Bug Summary

File:clang/lib/CodeGen/CGBuiltin.cpp
Warning:line 15448, column 5
Undefined or garbage value returned to caller

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name CGBuiltin.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D CLANG_ROUND_TRIP_CC1_ARGS=ON -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/include -I tools/clang/include -I include -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/build-llvm -ferror-limit 19 -fvisibility-inlines-hidden -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-11-10-160236-22541-1 -x c++ /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCUDARuntime.h"
14#include "CGCXXABI.h"
15#include "CGObjCRuntime.h"
16#include "CGOpenCLRuntime.h"
17#include "CGRecordLayout.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "PatternInit.h"
22#include "TargetInfo.h"
23#include "clang/AST/ASTContext.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/Decl.h"
26#include "clang/AST/OSLog.h"
27#include "clang/Basic/TargetBuiltins.h"
28#include "clang/Basic/TargetInfo.h"
29#include "clang/CodeGen/CGFunctionInfo.h"
30#include "llvm/ADT/APFloat.h"
31#include "llvm/ADT/APInt.h"
32#include "llvm/ADT/SmallPtrSet.h"
33#include "llvm/ADT/StringExtras.h"
34#include "llvm/Analysis/ValueTracking.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/InlineAsm.h"
37#include "llvm/IR/Intrinsics.h"
38#include "llvm/IR/IntrinsicsAArch64.h"
39#include "llvm/IR/IntrinsicsAMDGPU.h"
40#include "llvm/IR/IntrinsicsARM.h"
41#include "llvm/IR/IntrinsicsBPF.h"
42#include "llvm/IR/IntrinsicsHexagon.h"
43#include "llvm/IR/IntrinsicsNVPTX.h"
44#include "llvm/IR/IntrinsicsPowerPC.h"
45#include "llvm/IR/IntrinsicsR600.h"
46#include "llvm/IR/IntrinsicsRISCV.h"
47#include "llvm/IR/IntrinsicsS390.h"
48#include "llvm/IR/IntrinsicsWebAssembly.h"
49#include "llvm/IR/IntrinsicsX86.h"
50#include "llvm/IR/MDBuilder.h"
51#include "llvm/IR/MatrixBuilder.h"
52#include "llvm/Support/ConvertUTF.h"
53#include "llvm/Support/ScopedPrinter.h"
54#include "llvm/Support/X86TargetParser.h"
55#include <sstream>
56
57using namespace clang;
58using namespace CodeGen;
59using namespace llvm;
60
61static
62int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
63 return std::min(High, std::max(Low, Value));
64}
65
66static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
67 Align AlignmentInBytes) {
68 ConstantInt *Byte;
69 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
70 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
71 // Nothing to initialize.
72 return;
73 case LangOptions::TrivialAutoVarInitKind::Zero:
74 Byte = CGF.Builder.getInt8(0x00);
75 break;
76 case LangOptions::TrivialAutoVarInitKind::Pattern: {
77 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
78 Byte = llvm::dyn_cast<llvm::ConstantInt>(
79 initializationPatternFor(CGF.CGM, Int8));
80 break;
81 }
82 }
83 if (CGF.CGM.stopAutoInit())
84 return;
85 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
86 I->addAnnotationMetadata("auto-init");
87}
88
89/// getBuiltinLibFunction - Given a builtin id for a function like
90/// "__builtin_fabsf", return a Function* for "fabsf".
91llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
92 unsigned BuiltinID) {
93 assert(Context.BuiltinInfo.isLibFunction(BuiltinID))(static_cast <bool> (Context.BuiltinInfo.isLibFunction(
BuiltinID)) ? void (0) : __assert_fail ("Context.BuiltinInfo.isLibFunction(BuiltinID)"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 93, __extension__ __PRETTY_FUNCTION__))
;
94
95 // Get the name, skip over the __builtin_ prefix (if necessary).
96 StringRef Name;
97 GlobalDecl D(FD);
98
99 // If the builtin has been declared explicitly with an assembler label,
100 // use the mangled name. This differs from the plain label on platforms
101 // that prefix labels.
102 if (FD->hasAttr<AsmLabelAttr>())
103 Name = getMangledName(D);
104 else
105 Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
106
107 llvm::FunctionType *Ty =
108 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
109
110 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
111}
112
113/// Emit the conversions required to turn the given value into an
114/// integer of the given size.
115static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
116 QualType T, llvm::IntegerType *IntType) {
117 V = CGF.EmitToMemory(V, T);
118
119 if (V->getType()->isPointerTy())
120 return CGF.Builder.CreatePtrToInt(V, IntType);
121
122 assert(V->getType() == IntType)(static_cast <bool> (V->getType() == IntType) ? void
(0) : __assert_fail ("V->getType() == IntType", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 122, __extension__ __PRETTY_FUNCTION__))
;
123 return V;
124}
125
126static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
127 QualType T, llvm::Type *ResultType) {
128 V = CGF.EmitFromMemory(V, T);
129
130 if (ResultType->isPointerTy())
131 return CGF.Builder.CreateIntToPtr(V, ResultType);
132
133 assert(V->getType() == ResultType)(static_cast <bool> (V->getType() == ResultType) ? void
(0) : __assert_fail ("V->getType() == ResultType", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 133, __extension__ __PRETTY_FUNCTION__))
;
134 return V;
135}
136
137/// Utility to insert an atomic instruction based on Intrinsic::ID
138/// and the expression node.
139static Value *MakeBinaryAtomicValue(
140 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
141 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
142 QualType T = E->getType();
143 assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()->
isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 143, __extension__ __PRETTY_FUNCTION__))
;
144 assert(CGF.getContext().hasSameUnqualifiedType(T,(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(T, E->getArg(0)->getType()->getPointeeType())) ? void
(0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 145, __extension__ __PRETTY_FUNCTION__))
145 E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(T, E->getArg(0)->getType()->getPointeeType())) ? void
(0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 145, __extension__ __PRETTY_FUNCTION__))
;
146 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(T, E->getArg(1)->getType())) ? void (0) : __assert_fail
("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 146, __extension__ __PRETTY_FUNCTION__))
;
147
148 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
149 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
150
151 llvm::IntegerType *IntType =
152 llvm::IntegerType::get(CGF.getLLVMContext(),
153 CGF.getContext().getTypeSize(T));
154 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
155
156 llvm::Value *Args[2];
157 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
158 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
159 llvm::Type *ValueType = Args[1]->getType();
160 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
161
162 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
163 Kind, Args[0], Args[1], Ordering);
164 return EmitFromInt(CGF, Result, T, ValueType);
165}
166
167static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
168 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
169 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
170
171 // Convert the type of the pointer to a pointer to the stored type.
172 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
173 Value *BC = CGF.Builder.CreateBitCast(
174 Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
175 LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
176 LV.setNontemporal(true);
177 CGF.EmitStoreOfScalar(Val, LV, false);
178 return nullptr;
179}
180
181static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
182 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
183
184 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
185 LV.setNontemporal(true);
186 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
187}
188
189static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
190 llvm::AtomicRMWInst::BinOp Kind,
191 const CallExpr *E) {
192 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
193}
194
195/// Utility to insert an atomic instruction based Intrinsic::ID and
196/// the expression node, where the return value is the result of the
197/// operation.
198static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
199 llvm::AtomicRMWInst::BinOp Kind,
200 const CallExpr *E,
201 Instruction::BinaryOps Op,
202 bool Invert = false) {
203 QualType T = E->getType();
204 assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()->
isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 204, __extension__ __PRETTY_FUNCTION__))
;
205 assert(CGF.getContext().hasSameUnqualifiedType(T,(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(T, E->getArg(0)->getType()->getPointeeType())) ? void
(0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 206, __extension__ __PRETTY_FUNCTION__))
206 E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(T, E->getArg(0)->getType()->getPointeeType())) ? void
(0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 206, __extension__ __PRETTY_FUNCTION__))
;
207 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(T, E->getArg(1)->getType())) ? void (0) : __assert_fail
("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 207, __extension__ __PRETTY_FUNCTION__))
;
208
209 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
210 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
211
212 llvm::IntegerType *IntType =
213 llvm::IntegerType::get(CGF.getLLVMContext(),
214 CGF.getContext().getTypeSize(T));
215 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
216
217 llvm::Value *Args[2];
218 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
219 llvm::Type *ValueType = Args[1]->getType();
220 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
221 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
222
223 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
224 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
225 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
226 if (Invert)
227 Result =
228 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
229 llvm::ConstantInt::getAllOnesValue(IntType));
230 Result = EmitFromInt(CGF, Result, T, ValueType);
231 return RValue::get(Result);
232}
233
234/// Utility to insert an atomic cmpxchg instruction.
235///
236/// @param CGF The current codegen function.
237/// @param E Builtin call expression to convert to cmpxchg.
238/// arg0 - address to operate on
239/// arg1 - value to compare with
240/// arg2 - new value
241/// @param ReturnBool Specifies whether to return success flag of
242/// cmpxchg result or the old value.
243///
244/// @returns result of cmpxchg, according to ReturnBool
245///
246/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
247/// invoke the function EmitAtomicCmpXchgForMSIntrin.
248static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
249 bool ReturnBool) {
250 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
251 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
252 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
253
254 llvm::IntegerType *IntType = llvm::IntegerType::get(
255 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
256 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
257
258 Value *Args[3];
259 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
260 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
261 llvm::Type *ValueType = Args[1]->getType();
262 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
263 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
264
265 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
266 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
267 llvm::AtomicOrdering::SequentiallyConsistent);
268 if (ReturnBool)
269 // Extract boolean success flag and zext it to int.
270 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
271 CGF.ConvertType(E->getType()));
272 else
273 // Extract old value and emit it using the same type as compare value.
274 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
275 ValueType);
276}
277
278/// This function should be invoked to emit atomic cmpxchg for Microsoft's
279/// _InterlockedCompareExchange* intrinsics which have the following signature:
280/// T _InterlockedCompareExchange(T volatile *Destination,
281/// T Exchange,
282/// T Comparand);
283///
284/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
285/// cmpxchg *Destination, Comparand, Exchange.
286/// So we need to swap Comparand and Exchange when invoking
287/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
288/// function MakeAtomicCmpXchgValue since it expects the arguments to be
289/// already swapped.
290
291static
292Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
293 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
294 assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()->
isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 294, __extension__ __PRETTY_FUNCTION__))
;
295 assert(CGF.getContext().hasSameUnqualifiedType((static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
( E->getType(), E->getArg(0)->getType()->getPointeeType
())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 296, __extension__ __PRETTY_FUNCTION__))
296 E->getType(), E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
( E->getType(), E->getArg(0)->getType()->getPointeeType
())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 296, __extension__ __PRETTY_FUNCTION__))
;
297 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(E->getType(), E->getArg(1)->getType())) ? void (0) :
__assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 298, __extension__ __PRETTY_FUNCTION__))
298 E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(E->getType(), E->getArg(1)->getType())) ? void (0) :
__assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 298, __extension__ __PRETTY_FUNCTION__))
;
299 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(E->getType(), E->getArg(2)->getType())) ? void (0) :
__assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 300, __extension__ __PRETTY_FUNCTION__))
300 E->getArg(2)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(E->getType(), E->getArg(2)->getType())) ? void (0) :
__assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 300, __extension__ __PRETTY_FUNCTION__))
;
301
302 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
303 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
304 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
305
306 // For Release ordering, the failure ordering should be Monotonic.
307 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
308 AtomicOrdering::Monotonic :
309 SuccessOrdering;
310
311 // The atomic instruction is marked volatile for consistency with MSVC. This
312 // blocks the few atomics optimizations that LLVM has. If we want to optimize
313 // _Interlocked* operations in the future, we will have to remove the volatile
314 // marker.
315 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
316 Destination, Comparand, Exchange,
317 SuccessOrdering, FailureOrdering);
318 Result->setVolatile(true);
319 return CGF.Builder.CreateExtractValue(Result, 0);
320}
321
322// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
323// prototyped like this:
324//
325// unsigned char _InterlockedCompareExchange128...(
326// __int64 volatile * _Destination,
327// __int64 _ExchangeHigh,
328// __int64 _ExchangeLow,
329// __int64 * _ComparandResult);
330static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
331 const CallExpr *E,
332 AtomicOrdering SuccessOrdering) {
333 assert(E->getNumArgs() == 4)(static_cast <bool> (E->getNumArgs() == 4) ? void (0
) : __assert_fail ("E->getNumArgs() == 4", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 333, __extension__ __PRETTY_FUNCTION__))
;
334 llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0));
335 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
336 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
337 llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3));
338
339 assert(Destination->getType()->isPointerTy())(static_cast <bool> (Destination->getType()->isPointerTy
()) ? void (0) : __assert_fail ("Destination->getType()->isPointerTy()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 339, __extension__ __PRETTY_FUNCTION__))
;
340 assert(!ExchangeHigh->getType()->isPointerTy())(static_cast <bool> (!ExchangeHigh->getType()->isPointerTy
()) ? void (0) : __assert_fail ("!ExchangeHigh->getType()->isPointerTy()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 340, __extension__ __PRETTY_FUNCTION__))
;
341 assert(!ExchangeLow->getType()->isPointerTy())(static_cast <bool> (!ExchangeLow->getType()->isPointerTy
()) ? void (0) : __assert_fail ("!ExchangeLow->getType()->isPointerTy()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 341, __extension__ __PRETTY_FUNCTION__))
;
342 assert(ComparandPtr->getType()->isPointerTy())(static_cast <bool> (ComparandPtr->getType()->isPointerTy
()) ? void (0) : __assert_fail ("ComparandPtr->getType()->isPointerTy()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 342, __extension__ __PRETTY_FUNCTION__))
;
343
344 // For Release ordering, the failure ordering should be Monotonic.
345 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
346 ? AtomicOrdering::Monotonic
347 : SuccessOrdering;
348
349 // Convert to i128 pointers and values.
350 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
351 llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
352 Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy);
353 Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy),
354 CGF.getContext().toCharUnitsFromBits(128));
355
356 // (((i128)hi) << 64) | ((i128)lo)
357 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
358 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
359 ExchangeHigh =
360 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
361 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
362
363 // Load the comparand for the instruction.
364 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult);
365
366 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
367 SuccessOrdering, FailureOrdering);
368
369 // The atomic instruction is marked volatile for consistency with MSVC. This
370 // blocks the few atomics optimizations that LLVM has. If we want to optimize
371 // _Interlocked* operations in the future, we will have to remove the volatile
372 // marker.
373 CXI->setVolatile(true);
374
375 // Store the result as an outparameter.
376 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
377 ComparandResult);
378
379 // Get the success boolean and zero extend it to i8.
380 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
381 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
382}
383
384static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
385 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
386 assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()->
isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 386, __extension__ __PRETTY_FUNCTION__))
;
387
388 auto *IntTy = CGF.ConvertType(E->getType());
389 auto *Result = CGF.Builder.CreateAtomicRMW(
390 AtomicRMWInst::Add,
391 CGF.EmitScalarExpr(E->getArg(0)),
392 ConstantInt::get(IntTy, 1),
393 Ordering);
394 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
395}
396
397static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
398 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
399 assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()->
isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 399, __extension__ __PRETTY_FUNCTION__))
;
400
401 auto *IntTy = CGF.ConvertType(E->getType());
402 auto *Result = CGF.Builder.CreateAtomicRMW(
403 AtomicRMWInst::Sub,
404 CGF.EmitScalarExpr(E->getArg(0)),
405 ConstantInt::get(IntTy, 1),
406 Ordering);
407 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
408}
409
410// Build a plain volatile load.
411static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
412 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
413 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
414 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
415 llvm::Type *ITy =
416 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
417 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
418 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
419 Load->setVolatile(true);
420 return Load;
421}
422
423// Build a plain volatile store.
424static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
425 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
426 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
427 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
428 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
429 llvm::Type *ITy =
430 llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
431 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
432 llvm::StoreInst *Store =
433 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
434 Store->setVolatile(true);
435 return Store;
436}
437
438// Emit a simple mangled intrinsic that has 1 argument and a return type
439// matching the argument type. Depending on mode, this may be a constrained
440// floating-point intrinsic.
441static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
442 const CallExpr *E, unsigned IntrinsicID,
443 unsigned ConstrainedIntrinsicID) {
444 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
445
446 if (CGF.Builder.getIsFPConstrained()) {
447 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
448 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
449 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
450 } else {
451 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
452 return CGF.Builder.CreateCall(F, Src0);
453 }
454}
455
456// Emit an intrinsic that has 2 operands of the same type as its result.
457// Depending on mode, this may be a constrained floating-point intrinsic.
458static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
459 const CallExpr *E, unsigned IntrinsicID,
460 unsigned ConstrainedIntrinsicID) {
461 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
462 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
463
464 if (CGF.Builder.getIsFPConstrained()) {
465 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
466 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
467 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
468 } else {
469 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
470 return CGF.Builder.CreateCall(F, { Src0, Src1 });
471 }
472}
473
474// Emit an intrinsic that has 3 operands of the same type as its result.
475// Depending on mode, this may be a constrained floating-point intrinsic.
476static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
477 const CallExpr *E, unsigned IntrinsicID,
478 unsigned ConstrainedIntrinsicID) {
479 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
480 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
481 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
482
483 if (CGF.Builder.getIsFPConstrained()) {
484 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
485 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
486 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
487 } else {
488 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
489 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
490 }
491}
492
493// Emit an intrinsic where all operands are of the same type as the result.
494// Depending on mode, this may be a constrained floating-point intrinsic.
495static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
496 unsigned IntrinsicID,
497 unsigned ConstrainedIntrinsicID,
498 llvm::Type *Ty,
499 ArrayRef<Value *> Args) {
500 Function *F;
501 if (CGF.Builder.getIsFPConstrained())
502 F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
503 else
504 F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
505
506 if (CGF.Builder.getIsFPConstrained())
507 return CGF.Builder.CreateConstrainedFPCall(F, Args);
508 else
509 return CGF.Builder.CreateCall(F, Args);
510}
511
512// Emit a simple mangled intrinsic that has 1 argument and a return type
513// matching the argument type.
514static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
515 const CallExpr *E,
516 unsigned IntrinsicID) {
517 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
518
519 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
520 return CGF.Builder.CreateCall(F, Src0);
521}
522
523// Emit an intrinsic that has 2 operands of the same type as its result.
524static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
525 const CallExpr *E,
526 unsigned IntrinsicID) {
527 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
528 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
529
530 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
531 return CGF.Builder.CreateCall(F, { Src0, Src1 });
532}
533
534// Emit an intrinsic that has 3 operands of the same type as its result.
535static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
536 const CallExpr *E,
537 unsigned IntrinsicID) {
538 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
539 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
540 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
541
542 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
543 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
544}
545
546// Emit an intrinsic that has 1 float or double operand, and 1 integer.
547static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
548 const CallExpr *E,
549 unsigned IntrinsicID) {
550 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
551 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
552
553 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
554 return CGF.Builder.CreateCall(F, {Src0, Src1});
555}
556
557// Emit an intrinsic that has overloaded integer result and fp operand.
558static Value *
559emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
560 unsigned IntrinsicID,
561 unsigned ConstrainedIntrinsicID) {
562 llvm::Type *ResultType = CGF.ConvertType(E->getType());
563 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
564
565 if (CGF.Builder.getIsFPConstrained()) {
566 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
567 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
568 {ResultType, Src0->getType()});
569 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
570 } else {
571 Function *F =
572 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
573 return CGF.Builder.CreateCall(F, Src0);
574 }
575}
576
577/// EmitFAbs - Emit a call to @llvm.fabs().
578static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
579 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
580 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
581 Call->setDoesNotAccessMemory();
582 return Call;
583}
584
585/// Emit the computation of the sign bit for a floating point value. Returns
586/// the i1 sign bit value.
587static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
588 LLVMContext &C = CGF.CGM.getLLVMContext();
589
590 llvm::Type *Ty = V->getType();
591 int Width = Ty->getPrimitiveSizeInBits();
592 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
593 V = CGF.Builder.CreateBitCast(V, IntTy);
594 if (Ty->isPPC_FP128Ty()) {
595 // We want the sign bit of the higher-order double. The bitcast we just
596 // did works as if the double-double was stored to memory and then
597 // read as an i128. The "store" will put the higher-order double in the
598 // lower address in both little- and big-Endian modes, but the "load"
599 // will treat those bits as a different part of the i128: the low bits in
600 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
601 // we need to shift the high bits down to the low before truncating.
602 Width >>= 1;
603 if (CGF.getTarget().isBigEndian()) {
604 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
605 V = CGF.Builder.CreateLShr(V, ShiftCst);
606 }
607 // We are truncating value in order to extract the higher-order
608 // double, which we will be using to extract the sign from.
609 IntTy = llvm::IntegerType::get(C, Width);
610 V = CGF.Builder.CreateTrunc(V, IntTy);
611 }
612 Value *Zero = llvm::Constant::getNullValue(IntTy);
613 return CGF.Builder.CreateICmpSLT(V, Zero);
614}
615
616static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
617 const CallExpr *E, llvm::Constant *calleeValue) {
618 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
619 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
620}
621
622/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
623/// depending on IntrinsicID.
624///
625/// \arg CGF The current codegen function.
626/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
627/// \arg X The first argument to the llvm.*.with.overflow.*.
628/// \arg Y The second argument to the llvm.*.with.overflow.*.
629/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
630/// \returns The result (i.e. sum/product) returned by the intrinsic.
631static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
632 const llvm::Intrinsic::ID IntrinsicID,
633 llvm::Value *X, llvm::Value *Y,
634 llvm::Value *&Carry) {
635 // Make sure we have integers of the same width.
636 assert(X->getType() == Y->getType() &&(static_cast <bool> (X->getType() == Y->getType()
&& "Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)") ? void (0) : __assert_fail
("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 638, __extension__ __PRETTY_FUNCTION__))
637 "Arguments must be the same type. (Did you forget to make sure both "(static_cast <bool> (X->getType() == Y->getType()
&& "Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)") ? void (0) : __assert_fail
("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 638, __extension__ __PRETTY_FUNCTION__))
638 "arguments have the same integer width?)")(static_cast <bool> (X->getType() == Y->getType()
&& "Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)") ? void (0) : __assert_fail
("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 638, __extension__ __PRETTY_FUNCTION__))
;
639
640 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
641 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
642 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
643 return CGF.Builder.CreateExtractValue(Tmp, 0);
644}
645
646static Value *emitRangedBuiltin(CodeGenFunction &CGF,
647 unsigned IntrinsicID,
648 int low, int high) {
649 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
650 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
651 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
652 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
653 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
654 return Call;
655}
656
657namespace {
658 struct WidthAndSignedness {
659 unsigned Width;
660 bool Signed;
661 };
662}
663
664static WidthAndSignedness
665getIntegerWidthAndSignedness(const clang::ASTContext &context,
666 const clang::QualType Type) {
667 assert(Type->isIntegerType() && "Given type is not an integer.")(static_cast <bool> (Type->isIntegerType() &&
"Given type is not an integer.") ? void (0) : __assert_fail (
"Type->isIntegerType() && \"Given type is not an integer.\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 667, __extension__ __PRETTY_FUNCTION__))
;
668 unsigned Width = Type->isBooleanType() ? 1
669 : Type->isExtIntType() ? context.getIntWidth(Type)
670 : context.getTypeInfo(Type).Width;
671 bool Signed = Type->isSignedIntegerType();
672 return {Width, Signed};
673}
674
675// Given one or more integer types, this function produces an integer type that
676// encompasses them: any value in one of the given types could be expressed in
677// the encompassing type.
678static struct WidthAndSignedness
679EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
680 assert(Types.size() > 0 && "Empty list of types.")(static_cast <bool> (Types.size() > 0 && "Empty list of types."
) ? void (0) : __assert_fail ("Types.size() > 0 && \"Empty list of types.\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 680, __extension__ __PRETTY_FUNCTION__))
;
681
682 // If any of the given types is signed, we must return a signed type.
683 bool Signed = false;
684 for (const auto &Type : Types) {
685 Signed |= Type.Signed;
686 }
687
688 // The encompassing type must have a width greater than or equal to the width
689 // of the specified types. Additionally, if the encompassing type is signed,
690 // its width must be strictly greater than the width of any unsigned types
691 // given.
692 unsigned Width = 0;
693 for (const auto &Type : Types) {
694 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
695 if (Width < MinWidth) {
696 Width = MinWidth;
697 }
698 }
699
700 return {Width, Signed};
701}
702
703Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
704 llvm::Type *DestType = Int8PtrTy;
705 if (ArgValue->getType() != DestType)
706 ArgValue =
707 Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
708
709 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
710 return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
711}
712
713/// Checks if using the result of __builtin_object_size(p, @p From) in place of
714/// __builtin_object_size(p, @p To) is correct
715static bool areBOSTypesCompatible(int From, int To) {
716 // Note: Our __builtin_object_size implementation currently treats Type=0 and
717 // Type=2 identically. Encoding this implementation detail here may make
718 // improving __builtin_object_size difficult in the future, so it's omitted.
719 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
720}
721
722static llvm::Value *
723getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
724 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
725}
726
727llvm::Value *
728CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
729 llvm::IntegerType *ResType,
730 llvm::Value *EmittedE,
731 bool IsDynamic) {
732 uint64_t ObjectSize;
733 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
734 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
735 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
736}
737
738/// Returns a Value corresponding to the size of the given expression.
739/// This Value may be either of the following:
740/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
741/// it)
742/// - A call to the @llvm.objectsize intrinsic
743///
744/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
745/// and we wouldn't otherwise try to reference a pass_object_size parameter,
746/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
747llvm::Value *
748CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
749 llvm::IntegerType *ResType,
750 llvm::Value *EmittedE, bool IsDynamic) {
751 // We need to reference an argument if the pointer is a parameter with the
752 // pass_object_size attribute.
753 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
754 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
755 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
756 if (Param != nullptr && PS != nullptr &&
757 areBOSTypesCompatible(PS->getType(), Type)) {
758 auto Iter = SizeArguments.find(Param);
759 assert(Iter != SizeArguments.end())(static_cast <bool> (Iter != SizeArguments.end()) ? void
(0) : __assert_fail ("Iter != SizeArguments.end()", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 759, __extension__ __PRETTY_FUNCTION__))
;
760
761 const ImplicitParamDecl *D = Iter->second;
762 auto DIter = LocalDeclMap.find(D);
763 assert(DIter != LocalDeclMap.end())(static_cast <bool> (DIter != LocalDeclMap.end()) ? void
(0) : __assert_fail ("DIter != LocalDeclMap.end()", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 763, __extension__ __PRETTY_FUNCTION__))
;
764
765 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
766 getContext().getSizeType(), E->getBeginLoc());
767 }
768 }
769
770 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
771 // evaluate E for side-effects. In either case, we shouldn't lower to
772 // @llvm.objectsize.
773 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
774 return getDefaultBuiltinObjectSizeResult(Type, ResType);
775
776 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
777 assert(Ptr->getType()->isPointerTy() &&(static_cast <bool> (Ptr->getType()->isPointerTy(
) && "Non-pointer passed to __builtin_object_size?") ?
void (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 778, __extension__ __PRETTY_FUNCTION__))
778 "Non-pointer passed to __builtin_object_size?")(static_cast <bool> (Ptr->getType()->isPointerTy(
) && "Non-pointer passed to __builtin_object_size?") ?
void (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 778, __extension__ __PRETTY_FUNCTION__))
;
779
780 Function *F =
781 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
782
783 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
784 Value *Min = Builder.getInt1((Type & 2) != 0);
785 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
786 Value *NullIsUnknown = Builder.getTrue();
787 Value *Dynamic = Builder.getInt1(IsDynamic);
788 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
789}
790
791namespace {
792/// A struct to generically describe a bit test intrinsic.
793struct BitTest {
794 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
795 enum InterlockingKind : uint8_t {
796 Unlocked,
797 Sequential,
798 Acquire,
799 Release,
800 NoFence
801 };
802
803 ActionKind Action;
804 InterlockingKind Interlocking;
805 bool Is64Bit;
806
807 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
808};
809} // namespace
810
811BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
812 switch (BuiltinID) {
813 // Main portable variants.
814 case Builtin::BI_bittest:
815 return {TestOnly, Unlocked, false};
816 case Builtin::BI_bittestandcomplement:
817 return {Complement, Unlocked, false};
818 case Builtin::BI_bittestandreset:
819 return {Reset, Unlocked, false};
820 case Builtin::BI_bittestandset:
821 return {Set, Unlocked, false};
822 case Builtin::BI_interlockedbittestandreset:
823 return {Reset, Sequential, false};
824 case Builtin::BI_interlockedbittestandset:
825 return {Set, Sequential, false};
826
827 // X86-specific 64-bit variants.
828 case Builtin::BI_bittest64:
829 return {TestOnly, Unlocked, true};
830 case Builtin::BI_bittestandcomplement64:
831 return {Complement, Unlocked, true};
832 case Builtin::BI_bittestandreset64:
833 return {Reset, Unlocked, true};
834 case Builtin::BI_bittestandset64:
835 return {Set, Unlocked, true};
836 case Builtin::BI_interlockedbittestandreset64:
837 return {Reset, Sequential, true};
838 case Builtin::BI_interlockedbittestandset64:
839 return {Set, Sequential, true};
840
841 // ARM/AArch64-specific ordering variants.
842 case Builtin::BI_interlockedbittestandset_acq:
843 return {Set, Acquire, false};
844 case Builtin::BI_interlockedbittestandset_rel:
845 return {Set, Release, false};
846 case Builtin::BI_interlockedbittestandset_nf:
847 return {Set, NoFence, false};
848 case Builtin::BI_interlockedbittestandreset_acq:
849 return {Reset, Acquire, false};
850 case Builtin::BI_interlockedbittestandreset_rel:
851 return {Reset, Release, false};
852 case Builtin::BI_interlockedbittestandreset_nf:
853 return {Reset, NoFence, false};
854 }
855 llvm_unreachable("expected only bittest intrinsics")::llvm::llvm_unreachable_internal("expected only bittest intrinsics"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 855)
;
856}
857
858static char bitActionToX86BTCode(BitTest::ActionKind A) {
859 switch (A) {
860 case BitTest::TestOnly: return '\0';
861 case BitTest::Complement: return 'c';
862 case BitTest::Reset: return 'r';
863 case BitTest::Set: return 's';
864 }
865 llvm_unreachable("invalid action")::llvm::llvm_unreachable_internal("invalid action", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 865)
;
866}
867
868static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
869 BitTest BT,
870 const CallExpr *E, Value *BitBase,
871 Value *BitPos) {
872 char Action = bitActionToX86BTCode(BT.Action);
873 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
874
875 // Build the assembly.
876 SmallString<64> Asm;
877 raw_svector_ostream AsmOS(Asm);
878 if (BT.Interlocking != BitTest::Unlocked)
879 AsmOS << "lock ";
880 AsmOS << "bt";
881 if (Action)
882 AsmOS << Action;
883 AsmOS << SizeSuffix << " $2, ($1)";
884
885 // Build the constraints. FIXME: We should support immediates when possible.
886 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
887 std::string MachineClobbers = CGF.getTarget().getClobbers();
888 if (!MachineClobbers.empty()) {
889 Constraints += ',';
890 Constraints += MachineClobbers;
891 }
892 llvm::IntegerType *IntType = llvm::IntegerType::get(
893 CGF.getLLVMContext(),
894 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
895 llvm::Type *IntPtrType = IntType->getPointerTo();
896 llvm::FunctionType *FTy =
897 llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
898
899 llvm::InlineAsm *IA =
900 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
901 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
902}
903
904static llvm::AtomicOrdering
905getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
906 switch (I) {
907 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
908 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
909 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
910 case BitTest::Release: return llvm::AtomicOrdering::Release;
911 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
912 }
913 llvm_unreachable("invalid interlocking")::llvm::llvm_unreachable_internal("invalid interlocking", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 913)
;
914}
915
916/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
917/// bits and a bit position and read and optionally modify the bit at that
918/// position. The position index can be arbitrarily large, i.e. it can be larger
919/// than 31 or 63, so we need an indexed load in the general case.
920static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
921 unsigned BuiltinID,
922 const CallExpr *E) {
923 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
924 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
925
926 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
927
928 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
929 // indexing operation internally. Use them if possible.
930 if (CGF.getTarget().getTriple().isX86())
931 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
932
933 // Otherwise, use generic code to load one byte and test the bit. Use all but
934 // the bottom three bits as the array index, and the bottom three bits to form
935 // a mask.
936 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
937 Value *ByteIndex = CGF.Builder.CreateAShr(
938 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
939 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
940 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
941 ByteIndex, "bittest.byteaddr"),
942 CharUnits::One());
943 Value *PosLow =
944 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
945 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
946
947 // The updating instructions will need a mask.
948 Value *Mask = nullptr;
949 if (BT.Action != BitTest::TestOnly) {
950 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
951 "bittest.mask");
952 }
953
954 // Check the action and ordering of the interlocked intrinsics.
955 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
956
957 Value *OldByte = nullptr;
958 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
959 // Emit a combined atomicrmw load/store operation for the interlocked
960 // intrinsics.
961 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
962 if (BT.Action == BitTest::Reset) {
963 Mask = CGF.Builder.CreateNot(Mask);
964 RMWOp = llvm::AtomicRMWInst::And;
965 }
966 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
967 Ordering);
968 } else {
969 // Emit a plain load for the non-interlocked intrinsics.
970 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
971 Value *NewByte = nullptr;
972 switch (BT.Action) {
973 case BitTest::TestOnly:
974 // Don't store anything.
975 break;
976 case BitTest::Complement:
977 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
978 break;
979 case BitTest::Reset:
980 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
981 break;
982 case BitTest::Set:
983 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
984 break;
985 }
986 if (NewByte)
987 CGF.Builder.CreateStore(NewByte, ByteAddr);
988 }
989
990 // However we loaded the old byte, either by plain load or atomicrmw, shift
991 // the bit into the low position and mask it to 0 or 1.
992 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
993 return CGF.Builder.CreateAnd(
994 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
995}
996
997static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
998 unsigned BuiltinID,
999 const CallExpr *E) {
1000 Value *Addr = CGF.EmitScalarExpr(E->getArg(0));
1001
1002 SmallString<64> Asm;
1003 raw_svector_ostream AsmOS(Asm);
1004 llvm::IntegerType *RetType = CGF.Int32Ty;
1005
1006 switch (BuiltinID) {
1007 case clang::PPC::BI__builtin_ppc_ldarx:
1008 AsmOS << "ldarx ";
1009 RetType = CGF.Int64Ty;
1010 break;
1011 case clang::PPC::BI__builtin_ppc_lwarx:
1012 AsmOS << "lwarx ";
1013 RetType = CGF.Int32Ty;
1014 break;
1015 case clang::PPC::BI__builtin_ppc_lharx:
1016 AsmOS << "lharx ";
1017 RetType = CGF.Int16Ty;
1018 break;
1019 case clang::PPC::BI__builtin_ppc_lbarx:
1020 AsmOS << "lbarx ";
1021 RetType = CGF.Int8Ty;
1022 break;
1023 default:
1024 llvm_unreachable("Expected only PowerPC load reserve intrinsics")::llvm::llvm_unreachable_internal("Expected only PowerPC load reserve intrinsics"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1024)
;
1025 }
1026
1027 AsmOS << "$0, ${1:y}";
1028
1029 std::string Constraints = "=r,*Z,~{memory}";
1030 std::string MachineClobbers = CGF.getTarget().getClobbers();
1031 if (!MachineClobbers.empty()) {
1032 Constraints += ',';
1033 Constraints += MachineClobbers;
1034 }
1035
1036 llvm::Type *IntPtrType = RetType->getPointerTo();
1037 llvm::FunctionType *FTy =
1038 llvm::FunctionType::get(RetType, {IntPtrType}, false);
1039
1040 llvm::InlineAsm *IA =
1041 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1042 return CGF.Builder.CreateCall(IA, {Addr});
1043}
1044
1045namespace {
1046enum class MSVCSetJmpKind {
1047 _setjmpex,
1048 _setjmp3,
1049 _setjmp
1050};
1051}
1052
1053/// MSVC handles setjmp a bit differently on different platforms. On every
1054/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1055/// parameters can be passed as variadic arguments, but we always pass none.
1056static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1057 const CallExpr *E) {
1058 llvm::Value *Arg1 = nullptr;
1059 llvm::Type *Arg1Ty = nullptr;
1060 StringRef Name;
1061 bool IsVarArg = false;
1062 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1063 Name = "_setjmp3";
1064 Arg1Ty = CGF.Int32Ty;
1065 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1066 IsVarArg = true;
1067 } else {
1068 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1069 Arg1Ty = CGF.Int8PtrTy;
1070 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1071 Arg1 = CGF.Builder.CreateCall(
1072 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1073 } else
1074 Arg1 = CGF.Builder.CreateCall(
1075 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1076 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1077 }
1078
1079 // Mark the call site and declaration with ReturnsTwice.
1080 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1081 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1082 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1083 llvm::Attribute::ReturnsTwice);
1084 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1085 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1086 ReturnsTwiceAttr, /*Local=*/true);
1087
1088 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1089 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1090 llvm::Value *Args[] = {Buf, Arg1};
1091 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1092 CB->setAttributes(ReturnsTwiceAttr);
1093 return RValue::get(CB);
1094}
1095
1096// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
1097// we handle them here.
1098enum class CodeGenFunction::MSVCIntrin {
1099 _BitScanForward,
1100 _BitScanReverse,
1101 _InterlockedAnd,
1102 _InterlockedDecrement,
1103 _InterlockedExchange,
1104 _InterlockedExchangeAdd,
1105 _InterlockedExchangeSub,
1106 _InterlockedIncrement,
1107 _InterlockedOr,
1108 _InterlockedXor,
1109 _InterlockedExchangeAdd_acq,
1110 _InterlockedExchangeAdd_rel,
1111 _InterlockedExchangeAdd_nf,
1112 _InterlockedExchange_acq,
1113 _InterlockedExchange_rel,
1114 _InterlockedExchange_nf,
1115 _InterlockedCompareExchange_acq,
1116 _InterlockedCompareExchange_rel,
1117 _InterlockedCompareExchange_nf,
1118 _InterlockedCompareExchange128,
1119 _InterlockedCompareExchange128_acq,
1120 _InterlockedCompareExchange128_rel,
1121 _InterlockedCompareExchange128_nf,
1122 _InterlockedOr_acq,
1123 _InterlockedOr_rel,
1124 _InterlockedOr_nf,
1125 _InterlockedXor_acq,
1126 _InterlockedXor_rel,
1127 _InterlockedXor_nf,
1128 _InterlockedAnd_acq,
1129 _InterlockedAnd_rel,
1130 _InterlockedAnd_nf,
1131 _InterlockedIncrement_acq,
1132 _InterlockedIncrement_rel,
1133 _InterlockedIncrement_nf,
1134 _InterlockedDecrement_acq,
1135 _InterlockedDecrement_rel,
1136 _InterlockedDecrement_nf,
1137 __fastfail,
1138};
1139
1140static Optional<CodeGenFunction::MSVCIntrin>
1141translateArmToMsvcIntrin(unsigned BuiltinID) {
1142 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1143 switch (BuiltinID) {
1144 default:
1145 return None;
1146 case ARM::BI_BitScanForward:
1147 case ARM::BI_BitScanForward64:
1148 return MSVCIntrin::_BitScanForward;
1149 case ARM::BI_BitScanReverse:
1150 case ARM::BI_BitScanReverse64:
1151 return MSVCIntrin::_BitScanReverse;
1152 case ARM::BI_InterlockedAnd64:
1153 return MSVCIntrin::_InterlockedAnd;
1154 case ARM::BI_InterlockedExchange64:
1155 return MSVCIntrin::_InterlockedExchange;
1156 case ARM::BI_InterlockedExchangeAdd64:
1157 return MSVCIntrin::_InterlockedExchangeAdd;
1158 case ARM::BI_InterlockedExchangeSub64:
1159 return MSVCIntrin::_InterlockedExchangeSub;
1160 case ARM::BI_InterlockedOr64:
1161 return MSVCIntrin::_InterlockedOr;
1162 case ARM::BI_InterlockedXor64:
1163 return MSVCIntrin::_InterlockedXor;
1164 case ARM::BI_InterlockedDecrement64:
1165 return MSVCIntrin::_InterlockedDecrement;
1166 case ARM::BI_InterlockedIncrement64:
1167 return MSVCIntrin::_InterlockedIncrement;
1168 case ARM::BI_InterlockedExchangeAdd8_acq:
1169 case ARM::BI_InterlockedExchangeAdd16_acq:
1170 case ARM::BI_InterlockedExchangeAdd_acq:
1171 case ARM::BI_InterlockedExchangeAdd64_acq:
1172 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1173 case ARM::BI_InterlockedExchangeAdd8_rel:
1174 case ARM::BI_InterlockedExchangeAdd16_rel:
1175 case ARM::BI_InterlockedExchangeAdd_rel:
1176 case ARM::BI_InterlockedExchangeAdd64_rel:
1177 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1178 case ARM::BI_InterlockedExchangeAdd8_nf:
1179 case ARM::BI_InterlockedExchangeAdd16_nf:
1180 case ARM::BI_InterlockedExchangeAdd_nf:
1181 case ARM::BI_InterlockedExchangeAdd64_nf:
1182 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1183 case ARM::BI_InterlockedExchange8_acq:
1184 case ARM::BI_InterlockedExchange16_acq:
1185 case ARM::BI_InterlockedExchange_acq:
1186 case ARM::BI_InterlockedExchange64_acq:
1187 return MSVCIntrin::_InterlockedExchange_acq;
1188 case ARM::BI_InterlockedExchange8_rel:
1189 case ARM::BI_InterlockedExchange16_rel:
1190 case ARM::BI_InterlockedExchange_rel:
1191 case ARM::BI_InterlockedExchange64_rel:
1192 return MSVCIntrin::_InterlockedExchange_rel;
1193 case ARM::BI_InterlockedExchange8_nf:
1194 case ARM::BI_InterlockedExchange16_nf:
1195 case ARM::BI_InterlockedExchange_nf:
1196 case ARM::BI_InterlockedExchange64_nf:
1197 return MSVCIntrin::_InterlockedExchange_nf;
1198 case ARM::BI_InterlockedCompareExchange8_acq:
1199 case ARM::BI_InterlockedCompareExchange16_acq:
1200 case ARM::BI_InterlockedCompareExchange_acq:
1201 case ARM::BI_InterlockedCompareExchange64_acq:
1202 return MSVCIntrin::_InterlockedCompareExchange_acq;
1203 case ARM::BI_InterlockedCompareExchange8_rel:
1204 case ARM::BI_InterlockedCompareExchange16_rel:
1205 case ARM::BI_InterlockedCompareExchange_rel:
1206 case ARM::BI_InterlockedCompareExchange64_rel:
1207 return MSVCIntrin::_InterlockedCompareExchange_rel;
1208 case ARM::BI_InterlockedCompareExchange8_nf:
1209 case ARM::BI_InterlockedCompareExchange16_nf:
1210 case ARM::BI_InterlockedCompareExchange_nf:
1211 case ARM::BI_InterlockedCompareExchange64_nf:
1212 return MSVCIntrin::_InterlockedCompareExchange_nf;
1213 case ARM::BI_InterlockedOr8_acq:
1214 case ARM::BI_InterlockedOr16_acq:
1215 case ARM::BI_InterlockedOr_acq:
1216 case ARM::BI_InterlockedOr64_acq:
1217 return MSVCIntrin::_InterlockedOr_acq;
1218 case ARM::BI_InterlockedOr8_rel:
1219 case ARM::BI_InterlockedOr16_rel:
1220 case ARM::BI_InterlockedOr_rel:
1221 case ARM::BI_InterlockedOr64_rel:
1222 return MSVCIntrin::_InterlockedOr_rel;
1223 case ARM::BI_InterlockedOr8_nf:
1224 case ARM::BI_InterlockedOr16_nf:
1225 case ARM::BI_InterlockedOr_nf:
1226 case ARM::BI_InterlockedOr64_nf:
1227 return MSVCIntrin::_InterlockedOr_nf;
1228 case ARM::BI_InterlockedXor8_acq:
1229 case ARM::BI_InterlockedXor16_acq:
1230 case ARM::BI_InterlockedXor_acq:
1231 case ARM::BI_InterlockedXor64_acq:
1232 return MSVCIntrin::_InterlockedXor_acq;
1233 case ARM::BI_InterlockedXor8_rel:
1234 case ARM::BI_InterlockedXor16_rel:
1235 case ARM::BI_InterlockedXor_rel:
1236 case ARM::BI_InterlockedXor64_rel:
1237 return MSVCIntrin::_InterlockedXor_rel;
1238 case ARM::BI_InterlockedXor8_nf:
1239 case ARM::BI_InterlockedXor16_nf:
1240 case ARM::BI_InterlockedXor_nf:
1241 case ARM::BI_InterlockedXor64_nf:
1242 return MSVCIntrin::_InterlockedXor_nf;
1243 case ARM::BI_InterlockedAnd8_acq:
1244 case ARM::BI_InterlockedAnd16_acq:
1245 case ARM::BI_InterlockedAnd_acq:
1246 case ARM::BI_InterlockedAnd64_acq:
1247 return MSVCIntrin::_InterlockedAnd_acq;
1248 case ARM::BI_InterlockedAnd8_rel:
1249 case ARM::BI_InterlockedAnd16_rel:
1250 case ARM::BI_InterlockedAnd_rel:
1251 case ARM::BI_InterlockedAnd64_rel:
1252 return MSVCIntrin::_InterlockedAnd_rel;
1253 case ARM::BI_InterlockedAnd8_nf:
1254 case ARM::BI_InterlockedAnd16_nf:
1255 case ARM::BI_InterlockedAnd_nf:
1256 case ARM::BI_InterlockedAnd64_nf:
1257 return MSVCIntrin::_InterlockedAnd_nf;
1258 case ARM::BI_InterlockedIncrement16_acq:
1259 case ARM::BI_InterlockedIncrement_acq:
1260 case ARM::BI_InterlockedIncrement64_acq:
1261 return MSVCIntrin::_InterlockedIncrement_acq;
1262 case ARM::BI_InterlockedIncrement16_rel:
1263 case ARM::BI_InterlockedIncrement_rel:
1264 case ARM::BI_InterlockedIncrement64_rel:
1265 return MSVCIntrin::_InterlockedIncrement_rel;
1266 case ARM::BI_InterlockedIncrement16_nf:
1267 case ARM::BI_InterlockedIncrement_nf:
1268 case ARM::BI_InterlockedIncrement64_nf:
1269 return MSVCIntrin::_InterlockedIncrement_nf;
1270 case ARM::BI_InterlockedDecrement16_acq:
1271 case ARM::BI_InterlockedDecrement_acq:
1272 case ARM::BI_InterlockedDecrement64_acq:
1273 return MSVCIntrin::_InterlockedDecrement_acq;
1274 case ARM::BI_InterlockedDecrement16_rel:
1275 case ARM::BI_InterlockedDecrement_rel:
1276 case ARM::BI_InterlockedDecrement64_rel:
1277 return MSVCIntrin::_InterlockedDecrement_rel;
1278 case ARM::BI_InterlockedDecrement16_nf:
1279 case ARM::BI_InterlockedDecrement_nf:
1280 case ARM::BI_InterlockedDecrement64_nf:
1281 return MSVCIntrin::_InterlockedDecrement_nf;
1282 }
1283 llvm_unreachable("must return from switch")::llvm::llvm_unreachable_internal("must return from switch", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1283)
;
1284}
1285
1286static Optional<CodeGenFunction::MSVCIntrin>
1287translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
1288 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1289 switch (BuiltinID) {
1290 default:
1291 return None;
1292 case AArch64::BI_BitScanForward:
1293 case AArch64::BI_BitScanForward64:
1294 return MSVCIntrin::_BitScanForward;
1295 case AArch64::BI_BitScanReverse:
1296 case AArch64::BI_BitScanReverse64:
1297 return MSVCIntrin::_BitScanReverse;
1298 case AArch64::BI_InterlockedAnd64:
1299 return MSVCIntrin::_InterlockedAnd;
1300 case AArch64::BI_InterlockedExchange64:
1301 return MSVCIntrin::_InterlockedExchange;
1302 case AArch64::BI_InterlockedExchangeAdd64:
1303 return MSVCIntrin::_InterlockedExchangeAdd;
1304 case AArch64::BI_InterlockedExchangeSub64:
1305 return MSVCIntrin::_InterlockedExchangeSub;
1306 case AArch64::BI_InterlockedOr64:
1307 return MSVCIntrin::_InterlockedOr;
1308 case AArch64::BI_InterlockedXor64:
1309 return MSVCIntrin::_InterlockedXor;
1310 case AArch64::BI_InterlockedDecrement64:
1311 return MSVCIntrin::_InterlockedDecrement;
1312 case AArch64::BI_InterlockedIncrement64:
1313 return MSVCIntrin::_InterlockedIncrement;
1314 case AArch64::BI_InterlockedExchangeAdd8_acq:
1315 case AArch64::BI_InterlockedExchangeAdd16_acq:
1316 case AArch64::BI_InterlockedExchangeAdd_acq:
1317 case AArch64::BI_InterlockedExchangeAdd64_acq:
1318 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1319 case AArch64::BI_InterlockedExchangeAdd8_rel:
1320 case AArch64::BI_InterlockedExchangeAdd16_rel:
1321 case AArch64::BI_InterlockedExchangeAdd_rel:
1322 case AArch64::BI_InterlockedExchangeAdd64_rel:
1323 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1324 case AArch64::BI_InterlockedExchangeAdd8_nf:
1325 case AArch64::BI_InterlockedExchangeAdd16_nf:
1326 case AArch64::BI_InterlockedExchangeAdd_nf:
1327 case AArch64::BI_InterlockedExchangeAdd64_nf:
1328 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1329 case AArch64::BI_InterlockedExchange8_acq:
1330 case AArch64::BI_InterlockedExchange16_acq:
1331 case AArch64::BI_InterlockedExchange_acq:
1332 case AArch64::BI_InterlockedExchange64_acq:
1333 return MSVCIntrin::_InterlockedExchange_acq;
1334 case AArch64::BI_InterlockedExchange8_rel:
1335 case AArch64::BI_InterlockedExchange16_rel:
1336 case AArch64::BI_InterlockedExchange_rel:
1337 case AArch64::BI_InterlockedExchange64_rel:
1338 return MSVCIntrin::_InterlockedExchange_rel;
1339 case AArch64::BI_InterlockedExchange8_nf:
1340 case AArch64::BI_InterlockedExchange16_nf:
1341 case AArch64::BI_InterlockedExchange_nf:
1342 case AArch64::BI_InterlockedExchange64_nf:
1343 return MSVCIntrin::_InterlockedExchange_nf;
1344 case AArch64::BI_InterlockedCompareExchange8_acq:
1345 case AArch64::BI_InterlockedCompareExchange16_acq:
1346 case AArch64::BI_InterlockedCompareExchange_acq:
1347 case AArch64::BI_InterlockedCompareExchange64_acq:
1348 return MSVCIntrin::_InterlockedCompareExchange_acq;
1349 case AArch64::BI_InterlockedCompareExchange8_rel:
1350 case AArch64::BI_InterlockedCompareExchange16_rel:
1351 case AArch64::BI_InterlockedCompareExchange_rel:
1352 case AArch64::BI_InterlockedCompareExchange64_rel:
1353 return MSVCIntrin::_InterlockedCompareExchange_rel;
1354 case AArch64::BI_InterlockedCompareExchange8_nf:
1355 case AArch64::BI_InterlockedCompareExchange16_nf:
1356 case AArch64::BI_InterlockedCompareExchange_nf:
1357 case AArch64::BI_InterlockedCompareExchange64_nf:
1358 return MSVCIntrin::_InterlockedCompareExchange_nf;
1359 case AArch64::BI_InterlockedCompareExchange128:
1360 return MSVCIntrin::_InterlockedCompareExchange128;
1361 case AArch64::BI_InterlockedCompareExchange128_acq:
1362 return MSVCIntrin::_InterlockedCompareExchange128_acq;
1363 case AArch64::BI_InterlockedCompareExchange128_nf:
1364 return MSVCIntrin::_InterlockedCompareExchange128_nf;
1365 case AArch64::BI_InterlockedCompareExchange128_rel:
1366 return MSVCIntrin::_InterlockedCompareExchange128_rel;
1367 case AArch64::BI_InterlockedOr8_acq:
1368 case AArch64::BI_InterlockedOr16_acq:
1369 case AArch64::BI_InterlockedOr_acq:
1370 case AArch64::BI_InterlockedOr64_acq:
1371 return MSVCIntrin::_InterlockedOr_acq;
1372 case AArch64::BI_InterlockedOr8_rel:
1373 case AArch64::BI_InterlockedOr16_rel:
1374 case AArch64::BI_InterlockedOr_rel:
1375 case AArch64::BI_InterlockedOr64_rel:
1376 return MSVCIntrin::_InterlockedOr_rel;
1377 case AArch64::BI_InterlockedOr8_nf:
1378 case AArch64::BI_InterlockedOr16_nf:
1379 case AArch64::BI_InterlockedOr_nf:
1380 case AArch64::BI_InterlockedOr64_nf:
1381 return MSVCIntrin::_InterlockedOr_nf;
1382 case AArch64::BI_InterlockedXor8_acq:
1383 case AArch64::BI_InterlockedXor16_acq:
1384 case AArch64::BI_InterlockedXor_acq:
1385 case AArch64::BI_InterlockedXor64_acq:
1386 return MSVCIntrin::_InterlockedXor_acq;
1387 case AArch64::BI_InterlockedXor8_rel:
1388 case AArch64::BI_InterlockedXor16_rel:
1389 case AArch64::BI_InterlockedXor_rel:
1390 case AArch64::BI_InterlockedXor64_rel:
1391 return MSVCIntrin::_InterlockedXor_rel;
1392 case AArch64::BI_InterlockedXor8_nf:
1393 case AArch64::BI_InterlockedXor16_nf:
1394 case AArch64::BI_InterlockedXor_nf:
1395 case AArch64::BI_InterlockedXor64_nf:
1396 return MSVCIntrin::_InterlockedXor_nf;
1397 case AArch64::BI_InterlockedAnd8_acq:
1398 case AArch64::BI_InterlockedAnd16_acq:
1399 case AArch64::BI_InterlockedAnd_acq:
1400 case AArch64::BI_InterlockedAnd64_acq:
1401 return MSVCIntrin::_InterlockedAnd_acq;
1402 case AArch64::BI_InterlockedAnd8_rel:
1403 case AArch64::BI_InterlockedAnd16_rel:
1404 case AArch64::BI_InterlockedAnd_rel:
1405 case AArch64::BI_InterlockedAnd64_rel:
1406 return MSVCIntrin::_InterlockedAnd_rel;
1407 case AArch64::BI_InterlockedAnd8_nf:
1408 case AArch64::BI_InterlockedAnd16_nf:
1409 case AArch64::BI_InterlockedAnd_nf:
1410 case AArch64::BI_InterlockedAnd64_nf:
1411 return MSVCIntrin::_InterlockedAnd_nf;
1412 case AArch64::BI_InterlockedIncrement16_acq:
1413 case AArch64::BI_InterlockedIncrement_acq:
1414 case AArch64::BI_InterlockedIncrement64_acq:
1415 return MSVCIntrin::_InterlockedIncrement_acq;
1416 case AArch64::BI_InterlockedIncrement16_rel:
1417 case AArch64::BI_InterlockedIncrement_rel:
1418 case AArch64::BI_InterlockedIncrement64_rel:
1419 return MSVCIntrin::_InterlockedIncrement_rel;
1420 case AArch64::BI_InterlockedIncrement16_nf:
1421 case AArch64::BI_InterlockedIncrement_nf:
1422 case AArch64::BI_InterlockedIncrement64_nf:
1423 return MSVCIntrin::_InterlockedIncrement_nf;
1424 case AArch64::BI_InterlockedDecrement16_acq:
1425 case AArch64::BI_InterlockedDecrement_acq:
1426 case AArch64::BI_InterlockedDecrement64_acq:
1427 return MSVCIntrin::_InterlockedDecrement_acq;
1428 case AArch64::BI_InterlockedDecrement16_rel:
1429 case AArch64::BI_InterlockedDecrement_rel:
1430 case AArch64::BI_InterlockedDecrement64_rel:
1431 return MSVCIntrin::_InterlockedDecrement_rel;
1432 case AArch64::BI_InterlockedDecrement16_nf:
1433 case AArch64::BI_InterlockedDecrement_nf:
1434 case AArch64::BI_InterlockedDecrement64_nf:
1435 return MSVCIntrin::_InterlockedDecrement_nf;
1436 }
1437 llvm_unreachable("must return from switch")::llvm::llvm_unreachable_internal("must return from switch", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1437)
;
1438}
1439
1440static Optional<CodeGenFunction::MSVCIntrin>
1441translateX86ToMsvcIntrin(unsigned BuiltinID) {
1442 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1443 switch (BuiltinID) {
1444 default:
1445 return None;
1446 case clang::X86::BI_BitScanForward:
1447 case clang::X86::BI_BitScanForward64:
1448 return MSVCIntrin::_BitScanForward;
1449 case clang::X86::BI_BitScanReverse:
1450 case clang::X86::BI_BitScanReverse64:
1451 return MSVCIntrin::_BitScanReverse;
1452 case clang::X86::BI_InterlockedAnd64:
1453 return MSVCIntrin::_InterlockedAnd;
1454 case clang::X86::BI_InterlockedCompareExchange128:
1455 return MSVCIntrin::_InterlockedCompareExchange128;
1456 case clang::X86::BI_InterlockedExchange64:
1457 return MSVCIntrin::_InterlockedExchange;
1458 case clang::X86::BI_InterlockedExchangeAdd64:
1459 return MSVCIntrin::_InterlockedExchangeAdd;
1460 case clang::X86::BI_InterlockedExchangeSub64:
1461 return MSVCIntrin::_InterlockedExchangeSub;
1462 case clang::X86::BI_InterlockedOr64:
1463 return MSVCIntrin::_InterlockedOr;
1464 case clang::X86::BI_InterlockedXor64:
1465 return MSVCIntrin::_InterlockedXor;
1466 case clang::X86::BI_InterlockedDecrement64:
1467 return MSVCIntrin::_InterlockedDecrement;
1468 case clang::X86::BI_InterlockedIncrement64:
1469 return MSVCIntrin::_InterlockedIncrement;
1470 }
1471 llvm_unreachable("must return from switch")::llvm::llvm_unreachable_internal("must return from switch", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1471)
;
1472}
1473
1474// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1475Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
1476 const CallExpr *E) {
1477 switch (BuiltinID) {
1478 case MSVCIntrin::_BitScanForward:
1479 case MSVCIntrin::_BitScanReverse: {
1480 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1481 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1482
1483 llvm::Type *ArgType = ArgValue->getType();
1484 llvm::Type *IndexType =
1485 IndexAddress.getPointer()->getType()->getPointerElementType();
1486 llvm::Type *ResultType = ConvertType(E->getType());
1487
1488 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1489 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1490 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1491
1492 BasicBlock *Begin = Builder.GetInsertBlock();
1493 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1494 Builder.SetInsertPoint(End);
1495 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1496
1497 Builder.SetInsertPoint(Begin);
1498 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1499 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1500 Builder.CreateCondBr(IsZero, End, NotZero);
1501 Result->addIncoming(ResZero, Begin);
1502
1503 Builder.SetInsertPoint(NotZero);
1504
1505 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1506 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1507 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1508 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1509 Builder.CreateStore(ZeroCount, IndexAddress, false);
1510 } else {
1511 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1512 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1513
1514 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1515 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1516 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1517 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1518 Builder.CreateStore(Index, IndexAddress, false);
1519 }
1520 Builder.CreateBr(End);
1521 Result->addIncoming(ResOne, NotZero);
1522
1523 Builder.SetInsertPoint(End);
1524 return Result;
1525 }
1526 case MSVCIntrin::_InterlockedAnd:
1527 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1528 case MSVCIntrin::_InterlockedExchange:
1529 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1530 case MSVCIntrin::_InterlockedExchangeAdd:
1531 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1532 case MSVCIntrin::_InterlockedExchangeSub:
1533 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1534 case MSVCIntrin::_InterlockedOr:
1535 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1536 case MSVCIntrin::_InterlockedXor:
1537 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1538 case MSVCIntrin::_InterlockedExchangeAdd_acq:
1539 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1540 AtomicOrdering::Acquire);
1541 case MSVCIntrin::_InterlockedExchangeAdd_rel:
1542 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1543 AtomicOrdering::Release);
1544 case MSVCIntrin::_InterlockedExchangeAdd_nf:
1545 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1546 AtomicOrdering::Monotonic);
1547 case MSVCIntrin::_InterlockedExchange_acq:
1548 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1549 AtomicOrdering::Acquire);
1550 case MSVCIntrin::_InterlockedExchange_rel:
1551 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1552 AtomicOrdering::Release);
1553 case MSVCIntrin::_InterlockedExchange_nf:
1554 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1555 AtomicOrdering::Monotonic);
1556 case MSVCIntrin::_InterlockedCompareExchange_acq:
1557 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1558 case MSVCIntrin::_InterlockedCompareExchange_rel:
1559 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1560 case MSVCIntrin::_InterlockedCompareExchange_nf:
1561 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1562 case MSVCIntrin::_InterlockedCompareExchange128:
1563 return EmitAtomicCmpXchg128ForMSIntrin(
1564 *this, E, AtomicOrdering::SequentiallyConsistent);
1565 case MSVCIntrin::_InterlockedCompareExchange128_acq:
1566 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1567 case MSVCIntrin::_InterlockedCompareExchange128_rel:
1568 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1569 case MSVCIntrin::_InterlockedCompareExchange128_nf:
1570 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1571 case MSVCIntrin::_InterlockedOr_acq:
1572 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1573 AtomicOrdering::Acquire);
1574 case MSVCIntrin::_InterlockedOr_rel:
1575 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1576 AtomicOrdering::Release);
1577 case MSVCIntrin::_InterlockedOr_nf:
1578 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1579 AtomicOrdering::Monotonic);
1580 case MSVCIntrin::_InterlockedXor_acq:
1581 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1582 AtomicOrdering::Acquire);
1583 case MSVCIntrin::_InterlockedXor_rel:
1584 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1585 AtomicOrdering::Release);
1586 case MSVCIntrin::_InterlockedXor_nf:
1587 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1588 AtomicOrdering::Monotonic);
1589 case MSVCIntrin::_InterlockedAnd_acq:
1590 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1591 AtomicOrdering::Acquire);
1592 case MSVCIntrin::_InterlockedAnd_rel:
1593 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1594 AtomicOrdering::Release);
1595 case MSVCIntrin::_InterlockedAnd_nf:
1596 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1597 AtomicOrdering::Monotonic);
1598 case MSVCIntrin::_InterlockedIncrement_acq:
1599 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1600 case MSVCIntrin::_InterlockedIncrement_rel:
1601 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1602 case MSVCIntrin::_InterlockedIncrement_nf:
1603 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1604 case MSVCIntrin::_InterlockedDecrement_acq:
1605 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1606 case MSVCIntrin::_InterlockedDecrement_rel:
1607 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1608 case MSVCIntrin::_InterlockedDecrement_nf:
1609 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1610
1611 case MSVCIntrin::_InterlockedDecrement:
1612 return EmitAtomicDecrementValue(*this, E);
1613 case MSVCIntrin::_InterlockedIncrement:
1614 return EmitAtomicIncrementValue(*this, E);
1615
1616 case MSVCIntrin::__fastfail: {
1617 // Request immediate process termination from the kernel. The instruction
1618 // sequences to do this are documented on MSDN:
1619 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1620 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1621 StringRef Asm, Constraints;
1622 switch (ISA) {
1623 default:
1624 ErrorUnsupported(E, "__fastfail call for this architecture");
1625 break;
1626 case llvm::Triple::x86:
1627 case llvm::Triple::x86_64:
1628 Asm = "int $$0x29";
1629 Constraints = "{cx}";
1630 break;
1631 case llvm::Triple::thumb:
1632 Asm = "udf #251";
1633 Constraints = "{r0}";
1634 break;
1635 case llvm::Triple::aarch64:
1636 Asm = "brk #0xF003";
1637 Constraints = "{w0}";
1638 }
1639 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1640 llvm::InlineAsm *IA =
1641 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1642 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1643 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1644 llvm::Attribute::NoReturn);
1645 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1646 CI->setAttributes(NoReturnAttr);
1647 return CI;
1648 }
1649 }
1650 llvm_unreachable("Incorrect MSVC intrinsic!")::llvm::llvm_unreachable_internal("Incorrect MSVC intrinsic!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1650)
;
1651}
1652
1653namespace {
1654// ARC cleanup for __builtin_os_log_format
1655struct CallObjCArcUse final : EHScopeStack::Cleanup {
1656 CallObjCArcUse(llvm::Value *object) : object(object) {}
1657 llvm::Value *object;
1658
1659 void Emit(CodeGenFunction &CGF, Flags flags) override {
1660 CGF.EmitARCIntrinsicUse(object);
1661 }
1662};
1663}
1664
1665Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1666 BuiltinCheckKind Kind) {
1667 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)(static_cast <bool> ((Kind == BCK_CLZPassedZero || Kind
== BCK_CTZPassedZero) && "Unsupported builtin check kind"
) ? void (0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1668, __extension__ __PRETTY_FUNCTION__))
1668 && "Unsupported builtin check kind")(static_cast <bool> ((Kind == BCK_CLZPassedZero || Kind
== BCK_CTZPassedZero) && "Unsupported builtin check kind"
) ? void (0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1668, __extension__ __PRETTY_FUNCTION__))
;
1669
1670 Value *ArgValue = EmitScalarExpr(E);
1671 if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
1672 return ArgValue;
1673
1674 SanitizerScope SanScope(this);
1675 Value *Cond = Builder.CreateICmpNE(
1676 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1677 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1678 SanitizerHandler::InvalidBuiltin,
1679 {EmitCheckSourceLocation(E->getExprLoc()),
1680 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1681 None);
1682 return ArgValue;
1683}
1684
1685/// Get the argument type for arguments to os_log_helper.
1686static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1687 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1688 return C.getCanonicalType(UnsignedTy);
1689}
1690
1691llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1692 const analyze_os_log::OSLogBufferLayout &Layout,
1693 CharUnits BufferAlignment) {
1694 ASTContext &Ctx = getContext();
1695
1696 llvm::SmallString<64> Name;
1697 {
1698 raw_svector_ostream OS(Name);
1699 OS << "__os_log_helper";
1700 OS << "_" << BufferAlignment.getQuantity();
1701 OS << "_" << int(Layout.getSummaryByte());
1702 OS << "_" << int(Layout.getNumArgsByte());
1703 for (const auto &Item : Layout.Items)
1704 OS << "_" << int(Item.getSizeByte()) << "_"
1705 << int(Item.getDescriptorByte());
1706 }
1707
1708 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1709 return F;
1710
1711 llvm::SmallVector<QualType, 4> ArgTys;
1712 FunctionArgList Args;
1713 Args.push_back(ImplicitParamDecl::Create(
1714 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1715 ImplicitParamDecl::Other));
1716 ArgTys.emplace_back(Ctx.VoidPtrTy);
1717
1718 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1719 char Size = Layout.Items[I].getSizeByte();
1720 if (!Size)
1721 continue;
1722
1723 QualType ArgTy = getOSLogArgType(Ctx, Size);
1724 Args.push_back(ImplicitParamDecl::Create(
1725 Ctx, nullptr, SourceLocation(),
1726 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1727 ImplicitParamDecl::Other));
1728 ArgTys.emplace_back(ArgTy);
1729 }
1730
1731 QualType ReturnTy = Ctx.VoidTy;
1732
1733 // The helper function has linkonce_odr linkage to enable the linker to merge
1734 // identical functions. To ensure the merging always happens, 'noinline' is
1735 // attached to the function when compiling with -Oz.
1736 const CGFunctionInfo &FI =
1737 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1738 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1739 llvm::Function *Fn = llvm::Function::Create(
1740 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1741 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1742 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
1743 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1744 Fn->setDoesNotThrow();
1745
1746 // Attach 'noinline' at -Oz.
1747 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1748 Fn->addFnAttr(llvm::Attribute::NoInline);
1749
1750 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1751 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
1752
1753 // Create a scope with an artificial location for the body of this function.
1754 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1755
1756 CharUnits Offset;
1757 Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
1758 BufferAlignment);
1759 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1760 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1761 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1762 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1763
1764 unsigned I = 1;
1765 for (const auto &Item : Layout.Items) {
1766 Builder.CreateStore(
1767 Builder.getInt8(Item.getDescriptorByte()),
1768 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1769 Builder.CreateStore(
1770 Builder.getInt8(Item.getSizeByte()),
1771 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1772
1773 CharUnits Size = Item.size();
1774 if (!Size.getQuantity())
1775 continue;
1776
1777 Address Arg = GetAddrOfLocalVar(Args[I]);
1778 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1779 Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
1780 "argDataCast");
1781 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1782 Offset += Size;
1783 ++I;
1784 }
1785
1786 FinishFunction();
1787
1788 return Fn;
1789}
1790
1791RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1792 assert(E.getNumArgs() >= 2 &&(static_cast <bool> (E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments"
) ? void (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1793, __extension__ __PRETTY_FUNCTION__))
1793 "__builtin_os_log_format takes at least 2 arguments")(static_cast <bool> (E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments"
) ? void (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1793, __extension__ __PRETTY_FUNCTION__))
;
1794 ASTContext &Ctx = getContext();
1795 analyze_os_log::OSLogBufferLayout Layout;
1796 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1797 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1798 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1799
1800 // Ignore argument 1, the format string. It is not currently used.
1801 CallArgList Args;
1802 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1803
1804 for (const auto &Item : Layout.Items) {
1805 int Size = Item.getSizeByte();
1806 if (!Size)
1807 continue;
1808
1809 llvm::Value *ArgVal;
1810
1811 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1812 uint64_t Val = 0;
1813 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1814 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1815 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1816 } else if (const Expr *TheExpr = Item.getExpr()) {
1817 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1818
1819 // If a temporary object that requires destruction after the full
1820 // expression is passed, push a lifetime-extended cleanup to extend its
1821 // lifetime to the end of the enclosing block scope.
1822 auto LifetimeExtendObject = [&](const Expr *E) {
1823 E = E->IgnoreParenCasts();
1824 // Extend lifetimes of objects returned by function calls and message
1825 // sends.
1826
1827 // FIXME: We should do this in other cases in which temporaries are
1828 // created including arguments of non-ARC types (e.g., C++
1829 // temporaries).
1830 if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
1831 return true;
1832 return false;
1833 };
1834
1835 if (TheExpr->getType()->isObjCRetainableType() &&
1836 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
1837 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&(static_cast <bool> (getEvaluationKind(TheExpr->getType
()) == TEK_Scalar && "Only scalar can be a ObjC retainable type"
) ? void (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1838, __extension__ __PRETTY_FUNCTION__))
1838 "Only scalar can be a ObjC retainable type")(static_cast <bool> (getEvaluationKind(TheExpr->getType
()) == TEK_Scalar && "Only scalar can be a ObjC retainable type"
) ? void (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1838, __extension__ __PRETTY_FUNCTION__))
;
1839 if (!isa<Constant>(ArgVal)) {
1840 CleanupKind Cleanup = getARCCleanupKind();
1841 QualType Ty = TheExpr->getType();
1842 Address Alloca = Address::invalid();
1843 Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
1844 ArgVal = EmitARCRetain(Ty, ArgVal);
1845 Builder.CreateStore(ArgVal, Addr);
1846 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
1847 CodeGenFunction::destroyARCStrongPrecise,
1848 Cleanup & EHCleanup);
1849
1850 // Push a clang.arc.use call to ensure ARC optimizer knows that the
1851 // argument has to be alive.
1852 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
1853 pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
1854 }
1855 }
1856 } else {
1857 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1858 }
1859
1860 unsigned ArgValSize =
1861 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1862 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1863 ArgValSize);
1864 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1865 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1866 // If ArgVal has type x86_fp80, zero-extend ArgVal.
1867 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1868 Args.add(RValue::get(ArgVal), ArgTy);
1869 }
1870
1871 const CGFunctionInfo &FI =
1872 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
1873 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
1874 Layout, BufAddr.getAlignment());
1875 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
1876 return RValue::get(BufAddr.getPointer());
1877}
1878
1879static bool isSpecialUnsignedMultiplySignedResult(
1880 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
1881 WidthAndSignedness ResultInfo) {
1882 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1883 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
1884 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
1885}
1886
1887static RValue EmitCheckedUnsignedMultiplySignedResult(
1888 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
1889 const clang::Expr *Op2, WidthAndSignedness Op2Info,
1890 const clang::Expr *ResultArg, QualType ResultQTy,
1891 WidthAndSignedness ResultInfo) {
1892 assert(isSpecialUnsignedMultiplySignedResult((static_cast <bool> (isSpecialUnsignedMultiplySignedResult
( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo
) && "Cannot specialize this multiply") ? void (0) : __assert_fail
("isSpecialUnsignedMultiplySignedResult( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Cannot specialize this multiply\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1894, __extension__ __PRETTY_FUNCTION__))
1893 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&(static_cast <bool> (isSpecialUnsignedMultiplySignedResult
( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo
) && "Cannot specialize this multiply") ? void (0) : __assert_fail
("isSpecialUnsignedMultiplySignedResult( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Cannot specialize this multiply\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1894, __extension__ __PRETTY_FUNCTION__))
1894 "Cannot specialize this multiply")(static_cast <bool> (isSpecialUnsignedMultiplySignedResult
( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo
) && "Cannot specialize this multiply") ? void (0) : __assert_fail
("isSpecialUnsignedMultiplySignedResult( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Cannot specialize this multiply\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1894, __extension__ __PRETTY_FUNCTION__))
;
1895
1896 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
1897 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
1898
1899 llvm::Value *HasOverflow;
1900 llvm::Value *Result = EmitOverflowIntrinsic(
1901 CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
1902
1903 // The intrinsic call will detect overflow when the value is > UINT_MAX,
1904 // however, since the original builtin had a signed result, we need to report
1905 // an overflow when the result is greater than INT_MAX.
1906 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
1907 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
1908
1909 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
1910 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
1911
1912 bool isVolatile =
1913 ResultArg->getType()->getPointeeType().isVolatileQualified();
1914 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1915 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1916 isVolatile);
1917 return RValue::get(HasOverflow);
1918}
1919
1920/// Determine if a binop is a checked mixed-sign multiply we can specialize.
1921static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
1922 WidthAndSignedness Op1Info,
1923 WidthAndSignedness Op2Info,
1924 WidthAndSignedness ResultInfo) {
1925 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1926 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
1927 Op1Info.Signed != Op2Info.Signed;
1928}
1929
1930/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
1931/// the generic checked-binop irgen.
1932static RValue
1933EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
1934 WidthAndSignedness Op1Info, const clang::Expr *Op2,
1935 WidthAndSignedness Op2Info,
1936 const clang::Expr *ResultArg, QualType ResultQTy,
1937 WidthAndSignedness ResultInfo) {
1938 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,(static_cast <bool> (isSpecialMixedSignMultiply(Builtin
::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
"Not a mixed-sign multipliction we can specialize") ? void (
0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1940, __extension__ __PRETTY_FUNCTION__))
1939 Op2Info, ResultInfo) &&(static_cast <bool> (isSpecialMixedSignMultiply(Builtin
::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
"Not a mixed-sign multipliction we can specialize") ? void (
0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1940, __extension__ __PRETTY_FUNCTION__))
1940 "Not a mixed-sign multipliction we can specialize")(static_cast <bool> (isSpecialMixedSignMultiply(Builtin
::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
"Not a mixed-sign multipliction we can specialize") ? void (
0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1940, __extension__ __PRETTY_FUNCTION__))
;
1941
1942 // Emit the signed and unsigned operands.
1943 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
1944 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
1945 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
1946 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
1947 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
1948 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
1949
1950 // One of the operands may be smaller than the other. If so, [s|z]ext it.
1951 if (SignedOpWidth < UnsignedOpWidth)
1952 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
1953 if (UnsignedOpWidth < SignedOpWidth)
1954 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
1955
1956 llvm::Type *OpTy = Signed->getType();
1957 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
1958 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1959 llvm::Type *ResTy = ResultPtr.getElementType();
1960 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
1961
1962 // Take the absolute value of the signed operand.
1963 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
1964 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
1965 llvm::Value *AbsSigned =
1966 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
1967
1968 // Perform a checked unsigned multiplication.
1969 llvm::Value *UnsignedOverflow;
1970 llvm::Value *UnsignedResult =
1971 EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
1972 Unsigned, UnsignedOverflow);
1973
1974 llvm::Value *Overflow, *Result;
1975 if (ResultInfo.Signed) {
1976 // Signed overflow occurs if the result is greater than INT_MAX or lesser
1977 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
1978 auto IntMax =
1979 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
1980 llvm::Value *MaxResult =
1981 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
1982 CGF.Builder.CreateZExt(IsNegative, OpTy));
1983 llvm::Value *SignedOverflow =
1984 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
1985 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
1986
1987 // Prepare the signed result (possibly by negating it).
1988 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
1989 llvm::Value *SignedResult =
1990 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
1991 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
1992 } else {
1993 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
1994 llvm::Value *Underflow = CGF.Builder.CreateAnd(
1995 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
1996 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
1997 if (ResultInfo.Width < OpWidth) {
1998 auto IntMax =
1999 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2000 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2001 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2002 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2003 }
2004
2005 // Negate the product if it would be negative in infinite precision.
2006 Result = CGF.Builder.CreateSelect(
2007 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2008
2009 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2010 }
2011 assert(Overflow && Result && "Missing overflow or result")(static_cast <bool> (Overflow && Result &&
"Missing overflow or result") ? void (0) : __assert_fail ("Overflow && Result && \"Missing overflow or result\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2011, __extension__ __PRETTY_FUNCTION__))
;
2012
2013 bool isVolatile =
2014 ResultArg->getType()->getPointeeType().isVolatileQualified();
2015 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2016 isVolatile);
2017 return RValue::get(Overflow);
2018}
2019
2020static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
2021 Value *&RecordPtr, CharUnits Align,
2022 llvm::FunctionCallee Func, int Lvl) {
2023 ASTContext &Context = CGF.getContext();
2024 RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
2025 std::string Pad = std::string(Lvl * 4, ' ');
2026
2027 Value *GString =
2028 CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
2029 Value *Res = CGF.Builder.CreateCall(Func, {GString});
2030
2031 static llvm::DenseMap<QualType, const char *> Types;
2032 if (Types.empty()) {
2033 Types[Context.CharTy] = "%c";
2034 Types[Context.BoolTy] = "%d";
2035 Types[Context.SignedCharTy] = "%hhd";
2036 Types[Context.UnsignedCharTy] = "%hhu";
2037 Types[Context.IntTy] = "%d";
2038 Types[Context.UnsignedIntTy] = "%u";
2039 Types[Context.LongTy] = "%ld";
2040 Types[Context.UnsignedLongTy] = "%lu";
2041 Types[Context.LongLongTy] = "%lld";
2042 Types[Context.UnsignedLongLongTy] = "%llu";
2043 Types[Context.ShortTy] = "%hd";
2044 Types[Context.UnsignedShortTy] = "%hu";
2045 Types[Context.VoidPtrTy] = "%p";
2046 Types[Context.FloatTy] = "%f";
2047 Types[Context.DoubleTy] = "%f";
2048 Types[Context.LongDoubleTy] = "%Lf";
2049 Types[Context.getPointerType(Context.CharTy)] = "%s";
2050 Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
2051 }
2052
2053 for (const auto *FD : RD->fields()) {
2054 Value *FieldPtr = RecordPtr;
2055 if (RD->isUnion())
2056 FieldPtr = CGF.Builder.CreatePointerCast(
2057 FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
2058 else
2059 FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
2060 FD->getFieldIndex());
2061
2062 GString = CGF.Builder.CreateGlobalStringPtr(
2063 llvm::Twine(Pad)
2064 .concat(FD->getType().getAsString())
2065 .concat(llvm::Twine(' '))
2066 .concat(FD->getNameAsString())
2067 .concat(" : ")
2068 .str());
2069 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
2070 Res = CGF.Builder.CreateAdd(Res, TmpRes);
2071
2072 QualType CanonicalType =
2073 FD->getType().getUnqualifiedType().getCanonicalType();
2074
2075 // We check whether we are in a recursive type
2076 if (CanonicalType->isRecordType()) {
2077 TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
2078 Res = CGF.Builder.CreateAdd(TmpRes, Res);
2079 continue;
2080 }
2081
2082 // We try to determine the best format to print the current field
2083 llvm::Twine Format = Types.find(CanonicalType) == Types.end()
2084 ? Types[Context.VoidPtrTy]
2085 : Types[CanonicalType];
2086
2087 Address FieldAddress = Address(FieldPtr, Align);
2088 FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
2089
2090 // FIXME Need to handle bitfield here
2091 GString = CGF.Builder.CreateGlobalStringPtr(
2092 Format.concat(llvm::Twine('\n')).str());
2093 TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
2094 Res = CGF.Builder.CreateAdd(Res, TmpRes);
2095 }
2096
2097 GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
2098 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
2099 Res = CGF.Builder.CreateAdd(Res, TmpRes);
2100 return Res;
2101}
2102
2103static bool
2104TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
2105 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2106 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2107 Ty = Ctx.getBaseElementType(Arr);
2108
2109 const auto *Record = Ty->getAsCXXRecordDecl();
2110 if (!Record)
2111 return false;
2112
2113 // We've already checked this type, or are in the process of checking it.
2114 if (!Seen.insert(Record).second)
2115 return false;
2116
2117 assert(Record->hasDefinition() &&(static_cast <bool> (Record->hasDefinition() &&
"Incomplete types should already be diagnosed") ? void (0) :
__assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2118, __extension__ __PRETTY_FUNCTION__))
2118 "Incomplete types should already be diagnosed")(static_cast <bool> (Record->hasDefinition() &&
"Incomplete types should already be diagnosed") ? void (0) :
__assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2118, __extension__ __PRETTY_FUNCTION__))
;
2119
2120 if (Record->isDynamicClass())
2121 return true;
2122
2123 for (FieldDecl *F : Record->fields()) {
2124 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2125 return true;
2126 }
2127 return false;
2128}
2129
2130/// Determine if the specified type requires laundering by checking if it is a
2131/// dynamic class type or contains a subobject which is a dynamic class type.
2132static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
2133 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2134 return false;
2135 llvm::SmallPtrSet<const Decl *, 16> Seen;
2136 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2137}
2138
2139RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2140 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2141 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2142
2143 // The builtin's shift arg may have a different type than the source arg and
2144 // result, but the LLVM intrinsic uses the same type for all values.
2145 llvm::Type *Ty = Src->getType();
2146 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2147
2148 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2149 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2150 Function *F = CGM.getIntrinsic(IID, Ty);
2151 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
2152}
2153
2154// Map math builtins for long-double to f128 version.
2155static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2156 switch (BuiltinID) {
2157#define MUTATE_LDBL(func) \
2158 case Builtin::BI__builtin_##func##l: \
2159 return Builtin::BI__builtin_##func##f128;
2160 MUTATE_LDBL(sqrt)
2161 MUTATE_LDBL(cbrt)
2162 MUTATE_LDBL(fabs)
2163 MUTATE_LDBL(log)
2164 MUTATE_LDBL(log2)
2165 MUTATE_LDBL(log10)
2166 MUTATE_LDBL(log1p)
2167 MUTATE_LDBL(logb)
2168 MUTATE_LDBL(exp)
2169 MUTATE_LDBL(exp2)
2170 MUTATE_LDBL(expm1)
2171 MUTATE_LDBL(fdim)
2172 MUTATE_LDBL(hypot)
2173 MUTATE_LDBL(ilogb)
2174 MUTATE_LDBL(pow)
2175 MUTATE_LDBL(fmin)
2176 MUTATE_LDBL(fmax)
2177 MUTATE_LDBL(ceil)
2178 MUTATE_LDBL(trunc)
2179 MUTATE_LDBL(rint)
2180 MUTATE_LDBL(nearbyint)
2181 MUTATE_LDBL(round)
2182 MUTATE_LDBL(floor)
2183 MUTATE_LDBL(lround)
2184 MUTATE_LDBL(llround)
2185 MUTATE_LDBL(lrint)
2186 MUTATE_LDBL(llrint)
2187 MUTATE_LDBL(fmod)
2188 MUTATE_LDBL(modf)
2189 MUTATE_LDBL(nan)
2190 MUTATE_LDBL(nans)
2191 MUTATE_LDBL(inf)
2192 MUTATE_LDBL(fma)
2193 MUTATE_LDBL(sin)
2194 MUTATE_LDBL(cos)
2195 MUTATE_LDBL(tan)
2196 MUTATE_LDBL(sinh)
2197 MUTATE_LDBL(cosh)
2198 MUTATE_LDBL(tanh)
2199 MUTATE_LDBL(asin)
2200 MUTATE_LDBL(acos)
2201 MUTATE_LDBL(atan)
2202 MUTATE_LDBL(asinh)
2203 MUTATE_LDBL(acosh)
2204 MUTATE_LDBL(atanh)
2205 MUTATE_LDBL(atan2)
2206 MUTATE_LDBL(erf)
2207 MUTATE_LDBL(erfc)
2208 MUTATE_LDBL(ldexp)
2209 MUTATE_LDBL(frexp)
2210 MUTATE_LDBL(huge_val)
2211 MUTATE_LDBL(copysign)
2212 MUTATE_LDBL(nextafter)
2213 MUTATE_LDBL(nexttoward)
2214 MUTATE_LDBL(remainder)
2215 MUTATE_LDBL(remquo)
2216 MUTATE_LDBL(scalbln)
2217 MUTATE_LDBL(scalbn)
2218 MUTATE_LDBL(tgamma)
2219 MUTATE_LDBL(lgamma)
2220#undef MUTATE_LDBL
2221 default:
2222 return BuiltinID;
2223 }
2224}
2225
2226RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
2227 const CallExpr *E,
2228 ReturnValueSlot ReturnValue) {
2229 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2230 // See if we can constant fold this builtin. If so, don't emit it at all.
2231 Expr::EvalResult Result;
2232 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
2233 !Result.hasSideEffects()) {
2234 if (Result.Val.isInt())
2235 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2236 Result.Val.getInt()));
2237 if (Result.Val.isFloat())
2238 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2239 Result.Val.getFloat()));
2240 }
2241
2242 // If current long-double semantics is IEEE 128-bit, replace math builtins
2243 // of long-double with f128 equivalent.
2244 // TODO: This mutation should also be applied to other targets other than PPC,
2245 // after backend supports IEEE 128-bit style libcalls.
2246 if (getTarget().getTriple().isPPC64() &&
2247 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2248 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2249
2250 // If the builtin has been declared explicitly with an assembler label,
2251 // disable the specialized emitting below. Ideally we should communicate the
2252 // rename in IR, or at least avoid generating the intrinsic calls that are
2253 // likely to get lowered to the renamed library functions.
2254 const unsigned BuiltinIDIfNoAsmLabel =
2255 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2256
2257 // There are LLVM math intrinsics/instructions corresponding to math library
2258 // functions except the LLVM op will never set errno while the math library
2259 // might. Also, math builtins have the same semantics as their math library
2260 // twins. Thus, we can transform math library and builtin calls to their
2261 // LLVM counterparts if the call is marked 'const' (known to never set errno).
2262 if (FD->hasAttr<ConstAttr>()) {
2263 switch (BuiltinIDIfNoAsmLabel) {
2264 case Builtin::BIceil:
2265 case Builtin::BIceilf:
2266 case Builtin::BIceill:
2267 case Builtin::BI__builtin_ceil:
2268 case Builtin::BI__builtin_ceilf:
2269 case Builtin::BI__builtin_ceilf16:
2270 case Builtin::BI__builtin_ceill:
2271 case Builtin::BI__builtin_ceilf128:
2272 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2273 Intrinsic::ceil,
2274 Intrinsic::experimental_constrained_ceil));
2275
2276 case Builtin::BIcopysign:
2277 case Builtin::BIcopysignf:
2278 case Builtin::BIcopysignl:
2279 case Builtin::BI__builtin_copysign:
2280 case Builtin::BI__builtin_copysignf:
2281 case Builtin::BI__builtin_copysignf16:
2282 case Builtin::BI__builtin_copysignl:
2283 case Builtin::BI__builtin_copysignf128:
2284 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
2285
2286 case Builtin::BIcos:
2287 case Builtin::BIcosf:
2288 case Builtin::BIcosl:
2289 case Builtin::BI__builtin_cos:
2290 case Builtin::BI__builtin_cosf:
2291 case Builtin::BI__builtin_cosf16:
2292 case Builtin::BI__builtin_cosl:
2293 case Builtin::BI__builtin_cosf128:
2294 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2295 Intrinsic::cos,
2296 Intrinsic::experimental_constrained_cos));
2297
2298 case Builtin::BIexp:
2299 case Builtin::BIexpf:
2300 case Builtin::BIexpl:
2301 case Builtin::BI__builtin_exp:
2302 case Builtin::BI__builtin_expf:
2303 case Builtin::BI__builtin_expf16:
2304 case Builtin::BI__builtin_expl:
2305 case Builtin::BI__builtin_expf128:
2306 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2307 Intrinsic::exp,
2308 Intrinsic::experimental_constrained_exp));
2309
2310 case Builtin::BIexp2:
2311 case Builtin::BIexp2f:
2312 case Builtin::BIexp2l:
2313 case Builtin::BI__builtin_exp2:
2314 case Builtin::BI__builtin_exp2f:
2315 case Builtin::BI__builtin_exp2f16:
2316 case Builtin::BI__builtin_exp2l:
2317 case Builtin::BI__builtin_exp2f128:
2318 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2319 Intrinsic::exp2,
2320 Intrinsic::experimental_constrained_exp2));
2321
2322 case Builtin::BIfabs:
2323 case Builtin::BIfabsf:
2324 case Builtin::BIfabsl:
2325 case Builtin::BI__builtin_fabs:
2326 case Builtin::BI__builtin_fabsf:
2327 case Builtin::BI__builtin_fabsf16:
2328 case Builtin::BI__builtin_fabsl:
2329 case Builtin::BI__builtin_fabsf128:
2330 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
2331
2332 case Builtin::BIfloor:
2333 case Builtin::BIfloorf:
2334 case Builtin::BIfloorl:
2335 case Builtin::BI__builtin_floor:
2336 case Builtin::BI__builtin_floorf:
2337 case Builtin::BI__builtin_floorf16:
2338 case Builtin::BI__builtin_floorl:
2339 case Builtin::BI__builtin_floorf128:
2340 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2341 Intrinsic::floor,
2342 Intrinsic::experimental_constrained_floor));
2343
2344 case Builtin::BIfma:
2345 case Builtin::BIfmaf:
2346 case Builtin::BIfmal:
2347 case Builtin::BI__builtin_fma:
2348 case Builtin::BI__builtin_fmaf:
2349 case Builtin::BI__builtin_fmaf16:
2350 case Builtin::BI__builtin_fmal:
2351 case Builtin::BI__builtin_fmaf128:
2352 return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
2353 Intrinsic::fma,
2354 Intrinsic::experimental_constrained_fma));
2355
2356 case Builtin::BIfmax:
2357 case Builtin::BIfmaxf:
2358 case Builtin::BIfmaxl:
2359 case Builtin::BI__builtin_fmax:
2360 case Builtin::BI__builtin_fmaxf:
2361 case Builtin::BI__builtin_fmaxf16:
2362 case Builtin::BI__builtin_fmaxl:
2363 case Builtin::BI__builtin_fmaxf128:
2364 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2365 Intrinsic::maxnum,
2366 Intrinsic::experimental_constrained_maxnum));
2367
2368 case Builtin::BIfmin:
2369 case Builtin::BIfminf:
2370 case Builtin::BIfminl:
2371 case Builtin::BI__builtin_fmin:
2372 case Builtin::BI__builtin_fminf:
2373 case Builtin::BI__builtin_fminf16:
2374 case Builtin::BI__builtin_fminl:
2375 case Builtin::BI__builtin_fminf128:
2376 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2377 Intrinsic::minnum,
2378 Intrinsic::experimental_constrained_minnum));
2379
2380 // fmod() is a special-case. It maps to the frem instruction rather than an
2381 // LLVM intrinsic.
2382 case Builtin::BIfmod:
2383 case Builtin::BIfmodf:
2384 case Builtin::BIfmodl:
2385 case Builtin::BI__builtin_fmod:
2386 case Builtin::BI__builtin_fmodf:
2387 case Builtin::BI__builtin_fmodf16:
2388 case Builtin::BI__builtin_fmodl:
2389 case Builtin::BI__builtin_fmodf128: {
2390 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2391 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2392 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2393 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2394 }
2395
2396 case Builtin::BIlog:
2397 case Builtin::BIlogf:
2398 case Builtin::BIlogl:
2399 case Builtin::BI__builtin_log:
2400 case Builtin::BI__builtin_logf:
2401 case Builtin::BI__builtin_logf16:
2402 case Builtin::BI__builtin_logl:
2403 case Builtin::BI__builtin_logf128:
2404 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2405 Intrinsic::log,
2406 Intrinsic::experimental_constrained_log));
2407
2408 case Builtin::BIlog10:
2409 case Builtin::BIlog10f:
2410 case Builtin::BIlog10l:
2411 case Builtin::BI__builtin_log10:
2412 case Builtin::BI__builtin_log10f:
2413 case Builtin::BI__builtin_log10f16:
2414 case Builtin::BI__builtin_log10l:
2415 case Builtin::BI__builtin_log10f128:
2416 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2417 Intrinsic::log10,
2418 Intrinsic::experimental_constrained_log10));
2419
2420 case Builtin::BIlog2:
2421 case Builtin::BIlog2f:
2422 case Builtin::BIlog2l:
2423 case Builtin::BI__builtin_log2:
2424 case Builtin::BI__builtin_log2f:
2425 case Builtin::BI__builtin_log2f16:
2426 case Builtin::BI__builtin_log2l:
2427 case Builtin::BI__builtin_log2f128:
2428 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2429 Intrinsic::log2,
2430 Intrinsic::experimental_constrained_log2));
2431
2432 case Builtin::BInearbyint:
2433 case Builtin::BInearbyintf:
2434 case Builtin::BInearbyintl:
2435 case Builtin::BI__builtin_nearbyint:
2436 case Builtin::BI__builtin_nearbyintf:
2437 case Builtin::BI__builtin_nearbyintl:
2438 case Builtin::BI__builtin_nearbyintf128:
2439 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2440 Intrinsic::nearbyint,
2441 Intrinsic::experimental_constrained_nearbyint));
2442
2443 case Builtin::BIpow:
2444 case Builtin::BIpowf:
2445 case Builtin::BIpowl:
2446 case Builtin::BI__builtin_pow:
2447 case Builtin::BI__builtin_powf:
2448 case Builtin::BI__builtin_powf16:
2449 case Builtin::BI__builtin_powl:
2450 case Builtin::BI__builtin_powf128:
2451 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2452 Intrinsic::pow,
2453 Intrinsic::experimental_constrained_pow));
2454
2455 case Builtin::BIrint:
2456 case Builtin::BIrintf:
2457 case Builtin::BIrintl:
2458 case Builtin::BI__builtin_rint:
2459 case Builtin::BI__builtin_rintf:
2460 case Builtin::BI__builtin_rintf16:
2461 case Builtin::BI__builtin_rintl:
2462 case Builtin::BI__builtin_rintf128:
2463 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2464 Intrinsic::rint,
2465 Intrinsic::experimental_constrained_rint));
2466
2467 case Builtin::BIround:
2468 case Builtin::BIroundf:
2469 case Builtin::BIroundl:
2470 case Builtin::BI__builtin_round:
2471 case Builtin::BI__builtin_roundf:
2472 case Builtin::BI__builtin_roundf16:
2473 case Builtin::BI__builtin_roundl:
2474 case Builtin::BI__builtin_roundf128:
2475 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2476 Intrinsic::round,
2477 Intrinsic::experimental_constrained_round));
2478
2479 case Builtin::BIsin:
2480 case Builtin::BIsinf:
2481 case Builtin::BIsinl:
2482 case Builtin::BI__builtin_sin:
2483 case Builtin::BI__builtin_sinf:
2484 case Builtin::BI__builtin_sinf16:
2485 case Builtin::BI__builtin_sinl:
2486 case Builtin::BI__builtin_sinf128:
2487 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2488 Intrinsic::sin,
2489 Intrinsic::experimental_constrained_sin));
2490
2491 case Builtin::BIsqrt:
2492 case Builtin::BIsqrtf:
2493 case Builtin::BIsqrtl:
2494 case Builtin::BI__builtin_sqrt:
2495 case Builtin::BI__builtin_sqrtf:
2496 case Builtin::BI__builtin_sqrtf16:
2497 case Builtin::BI__builtin_sqrtl:
2498 case Builtin::BI__builtin_sqrtf128:
2499 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2500 Intrinsic::sqrt,
2501 Intrinsic::experimental_constrained_sqrt));
2502
2503 case Builtin::BItrunc:
2504 case Builtin::BItruncf:
2505 case Builtin::BItruncl:
2506 case Builtin::BI__builtin_trunc:
2507 case Builtin::BI__builtin_truncf:
2508 case Builtin::BI__builtin_truncf16:
2509 case Builtin::BI__builtin_truncl:
2510 case Builtin::BI__builtin_truncf128:
2511 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2512 Intrinsic::trunc,
2513 Intrinsic::experimental_constrained_trunc));
2514
2515 case Builtin::BIlround:
2516 case Builtin::BIlroundf:
2517 case Builtin::BIlroundl:
2518 case Builtin::BI__builtin_lround:
2519 case Builtin::BI__builtin_lroundf:
2520 case Builtin::BI__builtin_lroundl:
2521 case Builtin::BI__builtin_lroundf128:
2522 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2523 *this, E, Intrinsic::lround,
2524 Intrinsic::experimental_constrained_lround));
2525
2526 case Builtin::BIllround:
2527 case Builtin::BIllroundf:
2528 case Builtin::BIllroundl:
2529 case Builtin::BI__builtin_llround:
2530 case Builtin::BI__builtin_llroundf:
2531 case Builtin::BI__builtin_llroundl:
2532 case Builtin::BI__builtin_llroundf128:
2533 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2534 *this, E, Intrinsic::llround,
2535 Intrinsic::experimental_constrained_llround));
2536
2537 case Builtin::BIlrint:
2538 case Builtin::BIlrintf:
2539 case Builtin::BIlrintl:
2540 case Builtin::BI__builtin_lrint:
2541 case Builtin::BI__builtin_lrintf:
2542 case Builtin::BI__builtin_lrintl:
2543 case Builtin::BI__builtin_lrintf128:
2544 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2545 *this, E, Intrinsic::lrint,
2546 Intrinsic::experimental_constrained_lrint));
2547
2548 case Builtin::BIllrint:
2549 case Builtin::BIllrintf:
2550 case Builtin::BIllrintl:
2551 case Builtin::BI__builtin_llrint:
2552 case Builtin::BI__builtin_llrintf:
2553 case Builtin::BI__builtin_llrintl:
2554 case Builtin::BI__builtin_llrintf128:
2555 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2556 *this, E, Intrinsic::llrint,
2557 Intrinsic::experimental_constrained_llrint));
2558
2559 default:
2560 break;
2561 }
2562 }
2563
2564 switch (BuiltinIDIfNoAsmLabel) {
2565 default: break;
2566 case Builtin::BI__builtin___CFStringMakeConstantString:
2567 case Builtin::BI__builtin___NSStringMakeConstantString:
2568 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
2569 case Builtin::BI__builtin_stdarg_start:
2570 case Builtin::BI__builtin_va_start:
2571 case Builtin::BI__va_start:
2572 case Builtin::BI__builtin_va_end:
2573 return RValue::get(
2574 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
2575 ? EmitScalarExpr(E->getArg(0))
2576 : EmitVAListRef(E->getArg(0)).getPointer(),
2577 BuiltinID != Builtin::BI__builtin_va_end));
2578 case Builtin::BI__builtin_va_copy: {
2579 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
2580 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
2581
2582 llvm::Type *Type = Int8PtrTy;
2583
2584 DstPtr = Builder.CreateBitCast(DstPtr, Type);
2585 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
2586 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
2587 {DstPtr, SrcPtr}));
2588 }
2589 case Builtin::BI__builtin_abs:
2590 case Builtin::BI__builtin_labs:
2591 case Builtin::BI__builtin_llabs: {
2592 // X < 0 ? -X : X
2593 // The negation has 'nsw' because abs of INT_MIN is undefined.
2594 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2595 Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
2596 Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
2597 Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2598 Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
2599 return RValue::get(Result);
2600 }
2601 case Builtin::BI__builtin_complex: {
2602 Value *Real = EmitScalarExpr(E->getArg(0));
2603 Value *Imag = EmitScalarExpr(E->getArg(1));
2604 return RValue::getComplex({Real, Imag});
2605 }
2606 case Builtin::BI__builtin_conj:
2607 case Builtin::BI__builtin_conjf:
2608 case Builtin::BI__builtin_conjl:
2609 case Builtin::BIconj:
2610 case Builtin::BIconjf:
2611 case Builtin::BIconjl: {
2612 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2613 Value *Real = ComplexVal.first;
2614 Value *Imag = ComplexVal.second;
2615 Imag = Builder.CreateFNeg(Imag, "neg");
2616 return RValue::getComplex(std::make_pair(Real, Imag));
2617 }
2618 case Builtin::BI__builtin_creal:
2619 case Builtin::BI__builtin_crealf:
2620 case Builtin::BI__builtin_creall:
2621 case Builtin::BIcreal:
2622 case Builtin::BIcrealf:
2623 case Builtin::BIcreall: {
2624 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2625 return RValue::get(ComplexVal.first);
2626 }
2627
2628 case Builtin::BI__builtin_dump_struct: {
2629 llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
2630 llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
2631 LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
2632
2633 Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
2634 CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
2635
2636 const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
2637 QualType Arg0Type = Arg0->getType()->getPointeeType();
2638
2639 Value *RecordPtr = EmitScalarExpr(Arg0);
2640 Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
2641 {LLVMFuncType, Func}, 0);
2642 return RValue::get(Res);
2643 }
2644
2645 case Builtin::BI__builtin_preserve_access_index: {
2646 // Only enabled preserved access index region when debuginfo
2647 // is available as debuginfo is needed to preserve user-level
2648 // access pattern.
2649 if (!getDebugInfo()) {
2650 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
2651 return RValue::get(EmitScalarExpr(E->getArg(0)));
2652 }
2653
2654 // Nested builtin_preserve_access_index() not supported
2655 if (IsInPreservedAIRegion) {
2656 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
2657 return RValue::get(EmitScalarExpr(E->getArg(0)));
2658 }
2659
2660 IsInPreservedAIRegion = true;
2661 Value *Res = EmitScalarExpr(E->getArg(0));
2662 IsInPreservedAIRegion = false;
2663 return RValue::get(Res);
2664 }
2665
2666 case Builtin::BI__builtin_cimag:
2667 case Builtin::BI__builtin_cimagf:
2668 case Builtin::BI__builtin_cimagl:
2669 case Builtin::BIcimag:
2670 case Builtin::BIcimagf:
2671 case Builtin::BIcimagl: {
2672 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2673 return RValue::get(ComplexVal.second);
2674 }
2675
2676 case Builtin::BI__builtin_clrsb:
2677 case Builtin::BI__builtin_clrsbl:
2678 case Builtin::BI__builtin_clrsbll: {
2679 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
2680 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2681
2682 llvm::Type *ArgType = ArgValue->getType();
2683 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2684
2685 llvm::Type *ResultType = ConvertType(E->getType());
2686 Value *Zero = llvm::Constant::getNullValue(ArgType);
2687 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
2688 Value *Inverse = Builder.CreateNot(ArgValue, "not");
2689 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
2690 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
2691 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
2692 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2693 "cast");
2694 return RValue::get(Result);
2695 }
2696 case Builtin::BI__builtin_ctzs:
2697 case Builtin::BI__builtin_ctz:
2698 case Builtin::BI__builtin_ctzl:
2699 case Builtin::BI__builtin_ctzll: {
2700 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
2701
2702 llvm::Type *ArgType = ArgValue->getType();
2703 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2704
2705 llvm::Type *ResultType = ConvertType(E->getType());
2706 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2707 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2708 if (Result->getType() != ResultType)
2709 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2710 "cast");
2711 return RValue::get(Result);
2712 }
2713 case Builtin::BI__builtin_clzs:
2714 case Builtin::BI__builtin_clz:
2715 case Builtin::BI__builtin_clzl:
2716 case Builtin::BI__builtin_clzll: {
2717 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
2718
2719 llvm::Type *ArgType = ArgValue->getType();
2720 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2721
2722 llvm::Type *ResultType = ConvertType(E->getType());
2723 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2724 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2725 if (Result->getType() != ResultType)
2726 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2727 "cast");
2728 return RValue::get(Result);
2729 }
2730 case Builtin::BI__builtin_ffs:
2731 case Builtin::BI__builtin_ffsl:
2732 case Builtin::BI__builtin_ffsll: {
2733 // ffs(x) -> x ? cttz(x) + 1 : 0
2734 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2735
2736 llvm::Type *ArgType = ArgValue->getType();
2737 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2738
2739 llvm::Type *ResultType = ConvertType(E->getType());
2740 Value *Tmp =
2741 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
2742 llvm::ConstantInt::get(ArgType, 1));
2743 Value *Zero = llvm::Constant::getNullValue(ArgType);
2744 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
2745 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
2746 if (Result->getType() != ResultType)
2747 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2748 "cast");
2749 return RValue::get(Result);
2750 }
2751 case Builtin::BI__builtin_parity:
2752 case Builtin::BI__builtin_parityl:
2753 case Builtin::BI__builtin_parityll: {
2754 // parity(x) -> ctpop(x) & 1
2755 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2756
2757 llvm::Type *ArgType = ArgValue->getType();
2758 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2759
2760 llvm::Type *ResultType = ConvertType(E->getType());
2761 Value *Tmp = Builder.CreateCall(F, ArgValue);
2762 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
2763 if (Result->getType() != ResultType)
2764 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2765 "cast");
2766 return RValue::get(Result);
2767 }
2768 case Builtin::BI__lzcnt16:
2769 case Builtin::BI__lzcnt:
2770 case Builtin::BI__lzcnt64: {
2771 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2772
2773 llvm::Type *ArgType = ArgValue->getType();
2774 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2775
2776 llvm::Type *ResultType = ConvertType(E->getType());
2777 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2778 if (Result->getType() != ResultType)
2779 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2780 "cast");
2781 return RValue::get(Result);
2782 }
2783 case Builtin::BI__popcnt16:
2784 case Builtin::BI__popcnt:
2785 case Builtin::BI__popcnt64:
2786 case Builtin::BI__builtin_popcount:
2787 case Builtin::BI__builtin_popcountl:
2788 case Builtin::BI__builtin_popcountll: {
2789 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2790
2791 llvm::Type *ArgType = ArgValue->getType();
2792 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2793
2794 llvm::Type *ResultType = ConvertType(E->getType());
2795 Value *Result = Builder.CreateCall(F, ArgValue);
2796 if (Result->getType() != ResultType)
2797 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2798 "cast");
2799 return RValue::get(Result);
2800 }
2801 case Builtin::BI__builtin_unpredictable: {
2802 // Always return the argument of __builtin_unpredictable. LLVM does not
2803 // handle this builtin. Metadata for this builtin should be added directly
2804 // to instructions such as branches or switches that use it.
2805 return RValue::get(EmitScalarExpr(E->getArg(0)));
2806 }
2807 case Builtin::BI__builtin_expect: {
2808 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2809 llvm::Type *ArgType = ArgValue->getType();
2810
2811 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2812 // Don't generate llvm.expect on -O0 as the backend won't use it for
2813 // anything.
2814 // Note, we still IRGen ExpectedValue because it could have side-effects.
2815 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2816 return RValue::get(ArgValue);
2817
2818 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
2819 Value *Result =
2820 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
2821 return RValue::get(Result);
2822 }
2823 case Builtin::BI__builtin_expect_with_probability: {
2824 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2825 llvm::Type *ArgType = ArgValue->getType();
2826
2827 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2828 llvm::APFloat Probability(0.0);
2829 const Expr *ProbArg = E->getArg(2);
2830 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
2831 assert(EvalSucceed && "probability should be able to evaluate as float")(static_cast <bool> (EvalSucceed && "probability should be able to evaluate as float"
) ? void (0) : __assert_fail ("EvalSucceed && \"probability should be able to evaluate as float\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2831, __extension__ __PRETTY_FUNCTION__))
;
2832 (void)EvalSucceed;
2833 bool LoseInfo = false;
2834 Probability.convert(llvm::APFloat::IEEEdouble(),
2835 llvm::RoundingMode::Dynamic, &LoseInfo);
2836 llvm::Type *Ty = ConvertType(ProbArg->getType());
2837 Constant *Confidence = ConstantFP::get(Ty, Probability);
2838 // Don't generate llvm.expect.with.probability on -O0 as the backend
2839 // won't use it for anything.
2840 // Note, we still IRGen ExpectedValue because it could have side-effects.
2841 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2842 return RValue::get(ArgValue);
2843
2844 Function *FnExpect =
2845 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
2846 Value *Result = Builder.CreateCall(
2847 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
2848 return RValue::get(Result);
2849 }
2850 case Builtin::BI__builtin_assume_aligned: {
2851 const Expr *Ptr = E->getArg(0);
2852 Value *PtrValue = EmitScalarExpr(Ptr);
2853 Value *OffsetValue =
2854 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
2855
2856 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
2857 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
2858 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
2859 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
2860 llvm::Value::MaximumAlignment);
2861
2862 emitAlignmentAssumption(PtrValue, Ptr,
2863 /*The expr loc is sufficient.*/ SourceLocation(),
2864 AlignmentCI, OffsetValue);
2865 return RValue::get(PtrValue);
2866 }
2867 case Builtin::BI__assume:
2868 case Builtin::BI__builtin_assume: {
2869 if (E->getArg(0)->HasSideEffects(getContext()))
2870 return RValue::get(nullptr);
2871
2872 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2873 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
2874 return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
2875 }
2876 case Builtin::BI__arithmetic_fence: {
2877 // Create the builtin call if FastMath is selected, and the target
2878 // supports the builtin, otherwise just return the argument.
2879 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2880 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
2881 bool isArithmeticFenceEnabled =
2882 FMF.allowReassoc() &&
2883 getContext().getTargetInfo().checkArithmeticFenceSupported();
2884 QualType ArgType = E->getArg(0)->getType();
2885 if (ArgType->isComplexType()) {
2886 if (isArithmeticFenceEnabled) {
2887 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
2888 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2889 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
2890 ConvertType(ElementType));
2891 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
2892 ConvertType(ElementType));
2893 return RValue::getComplex(std::make_pair(Real, Imag));
2894 }
2895 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2896 Value *Real = ComplexVal.first;
2897 Value *Imag = ComplexVal.second;
2898 return RValue::getComplex(std::make_pair(Real, Imag));
2899 }
2900 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2901 if (isArithmeticFenceEnabled)
2902 return RValue::get(
2903 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
2904 return RValue::get(ArgValue);
2905 }
2906 case Builtin::BI__builtin_bswap16:
2907 case Builtin::BI__builtin_bswap32:
2908 case Builtin::BI__builtin_bswap64: {
2909 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
2910 }
2911 case Builtin::BI__builtin_bitreverse8:
2912 case Builtin::BI__builtin_bitreverse16:
2913 case Builtin::BI__builtin_bitreverse32:
2914 case Builtin::BI__builtin_bitreverse64: {
2915 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
2916 }
2917 case Builtin::BI__builtin_rotateleft8:
2918 case Builtin::BI__builtin_rotateleft16:
2919 case Builtin::BI__builtin_rotateleft32:
2920 case Builtin::BI__builtin_rotateleft64:
2921 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2922 case Builtin::BI_rotl16:
2923 case Builtin::BI_rotl:
2924 case Builtin::BI_lrotl:
2925 case Builtin::BI_rotl64:
2926 return emitRotate(E, false);
2927
2928 case Builtin::BI__builtin_rotateright8:
2929 case Builtin::BI__builtin_rotateright16:
2930 case Builtin::BI__builtin_rotateright32:
2931 case Builtin::BI__builtin_rotateright64:
2932 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2933 case Builtin::BI_rotr16:
2934 case Builtin::BI_rotr:
2935 case Builtin::BI_lrotr:
2936 case Builtin::BI_rotr64:
2937 return emitRotate(E, true);
2938
2939 case Builtin::BI__builtin_constant_p: {
2940 llvm::Type *ResultType = ConvertType(E->getType());
2941
2942 const Expr *Arg = E->getArg(0);
2943 QualType ArgType = Arg->getType();
2944 // FIXME: The allowance for Obj-C pointers and block pointers is historical
2945 // and likely a mistake.
2946 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
2947 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
2948 // Per the GCC documentation, only numeric constants are recognized after
2949 // inlining.
2950 return RValue::get(ConstantInt::get(ResultType, 0));
2951
2952 if (Arg->HasSideEffects(getContext()))
2953 // The argument is unevaluated, so be conservative if it might have
2954 // side-effects.
2955 return RValue::get(ConstantInt::get(ResultType, 0));
2956
2957 Value *ArgValue = EmitScalarExpr(Arg);
2958 if (ArgType->isObjCObjectPointerType()) {
2959 // Convert Objective-C objects to id because we cannot distinguish between
2960 // LLVM types for Obj-C classes as they are opaque.
2961 ArgType = CGM.getContext().getObjCIdType();
2962 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
2963 }
2964 Function *F =
2965 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
2966 Value *Result = Builder.CreateCall(F, ArgValue);
2967 if (Result->getType() != ResultType)
2968 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
2969 return RValue::get(Result);
2970 }
2971 case Builtin::BI__builtin_dynamic_object_size:
2972 case Builtin::BI__builtin_object_size: {
2973 unsigned Type =
2974 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
2975 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
2976
2977 // We pass this builtin onto the optimizer so that it can figure out the
2978 // object size in more complex cases.
2979 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
2980 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
2981 /*EmittedE=*/nullptr, IsDynamic));
2982 }
2983 case Builtin::BI__builtin_prefetch: {
2984 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
2985 // FIXME: Technically these constants should of type 'int', yes?
2986 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
2987 llvm::ConstantInt::get(Int32Ty, 0);
2988 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
2989 llvm::ConstantInt::get(Int32Ty, 3);
2990 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
2991 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
2992 return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
2993 }
2994 case Builtin::BI__builtin_readcyclecounter: {
2995 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
2996 return RValue::get(Builder.CreateCall(F));
2997 }
2998 case Builtin::BI__builtin___clear_cache: {
2999 Value *Begin = EmitScalarExpr(E->getArg(0));
3000 Value *End = EmitScalarExpr(E->getArg(1));
3001 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
3002 return RValue::get(Builder.CreateCall(F, {Begin, End}));
3003 }
3004 case Builtin::BI__builtin_trap:
3005 return RValue::get(EmitTrapCall(Intrinsic::trap));
3006 case Builtin::BI__debugbreak:
3007 return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
3008 case Builtin::BI__builtin_unreachable: {
3009 EmitUnreachable(E->getExprLoc());
3010
3011 // We do need to preserve an insertion point.
3012 EmitBlock(createBasicBlock("unreachable.cont"));
3013
3014 return RValue::get(nullptr);
3015 }
3016
3017 case Builtin::BI__builtin_powi:
3018 case Builtin::BI__builtin_powif:
3019 case Builtin::BI__builtin_powil: {
3020 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
3021 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
3022
3023 if (Builder.getIsFPConstrained()) {
3024 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3025 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
3026 Src0->getType());
3027 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
3028 }
3029
3030 Function *F = CGM.getIntrinsic(Intrinsic::powi,
3031 { Src0->getType(), Src1->getType() });
3032 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
3033 }
3034 case Builtin::BI__builtin_isgreater:
3035 case Builtin::BI__builtin_isgreaterequal:
3036 case Builtin::BI__builtin_isless:
3037 case Builtin::BI__builtin_islessequal:
3038 case Builtin::BI__builtin_islessgreater:
3039 case Builtin::BI__builtin_isunordered: {
3040 // Ordered comparisons: we know the arguments to these are matching scalar
3041 // floating point values.
3042 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3043 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3044 Value *LHS = EmitScalarExpr(E->getArg(0));
3045 Value *RHS = EmitScalarExpr(E->getArg(1));
3046
3047 switch (BuiltinID) {
3048 default: llvm_unreachable("Unknown ordered comparison")::llvm::llvm_unreachable_internal("Unknown ordered comparison"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3048)
;
3049 case Builtin::BI__builtin_isgreater:
3050 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
3051 break;
3052 case Builtin::BI__builtin_isgreaterequal:
3053 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
3054 break;
3055 case Builtin::BI__builtin_isless:
3056 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
3057 break;
3058 case Builtin::BI__builtin_islessequal:
3059 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
3060 break;
3061 case Builtin::BI__builtin_islessgreater:
3062 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
3063 break;
3064 case Builtin::BI__builtin_isunordered:
3065 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
3066 break;
3067 }
3068 // ZExt bool to int type.
3069 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
3070 }
3071 case Builtin::BI__builtin_isnan: {
3072 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3073 Value *V = EmitScalarExpr(E->getArg(0));
3074 llvm::Type *Ty = V->getType();
3075 const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
3076 if (!Builder.getIsFPConstrained() ||
3077 Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
3078 !Ty->isIEEE()) {
3079 V = Builder.CreateFCmpUNO(V, V, "cmp");
3080 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
3081 }
3082
3083 if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
3084 return RValue::get(Result);
3085
3086 // NaN has all exp bits set and a non zero significand. Therefore:
3087 // isnan(V) == ((exp mask - (abs(V) & exp mask)) < 0)
3088 unsigned bitsize = Ty->getScalarSizeInBits();
3089 llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
3090 Value *IntV = Builder.CreateBitCast(V, IntTy);
3091 APInt AndMask = APInt::getSignedMaxValue(bitsize);
3092 Value *AbsV =
3093 Builder.CreateAnd(IntV, llvm::ConstantInt::get(IntTy, AndMask));
3094 APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
3095 Value *Sub =
3096 Builder.CreateSub(llvm::ConstantInt::get(IntTy, ExpMask), AbsV);
3097 // V = sign bit (Sub) <=> V = (Sub < 0)
3098 V = Builder.CreateLShr(Sub, llvm::ConstantInt::get(IntTy, bitsize - 1));
3099 if (bitsize > 32)
3100 V = Builder.CreateTrunc(V, ConvertType(E->getType()));
3101 return RValue::get(V);
3102 }
3103
3104 case Builtin::BI__builtin_elementwise_abs: {
3105 Value *Op0 = EmitScalarExpr(E->getArg(0));
3106 Value *Result;
3107 if (Op0->getType()->isIntOrIntVectorTy())
3108 Result = Builder.CreateBinaryIntrinsic(
3109 llvm::Intrinsic::abs, Op0, Builder.getFalse(), nullptr, "elt.abs");
3110 else
3111 Result = Builder.CreateUnaryIntrinsic(llvm::Intrinsic::fabs, Op0, nullptr,
3112 "elt.abs");
3113 return RValue::get(Result);
3114 }
3115 case Builtin::BI__builtin_elementwise_max: {
3116 Value *Op0 = EmitScalarExpr(E->getArg(0));
3117 Value *Op1 = EmitScalarExpr(E->getArg(1));
3118 Value *Result;
3119 if (Op0->getType()->isIntOrIntVectorTy()) {
3120 QualType Ty = E->getArg(0)->getType();
3121 if (auto *VecTy = Ty->getAs<VectorType>())
3122 Ty = VecTy->getElementType();
3123 Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
3124 ? llvm::Intrinsic::smax
3125 : llvm::Intrinsic::umax,
3126 Op0, Op1, nullptr, "elt.max");
3127 } else
3128 Result = Builder.CreateMaxNum(Op0, Op1, "elt.max");
3129 return RValue::get(Result);
3130 }
3131 case Builtin::BI__builtin_elementwise_min: {
3132 Value *Op0 = EmitScalarExpr(E->getArg(0));
3133 Value *Op1 = EmitScalarExpr(E->getArg(1));
3134 Value *Result;
3135 if (Op0->getType()->isIntOrIntVectorTy()) {
3136 QualType Ty = E->getArg(0)->getType();
3137 if (auto *VecTy = Ty->getAs<VectorType>())
3138 Ty = VecTy->getElementType();
3139 Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
3140 ? llvm::Intrinsic::smin
3141 : llvm::Intrinsic::umin,
3142 Op0, Op1, nullptr, "elt.min");
3143 } else
3144 Result = Builder.CreateMinNum(Op0, Op1, "elt.min");
3145 return RValue::get(Result);
3146 }
3147
3148 case Builtin::BI__builtin_reduce_max: {
3149 auto GetIntrinsicID = [](QualType QT, llvm::Type *IrTy) {
3150 if (IrTy->isIntOrIntVectorTy()) {
3151 if (auto *VecTy = QT->getAs<VectorType>())
3152 QT = VecTy->getElementType();
3153 if (QT->isSignedIntegerType())
3154 return llvm::Intrinsic::vector_reduce_smax;
3155 else
3156 return llvm::Intrinsic::vector_reduce_umax;
3157 }
3158 return llvm::Intrinsic::vector_reduce_fmax;
3159 };
3160 Value *Op0 = EmitScalarExpr(E->getArg(0));
3161 Value *Result = Builder.CreateUnaryIntrinsic(
3162 GetIntrinsicID(E->getArg(0)->getType(), Op0->getType()), Op0, nullptr,
3163 "rdx.min");
3164 return RValue::get(Result);
3165 }
3166
3167 case Builtin::BI__builtin_reduce_min: {
3168 auto GetIntrinsicID = [](QualType QT, llvm::Type *IrTy) {
3169 if (IrTy->isIntOrIntVectorTy()) {
3170 if (auto *VecTy = QT->getAs<VectorType>())
3171 QT = VecTy->getElementType();
3172 if (QT->isSignedIntegerType())
3173 return llvm::Intrinsic::vector_reduce_smin;
3174 else
3175 return llvm::Intrinsic::vector_reduce_umin;
3176 }
3177 return llvm::Intrinsic::vector_reduce_fmin;
3178 };
3179 Value *Op0 = EmitScalarExpr(E->getArg(0));
3180 Value *Result = Builder.CreateUnaryIntrinsic(
3181 GetIntrinsicID(E->getArg(0)->getType(), Op0->getType()), Op0, nullptr,
3182 "rdx.min");
3183 return RValue::get(Result);
3184 }
3185
3186 case Builtin::BI__builtin_matrix_transpose: {
3187 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
3188 Value *MatValue = EmitScalarExpr(E->getArg(0));
3189 MatrixBuilder<CGBuilderTy> MB(Builder);
3190 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
3191 MatrixTy->getNumColumns());
3192 return RValue::get(Result);
3193 }
3194
3195 case Builtin::BI__builtin_matrix_column_major_load: {
3196 MatrixBuilder<CGBuilderTy> MB(Builder);
3197 // Emit everything that isn't dependent on the first parameter type
3198 Value *Stride = EmitScalarExpr(E->getArg(3));
3199 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
3200 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
3201 assert(PtrTy && "arg0 must be of pointer type")(static_cast <bool> (PtrTy && "arg0 must be of pointer type"
) ? void (0) : __assert_fail ("PtrTy && \"arg0 must be of pointer type\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3201, __extension__ __PRETTY_FUNCTION__))
;
3202 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
3203
3204 Address Src = EmitPointerWithAlignment(E->getArg(0));
3205 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
3206 E->getArg(0)->getExprLoc(), FD, 0);
3207 Value *Result = MB.CreateColumnMajorLoad(
3208 Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
3209 IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
3210 "matrix");
3211 return RValue::get(Result);
3212 }
3213
3214 case Builtin::BI__builtin_matrix_column_major_store: {
3215 MatrixBuilder<CGBuilderTy> MB(Builder);
3216 Value *Matrix = EmitScalarExpr(E->getArg(0));
3217 Address Dst = EmitPointerWithAlignment(E->getArg(1));
3218 Value *Stride = EmitScalarExpr(E->getArg(2));
3219
3220 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
3221 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
3222 assert(PtrTy && "arg1 must be of pointer type")(static_cast <bool> (PtrTy && "arg1 must be of pointer type"
) ? void (0) : __assert_fail ("PtrTy && \"arg1 must be of pointer type\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3222, __extension__ __PRETTY_FUNCTION__))
;
3223 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
3224
3225 EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
3226 E->getArg(1)->getExprLoc(), FD, 0);
3227 Value *Result = MB.CreateColumnMajorStore(
3228 Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
3229 Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
3230 return RValue::get(Result);
3231 }
3232
3233 case Builtin::BIfinite:
3234 case Builtin::BI__finite:
3235 case Builtin::BIfinitef:
3236 case Builtin::BI__finitef:
3237 case Builtin::BIfinitel:
3238 case Builtin::BI__finitel:
3239 case Builtin::BI__builtin_isinf:
3240 case Builtin::BI__builtin_isfinite: {
3241 // isinf(x) --> fabs(x) == infinity
3242 // isfinite(x) --> fabs(x) != infinity
3243 // x != NaN via the ordered compare in either case.
3244 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3245 Value *V = EmitScalarExpr(E->getArg(0));
3246 llvm::Type *Ty = V->getType();
3247 if (!Builder.getIsFPConstrained() ||
3248 Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
3249 !Ty->isIEEE()) {
3250 Value *Fabs = EmitFAbs(*this, V);
3251 Constant *Infinity = ConstantFP::getInfinity(V->getType());
3252 CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
3253 ? CmpInst::FCMP_OEQ
3254 : CmpInst::FCMP_ONE;
3255 Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
3256 return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
3257 }
3258
3259 if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
3260 return RValue::get(Result);
3261
3262 // Inf values have all exp bits set and a zero significand. Therefore:
3263 // isinf(V) == ((V << 1) == ((exp mask) << 1))
3264 // isfinite(V) == ((V << 1) < ((exp mask) << 1)) using unsigned comparison
3265 unsigned bitsize = Ty->getScalarSizeInBits();
3266 llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
3267 Value *IntV = Builder.CreateBitCast(V, IntTy);
3268 Value *Shl1 = Builder.CreateShl(IntV, 1);
3269 const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
3270 APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
3271 Value *ExpMaskShl1 = llvm::ConstantInt::get(IntTy, ExpMask.shl(1));
3272 if (BuiltinID == Builtin::BI__builtin_isinf)
3273 V = Builder.CreateICmpEQ(Shl1, ExpMaskShl1);
3274 else
3275 V = Builder.CreateICmpULT(Shl1, ExpMaskShl1);
3276 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
3277 }
3278
3279 case Builtin::BI__builtin_isinf_sign: {
3280 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
3281 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3282 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3283 Value *Arg = EmitScalarExpr(E->getArg(0));
3284 Value *AbsArg = EmitFAbs(*this, Arg);
3285 Value *IsInf = Builder.CreateFCmpOEQ(
3286 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
3287 Value *IsNeg = EmitSignBit(*this, Arg);
3288
3289 llvm::Type *IntTy = ConvertType(E->getType());
3290 Value *Zero = Constant::getNullValue(IntTy);
3291 Value *One = ConstantInt::get(IntTy, 1);
3292 Value *NegativeOne = ConstantInt::get(IntTy, -1);
3293 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
3294 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
3295 return RValue::get(Result);
3296 }
3297
3298 case Builtin::BI__builtin_isnormal: {
3299 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
3300 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3301 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3302 Value *V = EmitScalarExpr(E->getArg(0));
3303 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
3304
3305 Value *Abs = EmitFAbs(*this, V);
3306 Value *IsLessThanInf =
3307 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
3308 APFloat Smallest = APFloat::getSmallestNormalized(
3309 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
3310 Value *IsNormal =
3311 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
3312 "isnormal");
3313 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
3314 V = Builder.CreateAnd(V, IsNormal, "and");
3315 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
3316 }
3317
3318 case Builtin::BI__builtin_flt_rounds: {
3319 Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
3320
3321 llvm::Type *ResultType = ConvertType(E->getType());
3322 Value *Result = Builder.CreateCall(F);
3323 if (Result->getType() != ResultType)
3324 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3325 "cast");
3326 return RValue::get(Result);
3327 }
3328
3329 case Builtin::BI__builtin_fpclassify: {
3330 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3331 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3332 Value *V = EmitScalarExpr(E->getArg(5));
3333 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
3334
3335 // Create Result
3336 BasicBlock *Begin = Builder.GetInsertBlock();
3337 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
3338 Builder.SetInsertPoint(End);
3339 PHINode *Result =
3340 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
3341 "fpclassify_result");
3342
3343 // if (V==0) return FP_ZERO
3344 Builder.SetInsertPoint(Begin);
3345 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
3346 "iszero");
3347 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
3348 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
3349 Builder.CreateCondBr(IsZero, End, NotZero);
3350 Result->addIncoming(ZeroLiteral, Begin);
3351
3352 // if (V != V) return FP_NAN
3353 Builder.SetInsertPoint(NotZero);
3354 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
3355 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
3356 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
3357 Builder.CreateCondBr(IsNan, End, NotNan);
3358 Result->addIncoming(NanLiteral, NotZero);
3359
3360 // if (fabs(V) == infinity) return FP_INFINITY
3361 Builder.SetInsertPoint(NotNan);
3362 Value *VAbs = EmitFAbs(*this, V);
3363 Value *IsInf =
3364 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
3365 "isinf");
3366 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
3367 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
3368 Builder.CreateCondBr(IsInf, End, NotInf);
3369 Result->addIncoming(InfLiteral, NotNan);
3370
3371 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
3372 Builder.SetInsertPoint(NotInf);
3373 APFloat Smallest = APFloat::getSmallestNormalized(
3374 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
3375 Value *IsNormal =
3376 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
3377 "isnormal");
3378 Value *NormalResult =
3379 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
3380 EmitScalarExpr(E->getArg(3)));
3381 Builder.CreateBr(End);
3382 Result->addIncoming(NormalResult, NotInf);
3383
3384 // return Result
3385 Builder.SetInsertPoint(End);
3386 return RValue::get(Result);
3387 }
3388
3389 case Builtin::BIalloca:
3390 case Builtin::BI_alloca:
3391 case Builtin::BI__builtin_alloca: {
3392 Value *Size = EmitScalarExpr(E->getArg(0));
3393 const TargetInfo &TI = getContext().getTargetInfo();
3394 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
3395 const Align SuitableAlignmentInBytes =
3396 CGM.getContext()
3397 .toCharUnitsFromBits(TI.getSuitableAlign())
3398 .getAsAlign();
3399 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3400 AI->setAlignment(SuitableAlignmentInBytes);
3401 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
3402 return RValue::get(AI);
3403 }
3404
3405 case Builtin::BI__builtin_alloca_with_align: {
3406 Value *Size = EmitScalarExpr(E->getArg(0));
3407 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
3408 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
3409 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
3410 const Align AlignmentInBytes =
3411 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
3412 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3413 AI->setAlignment(AlignmentInBytes);
3414 initializeAlloca(*this, AI, Size, AlignmentInBytes);
3415 return RValue::get(AI);
3416 }
3417
3418 case Builtin::BIbzero:
3419 case Builtin::BI__builtin_bzero: {
3420 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3421 Value *SizeVal = EmitScalarExpr(E->getArg(1));
3422 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3423 E->getArg(0)->getExprLoc(), FD, 0);
3424 Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
3425 return RValue::get(nullptr);
3426 }
3427 case Builtin::BImemcpy:
3428 case Builtin::BI__builtin_memcpy:
3429 case Builtin::BImempcpy:
3430 case Builtin::BI__builtin_mempcpy: {
3431 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3432 Address Src = EmitPointerWithAlignment(E->getArg(1));
3433 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3434 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3435 E->getArg(0)->getExprLoc(), FD, 0);
3436 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3437 E->getArg(1)->getExprLoc(), FD, 1);
3438 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3439 if (BuiltinID == Builtin::BImempcpy ||
3440 BuiltinID == Builtin::BI__builtin_mempcpy)
3441 return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(),
3442 Dest.getPointer(), SizeVal));
3443 else
3444 return RValue::get(Dest.getPointer());
3445 }
3446
3447 case Builtin::BI__builtin_memcpy_inline: {
3448 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3449 Address Src = EmitPointerWithAlignment(E->getArg(1));
3450 uint64_t Size =
3451 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
3452 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3453 E->getArg(0)->getExprLoc(), FD, 0);
3454 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3455 E->getArg(1)->getExprLoc(), FD, 1);
3456 Builder.CreateMemCpyInline(Dest, Src, Size);
3457 return RValue::get(nullptr);
3458 }
3459
3460 case Builtin::BI__builtin_char_memchr:
3461 BuiltinID = Builtin::BI__builtin_memchr;
3462 break;
3463
3464 case Builtin::BI__builtin___memcpy_chk: {
3465 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
3466 Expr::EvalResult SizeResult, DstSizeResult;
3467 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3468 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3469 break;
3470 llvm::APSInt Size = SizeResult.Val.getInt();
3471 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3472 if (Size.ugt(DstSize))
3473 break;
3474 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3475 Address Src = EmitPointerWithAlignment(E->getArg(1));
3476 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3477 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3478 return RValue::get(Dest.getPointer());
3479 }
3480
3481 case Builtin::BI__builtin_objc_memmove_collectable: {
3482 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
3483 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
3484 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3485 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
3486 DestAddr, SrcAddr, SizeVal);
3487 return RValue::get(DestAddr.getPointer());
3488 }
3489
3490 case Builtin::BI__builtin___memmove_chk: {
3491 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
3492 Expr::EvalResult SizeResult, DstSizeResult;
3493 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3494 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3495 break;
3496 llvm::APSInt Size = SizeResult.Val.getInt();
3497 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3498 if (Size.ugt(DstSize))
3499 break;
3500 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3501 Address Src = EmitPointerWithAlignment(E->getArg(1));
3502 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3503 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3504 return RValue::get(Dest.getPointer());
3505 }
3506
3507 case Builtin::BImemmove:
3508 case Builtin::BI__builtin_memmove: {
3509 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3510 Address Src = EmitPointerWithAlignment(E->getArg(1));
3511 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3512 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3513 E->getArg(0)->getExprLoc(), FD, 0);
3514 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3515 E->getArg(1)->getExprLoc(), FD, 1);
3516 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3517 return RValue::get(Dest.getPointer());
3518 }
3519 case Builtin::BImemset:
3520 case Builtin::BI__builtin_memset: {
3521 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3522 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3523 Builder.getInt8Ty());
3524 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3525 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3526 E->getArg(0)->getExprLoc(), FD, 0);
3527 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3528 return RValue::get(Dest.getPointer());
3529 }
3530 case Builtin::BI__builtin___memset_chk: {
3531 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
3532 Expr::EvalResult SizeResult, DstSizeResult;
3533 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3534 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3535 break;
3536 llvm::APSInt Size = SizeResult.Val.getInt();
3537 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3538 if (Size.ugt(DstSize))
3539 break;
3540 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3541 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3542 Builder.getInt8Ty());
3543 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3544 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3545 return RValue::get(Dest.getPointer());
3546 }
3547 case Builtin::BI__builtin_wmemchr: {
3548 // The MSVC runtime library does not provide a definition of wmemchr, so we
3549 // need an inline implementation.
3550 if (!getTarget().getTriple().isOSMSVCRT())
3551 break;
3552
3553 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
3554 Value *Str = EmitScalarExpr(E->getArg(0));
3555 Value *Chr = EmitScalarExpr(E->getArg(1));
3556 Value *Size = EmitScalarExpr(E->getArg(2));
3557
3558 BasicBlock *Entry = Builder.GetInsertBlock();
3559 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
3560 BasicBlock *Next = createBasicBlock("wmemchr.next");
3561 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
3562 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
3563 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
3564
3565 EmitBlock(CmpEq);
3566 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
3567 StrPhi->addIncoming(Str, Entry);
3568 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
3569 SizePhi->addIncoming(Size, Entry);
3570 CharUnits WCharAlign =
3571 getContext().getTypeAlignInChars(getContext().WCharTy);
3572 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
3573 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
3574 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
3575 Builder.CreateCondBr(StrEqChr, Exit, Next);
3576
3577 EmitBlock(Next);
3578 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
3579 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
3580 Value *NextSizeEq0 =
3581 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
3582 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
3583 StrPhi->addIncoming(NextStr, Next);
3584 SizePhi->addIncoming(NextSize, Next);
3585
3586 EmitBlock(Exit);
3587 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
3588 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
3589 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
3590 Ret->addIncoming(FoundChr, CmpEq);
3591 return RValue::get(Ret);
3592 }
3593 case Builtin::BI__builtin_wmemcmp: {
3594 // The MSVC runtime library does not provide a definition of wmemcmp, so we
3595 // need an inline implementation.
3596 if (!getTarget().getTriple().isOSMSVCRT())
3597 break;
3598
3599 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
3600
3601 Value *Dst = EmitScalarExpr(E->getArg(0));
3602 Value *Src = EmitScalarExpr(E->getArg(1));
3603 Value *Size = EmitScalarExpr(E->getArg(2));
3604
3605 BasicBlock *Entry = Builder.GetInsertBlock();
3606 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
3607 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
3608 BasicBlock *Next = createBasicBlock("wmemcmp.next");
3609 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
3610 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
3611 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
3612
3613 EmitBlock(CmpGT);
3614 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
3615 DstPhi->addIncoming(Dst, Entry);
3616 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
3617 SrcPhi->addIncoming(Src, Entry);
3618 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
3619 SizePhi->addIncoming(Size, Entry);
3620 CharUnits WCharAlign =
3621 getContext().getTypeAlignInChars(getContext().WCharTy);
3622 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
3623 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
3624 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
3625 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
3626
3627 EmitBlock(CmpLT);
3628 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
3629 Builder.CreateCondBr(DstLtSrc, Exit, Next);
3630
3631 EmitBlock(Next);
3632 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
3633 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
3634 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
3635 Value *NextSizeEq0 =
3636 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
3637 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
3638 DstPhi->addIncoming(NextDst, Next);
3639 SrcPhi->addIncoming(NextSrc, Next);
3640 SizePhi->addIncoming(NextSize, Next);
3641
3642 EmitBlock(Exit);
3643 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
3644 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
3645 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
3646 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
3647 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
3648 return RValue::get(Ret);
3649 }
3650 case Builtin::BI__builtin_dwarf_cfa: {
3651 // The offset in bytes from the first argument to the CFA.
3652 //
3653 // Why on earth is this in the frontend? Is there any reason at
3654 // all that the backend can't reasonably determine this while
3655 // lowering llvm.eh.dwarf.cfa()?
3656 //
3657 // TODO: If there's a satisfactory reason, add a target hook for
3658 // this instead of hard-coding 0, which is correct for most targets.
3659 int32_t Offset = 0;
3660
3661 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
3662 return RValue::get(Builder.CreateCall(F,
3663 llvm::ConstantInt::get(Int32Ty, Offset)));
3664 }
3665 case Builtin::BI__builtin_return_address: {
3666 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
3667 getContext().UnsignedIntTy);
3668 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
3669 return RValue::get(Builder.CreateCall(F, Depth));
3670 }
3671 case Builtin::BI_ReturnAddress: {
3672 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
3673 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
3674 }
3675 case Builtin::BI__builtin_frame_address: {
3676 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
3677 getContext().UnsignedIntTy);
3678 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
3679 return RValue::get(Builder.CreateCall(F, Depth));
3680 }
3681 case Builtin::BI__builtin_extract_return_addr: {
3682 Value *Address = EmitScalarExpr(E->getArg(0));
3683 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
3684 return RValue::get(Result);
3685 }
3686 case Builtin::BI__builtin_frob_return_addr: {
3687 Value *Address = EmitScalarExpr(E->getArg(0));
3688 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
3689 return RValue::get(Result);
3690 }
3691 case Builtin::BI__builtin_dwarf_sp_column: {
3692 llvm::IntegerType *Ty
3693 = cast<llvm::IntegerType>(ConvertType(E->getType()));
3694 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
3695 if (Column == -1) {
3696 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
3697 return RValue::get(llvm::UndefValue::get(Ty));
3698 }
3699 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
3700 }
3701 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
3702 Value *Address = EmitScalarExpr(E->getArg(0));
3703 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
3704 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
3705 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
3706 }
3707 case Builtin::BI__builtin_eh_return: {
3708 Value *Int = EmitScalarExpr(E->getArg(0));
3709 Value *Ptr = EmitScalarExpr(E->getArg(1));
3710
3711 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
3712 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&(static_cast <bool> ((IntTy->getBitWidth() == 32 || IntTy
->getBitWidth() == 64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"
) ? void (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3713, __extension__ __PRETTY_FUNCTION__))
3713 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants")(static_cast <bool> ((IntTy->getBitWidth() == 32 || IntTy
->getBitWidth() == 64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"
) ? void (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3713, __extension__ __PRETTY_FUNCTION__))
;
3714 Function *F =
3715 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
3716 : Intrinsic::eh_return_i64);
3717 Builder.CreateCall(F, {Int, Ptr});
3718 Builder.CreateUnreachable();
3719
3720 // We do need to preserve an insertion point.
3721 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
3722
3723 return RValue::get(nullptr);
3724 }
3725 case Builtin::BI__builtin_unwind_init: {
3726 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
3727 return RValue::get(Builder.CreateCall(F));
3728 }
3729 case Builtin::BI__builtin_extend_pointer: {
3730 // Extends a pointer to the size of an _Unwind_Word, which is
3731 // uint64_t on all platforms. Generally this gets poked into a
3732 // register and eventually used as an address, so if the
3733 // addressing registers are wider than pointers and the platform
3734 // doesn't implicitly ignore high-order bits when doing
3735 // addressing, we need to make sure we zext / sext based on
3736 // the platform's expectations.
3737 //
3738 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
3739
3740 // Cast the pointer to intptr_t.
3741 Value *Ptr = EmitScalarExpr(E->getArg(0));
3742 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
3743
3744 // If that's 64 bits, we're done.
3745 if (IntPtrTy->getBitWidth() == 64)
3746 return RValue::get(Result);
3747
3748 // Otherwise, ask the codegen data what to do.
3749 if (getTargetHooks().extendPointerWithSExt())
3750 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
3751 else
3752 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
3753 }
3754 case Builtin::BI__builtin_setjmp: {
3755 // Buffer is a void**.
3756 Address Buf = EmitPointerWithAlignment(E->getArg(0));
3757
3758 // Store the frame pointer to the setjmp buffer.
3759 Value *FrameAddr = Builder.CreateCall(
3760 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
3761 ConstantInt::get(Int32Ty, 0));
3762 Builder.CreateStore(FrameAddr, Buf);
3763
3764 // Store the stack pointer to the setjmp buffer.
3765 Value *StackAddr =
3766 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
3767 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
3768 Builder.CreateStore(StackAddr, StackSaveSlot);
3769
3770 // Call LLVM's EH setjmp, which is lightweight.
3771 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
3772 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
3773 return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
3774 }
3775 case Builtin::BI__builtin_longjmp: {
3776 Value *Buf = EmitScalarExpr(E->getArg(0));
3777 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
3778
3779 // Call LLVM's EH longjmp, which is lightweight.
3780 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
3781
3782 // longjmp doesn't return; mark this as unreachable.
3783 Builder.CreateUnreachable();
3784
3785 // We do need to preserve an insertion point.
3786 EmitBlock(createBasicBlock("longjmp.cont"));
3787
3788 return RValue::get(nullptr);
3789 }
3790 case Builtin::BI__builtin_launder: {
3791 const Expr *Arg = E->getArg(0);
3792 QualType ArgTy = Arg->getType()->getPointeeType();
3793 Value *Ptr = EmitScalarExpr(Arg);
3794 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
3795 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
3796
3797 return RValue::get(Ptr);
3798 }
3799 case Builtin::BI__sync_fetch_and_add:
3800 case Builtin::BI__sync_fetch_and_sub:
3801 case Builtin::BI__sync_fetch_and_or:
3802 case Builtin::BI__sync_fetch_and_and:
3803 case Builtin::BI__sync_fetch_and_xor:
3804 case Builtin::BI__sync_fetch_and_nand:
3805 case Builtin::BI__sync_add_and_fetch:
3806 case Builtin::BI__sync_sub_and_fetch:
3807 case Builtin::BI__sync_and_and_fetch:
3808 case Builtin::BI__sync_or_and_fetch:
3809 case Builtin::BI__sync_xor_and_fetch:
3810 case Builtin::BI__sync_nand_and_fetch:
3811 case Builtin::BI__sync_val_compare_and_swap:
3812 case Builtin::BI__sync_bool_compare_and_swap:
3813 case Builtin::BI__sync_lock_test_and_set:
3814 case Builtin::BI__sync_lock_release:
3815 case Builtin::BI__sync_swap:
3816 llvm_unreachable("Shouldn't make it through sema")::llvm::llvm_unreachable_internal("Shouldn't make it through sema"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3816)
;
3817 case Builtin::BI__sync_fetch_and_add_1:
3818 case Builtin::BI__sync_fetch_and_add_2:
3819 case Builtin::BI__sync_fetch_and_add_4:
3820 case Builtin::BI__sync_fetch_and_add_8:
3821 case Builtin::BI__sync_fetch_and_add_16:
3822 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
3823 case Builtin::BI__sync_fetch_and_sub_1:
3824 case Builtin::BI__sync_fetch_and_sub_2:
3825 case Builtin::BI__sync_fetch_and_sub_4:
3826 case Builtin::BI__sync_fetch_and_sub_8:
3827 case Builtin::BI__sync_fetch_and_sub_16:
3828 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
3829 case Builtin::BI__sync_fetch_and_or_1:
3830 case Builtin::BI__sync_fetch_and_or_2:
3831 case Builtin::BI__sync_fetch_and_or_4:
3832 case Builtin::BI__sync_fetch_and_or_8:
3833 case Builtin::BI__sync_fetch_and_or_16:
3834 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
3835 case Builtin::BI__sync_fetch_and_and_1:
3836 case Builtin::BI__sync_fetch_and_and_2:
3837 case Builtin::BI__sync_fetch_and_and_4:
3838 case Builtin::BI__sync_fetch_and_and_8:
3839 case Builtin::BI__sync_fetch_and_and_16:
3840 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
3841 case Builtin::BI__sync_fetch_and_xor_1:
3842 case Builtin::BI__sync_fetch_and_xor_2:
3843 case Builtin::BI__sync_fetch_and_xor_4:
3844 case Builtin::BI__sync_fetch_and_xor_8:
3845 case Builtin::BI__sync_fetch_and_xor_16:
3846 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
3847 case Builtin::BI__sync_fetch_and_nand_1:
3848 case Builtin::BI__sync_fetch_and_nand_2:
3849 case Builtin::BI__sync_fetch_and_nand_4:
3850 case Builtin::BI__sync_fetch_and_nand_8:
3851 case Builtin::BI__sync_fetch_and_nand_16:
3852 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
3853
3854 // Clang extensions: not overloaded yet.
3855 case Builtin::BI__sync_fetch_and_min:
3856 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
3857 case Builtin::BI__sync_fetch_and_max:
3858 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
3859 case Builtin::BI__sync_fetch_and_umin:
3860 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
3861 case Builtin::BI__sync_fetch_and_umax:
3862 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
3863
3864 case Builtin::BI__sync_add_and_fetch_1:
3865 case Builtin::BI__sync_add_and_fetch_2:
3866 case Builtin::BI__sync_add_and_fetch_4:
3867 case Builtin::BI__sync_add_and_fetch_8:
3868 case Builtin::BI__sync_add_and_fetch_16:
3869 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
3870 llvm::Instruction::Add);
3871 case Builtin::BI__sync_sub_and_fetch_1:
3872 case Builtin::BI__sync_sub_and_fetch_2:
3873 case Builtin::BI__sync_sub_and_fetch_4:
3874 case Builtin::BI__sync_sub_and_fetch_8:
3875 case Builtin::BI__sync_sub_and_fetch_16:
3876 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
3877 llvm::Instruction::Sub);
3878 case Builtin::BI__sync_and_and_fetch_1:
3879 case Builtin::BI__sync_and_and_fetch_2:
3880 case Builtin::BI__sync_and_and_fetch_4:
3881 case Builtin::BI__sync_and_and_fetch_8:
3882 case Builtin::BI__sync_and_and_fetch_16:
3883 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
3884 llvm::Instruction::And);
3885 case Builtin::BI__sync_or_and_fetch_1:
3886 case Builtin::BI__sync_or_and_fetch_2:
3887 case Builtin::BI__sync_or_and_fetch_4:
3888 case Builtin::BI__sync_or_and_fetch_8:
3889 case Builtin::BI__sync_or_and_fetch_16:
3890 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
3891 llvm::Instruction::Or);
3892 case Builtin::BI__sync_xor_and_fetch_1:
3893 case Builtin::BI__sync_xor_and_fetch_2:
3894 case Builtin::BI__sync_xor_and_fetch_4:
3895 case Builtin::BI__sync_xor_and_fetch_8:
3896 case Builtin::BI__sync_xor_and_fetch_16:
3897 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
3898 llvm::Instruction::Xor);
3899 case Builtin::BI__sync_nand_and_fetch_1:
3900 case Builtin::BI__sync_nand_and_fetch_2:
3901 case Builtin::BI__sync_nand_and_fetch_4:
3902 case Builtin::BI__sync_nand_and_fetch_8:
3903 case Builtin::BI__sync_nand_and_fetch_16:
3904 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
3905 llvm::Instruction::And, true);
3906
3907 case Builtin::BI__sync_val_compare_and_swap_1:
3908 case Builtin::BI__sync_val_compare_and_swap_2:
3909 case Builtin::BI__sync_val_compare_and_swap_4:
3910 case Builtin::BI__sync_val_compare_and_swap_8:
3911 case Builtin::BI__sync_val_compare_and_swap_16:
3912 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
3913
3914 case Builtin::BI__sync_bool_compare_and_swap_1:
3915 case Builtin::BI__sync_bool_compare_and_swap_2:
3916 case Builtin::BI__sync_bool_compare_and_swap_4:
3917 case Builtin::BI__sync_bool_compare_and_swap_8:
3918 case Builtin::BI__sync_bool_compare_and_swap_16:
3919 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
3920
3921 case Builtin::BI__sync_swap_1:
3922 case Builtin::BI__sync_swap_2:
3923 case Builtin::BI__sync_swap_4:
3924 case Builtin::BI__sync_swap_8:
3925 case Builtin::BI__sync_swap_16:
3926 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3927
3928 case Builtin::BI__sync_lock_test_and_set_1:
3929 case Builtin::BI__sync_lock_test_and_set_2:
3930 case Builtin::BI__sync_lock_test_and_set_4:
3931 case Builtin::BI__sync_lock_test_and_set_8:
3932 case Builtin::BI__sync_lock_test_and_set_16:
3933 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3934
3935 case Builtin::BI__sync_lock_release_1:
3936 case Builtin::BI__sync_lock_release_2:
3937 case Builtin::BI__sync_lock_release_4:
3938 case Builtin::BI__sync_lock_release_8:
3939 case Builtin::BI__sync_lock_release_16: {
3940 Value *Ptr = EmitScalarExpr(E->getArg(0));
3941 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
3942 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
3943 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
3944 StoreSize.getQuantity() * 8);
3945 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
3946 llvm::StoreInst *Store =
3947 Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
3948 StoreSize);
3949 Store->setAtomic(llvm::AtomicOrdering::Release);
3950 return RValue::get(nullptr);
3951 }
3952
3953 case Builtin::BI__sync_synchronize: {
3954 // We assume this is supposed to correspond to a C++0x-style
3955 // sequentially-consistent fence (i.e. this is only usable for
3956 // synchronization, not device I/O or anything like that). This intrinsic
3957 // is really badly designed in the sense that in theory, there isn't
3958 // any way to safely use it... but in practice, it mostly works
3959 // to use it with non-atomic loads and stores to get acquire/release
3960 // semantics.
3961 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
3962 return RValue::get(nullptr);
3963 }
3964
3965 case Builtin::BI__builtin_nontemporal_load:
3966 return RValue::get(EmitNontemporalLoad(*this, E));
3967 case Builtin::BI__builtin_nontemporal_store:
3968 return RValue::get(EmitNontemporalStore(*this, E));
3969 case Builtin::BI__c11_atomic_is_lock_free:
3970 case Builtin::BI__atomic_is_lock_free: {
3971 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
3972 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
3973 // _Atomic(T) is always properly-aligned.
3974 const char *LibCallName = "__atomic_is_lock_free";
3975 CallArgList Args;
3976 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
3977 getContext().getSizeType());
3978 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
3979 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
3980 getContext().VoidPtrTy);
3981 else
3982 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
3983 getContext().VoidPtrTy);
3984 const CGFunctionInfo &FuncInfo =
3985 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
3986 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3987 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3988 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
3989 ReturnValueSlot(), Args);
3990 }
3991
3992 case Builtin::BI__atomic_test_and_set: {
3993 // Look at the argument type to determine whether this is a volatile
3994 // operation. The parameter type is always volatile.
3995 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3996 bool Volatile =
3997 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3998
3999 Value *Ptr = EmitScalarExpr(E->getArg(0));
4000 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
4001 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
4002 Value *NewVal = Builder.getInt8(1);
4003 Value *Order = EmitScalarExpr(E->getArg(1));
4004 if (isa<llvm::ConstantInt>(Order)) {
4005 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
4006 AtomicRMWInst *Result = nullptr;
4007 switch (ord) {
4008 case 0: // memory_order_relaxed
4009 default: // invalid order
4010 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4011 llvm::AtomicOrdering::Monotonic);
4012 break;
4013 case 1: // memory_order_consume
4014 case 2: // memory_order_acquire
4015 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4016 llvm::AtomicOrdering::Acquire);
4017 break;
4018 case 3: // memory_order_release
4019 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4020 llvm::AtomicOrdering::Release);
4021 break;
4022 case 4: // memory_order_acq_rel
4023
4024 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4025 llvm::AtomicOrdering::AcquireRelease);
4026 break;
4027 case 5: // memory_order_seq_cst
4028 Result = Builder.CreateAtomicRMW(
4029 llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4030 llvm::AtomicOrdering::SequentiallyConsistent);
4031 break;
4032 }
4033 Result->setVolatile(Volatile);
4034 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
4035 }
4036
4037 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4038
4039 llvm::BasicBlock *BBs[5] = {
4040 createBasicBlock("monotonic", CurFn),
4041 createBasicBlock("acquire", CurFn),
4042 createBasicBlock("release", CurFn),
4043 createBasicBlock("acqrel", CurFn),
4044 createBasicBlock("seqcst", CurFn)
4045 };
4046 llvm::AtomicOrdering Orders[5] = {
4047 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
4048 llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
4049 llvm::AtomicOrdering::SequentiallyConsistent};
4050
4051 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4052 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
4053
4054 Builder.SetInsertPoint(ContBB);
4055 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
4056
4057 for (unsigned i = 0; i < 5; ++i) {
4058 Builder.SetInsertPoint(BBs[i]);
4059 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
4060 Ptr, NewVal, Orders[i]);
4061 RMW->setVolatile(Volatile);
4062 Result->addIncoming(RMW, BBs[i]);
4063 Builder.CreateBr(ContBB);
4064 }
4065
4066 SI->addCase(Builder.getInt32(0), BBs[0]);
4067 SI->addCase(Builder.getInt32(1), BBs[1]);
4068 SI->addCase(Builder.getInt32(2), BBs[1]);
4069 SI->addCase(Builder.getInt32(3), BBs[2]);
4070 SI->addCase(Builder.getInt32(4), BBs[3]);
4071 SI->addCase(Builder.getInt32(5), BBs[4]);
4072
4073 Builder.SetInsertPoint(ContBB);
4074 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
4075 }
4076
4077 case Builtin::BI__atomic_clear: {
4078 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
4079 bool Volatile =
4080 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
4081
4082 Address Ptr = EmitPointerWithAlignment(E->getArg(0));
4083 unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
4084 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
4085 Value *NewVal = Builder.getInt8(0);
4086 Value *Order = EmitScalarExpr(E->getArg(1));
4087 if (isa<llvm::ConstantInt>(Order)) {
4088 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
4089 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
4090 switch (ord) {
4091 case 0: // memory_order_relaxed
4092 default: // invalid order
4093 Store->setOrdering(llvm::AtomicOrdering::Monotonic);
4094 break;
4095 case 3: // memory_order_release
4096 Store->setOrdering(llvm::AtomicOrdering::Release);
4097 break;
4098 case 5: // memory_order_seq_cst
4099 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
4100 break;
4101 }
4102 return RValue::get(nullptr);
4103 }
4104
4105 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4106
4107 llvm::BasicBlock *BBs[3] = {
4108 createBasicBlock("monotonic", CurFn),
4109 createBasicBlock("release", CurFn),
4110 createBasicBlock("seqcst", CurFn)
4111 };
4112 llvm::AtomicOrdering Orders[3] = {
4113 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
4114 llvm::AtomicOrdering::SequentiallyConsistent};
4115
4116 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4117 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
4118
4119 for (unsigned i = 0; i < 3; ++i) {
4120 Builder.SetInsertPoint(BBs[i]);
4121 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
4122 Store->setOrdering(Orders[i]);
4123 Builder.CreateBr(ContBB);
4124 }
4125
4126 SI->addCase(Builder.getInt32(0), BBs[0]);
4127 SI->addCase(Builder.getInt32(3), BBs[1]);
4128 SI->addCase(Builder.getInt32(5), BBs[2]);
4129
4130 Builder.SetInsertPoint(ContBB);
4131 return RValue::get(nullptr);
4132 }
4133
4134 case Builtin::BI__atomic_thread_fence:
4135 case Builtin::BI__atomic_signal_fence:
4136 case Builtin::BI__c11_atomic_thread_fence:
4137 case Builtin::BI__c11_atomic_signal_fence: {
4138 llvm::SyncScope::ID SSID;
4139 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
4140 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
4141 SSID = llvm::SyncScope::SingleThread;
4142 else
4143 SSID = llvm::SyncScope::System;
4144 Value *Order = EmitScalarExpr(E->getArg(0));
4145 if (isa<llvm::ConstantInt>(Order)) {
4146 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
4147 switch (ord) {
4148 case 0: // memory_order_relaxed
4149 default: // invalid order
4150 break;
4151 case 1: // memory_order_consume
4152 case 2: // memory_order_acquire
4153 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
4154 break;
4155 case 3: // memory_order_release
4156 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
4157 break;
4158 case 4: // memory_order_acq_rel
4159 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
4160 break;
4161 case 5: // memory_order_seq_cst
4162 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
4163 break;
4164 }
4165 return RValue::get(nullptr);
4166 }
4167
4168 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
4169 AcquireBB = createBasicBlock("acquire", CurFn);
4170 ReleaseBB = createBasicBlock("release", CurFn);
4171 AcqRelBB = createBasicBlock("acqrel", CurFn);
4172 SeqCstBB = createBasicBlock("seqcst", CurFn);
4173 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4174
4175 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4176 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
4177
4178 Builder.SetInsertPoint(AcquireBB);
4179 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
4180 Builder.CreateBr(ContBB);
4181 SI->addCase(Builder.getInt32(1), AcquireBB);
4182 SI->addCase(Builder.getInt32(2), AcquireBB);
4183
4184 Builder.SetInsertPoint(ReleaseBB);
4185 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
4186 Builder.CreateBr(ContBB);
4187 SI->addCase(Builder.getInt32(3), ReleaseBB);
4188
4189 Builder.SetInsertPoint(AcqRelBB);
4190 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
4191 Builder.CreateBr(ContBB);
4192 SI->addCase(Builder.getInt32(4), AcqRelBB);
4193
4194 Builder.SetInsertPoint(SeqCstBB);
4195 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
4196 Builder.CreateBr(ContBB);
4197 SI->addCase(Builder.getInt32(5), SeqCstBB);
4198
4199 Builder.SetInsertPoint(ContBB);
4200 return RValue::get(nullptr);
4201 }
4202
4203 case Builtin::BI__builtin_signbit:
4204 case Builtin::BI__builtin_signbitf:
4205 case Builtin::BI__builtin_signbitl: {
4206 return RValue::get(
4207 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
4208 ConvertType(E->getType())));
4209 }
4210 case Builtin::BI__warn_memset_zero_len:
4211 return RValue::getIgnored();
4212 case Builtin::BI__annotation: {
4213 // Re-encode each wide string to UTF8 and make an MDString.
4214 SmallVector<Metadata *, 1> Strings;
4215 for (const Expr *Arg : E->arguments()) {
4216 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
4217 assert(Str->getCharByteWidth() == 2)(static_cast <bool> (Str->getCharByteWidth() == 2) ?
void (0) : __assert_fail ("Str->getCharByteWidth() == 2",
"/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4217, __extension__ __PRETTY_FUNCTION__))
;
4218 StringRef WideBytes = Str->getBytes();
4219 std::string StrUtf8;
4220 if (!convertUTF16ToUTF8String(
4221 makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
4222 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
4223 continue;
4224 }
4225 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
4226 }
4227
4228 // Build and MDTuple of MDStrings and emit the intrinsic call.
4229 llvm::Function *F =
4230 CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
4231 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
4232 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
4233 return RValue::getIgnored();
4234 }
4235 case Builtin::BI__builtin_annotation: {
4236 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
4237 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
4238 AnnVal->getType());
4239
4240 // Get the annotation string, go through casts. Sema requires this to be a
4241 // non-wide string literal, potentially casted, so the cast<> is safe.
4242 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
4243 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
4244 return RValue::get(
4245 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
4246 }
4247 case Builtin::BI__builtin_addcb:
4248 case Builtin::BI__builtin_addcs:
4249 case Builtin::BI__builtin_addc:
4250 case Builtin::BI__builtin_addcl:
4251 case Builtin::BI__builtin_addcll:
4252 case Builtin::BI__builtin_subcb:
4253 case Builtin::BI__builtin_subcs:
4254 case Builtin::BI__builtin_subc:
4255 case Builtin::BI__builtin_subcl:
4256 case Builtin::BI__builtin_subcll: {
4257
4258 // We translate all of these builtins from expressions of the form:
4259 // int x = ..., y = ..., carryin = ..., carryout, result;
4260 // result = __builtin_addc(x, y, carryin, &carryout);
4261 //
4262 // to LLVM IR of the form:
4263 //
4264 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
4265 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
4266 // %carry1 = extractvalue {i32, i1} %tmp1, 1
4267 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
4268 // i32 %carryin)
4269 // %result = extractvalue {i32, i1} %tmp2, 0
4270 // %carry2 = extractvalue {i32, i1} %tmp2, 1
4271 // %tmp3 = or i1 %carry1, %carry2
4272 // %tmp4 = zext i1 %tmp3 to i32
4273 // store i32 %tmp4, i32* %carryout
4274
4275 // Scalarize our inputs.
4276 llvm::Value *X = EmitScalarExpr(E->getArg(0));
4277 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4278 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
4279 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
4280
4281 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
4282 llvm::Intrinsic::ID IntrinsicId;
4283 switch (BuiltinID) {
4284 default: llvm_unreachable("Unknown multiprecision builtin id.")::llvm::llvm_unreachable_internal("Unknown multiprecision builtin id."
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4284)
;
4285 case Builtin::BI__builtin_addcb:
4286 case Builtin::BI__builtin_addcs:
4287 case Builtin::BI__builtin_addc:
4288 case Builtin::BI__builtin_addcl:
4289 case Builtin::BI__builtin_addcll:
4290 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4291 break;
4292 case Builtin::BI__builtin_subcb:
4293 case Builtin::BI__builtin_subcs:
4294 case Builtin::BI__builtin_subc:
4295 case Builtin::BI__builtin_subcl:
4296 case Builtin::BI__builtin_subcll:
4297 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4298 break;
4299 }
4300
4301 // Construct our resulting LLVM IR expression.
4302 llvm::Value *Carry1;
4303 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
4304 X, Y, Carry1);
4305 llvm::Value *Carry2;
4306 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
4307 Sum1, Carryin, Carry2);
4308 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
4309 X->getType());
4310 Builder.CreateStore(CarryOut, CarryOutPtr);
4311 return RValue::get(Sum2);
4312 }
4313
4314 case Builtin::BI__builtin_add_overflow:
4315 case Builtin::BI__builtin_sub_overflow:
4316 case Builtin::BI__builtin_mul_overflow: {
4317 const clang::Expr *LeftArg = E->getArg(0);
4318 const clang::Expr *RightArg = E->getArg(1);
4319 const clang::Expr *ResultArg = E->getArg(2);
4320
4321 clang::QualType ResultQTy =
4322 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
4323
4324 WidthAndSignedness LeftInfo =
4325 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
4326 WidthAndSignedness RightInfo =
4327 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
4328 WidthAndSignedness ResultInfo =
4329 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
4330
4331 // Handle mixed-sign multiplication as a special case, because adding
4332 // runtime or backend support for our generic irgen would be too expensive.
4333 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
4334 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
4335 RightInfo, ResultArg, ResultQTy,
4336 ResultInfo);
4337
4338 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
4339 ResultInfo))
4340 return EmitCheckedUnsignedMultiplySignedResult(
4341 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
4342 ResultInfo);
4343
4344 WidthAndSignedness EncompassingInfo =
4345 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
4346
4347 llvm::Type *EncompassingLLVMTy =
4348 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
4349
4350 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
4351
4352 llvm::Intrinsic::ID IntrinsicId;
4353 switch (BuiltinID) {
4354 default:
4355 llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id."
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4355)
;
4356 case Builtin::BI__builtin_add_overflow:
4357 IntrinsicId = EncompassingInfo.Signed
4358 ? llvm::Intrinsic::sadd_with_overflow
4359 : llvm::Intrinsic::uadd_with_overflow;
4360 break;
4361 case Builtin::BI__builtin_sub_overflow:
4362 IntrinsicId = EncompassingInfo.Signed
4363 ? llvm::Intrinsic::ssub_with_overflow
4364 : llvm::Intrinsic::usub_with_overflow;
4365 break;
4366 case Builtin::BI__builtin_mul_overflow:
4367 IntrinsicId = EncompassingInfo.Signed
4368 ? llvm::Intrinsic::smul_with_overflow
4369 : llvm::Intrinsic::umul_with_overflow;
4370 break;
4371 }
4372
4373 llvm::Value *Left = EmitScalarExpr(LeftArg);
4374 llvm::Value *Right = EmitScalarExpr(RightArg);
4375 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
4376
4377 // Extend each operand to the encompassing type.
4378 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
4379 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
4380
4381 // Perform the operation on the extended values.
4382 llvm::Value *Overflow, *Result;
4383 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
4384
4385 if (EncompassingInfo.Width > ResultInfo.Width) {
4386 // The encompassing type is wider than the result type, so we need to
4387 // truncate it.
4388 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
4389
4390 // To see if the truncation caused an overflow, we will extend
4391 // the result and then compare it to the original result.
4392 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
4393 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
4394 llvm::Value *TruncationOverflow =
4395 Builder.CreateICmpNE(Result, ResultTruncExt);
4396
4397 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
4398 Result = ResultTrunc;
4399 }
4400
4401 // Finally, store the result using the pointer.
4402 bool isVolatile =
4403 ResultArg->getType()->getPointeeType().isVolatileQualified();
4404 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
4405
4406 return RValue::get(Overflow);
4407 }
4408
4409 case Builtin::BI__builtin_uadd_overflow:
4410 case Builtin::BI__builtin_uaddl_overflow:
4411 case Builtin::BI__builtin_uaddll_overflow:
4412 case Builtin::BI__builtin_usub_overflow:
4413 case Builtin::BI__builtin_usubl_overflow:
4414 case Builtin::BI__builtin_usubll_overflow:
4415 case Builtin::BI__builtin_umul_overflow:
4416 case Builtin::BI__builtin_umull_overflow:
4417 case Builtin::BI__builtin_umulll_overflow:
4418 case Builtin::BI__builtin_sadd_overflow:
4419 case Builtin::BI__builtin_saddl_overflow:
4420 case Builtin::BI__builtin_saddll_overflow:
4421 case Builtin::BI__builtin_ssub_overflow:
4422 case Builtin::BI__builtin_ssubl_overflow:
4423 case Builtin::BI__builtin_ssubll_overflow:
4424 case Builtin::BI__builtin_smul_overflow:
4425 case Builtin::BI__builtin_smull_overflow:
4426 case Builtin::BI__builtin_smulll_overflow: {
4427
4428 // We translate all of these builtins directly to the relevant llvm IR node.
4429
4430 // Scalarize our inputs.
4431 llvm::Value *X = EmitScalarExpr(E->getArg(0));
4432 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4433 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
4434
4435 // Decide which of the overflow intrinsics we are lowering to:
4436 llvm::Intrinsic::ID IntrinsicId;
4437 switch (BuiltinID) {
4438 default: llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id."
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4438)
;
4439 case Builtin::BI__builtin_uadd_overflow:
4440 case Builtin::BI__builtin_uaddl_overflow:
4441 case Builtin::BI__builtin_uaddll_overflow:
4442 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4443 break;
4444 case Builtin::BI__builtin_usub_overflow:
4445 case Builtin::BI__builtin_usubl_overflow:
4446 case Builtin::BI__builtin_usubll_overflow:
4447 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4448 break;
4449 case Builtin::BI__builtin_umul_overflow:
4450 case Builtin::BI__builtin_umull_overflow:
4451 case Builtin::BI__builtin_umulll_overflow:
4452 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
4453 break;
4454 case Builtin::BI__builtin_sadd_overflow:
4455 case Builtin::BI__builtin_saddl_overflow:
4456 case Builtin::BI__builtin_saddll_overflow:
4457 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
4458 break;
4459 case Builtin::BI__builtin_ssub_overflow:
4460 case Builtin::BI__builtin_ssubl_overflow:
4461 case Builtin::BI__builtin_ssubll_overflow:
4462 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
4463 break;
4464 case Builtin::BI__builtin_smul_overflow:
4465 case Builtin::BI__builtin_smull_overflow:
4466 case Builtin::BI__builtin_smulll_overflow:
4467 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
4468 break;
4469 }
4470
4471
4472 llvm::Value *Carry;
4473 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
4474 Builder.CreateStore(Sum, SumOutPtr);
4475
4476 return RValue::get(Carry);
4477 }
4478 case Builtin::BI__builtin_addressof:
4479 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
4480 case Builtin::BI__builtin_operator_new:
4481 return EmitBuiltinNewDeleteCall(
4482 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
4483 case Builtin::BI__builtin_operator_delete:
4484 return EmitBuiltinNewDeleteCall(
4485 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
4486
4487 case Builtin::BI__builtin_is_aligned:
4488 return EmitBuiltinIsAligned(E);
4489 case Builtin::BI__builtin_align_up:
4490 return EmitBuiltinAlignTo(E, true);
4491 case Builtin::BI__builtin_align_down:
4492 return EmitBuiltinAlignTo(E, false);
4493
4494 case Builtin::BI__noop:
4495 // __noop always evaluates to an integer literal zero.
4496 return RValue::get(ConstantInt::get(IntTy, 0));
4497 case Builtin::BI__builtin_call_with_static_chain: {
4498 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
4499 const Expr *Chain = E->getArg(1);
4500 return EmitCall(Call->getCallee()->getType(),
4501 EmitCallee(Call->getCallee()), Call, ReturnValue,
4502 EmitScalarExpr(Chain));
4503 }
4504 case Builtin::BI_InterlockedExchange8:
4505 case Builtin::BI_InterlockedExchange16:
4506 case Builtin::BI_InterlockedExchange:
4507 case Builtin::BI_InterlockedExchangePointer:
4508 return RValue::get(
4509 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
4510 case Builtin::BI_InterlockedCompareExchangePointer:
4511 case Builtin::BI_InterlockedCompareExchangePointer_nf: {
4512 llvm::Type *RTy;
4513 llvm::IntegerType *IntType =
4514 IntegerType::get(getLLVMContext(),
4515 getContext().getTypeSize(E->getType()));
4516 llvm::Type *IntPtrType = IntType->getPointerTo();
4517
4518 llvm::Value *Destination =
4519 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
4520
4521 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
4522 RTy = Exchange->getType();
4523 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
4524
4525 llvm::Value *Comparand =
4526 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
4527
4528 auto Ordering =
4529 BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
4530 AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
4531
4532 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
4533 Ordering, Ordering);
4534 Result->setVolatile(true);
4535
4536 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
4537 0),
4538 RTy));
4539 }
4540 case Builtin::BI_InterlockedCompareExchange8:
4541 case Builtin::BI_InterlockedCompareExchange16:
4542 case Builtin::BI_InterlockedCompareExchange:
4543 case Builtin::BI_InterlockedCompareExchange64:
4544 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
4545 case Builtin::BI_InterlockedIncrement16:
4546 case Builtin::BI_InterlockedIncrement:
4547 return RValue::get(
4548 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
4549 case Builtin::BI_InterlockedDecrement16:
4550 case Builtin::BI_InterlockedDecrement:
4551 return RValue::get(
4552 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
4553 case Builtin::BI_InterlockedAnd8:
4554 case Builtin::BI_InterlockedAnd16:
4555 case Builtin::BI_InterlockedAnd:
4556 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
4557 case Builtin::BI_InterlockedExchangeAdd8:
4558 case Builtin::BI_InterlockedExchangeAdd16:
4559 case Builtin::BI_InterlockedExchangeAdd:
4560 return RValue::get(
4561 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
4562 case Builtin::BI_InterlockedExchangeSub8:
4563 case Builtin::BI_InterlockedExchangeSub16:
4564 case Builtin::BI_InterlockedExchangeSub:
4565 return RValue::get(
4566 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
4567 case Builtin::BI_InterlockedOr8:
4568 case Builtin::BI_InterlockedOr16:
4569 case Builtin::BI_InterlockedOr:
4570 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
4571 case Builtin::BI_InterlockedXor8:
4572 case Builtin::BI_InterlockedXor16:
4573 case Builtin::BI_InterlockedXor:
4574 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
4575
4576 case Builtin::BI_bittest64:
4577 case Builtin::BI_bittest:
4578 case Builtin::BI_bittestandcomplement64:
4579 case Builtin::BI_bittestandcomplement:
4580 case Builtin::BI_bittestandreset64:
4581 case Builtin::BI_bittestandreset:
4582 case Builtin::BI_bittestandset64:
4583 case Builtin::BI_bittestandset:
4584 case Builtin::BI_interlockedbittestandreset:
4585 case Builtin::BI_interlockedbittestandreset64:
4586 case Builtin::BI_interlockedbittestandset64:
4587 case Builtin::BI_interlockedbittestandset:
4588 case Builtin::BI_interlockedbittestandset_acq:
4589 case Builtin::BI_interlockedbittestandset_rel:
4590 case Builtin::BI_interlockedbittestandset_nf:
4591 case Builtin::BI_interlockedbittestandreset_acq:
4592 case Builtin::BI_interlockedbittestandreset_rel:
4593 case Builtin::BI_interlockedbittestandreset_nf:
4594 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
4595
4596 // These builtins exist to emit regular volatile loads and stores not
4597 // affected by the -fms-volatile setting.
4598 case Builtin::BI__iso_volatile_load8:
4599 case Builtin::BI__iso_volatile_load16:
4600 case Builtin::BI__iso_volatile_load32:
4601 case Builtin::BI__iso_volatile_load64:
4602 return RValue::get(EmitISOVolatileLoad(*this, E));
4603 case Builtin::BI__iso_volatile_store8:
4604 case Builtin::BI__iso_volatile_store16:
4605 case Builtin::BI__iso_volatile_store32:
4606 case Builtin::BI__iso_volatile_store64:
4607 return RValue::get(EmitISOVolatileStore(*this, E));
4608
4609 case Builtin::BI__exception_code:
4610 case Builtin::BI_exception_code:
4611 return RValue::get(EmitSEHExceptionCode());
4612 case Builtin::BI__exception_info:
4613 case Builtin::BI_exception_info:
4614 return RValue::get(EmitSEHExceptionInfo());
4615 case Builtin::BI__abnormal_termination:
4616 case Builtin::BI_abnormal_termination:
4617 return RValue::get(EmitSEHAbnormalTermination());
4618 case Builtin::BI_setjmpex:
4619 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
4620 E->getArg(0)->getType()->isPointerType())
4621 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
4622 break;
4623 case Builtin::BI_setjmp:
4624 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
4625 E->getArg(0)->getType()->isPointerType()) {
4626 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
4627 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
4628 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
4629 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
4630 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
4631 }
4632 break;
4633
4634 case Builtin::BI__GetExceptionInfo: {
4635 if (llvm::GlobalVariable *GV =
4636 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
4637 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
4638 break;
4639 }
4640
4641 case Builtin::BI__fastfail:
4642 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
4643
4644 case Builtin::BI__builtin_coro_size: {
4645 auto & Context = getContext();
4646 auto SizeTy = Context.getSizeType();
4647 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4648 Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
4649 return RValue::get(Builder.CreateCall(F));
4650 }
4651
4652 case Builtin::BI__builtin_coro_id:
4653 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
4654 case Builtin::BI__builtin_coro_promise:
4655 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
4656 case Builtin::BI__builtin_coro_resume:
4657 return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
4658 case Builtin::BI__builtin_coro_frame:
4659 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
4660 case Builtin::BI__builtin_coro_noop:
4661 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
4662 case Builtin::BI__builtin_coro_free:
4663 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
4664 case Builtin::BI__builtin_coro_destroy:
4665 return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
4666 case Builtin::BI__builtin_coro_done:
4667 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
4668 case Builtin::BI__builtin_coro_alloc:
4669 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
4670 case Builtin::BI__builtin_coro_begin:
4671 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
4672 case Builtin::BI__builtin_coro_end:
4673 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
4674 case Builtin::BI__builtin_coro_suspend:
4675 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
4676 case Builtin::BI__builtin_coro_param:
4677 return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
4678
4679 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
4680 case Builtin::BIread_pipe:
4681 case Builtin::BIwrite_pipe: {
4682 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4683 *Arg1 = EmitScalarExpr(E->getArg(1));
4684 CGOpenCLRuntime OpenCLRT(CGM);
4685 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4686 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4687
4688 // Type of the generic packet parameter.
4689 unsigned GenericAS =
4690 getContext().getTargetAddressSpace(LangAS::opencl_generic);
4691 llvm::Type *I8PTy = llvm::PointerType::get(
4692 llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
4693
4694 // Testing which overloaded version we should generate the call for.
4695 if (2U == E->getNumArgs()) {
4696 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
4697 : "__write_pipe_2";
4698 // Creating a generic function type to be able to call with any builtin or
4699 // user defined type.
4700 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
4701 llvm::FunctionType *FTy = llvm::FunctionType::get(
4702 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4703 Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
4704 return RValue::get(
4705 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4706 {Arg0, BCast, PacketSize, PacketAlign}));
4707 } else {
4708 assert(4 == E->getNumArgs() &&(static_cast <bool> (4 == E->getNumArgs() &&
"Illegal number of parameters to pipe function") ? void (0) :
__assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4709, __extension__ __PRETTY_FUNCTION__))
4709 "Illegal number of parameters to pipe function")(static_cast <bool> (4 == E->getNumArgs() &&
"Illegal number of parameters to pipe function") ? void (0) :
__assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4709, __extension__ __PRETTY_FUNCTION__))
;
4710 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
4711 : "__write_pipe_4";
4712
4713 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
4714 Int32Ty, Int32Ty};
4715 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
4716 *Arg3 = EmitScalarExpr(E->getArg(3));
4717 llvm::FunctionType *FTy = llvm::FunctionType::get(
4718 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4719 Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
4720 // We know the third argument is an integer type, but we may need to cast
4721 // it to i32.
4722 if (Arg2->getType() != Int32Ty)
4723 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
4724 return RValue::get(
4725 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4726 {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
4727 }
4728 }
4729 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
4730 // functions
4731 case Builtin::BIreserve_read_pipe:
4732 case Builtin::BIreserve_write_pipe:
4733 case Builtin::BIwork_group_reserve_read_pipe:
4734 case Builtin::BIwork_group_reserve_write_pipe:
4735 case Builtin::BIsub_group_reserve_read_pipe:
4736 case Builtin::BIsub_group_reserve_write_pipe: {
4737 // Composing the mangled name for the function.
4738 const char *Name;
4739 if (BuiltinID == Builtin::BIreserve_read_pipe)
4740 Name = "__reserve_read_pipe";
4741 else if (BuiltinID == Builtin::BIreserve_write_pipe)
4742 Name = "__reserve_write_pipe";
4743 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
4744 Name = "__work_group_reserve_read_pipe";
4745 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
4746 Name = "__work_group_reserve_write_pipe";
4747 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
4748 Name = "__sub_group_reserve_read_pipe";
4749 else
4750 Name = "__sub_group_reserve_write_pipe";
4751
4752 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4753 *Arg1 = EmitScalarExpr(E->getArg(1));
4754 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
4755 CGOpenCLRuntime OpenCLRT(CGM);
4756 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4757 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4758
4759 // Building the generic function prototype.
4760 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
4761 llvm::FunctionType *FTy = llvm::FunctionType::get(
4762 ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4763 // We know the second argument is an integer type, but we may need to cast
4764 // it to i32.
4765 if (Arg1->getType() != Int32Ty)
4766 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
4767 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4768 {Arg0, Arg1, PacketSize, PacketAlign}));
4769 }
4770 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
4771 // functions
4772 case Builtin::BIcommit_read_pipe:
4773 case Builtin::BIcommit_write_pipe:
4774 case Builtin::BIwork_group_commit_read_pipe:
4775 case Builtin::BIwork_group_commit_write_pipe:
4776 case Builtin::BIsub_group_commit_read_pipe:
4777 case Builtin::BIsub_group_commit_write_pipe: {
4778 const char *Name;
4779 if (BuiltinID == Builtin::BIcommit_read_pipe)
4780 Name = "__commit_read_pipe";
4781 else if (BuiltinID == Builtin::BIcommit_write_pipe)
4782 Name = "__commit_write_pipe";
4783 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
4784 Name = "__work_group_commit_read_pipe";
4785 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
4786 Name = "__work_group_commit_write_pipe";
4787 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
4788 Name = "__sub_group_commit_read_pipe";
4789 else
4790 Name = "__sub_group_commit_write_pipe";
4791
4792 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4793 *Arg1 = EmitScalarExpr(E->getArg(1));
4794 CGOpenCLRuntime OpenCLRT(CGM);
4795 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4796 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4797
4798 // Building the generic function prototype.
4799 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
4800 llvm::FunctionType *FTy =
4801 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
4802 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4803
4804 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4805 {Arg0, Arg1, PacketSize, PacketAlign}));
4806 }
4807 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
4808 case Builtin::BIget_pipe_num_packets:
4809 case Builtin::BIget_pipe_max_packets: {
4810 const char *BaseName;
4811 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
4812 if (BuiltinID == Builtin::BIget_pipe_num_packets)
4813 BaseName = "__get_pipe_num_packets";
4814 else
4815 BaseName = "__get_pipe_max_packets";
4816 std::string Name = std::string(BaseName) +
4817 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
4818
4819 // Building the generic function prototype.
4820 Value *Arg0 = EmitScalarExpr(E->getArg(0));
4821 CGOpenCLRuntime OpenCLRT(CGM);
4822 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4823 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4824 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
4825 llvm::FunctionType *FTy = llvm::FunctionType::get(
4826 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4827
4828 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4829 {Arg0, PacketSize, PacketAlign}));
4830 }
4831
4832 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
4833 case Builtin::BIto_global:
4834 case Builtin::BIto_local:
4835 case Builtin::BIto_private: {
4836 auto Arg0 = EmitScalarExpr(E->getArg(0));
4837 auto NewArgT = llvm::PointerType::get(Int8Ty,
4838 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4839 auto NewRetT = llvm::PointerType::get(Int8Ty,
4840 CGM.getContext().getTargetAddressSpace(
4841 E->getType()->getPointeeType().getAddressSpace()));
4842 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
4843 llvm::Value *NewArg;
4844 if (Arg0->getType()->getPointerAddressSpace() !=
4845 NewArgT->getPointerAddressSpace())
4846 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
4847 else
4848 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
4849 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
4850 auto NewCall =
4851 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
4852 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
4853 ConvertType(E->getType())));
4854 }
4855
4856 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
4857 // It contains four different overload formats specified in Table 6.13.17.1.
4858 case Builtin::BIenqueue_kernel: {
4859 StringRef Name; // Generated function call name
4860 unsigned NumArgs = E->getNumArgs();
4861
4862 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
4863 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4864 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4865
4866 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
4867 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
4868 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
4869 llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
4870 llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
4871
4872 if (NumArgs == 4) {
4873 // The most basic form of the call with parameters:
4874 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
4875 Name = "__enqueue_kernel_basic";
4876 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
4877 GenericVoidPtrTy};
4878 llvm::FunctionType *FTy = llvm::FunctionType::get(
4879 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4880
4881 auto Info =
4882 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4883 llvm::Value *Kernel =
4884 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4885 llvm::Value *Block =
4886 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4887
4888 AttrBuilder B;
4889 B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
4890 llvm::AttributeList ByValAttrSet =
4891 llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
4892
4893 auto RTCall =
4894 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
4895 {Queue, Flags, Range, Kernel, Block});
4896 RTCall->setAttributes(ByValAttrSet);
4897 return RValue::get(RTCall);
4898 }
4899 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature")(static_cast <bool> (NumArgs >= 5 && "Invalid enqueue_kernel signature"
) ? void (0) : __assert_fail ("NumArgs >= 5 && \"Invalid enqueue_kernel signature\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4899, __extension__ __PRETTY_FUNCTION__))
;
4900
4901 // Create a temporary array to hold the sizes of local pointer arguments
4902 // for the block. \p First is the position of the first size argument.
4903 auto CreateArrayForSizeVar = [=](unsigned First)
4904 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
4905 llvm::APInt ArraySize(32, NumArgs - First);
4906 QualType SizeArrayTy = getContext().getConstantArrayType(
4907 getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
4908 /*IndexTypeQuals=*/0);
4909 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
4910 llvm::Value *TmpPtr = Tmp.getPointer();
4911 llvm::Value *TmpSize = EmitLifetimeStart(
4912 CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
4913 llvm::Value *ElemPtr;
4914 // Each of the following arguments specifies the size of the corresponding
4915 // argument passed to the enqueued block.
4916 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
4917 for (unsigned I = First; I < NumArgs; ++I) {
4918 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
4919 auto *GEP = Builder.CreateGEP(Tmp.getElementType(), TmpPtr,
4920 {Zero, Index});
4921 if (I == First)
4922 ElemPtr = GEP;
4923 auto *V =
4924 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
4925 Builder.CreateAlignedStore(
4926 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
4927 }
4928 return std::tie(ElemPtr, TmpSize, TmpPtr);
4929 };
4930
4931 // Could have events and/or varargs.
4932 if (E->getArg(3)->getType()->isBlockPointerType()) {
4933 // No events passed, but has variadic arguments.
4934 Name = "__enqueue_kernel_varargs";
4935 auto Info =
4936 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4937 llvm::Value *Kernel =
4938 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4939 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4940 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4941 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
4942
4943 // Create a vector of the arguments, as well as a constant value to
4944 // express to the runtime the number of variadic arguments.
4945 llvm::Value *const Args[] = {Queue, Flags,
4946 Range, Kernel,
4947 Block, ConstantInt::get(IntTy, NumArgs - 4),
4948 ElemPtr};
4949 llvm::Type *const ArgTys[] = {
4950 QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
4951 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
4952
4953 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
4954 auto Call = RValue::get(
4955 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
4956 if (TmpSize)
4957 EmitLifetimeEnd(TmpSize, TmpPtr);
4958 return Call;
4959 }
4960 // Any calls now have event arguments passed.
4961 if (NumArgs >= 7) {
4962 llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
4963 llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
4964 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4965
4966 llvm::Value *NumEvents =
4967 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
4968
4969 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
4970 // to be a null pointer constant (including `0` literal), we can take it
4971 // into account and emit null pointer directly.
4972 llvm::Value *EventWaitList = nullptr;
4973 if (E->getArg(4)->isNullPointerConstant(
4974 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4975 EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
4976 } else {
4977 EventWaitList = E->getArg(4)->getType()->isArrayType()
4978 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
4979 : EmitScalarExpr(E->getArg(4));
4980 // Convert to generic address space.
4981 EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
4982 }
4983 llvm::Value *EventRet = nullptr;
4984 if (E->getArg(5)->isNullPointerConstant(
4985 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4986 EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
4987 } else {
4988 EventRet =
4989 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
4990 }
4991
4992 auto Info =
4993 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
4994 llvm::Value *Kernel =
4995 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4996 llvm::Value *Block =
4997 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4998
4999 std::vector<llvm::Type *> ArgTys = {
5000 QueueTy, Int32Ty, RangeTy, Int32Ty,
5001 EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
5002
5003 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
5004 NumEvents, EventWaitList, EventRet,
5005 Kernel, Block};
5006
5007 if (NumArgs == 7) {
5008 // Has events but no variadics.
5009 Name = "__enqueue_kernel_basic_events";
5010 llvm::FunctionType *FTy = llvm::FunctionType::get(
5011 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5012 return RValue::get(
5013 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5014 llvm::ArrayRef<llvm::Value *>(Args)));
5015 }
5016 // Has event info and variadics
5017 // Pass the number of variadics to the runtime function too.
5018 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
5019 ArgTys.push_back(Int32Ty);
5020 Name = "__enqueue_kernel_events_varargs";
5021
5022 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
5023 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
5024 Args.push_back(ElemPtr);
5025 ArgTys.push_back(ElemPtr->getType());
5026
5027 llvm::FunctionType *FTy = llvm::FunctionType::get(
5028 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5029 auto Call =
5030 RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5031 llvm::ArrayRef<llvm::Value *>(Args)));
5032 if (TmpSize)
5033 EmitLifetimeEnd(TmpSize, TmpPtr);
5034 return Call;
5035 }
5036 LLVM_FALLTHROUGH[[gnu::fallthrough]];
5037 }
5038 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
5039 // parameter.
5040 case Builtin::BIget_kernel_work_group_size: {
5041 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
5042 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5043 auto Info =
5044 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
5045 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
5046 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5047 return RValue::get(EmitRuntimeCall(
5048 CGM.CreateRuntimeFunction(
5049 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
5050 false),
5051 "__get_kernel_work_group_size_impl"),
5052 {Kernel, Arg}));
5053 }
5054 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
5055 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
5056 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5057 auto Info =
5058 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
5059 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
5060 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5061 return RValue::get(EmitRuntimeCall(
5062 CGM.CreateRuntimeFunction(
5063 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
5064 false),
5065 "__get_kernel_preferred_work_group_size_multiple_impl"),
5066 {Kernel, Arg}));
5067 }
5068 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
5069 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
5070 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
5071 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5072 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
5073 llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
5074 auto Info =
5075 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
5076 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
5077 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5078 const char *Name =
5079 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
5080 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
5081 : "__get_kernel_sub_group_count_for_ndrange_impl";
5082 return RValue::get(EmitRuntimeCall(
5083 CGM.CreateRuntimeFunction(
5084 llvm::FunctionType::get(
5085 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
5086 false),
5087 Name),
5088 {NDRange, Kernel, Block}));
5089 }
5090
5091 case Builtin::BI__builtin_store_half:
5092 case Builtin::BI__builtin_store_halff: {
5093 Value *Val = EmitScalarExpr(E->getArg(0));
5094 Address Address = EmitPointerWithAlignment(E->getArg(1));
5095 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
5096 return RValue::get(Builder.CreateStore(HalfVal, Address));
5097 }
5098 case Builtin::BI__builtin_load_half: {
5099 Address Address = EmitPointerWithAlignment(E->getArg(0));
5100 Value *HalfVal = Builder.CreateLoad(Address);
5101 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
5102 }
5103 case Builtin::BI__builtin_load_halff: {
5104 Address Address = EmitPointerWithAlignment(E->getArg(0));
5105 Value *HalfVal = Builder.CreateLoad(Address);
5106 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
5107 }
5108 case Builtin::BIprintf:
5109 if (getTarget().getTriple().isNVPTX())
5110 return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
5111 if (getTarget().getTriple().getArch() == Triple::amdgcn &&
5112 getLangOpts().HIP)
5113 return EmitAMDGPUDevicePrintfCallExpr(E, ReturnValue);
5114 break;
5115 case Builtin::BI__builtin_canonicalize:
5116 case Builtin::BI__builtin_canonicalizef:
5117 case Builtin::BI__builtin_canonicalizef16:
5118 case Builtin::BI__builtin_canonicalizel:
5119 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
5120
5121 case Builtin::BI__builtin_thread_pointer: {
5122 if (!getContext().getTargetInfo().isTLSSupported())
5123 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
5124 // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
5125 break;
5126 }
5127 case Builtin::BI__builtin_os_log_format:
5128 return emitBuiltinOSLogFormat(*E);
5129
5130 case Builtin::BI__xray_customevent: {
5131 if (!ShouldXRayInstrumentFunction())
5132 return RValue::getIgnored();
5133
5134 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
5135 XRayInstrKind::Custom))
5136 return RValue::getIgnored();
5137
5138 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
5139 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
5140 return RValue::getIgnored();
5141
5142 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
5143 auto FTy = F->getFunctionType();
5144 auto Arg0 = E->getArg(0);
5145 auto Arg0Val = EmitScalarExpr(Arg0);
5146 auto Arg0Ty = Arg0->getType();
5147 auto PTy0 = FTy->getParamType(0);
5148 if (PTy0 != Arg0Val->getType()) {
5149 if (Arg0Ty->isArrayType())
5150 Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
5151 else
5152 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
5153 }
5154 auto Arg1 = EmitScalarExpr(E->getArg(1));
5155 auto PTy1 = FTy->getParamType(1);
5156 if (PTy1 != Arg1->getType())
5157 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
5158 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
5159 }
5160
5161 case Builtin::BI__xray_typedevent: {
5162 // TODO: There should be a way to always emit events even if the current
5163 // function is not instrumented. Losing events in a stream can cripple
5164 // a trace.
5165 if (!ShouldXRayInstrumentFunction())
5166 return RValue::getIgnored();
5167
5168 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
5169 XRayInstrKind::Typed))
5170 return RValue::getIgnored();
5171
5172 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
5173 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
5174 return RValue::getIgnored();
5175
5176 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
5177 auto FTy = F->getFunctionType();
5178 auto Arg0 = EmitScalarExpr(E->getArg(0));
5179 auto PTy0 = FTy->getParamType(0);
5180 if (PTy0 != Arg0->getType())
5181 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
5182 auto Arg1 = E->getArg(1);
5183 auto Arg1Val = EmitScalarExpr(Arg1);
5184 auto Arg1Ty = Arg1->getType();
5185 auto PTy1 = FTy->getParamType(1);
5186 if (PTy1 != Arg1Val->getType()) {
5187 if (Arg1Ty->isArrayType())
5188 Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
5189 else
5190 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
5191 }
5192 auto Arg2 = EmitScalarExpr(E->getArg(2));
5193 auto PTy2 = FTy->getParamType(2);
5194 if (PTy2 != Arg2->getType())
5195 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
5196 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
5197 }
5198
5199 case Builtin::BI__builtin_ms_va_start:
5200 case Builtin::BI__builtin_ms_va_end:
5201 return RValue::get(
5202 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
5203 BuiltinID == Builtin::BI__builtin_ms_va_start));
5204
5205 case Builtin::BI__builtin_ms_va_copy: {
5206 // Lower this manually. We can't reliably determine whether or not any
5207 // given va_copy() is for a Win64 va_list from the calling convention
5208 // alone, because it's legal to do this from a System V ABI function.
5209 // With opaque pointer types, we won't have enough information in LLVM
5210 // IR to determine this from the argument types, either. Best to do it
5211 // now, while we have enough information.
5212 Address DestAddr = EmitMSVAListRef(E->getArg(0));
5213 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
5214
5215 llvm::Type *BPP = Int8PtrPtrTy;
5216
5217 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
5218 DestAddr.getAlignment());
5219 SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
5220 SrcAddr.getAlignment());
5221
5222 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
5223 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
5224 }
5225
5226 case Builtin::BI__builtin_get_device_side_mangled_name: {
5227 auto Name = CGM.getCUDARuntime().getDeviceSideName(
5228 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
5229 auto Str = CGM.GetAddrOfConstantCString(Name, "");
5230 llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
5231 llvm::ConstantInt::get(SizeTy, 0)};
5232 auto *Ptr = llvm::ConstantExpr::getGetElementPtr(Str.getElementType(),
5233 Str.getPointer(), Zeros);
5234 return RValue::get(Ptr);
5235 }
5236 }
5237
5238 // If this is an alias for a lib function (e.g. __builtin_sin), emit
5239 // the call using the normal call path, but using the unmangled
5240 // version of the function name.
5241 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
5242 return emitLibraryCall(*this, FD, E,
5243 CGM.getBuiltinLibFunction(FD, BuiltinID));
5244
5245 // If this is a predefined lib function (e.g. malloc), emit the call
5246 // using exactly the normal call path.
5247 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
5248 return emitLibraryCall(*this, FD, E,
5249 cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
5250
5251 // Check that a call to a target specific builtin has the correct target
5252 // features.
5253 // This is down here to avoid non-target specific builtins, however, if
5254 // generic builtins start to require generic target features then we
5255 // can move this up to the beginning of the function.
5256 checkTargetFeatures(E, FD);
5257
5258 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
5259 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
5260
5261 // See if we have a target specific intrinsic.
5262 const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
5263 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
5264 StringRef Prefix =
5265 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
5266 if (!Prefix.empty()) {
5267 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
5268 // NOTE we don't need to perform a compatibility flag check here since the
5269 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
5270 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
5271 if (IntrinsicID == Intrinsic::not_intrinsic)
5272 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
5273 }
5274
5275 if (IntrinsicID != Intrinsic::not_intrinsic) {
5276 SmallVector<Value*, 16> Args;
5277
5278 // Find out if any arguments are required to be integer constant
5279 // expressions.
5280 unsigned ICEArguments = 0;
5281 ASTContext::GetBuiltinTypeError Error;
5282 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
5283 assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None &&
"Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5283, __extension__ __PRETTY_FUNCTION__))
;
5284
5285 Function *F = CGM.getIntrinsic(IntrinsicID);
5286 llvm::FunctionType *FTy = F->getFunctionType();
5287
5288 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
5289 Value *ArgValue;
5290 // If this is a normal argument, just emit it as a scalar.
5291 if ((ICEArguments & (1 << i)) == 0) {
5292 ArgValue = EmitScalarExpr(E->getArg(i));
5293 } else {
5294 // If this is required to be a constant, constant fold it so that we
5295 // know that the generated intrinsic gets a ConstantInt.
5296 ArgValue = llvm::ConstantInt::get(
5297 getLLVMContext(),
5298 *E->getArg(i)->getIntegerConstantExpr(getContext()));
5299 }
5300
5301 // If the intrinsic arg type is different from the builtin arg type
5302 // we need to do a bit cast.
5303 llvm::Type *PTy = FTy->getParamType(i);
5304 if (PTy != ArgValue->getType()) {
5305 // XXX - vector of pointers?
5306 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
5307 if (PtrTy->getAddressSpace() !=
5308 ArgValue->getType()->getPointerAddressSpace()) {
5309 ArgValue = Builder.CreateAddrSpaceCast(
5310 ArgValue,
5311 ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
5312 }
5313 }
5314
5315 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&(static_cast <bool> (PTy->canLosslesslyBitCastTo(FTy
->getParamType(i)) && "Must be able to losslessly bit cast to param"
) ? void (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5316, __extension__ __PRETTY_FUNCTION__))
5316 "Must be able to losslessly bit cast to param")(static_cast <bool> (PTy->canLosslesslyBitCastTo(FTy
->getParamType(i)) && "Must be able to losslessly bit cast to param"
) ? void (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5316, __extension__ __PRETTY_FUNCTION__))
;
5317 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
5318 }
5319
5320 Args.push_back(ArgValue);
5321 }
5322
5323 Value *V = Builder.CreateCall(F, Args);
5324 QualType BuiltinRetType = E->getType();
5325
5326 llvm::Type *RetTy = VoidTy;
5327 if (!BuiltinRetType->isVoidType())
5328 RetTy = ConvertType(BuiltinRetType);
5329
5330 if (RetTy != V->getType()) {
5331 // XXX - vector of pointers?
5332 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
5333 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
5334 V = Builder.CreateAddrSpaceCast(
5335 V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
5336 }
5337 }
5338
5339 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&(static_cast <bool> (V->getType()->canLosslesslyBitCastTo
(RetTy) && "Must be able to losslessly bit cast result type"
) ? void (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5340, __extension__ __PRETTY_FUNCTION__))
5340 "Must be able to losslessly bit cast result type")(static_cast <bool> (V->getType()->canLosslesslyBitCastTo
(RetTy) && "Must be able to losslessly bit cast result type"
) ? void (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5340, __extension__ __PRETTY_FUNCTION__))
;
5341 V = Builder.CreateBitCast(V, RetTy);
5342 }
5343
5344 return RValue::get(V);
5345 }
5346
5347 // Some target-specific builtins can have aggregate return values, e.g.
5348 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
5349 // ReturnValue to be non-null, so that the target-specific emission code can
5350 // always just emit into it.
5351 TypeEvaluationKind EvalKind = getEvaluationKind(E->getType());
5352 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
5353 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
5354 ReturnValue = ReturnValueSlot(DestPtr, false);
5355 }
5356
5357 // Now see if we can emit a target-specific builtin.
5358 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
5359 switch (EvalKind) {
5360 case TEK_Scalar:
5361 return RValue::get(V);
5362 case TEK_Aggregate:
5363 return RValue::getAggregate(ReturnValue.getValue(),
5364 ReturnValue.isVolatile());
5365 case TEK_Complex:
5366 llvm_unreachable("No current target builtin returns complex")::llvm::llvm_unreachable_internal("No current target builtin returns complex"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5366)
;
5367 }
5368 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr")::llvm::llvm_unreachable_internal("Bad evaluation kind in EmitBuiltinExpr"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5368)
;
5369 }
5370
5371 ErrorUnsupported(E, "builtin function");
5372
5373 // Unknown builtin, for now just dump it out and return undef.
5374 return GetUndefRValue(E->getType());
5375}
5376
5377static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
5378 unsigned BuiltinID, const CallExpr *E,
5379 ReturnValueSlot ReturnValue,
5380 llvm::Triple::ArchType Arch) {
5381 switch (Arch) {
5382 case llvm::Triple::arm:
5383 case llvm::Triple::armeb:
5384 case llvm::Triple::thumb:
5385 case llvm::Triple::thumbeb:
5386 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
5387 case llvm::Triple::aarch64:
5388 case llvm::Triple::aarch64_32:
5389 case llvm::Triple::aarch64_be:
5390 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
5391 case llvm::Triple::bpfeb:
5392 case llvm::Triple::bpfel:
5393 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
5394 case llvm::Triple::x86:
5395 case llvm::Triple::x86_64:
5396 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
5397 case llvm::Triple::ppc:
5398 case llvm::Triple::ppcle:
5399 case llvm::Triple::ppc64:
5400 case llvm::Triple::ppc64le:
5401 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
5402 case llvm::Triple::r600:
5403 case llvm::Triple::amdgcn:
5404 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
5405 case llvm::Triple::systemz:
5406 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
5407 case llvm::Triple::nvptx:
5408 case llvm::Triple::nvptx64:
5409 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
5410 case llvm::Triple::wasm32:
5411 case llvm::Triple::wasm64:
5412 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
5413 case llvm::Triple::hexagon:
5414 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
5415 case llvm::Triple::riscv32:
5416 case llvm::Triple::riscv64:
5417 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
5418 default:
5419 return nullptr;
5420 }
5421}
5422
5423Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
5424 const CallExpr *E,
5425 ReturnValueSlot ReturnValue) {
5426 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
5427 assert(getContext().getAuxTargetInfo() && "Missing aux target info")(static_cast <bool> (getContext().getAuxTargetInfo() &&
"Missing aux target info") ? void (0) : __assert_fail ("getContext().getAuxTargetInfo() && \"Missing aux target info\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5427, __extension__ __PRETTY_FUNCTION__))
;
5428 return EmitTargetArchBuiltinExpr(
5429 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
5430 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
5431 }
5432
5433 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
5434 getTarget().getTriple().getArch());
5435}
5436
5437static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
5438 NeonTypeFlags TypeFlags,
5439 bool HasLegalHalfType = true,
5440 bool V1Ty = false,
5441 bool AllowBFloatArgsAndRet = true) {
5442 int IsQuad = TypeFlags.isQuad();
5443 switch (TypeFlags.getEltType()) {
5444 case NeonTypeFlags::Int8:
5445 case NeonTypeFlags::Poly8:
5446 return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
5447 case NeonTypeFlags::Int16:
5448 case NeonTypeFlags::Poly16:
5449 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5450 case NeonTypeFlags::BFloat16:
5451 if (AllowBFloatArgsAndRet)
5452 return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
5453 else
5454 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5455 case NeonTypeFlags::Float16:
5456 if (HasLegalHalfType)
5457 return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
5458 else
5459 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5460 case NeonTypeFlags::Int32:
5461 return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
5462 case NeonTypeFlags::Int64:
5463 case NeonTypeFlags::Poly64:
5464 return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
5465 case NeonTypeFlags::Poly128:
5466 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
5467 // There is a lot of i128 and f128 API missing.
5468 // so we use v16i8 to represent poly128 and get pattern matched.
5469 return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
5470 case NeonTypeFlags::Float32:
5471 return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
5472 case NeonTypeFlags::Float64:
5473 return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
5474 }
5475 llvm_unreachable("Unknown vector element type!")::llvm::llvm_unreachable_internal("Unknown vector element type!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5475)
;
5476}
5477
5478static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
5479 NeonTypeFlags IntTypeFlags) {
5480 int IsQuad = IntTypeFlags.isQuad();
5481 switch (IntTypeFlags.getEltType()) {
5482 case NeonTypeFlags::Int16:
5483 return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
5484 case NeonTypeFlags::Int32:
5485 return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
5486 case NeonTypeFlags::Int64:
5487 return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
5488 default:
5489 llvm_unreachable("Type can't be converted to floating-point!")::llvm::llvm_unreachable_internal("Type can't be converted to floating-point!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5489)
;
5490 }
5491}
5492
5493Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
5494 const ElementCount &Count) {
5495 Value *SV = llvm::ConstantVector::getSplat(Count, C);
5496 return Builder.CreateShuffleVector(V, V, SV, "lane");
5497}
5498
5499Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
5500 ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
5501 return EmitNeonSplat(V, C, EC);
5502}
5503
5504Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
5505 const char *name,
5506 unsigned shift, bool rightshift) {
5507 unsigned j = 0;
5508 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5509 ai != ae; ++ai, ++j) {
5510 if (F->isConstrainedFPIntrinsic())
5511 if (ai->getType()->isMetadataTy())
5512 continue;
5513 if (shift > 0 && shift == j)
5514 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
5515 else
5516 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
5517 }
5518
5519 if (F->isConstrainedFPIntrinsic())
5520 return Builder.CreateConstrainedFPCall(F, Ops, name);
5521 else
5522 return Builder.CreateCall(F, Ops, name);
5523}
5524
5525Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
5526 bool neg) {
5527 int SV = cast<ConstantInt>(V)->getSExtValue();
5528 return ConstantInt::get(Ty, neg ? -SV : SV);
5529}
5530
5531// Right-shift a vector by a constant.
5532Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
5533 llvm::Type *Ty, bool usgn,
5534 const char *name) {
5535 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
5536
5537 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
5538 int EltSize = VTy->getScalarSizeInBits();
5539
5540 Vec = Builder.CreateBitCast(Vec, Ty);
5541
5542 // lshr/ashr are undefined when the shift amount is equal to the vector
5543 // element size.
5544 if (ShiftAmt == EltSize) {
5545 if (usgn) {
5546 // Right-shifting an unsigned value by its size yields 0.
5547 return llvm::ConstantAggregateZero::get(VTy);
5548 } else {
5549 // Right-shifting a signed value by its size is equivalent
5550 // to a shift of size-1.
5551 --ShiftAmt;
5552 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
5553 }
5554 }
5555
5556 Shift = EmitNeonShiftVector(Shift, Ty, false);
5557 if (usgn)
5558 return Builder.CreateLShr(Vec, Shift, name);
5559 else
5560 return Builder.CreateAShr(Vec, Shift, name);
5561}
5562
5563enum {
5564 AddRetType = (1 << 0),
5565 Add1ArgType = (1 << 1),
5566 Add2ArgTypes = (1 << 2),
5567
5568 VectorizeRetType = (1 << 3),
5569 VectorizeArgTypes = (1 << 4),
5570
5571 InventFloatType = (1 << 5),
5572 UnsignedAlts = (1 << 6),
5573
5574 Use64BitVectors = (1 << 7),
5575 Use128BitVectors = (1 << 8),
5576
5577 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
5578 VectorRet = AddRetType | VectorizeRetType,
5579 VectorRetGetArgs01 =
5580 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
5581 FpCmpzModifiers =
5582 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
5583};
5584
5585namespace {
5586struct ARMVectorIntrinsicInfo {
5587 const char *NameHint;
5588 unsigned BuiltinID;
5589 unsigned LLVMIntrinsic;
5590 unsigned AltLLVMIntrinsic;
5591 uint64_t TypeModifier;
5592
5593 bool operator<(unsigned RHSBuiltinID) const {
5594 return BuiltinID < RHSBuiltinID;
5595 }
5596 bool operator<(const ARMVectorIntrinsicInfo &TE) const {
5597 return BuiltinID < TE.BuiltinID;
5598 }
5599};
5600} // end anonymous namespace
5601
5602#define NEONMAP0(NameBase) \
5603 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
5604
5605#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
5606 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
5607 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
5608
5609#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
5610 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
5611 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
5612 TypeModifier }
5613
5614static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
5615 NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0),
5616 NEONMAP0(splat_lane_v),
5617 NEONMAP0(splat_laneq_v),
5618 NEONMAP0(splatq_lane_v),
5619 NEONMAP0(splatq_laneq_v),
5620 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
5621 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
5622 NEONMAP1(vabs_v, arm_neon_vabs, 0),
5623 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
5624 NEONMAP0(vadd_v),
5625 NEONMAP0(vaddhn_v),
5626 NEONMAP0(vaddq_v),
5627 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
5628 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
5629 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
5630 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
5631 NEONMAP1(vbfdot_v, arm_neon_bfdot, 0),
5632 NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0),
5633 NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0),
5634 NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0),
5635 NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0),
5636 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
5637 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
5638 NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
5639 NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
5640 NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
5641 NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
5642 NEONMAP1(vcage_v, arm_neon_vacge, 0),
5643 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
5644 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
5645 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
5646 NEONMAP1(vcale_v, arm_neon_vacge, 0),
5647 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
5648 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
5649 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
5650 NEONMAP0(vceqz_v),
5651 NEONMAP0(vceqzq_v),
5652 NEONMAP0(vcgez_v),
5653 NEONMAP0(vcgezq_v),
5654 NEONMAP0(vcgtz_v),
5655 NEONMAP0(vcgtzq_v),
5656 NEONMAP0(vclez_v),
5657 NEONMAP0(vclezq_v),
5658 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
5659 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
5660 NEONMAP0(vcltz_v),
5661 NEONMAP0(vcltzq_v),
5662 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5663 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5664 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5665 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5666 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
5667 NEONMAP0(vcvt_f16_v),
5668 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
5669 NEONMAP0(vcvt_f32_v),
5670 NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5671 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5672 NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
5673 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
5674 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
5675 NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
5676 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
5677 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
5678 NEONMAP0(vcvt_s16_v),
5679 NEONMAP0(vcvt_s32_v),
5680 NEONMAP0(vcvt_s64_v),
5681 NEONMAP0(vcvt_u16_v),
5682 NEONMAP0(vcvt_u32_v),
5683 NEONMAP0(vcvt_u64_v),
5684 NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
5685 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
5686 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
5687 NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
5688 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
5689 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
5690 NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
5691 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
5692 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
5693 NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
5694 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
5695 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
5696 NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
5697 NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
5698 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
5699 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
5700 NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
5701 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
5702 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
5703 NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
5704 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
5705 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
5706 NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
5707 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
5708 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
5709 NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
5710 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
5711 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
5712 NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
5713 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
5714 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
5715 NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
5716 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
5717 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
5718 NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
5719 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
5720 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
5721 NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
5722 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
5723 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
5724 NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
5725 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
5726 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
5727 NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
5728 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
5729 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
5730 NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
5731 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
5732 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
5733 NEONMAP0(vcvtq_f16_v),
5734 NEONMAP0(vcvtq_f32_v),
5735 NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5736 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5737 NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
5738 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
5739 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
5740 NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
5741 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
5742 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
5743 NEONMAP0(vcvtq_s16_v),
5744 NEONMAP0(vcvtq_s32_v),
5745 NEONMAP0(vcvtq_s64_v),
5746 NEONMAP0(vcvtq_u16_v),
5747 NEONMAP0(vcvtq_u32_v),
5748 NEONMAP0(vcvtq_u64_v),
5749 NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
5750 NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
5751 NEONMAP0(vext_v),
5752 NEONMAP0(vextq_v),
5753 NEONMAP0(vfma_v),
5754 NEONMAP0(vfmaq_v),
5755 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
5756 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
5757 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
5758 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
5759 NEONMAP0(vld1_dup_v),
5760 NEONMAP1(vld1_v, arm_neon_vld1, 0),
5761 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
5762 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
5763 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
5764 NEONMAP0(vld1q_dup_v),
5765 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
5766 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
5767 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
5768 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
5769 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
5770 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
5771 NEONMAP1(vld2_v, arm_neon_vld2, 0),
5772 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
5773 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
5774 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
5775 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
5776 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
5777 NEONMAP1(vld3_v, arm_neon_vld3, 0),
5778 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
5779 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
5780 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
5781 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
5782 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
5783 NEONMAP1(vld4_v, arm_neon_vld4, 0),
5784 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
5785 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
5786 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
5787 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
5788 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
5789 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
5790 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
5791 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
5792 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
5793 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
5794 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
5795 NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
5796 NEONMAP0(vmovl_v),
5797 NEONMAP0(vmovn_v),
5798 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
5799 NEONMAP0(vmull_v),
5800 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
5801 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
5802 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
5803 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
5804 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
5805 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
5806 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
5807 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
5808 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
5809 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
5810 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
5811 NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
5812 NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
5813 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
5814 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
5815 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
5816 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
5817 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
5818 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
5819 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
5820 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
5821 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
5822 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
5823 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
5824 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
5825 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
5826 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
5827 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
5828 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
5829 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
5830 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
5831 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
5832 NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
5833 NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
5834 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
5835 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
5836 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
5837 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
5838 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
5839 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
5840 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
5841 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
5842 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
5843 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
5844 NEONMAP0(vrndi_v),
5845 NEONMAP0(vrndiq_v),
5846 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
5847 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
5848 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
5849 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
5850 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
5851 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
5852 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
5853 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
5854 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
5855 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
5856 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
5857 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
5858 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
5859 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
5860 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
5861 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
5862 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
5863 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
5864 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
5865 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
5866 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
5867 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
5868 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
5869 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
5870 NEONMAP0(vshl_n_v),
5871 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5872 NEONMAP0(vshll_n_v),
5873 NEONMAP0(vshlq_n_v),
5874 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5875 NEONMAP0(vshr_n_v),
5876 NEONMAP0(vshrn_n_v),
5877 NEONMAP0(vshrq_n_v),
5878 NEONMAP1(vst1_v, arm_neon_vst1, 0),
5879 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
5880 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
5881 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
5882 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
5883 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
5884 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
5885 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
5886 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
5887 NEONMAP1(vst2_v, arm_neon_vst2, 0),
5888 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
5889 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
5890 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
5891 NEONMAP1(vst3_v, arm_neon_vst3, 0),
5892 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
5893 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
5894 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
5895 NEONMAP1(vst4_v, arm_neon_vst4, 0),
5896 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
5897 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
5898 NEONMAP0(vsubhn_v),
5899 NEONMAP0(vtrn_v),
5900 NEONMAP0(vtrnq_v),
5901 NEONMAP0(vtst_v),
5902 NEONMAP0(vtstq_v),
5903 NEONMAP1(vusdot_v, arm_neon_usdot, 0),
5904 NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
5905 NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
5906 NEONMAP0(vuzp_v),
5907 NEONMAP0(vuzpq_v),
5908 NEONMAP0(vzip_v),
5909 NEONMAP0(vzipq_v)
5910};
5911
5912static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
5913 NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0),
5914 NEONMAP0(splat_lane_v),
5915 NEONMAP0(splat_laneq_v),
5916 NEONMAP0(splatq_lane_v),
5917 NEONMAP0(splatq_laneq_v),
5918 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
5919 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
5920 NEONMAP0(vadd_v),
5921 NEONMAP0(vaddhn_v),
5922 NEONMAP0(vaddq_p128),
5923 NEONMAP0(vaddq_v),
5924 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
5925 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
5926 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
5927 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
5928 NEONMAP2(vbcaxq_v, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
5929 NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
5930 NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
5931 NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
5932 NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0),
5933 NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0),
5934 NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5935 NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5936 NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5937 NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5938 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
5939 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
5940 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
5941 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
5942 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
5943 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
5944 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
5945 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
5946 NEONMAP0(vceqz_v),
5947 NEONMAP0(vceqzq_v),
5948 NEONMAP0(vcgez_v),
5949 NEONMAP0(vcgezq_v),
5950 NEONMAP0(vcgtz_v),
5951 NEONMAP0(vcgtzq_v),
5952 NEONMAP0(vclez_v),
5953 NEONMAP0(vclezq_v),
5954 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
5955 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
5956 NEONMAP0(vcltz_v),
5957 NEONMAP0(vcltzq_v),
5958 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5959 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5960 NEONMAP1(vcmla_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
5961 NEONMAP1(vcmla_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
5962 NEONMAP1(vcmla_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
5963 NEONMAP1(vcmla_v, aarch64_neon_vcmla_rot0, Add1ArgType),
5964 NEONMAP1(vcmlaq_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
5965 NEONMAP1(vcmlaq_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
5966 NEONMAP1(vcmlaq_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
5967 NEONMAP1(vcmlaq_v, aarch64_neon_vcmla_rot0, Add1ArgType),
5968 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5969 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5970 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
5971 NEONMAP0(vcvt_f16_v),
5972 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
5973 NEONMAP0(vcvt_f32_v),
5974 NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5975 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5976 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5977 NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5978 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5979 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5980 NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5981 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5982 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5983 NEONMAP0(vcvtq_f16_v),
5984 NEONMAP0(vcvtq_f32_v),
5985 NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0),
5986 NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5987 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5988 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5989 NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5990 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5991 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5992 NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5993 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5994 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5995 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
5996 NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5997 NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5998 NEONMAP2(veor3q_v, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
5999 NEONMAP0(vext_v),
6000 NEONMAP0(vextq_v),
6001 NEONMAP0(vfma_v),
6002 NEONMAP0(vfmaq_v),
6003 NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
6004 NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
6005 NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
6006 NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
6007 NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
6008 NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
6009 NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
6010 NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
6011 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
6012 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
6013 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
6014 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
6015 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
6016 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
6017 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
6018 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
6019 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
6020 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
6021 NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0),
6022 NEONMAP0(vmovl_v),
6023 NEONMAP0(vmovn_v),
6024 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
6025 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
6026 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
6027 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
6028 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
6029 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
6030 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
6031 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
6032 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
6033 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
6034 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
6035 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
6036 NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
6037 NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
6038 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
6039 NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
6040 NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
6041 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
6042 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
6043 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
6044 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
6045 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
6046 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
6047 NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
6048 NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
6049 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
6050 NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
6051 NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
6052 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
6053 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
6054 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
6055 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
6056 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
6057 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
6058 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
6059 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
6060 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
6061 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
6062 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
6063 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
6064 NEONMAP1(vrax1q_v, aarch64_crypto_rax1, 0),
6065 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
6066 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
6067 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
6068 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
6069 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
6070 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
6071 NEONMAP1(vrnd32x_v, aarch64_neon_frint32x, Add1ArgType),
6072 NEONMAP1(vrnd32xq_v, aarch64_neon_frint32x, Add1ArgType),
6073 NEONMAP1(vrnd32z_v, aarch64_neon_frint32z, Add1ArgType),
6074 NEONMAP1(vrnd32zq_v, aarch64_neon_frint32z, Add1ArgType),
6075 NEONMAP1(vrnd64x_v, aarch64_neon_frint64x, Add1ArgType),
6076 NEONMAP1(vrnd64xq_v, aarch64_neon_frint64x, Add1ArgType),
6077 NEONMAP1(vrnd64z_v, aarch64_neon_frint64z, Add1ArgType),
6078 NEONMAP1(vrnd64zq_v, aarch64_neon_frint64z, Add1ArgType),
6079 NEONMAP0(vrndi_v),
6080 NEONMAP0(vrndiq_v),
6081 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
6082 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
6083 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
6084 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
6085 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
6086 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
6087 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
6088 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
6089 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
6090 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
6091 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
6092 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
6093 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
6094 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
6095 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
6096 NEONMAP1(vsha512h2q_v, aarch64_crypto_sha512h2, 0),
6097 NEONMAP1(vsha512hq_v, aarch64_crypto_sha512h, 0),
6098 NEONMAP1(vsha512su0q_v, aarch64_crypto_sha512su0, 0),
6099 NEONMAP1(vsha512su1q_v, aarch64_crypto_sha512su1, 0),
6100 NEONMAP0(vshl_n_v),
6101 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
6102 NEONMAP0(vshll_n_v),
6103 NEONMAP0(vshlq_n_v),
6104 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
6105 NEONMAP0(vshr_n_v),
6106 NEONMAP0(vshrn_n_v),
6107 NEONMAP0(vshrq_n_v),
6108 NEONMAP1(vsm3partw1q_v, aarch64_crypto_sm3partw1, 0),
6109 NEONMAP1(vsm3partw2q_v, aarch64_crypto_sm3partw2, 0),
6110 NEONMAP1(vsm3ss1q_v, aarch64_crypto_sm3ss1, 0),
6111 NEONMAP1(vsm3tt1aq_v, aarch64_crypto_sm3tt1a, 0),
6112 NEONMAP1(vsm3tt1bq_v, aarch64_crypto_sm3tt1b, 0),
6113 NEONMAP1(vsm3tt2aq_v, aarch64_crypto_sm3tt2a, 0),
6114 NEONMAP1(vsm3tt2bq_v, aarch64_crypto_sm3tt2b, 0),
6115 NEONMAP1(vsm4ekeyq_v, aarch64_crypto_sm4ekey, 0),
6116 NEONMAP1(vsm4eq_v, aarch64_crypto_sm4e, 0),
6117 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
6118 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
6119 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
6120 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
6121 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
6122 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
6123 NEONMAP0(vsubhn_v),
6124 NEONMAP0(vtst_v),
6125 NEONMAP0(vtstq_v),
6126 NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
6127 NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
6128 NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
6129 NEONMAP1(vxarq_v, aarch64_crypto_xar, 0),
6130};
6131
6132static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
6133 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
6134 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
6135 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
6136 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
6137 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
6138 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
6139 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
6140 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
6141 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
6142 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6143 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
6144 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
6145 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
6146 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
6147 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6148 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6149 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
6150 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
6151 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
6152 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
6153 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
6154 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
6155 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
6156 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
6157 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6158 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6159 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6160 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6161 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6162 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6163 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6164 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6165 NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6166 NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6167 NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
6168 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6169 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6170 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6171 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6172 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6173 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6174 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6175 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6176 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6177 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6178 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6179 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6180 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6181 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6182 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6183 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6184 NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6185 NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6186 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
6187 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6188 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6189 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6190 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6191 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
6192 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
6193 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6194 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6195 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
6196 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
6197 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6198 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6199 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6200 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6201 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
6202 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
6203 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6204 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
6205 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
6206 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
6207 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
6208 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
6209 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
6210 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6211 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6212 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6213 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6214 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6215 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6216 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6217 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6218 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
6219 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6220 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
6221 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
6222 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
6223 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
6224 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
6225 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
6226 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
6227 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
6228 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
6229 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
6230 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
6231 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
6232 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
6233 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
6234 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
6235 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
6236 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
6237 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
6238 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
6239 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
6240 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
6241 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
6242 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
6243 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
6244 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
6245 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
6246 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
6247 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
6248 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
6249 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
6250 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
6251 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
6252 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
6253 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
6254 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
6255 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
6256 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
6257 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
6258 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
6259 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
6260 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
6261 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
6262 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
6263 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
6264 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
6265 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
6266 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
6267 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
6268 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6269 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6270 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6271 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6272 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
6273 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
6274 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6275 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6276 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6277 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6278 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
6279 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
6280 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
6281 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
6282 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
6283 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
6284 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
6285 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
6286 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
6287 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
6288 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
6289 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
6290 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
6291 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
6292 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
6293 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
6294 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
6295 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
6296 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
6297 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
6298 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
6299 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
6300 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
6301 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
6302 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
6303 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
6304 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
6305 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
6306 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
6307 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
6308 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
6309 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
6310 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
6311 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
6312 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
6313 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
6314 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
6315 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
6316 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
6317 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
6318 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
6319 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
6320 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
6321 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
6322 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
6323 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
6324 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
6325 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
6326 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
6327 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
6328 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
6329 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
6330 // FP16 scalar intrinisics go here.
6331 NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
6332 NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6333 NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6334 NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6335 NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6336 NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6337 NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6338 NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6339 NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6340 NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6341 NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6342 NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6343 NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6344 NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6345 NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6346 NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6347 NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6348 NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6349 NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6350 NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6351 NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6352 NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6353 NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6354 NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6355 NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6356 NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6357 NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6358 NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6359 NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6360 NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
6361 NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
6362 NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
6363 NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
6364 NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
6365};
6366
6367#undef NEONMAP0
6368#undef NEONMAP1
6369#undef NEONMAP2
6370
6371#define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
6372 { \
6373 #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
6374 TypeModifier \
6375 }
6376
6377#define SVEMAP2(NameBase, TypeModifier) \
6378 { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
6379static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
6380#define GET_SVE_LLVM_INTRINSIC_MAP
6381#include "clang/Basic/arm_sve_builtin_cg.inc"
6382#undef GET_SVE_LLVM_INTRINSIC_MAP
6383};
6384
6385#undef SVEMAP1
6386#undef SVEMAP2
6387
6388static bool NEONSIMDIntrinsicsProvenSorted = false;
6389
6390static bool AArch64SIMDIntrinsicsProvenSorted = false;
6391static bool AArch64SISDIntrinsicsProvenSorted = false;
6392static bool AArch64SVEIntrinsicsProvenSorted = false;
6393
6394static const ARMVectorIntrinsicInfo *
6395findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
6396 unsigned BuiltinID, bool &MapProvenSorted) {
6397
6398#ifndef NDEBUG
6399 if (!MapProvenSorted) {
6400 assert(llvm::is_sorted(IntrinsicMap))(static_cast <bool> (llvm::is_sorted(IntrinsicMap)) ? void
(0) : __assert_fail ("llvm::is_sorted(IntrinsicMap)", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6400, __extension__ __PRETTY_FUNCTION__))
;
6401 MapProvenSorted = true;
6402 }
6403#endif
6404
6405 const ARMVectorIntrinsicInfo *Builtin =
6406 llvm::lower_bound(IntrinsicMap, BuiltinID);
6407
6408 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
6409 return Builtin;
6410
6411 return nullptr;
6412}
6413
6414Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
6415 unsigned Modifier,
6416 llvm::Type *ArgType,
6417 const CallExpr *E) {
6418 int VectorSize = 0;
6419 if (Modifier & Use64BitVectors)
6420 VectorSize = 64;
6421 else if (Modifier & Use128BitVectors)
6422 VectorSize = 128;
6423
6424 // Return type.
6425 SmallVector<llvm::Type *, 3> Tys;
6426 if (Modifier & AddRetType) {
6427 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
6428 if (Modifier & VectorizeRetType)
6429 Ty = llvm::FixedVectorType::get(
6430 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
6431
6432 Tys.push_back(Ty);
6433 }
6434
6435 // Arguments.
6436 if (Modifier & VectorizeArgTypes) {
6437 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
6438 ArgType = llvm::FixedVectorType::get(ArgType, Elts);
6439 }
6440
6441 if (Modifier & (Add1ArgType | Add2ArgTypes))
6442 Tys.push_back(ArgType);
6443
6444 if (Modifier & Add2ArgTypes)
6445 Tys.push_back(ArgType);
6446
6447 if (Modifier & InventFloatType)
6448 Tys.push_back(FloatTy);
6449
6450 return CGM.getIntrinsic(IntrinsicID, Tys);
6451}
6452
6453static Value *EmitCommonNeonSISDBuiltinExpr(
6454 CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
6455 SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
6456 unsigned BuiltinID = SISDInfo.BuiltinID;
6457 unsigned int Int = SISDInfo.LLVMIntrinsic;
6458 unsigned Modifier = SISDInfo.TypeModifier;
6459 const char *s = SISDInfo.NameHint;
6460
6461 switch (BuiltinID) {
6462 case NEON::BI__builtin_neon_vcled_s64:
6463 case NEON::BI__builtin_neon_vcled_u64:
6464 case NEON::BI__builtin_neon_vcles_f32:
6465 case NEON::BI__builtin_neon_vcled_f64:
6466 case NEON::BI__builtin_neon_vcltd_s64:
6467 case NEON::BI__builtin_neon_vcltd_u64:
6468 case NEON::BI__builtin_neon_vclts_f32:
6469 case NEON::BI__builtin_neon_vcltd_f64:
6470 case NEON::BI__builtin_neon_vcales_f32:
6471 case NEON::BI__builtin_neon_vcaled_f64:
6472 case NEON::BI__builtin_neon_vcalts_f32:
6473 case NEON::BI__builtin_neon_vcaltd_f64:
6474 // Only one direction of comparisons actually exist, cmle is actually a cmge
6475 // with swapped operands. The table gives us the right intrinsic but we
6476 // still need to do the swap.
6477 std::swap(Ops[0], Ops[1]);
6478 break;
6479 }
6480
6481 assert(Int && "Generic code assumes a valid intrinsic")(static_cast <bool> (Int && "Generic code assumes a valid intrinsic"
) ? void (0) : __assert_fail ("Int && \"Generic code assumes a valid intrinsic\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6481, __extension__ __PRETTY_FUNCTION__))
;
6482
6483 // Determine the type(s) of this overloaded AArch64 intrinsic.
6484 const Expr *Arg = E->getArg(0);
6485 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
6486 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
6487
6488 int j = 0;
6489 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
6490 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
6491 ai != ae; ++ai, ++j) {
6492 llvm::Type *ArgTy = ai->getType();
6493 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
6494 ArgTy->getPrimitiveSizeInBits())
6495 continue;
6496
6497 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy())(static_cast <bool> (ArgTy->isVectorTy() && !
Ops[j]->getType()->isVectorTy()) ? void (0) : __assert_fail
("ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6497, __extension__ __PRETTY_FUNCTION__))
;
6498 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
6499 // it before inserting.
6500 Ops[j] = CGF.Builder.CreateTruncOrBitCast(
6501 Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
6502 Ops[j] =
6503 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
6504 }
6505
6506 Value *Result = CGF.EmitNeonCall(F, Ops, s);
6507 llvm::Type *ResultType = CGF.ConvertType(E->getType());
6508 if (ResultType->getPrimitiveSizeInBits().getFixedSize() <
6509 Result->getType()->getPrimitiveSizeInBits().getFixedSize())
6510 return CGF.Builder.CreateExtractElement(Result, C0);
6511
6512 return CGF.Builder.CreateBitCast(Result, ResultType, s);
6513}
6514
6515Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
6516 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
6517 const char *NameHint, unsigned Modifier, const CallExpr *E,
6518 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
6519 llvm::Triple::ArchType Arch) {
6520 // Get the last argument, which specifies the vector type.
6521 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
6522 Optional<llvm::APSInt> NeonTypeConst =
6523 Arg->getIntegerConstantExpr(getContext());
6524 if (!NeonTypeConst)
6525 return nullptr;
6526
6527 // Determine the type of this overloaded NEON intrinsic.
6528 NeonTypeFlags Type(NeonTypeConst->getZExtValue());
6529 bool Usgn = Type.isUnsigned();
6530 bool Quad = Type.isQuad();
6531 const bool HasLegalHalfType = getTarget().hasLegalHalfType();
6532 const bool AllowBFloatArgsAndRet =
6533 getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
6534
6535 llvm::FixedVectorType *VTy =
6536 GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet);
6537 llvm::Type *Ty = VTy;
6538 if (!Ty)
6539 return nullptr;
6540
6541 auto getAlignmentValue32 = [&](Address addr) -> Value* {
6542 return Builder.getInt32(addr.getAlignment().getQuantity());
6543 };
6544
6545 unsigned Int = LLVMIntrinsic;
6546 if ((Modifier & UnsignedAlts) && !Usgn)
6547 Int = AltLLVMIntrinsic;
6548
6549 switch (BuiltinID) {
6550 default: break;
6551 case NEON::BI__builtin_neon_splat_lane_v:
6552 case NEON::BI__builtin_neon_splat_laneq_v:
6553 case NEON::BI__builtin_neon_splatq_lane_v:
6554 case NEON::BI__builtin_neon_splatq_laneq_v: {
6555 auto NumElements = VTy->getElementCount();
6556 if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
6557 NumElements = NumElements * 2;
6558 if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
6559 NumElements = NumElements.divideCoefficientBy(2);
6560
6561 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
6562 return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
6563 }
6564 case NEON::BI__builtin_neon_vpadd_v:
6565 case NEON::BI__builtin_neon_vpaddq_v:
6566 // We don't allow fp/int overloading of intrinsics.
6567 if (VTy->getElementType()->isFloatingPointTy() &&
6568 Int == Intrinsic::aarch64_neon_addp)
6569 Int = Intrinsic::aarch64_neon_faddp;
6570 break;
6571 case NEON::BI__builtin_neon_vabs_v:
6572 case NEON::BI__builtin_neon_vabsq_v:
6573 if (VTy->getElementType()->isFloatingPointTy())
6574 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
6575 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
6576 case NEON::BI__builtin_neon_vadd_v:
6577 case NEON::BI__builtin_neon_vaddq_v: {
6578 llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, Quad ? 16 : 8);
6579 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
6580 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
6581 Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
6582 return Builder.CreateBitCast(Ops[0], Ty);
6583 }
6584 case NEON::BI__builtin_neon_vaddhn_v: {
6585 llvm::FixedVectorType *SrcTy =
6586 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6587
6588 // %sum = add <4 x i32> %lhs, %rhs
6589 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6590 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
6591 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
6592
6593 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
6594 Constant *ShiftAmt =
6595 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
6596 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
6597
6598 // %res = trunc <4 x i32> %high to <4 x i16>
6599 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
6600 }
6601 case NEON::BI__builtin_neon_vcale_v:
6602 case NEON::BI__builtin_neon_vcaleq_v:
6603 case NEON::BI__builtin_neon_vcalt_v:
6604 case NEON::BI__builtin_neon_vcaltq_v:
6605 std::swap(Ops[0], Ops[1]);
6606 LLVM_FALLTHROUGH[[gnu::fallthrough]];
6607 case NEON::BI__builtin_neon_vcage_v:
6608 case NEON::BI__builtin_neon_vcageq_v:
6609 case NEON::BI__builtin_neon_vcagt_v:
6610 case NEON::BI__builtin_neon_vcagtq_v: {
6611 llvm::Type *Ty;
6612 switch (VTy->getScalarSizeInBits()) {
6613 default: llvm_unreachable("unexpected type")::llvm::llvm_unreachable_internal("unexpected type", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6613)
;
6614 case 32:
6615 Ty = FloatTy;
6616 break;
6617 case 64:
6618 Ty = DoubleTy;
6619 break;
6620 case 16:
6621 Ty = HalfTy;
6622 break;
6623 }
6624 auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
6625 llvm::Type *Tys[] = { VTy, VecFlt };
6626 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6627 return EmitNeonCall(F, Ops, NameHint);
6628 }
6629 case NEON::BI__builtin_neon_vceqz_v:
6630 case NEON::BI__builtin_neon_vceqzq_v:
6631 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
6632 ICmpInst::ICMP_EQ, "vceqz");
6633 case NEON::BI__builtin_neon_vcgez_v:
6634 case NEON::BI__builtin_neon_vcgezq_v:
6635 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
6636 ICmpInst::ICMP_SGE, "vcgez");
6637 case NEON::BI__builtin_neon_vclez_v:
6638 case NEON::BI__builtin_neon_vclezq_v:
6639 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
6640 ICmpInst::ICMP_SLE, "vclez");
6641 case NEON::BI__builtin_neon_vcgtz_v:
6642 case NEON::BI__builtin_neon_vcgtzq_v:
6643 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
6644 ICmpInst::ICMP_SGT, "vcgtz");
6645 case NEON::BI__builtin_neon_vcltz_v:
6646 case NEON::BI__builtin_neon_vcltzq_v:
6647 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
6648 ICmpInst::ICMP_SLT, "vcltz");
6649 case NEON::BI__builtin_neon_vclz_v:
6650 case NEON::BI__builtin_neon_vclzq_v:
6651 // We generate target-independent intrinsic, which needs a second argument
6652 // for whether or not clz of zero is undefined; on ARM it isn't.
6653 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
6654 break;
6655 case NEON::BI__builtin_neon_vcvt_f32_v:
6656 case NEON::BI__builtin_neon_vcvtq_f32_v:
6657 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6658 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
6659 HasLegalHalfType);
6660 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
6661 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
6662 case NEON::BI__builtin_neon_vcvt_f16_v:
6663 case NEON::BI__builtin_neon_vcvtq_f16_v:
6664 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6665 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
6666 HasLegalHalfType);
6667 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
6668 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
6669 case NEON::BI__builtin_neon_vcvt_n_f16_v:
6670 case NEON::BI__builtin_neon_vcvt_n_f32_v:
6671 case NEON::BI__builtin_neon_vcvt_n_f64_v:
6672 case NEON::BI__builtin_neon_vcvtq_n_f16_v:
6673 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
6674 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
6675 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
6676 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6677 Function *F = CGM.getIntrinsic(Int, Tys);
6678 return EmitNeonCall(F, Ops, "vcvt_n");
6679 }
6680 case NEON::BI__builtin_neon_vcvt_n_s16_v:
6681 case NEON::BI__builtin_neon_vcvt_n_s32_v:
6682 case NEON::BI__builtin_neon_vcvt_n_u16_v:
6683 case NEON::BI__builtin_neon_vcvt_n_u32_v:
6684 case NEON::BI__builtin_neon_vcvt_n_s64_v:
6685 case NEON::BI__builtin_neon_vcvt_n_u64_v:
6686 case NEON::BI__builtin_neon_vcvtq_n_s16_v:
6687 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
6688 case NEON::BI__builtin_neon_vcvtq_n_u16_v:
6689 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
6690 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
6691 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
6692 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
6693 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6694 return EmitNeonCall(F, Ops, "vcvt_n");
6695 }
6696 case NEON::BI__builtin_neon_vcvt_s32_v:
6697 case NEON::BI__builtin_neon_vcvt_u32_v:
6698 case NEON::BI__builtin_neon_vcvt_s64_v:
6699 case NEON::BI__builtin_neon_vcvt_u64_v:
6700 case NEON::BI__builtin_neon_vcvt_s16_v:
6701 case NEON::BI__builtin_neon_vcvt_u16_v:
6702 case NEON::BI__builtin_neon_vcvtq_s32_v:
6703 case NEON::BI__builtin_neon_vcvtq_u32_v:
6704 case NEON::BI__builtin_neon_vcvtq_s64_v:
6705 case NEON::BI__builtin_neon_vcvtq_u64_v:
6706 case NEON::BI__builtin_neon_vcvtq_s16_v:
6707 case NEON::BI__builtin_neon_vcvtq_u16_v: {
6708 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
6709 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
6710 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
6711 }
6712 case NEON::BI__builtin_neon_vcvta_s16_v:
6713 case NEON::BI__builtin_neon_vcvta_s32_v:
6714 case NEON::BI__builtin_neon_vcvta_s64_v:
6715 case NEON::BI__builtin_neon_vcvta_u16_v:
6716 case NEON::BI__builtin_neon_vcvta_u32_v:
6717 case NEON::BI__builtin_neon_vcvta_u64_v:
6718 case NEON::BI__builtin_neon_vcvtaq_s16_v:
6719 case NEON::BI__builtin_neon_vcvtaq_s32_v:
6720 case NEON::BI__builtin_neon_vcvtaq_s64_v:
6721 case NEON::BI__builtin_neon_vcvtaq_u16_v:
6722 case NEON::BI__builtin_neon_vcvtaq_u32_v:
6723 case NEON::BI__builtin_neon_vcvtaq_u64_v:
6724 case NEON::BI__builtin_neon_vcvtn_s16_v:
6725 case NEON::BI__builtin_neon_vcvtn_s32_v:
6726 case NEON::BI__builtin_neon_vcvtn_s64_v:
6727 case NEON::BI__builtin_neon_vcvtn_u16_v:
6728 case NEON::BI__builtin_neon_vcvtn_u32_v:
6729 case NEON::BI__builtin_neon_vcvtn_u64_v:
6730 case NEON::BI__builtin_neon_vcvtnq_s16_v:
6731 case NEON::BI__builtin_neon_vcvtnq_s32_v:
6732 case NEON::BI__builtin_neon_vcvtnq_s64_v:
6733 case NEON::BI__builtin_neon_vcvtnq_u16_v:
6734 case NEON::BI__builtin_neon_vcvtnq_u32_v:
6735 case NEON::BI__builtin_neon_vcvtnq_u64_v:
6736 case NEON::BI__builtin_neon_vcvtp_s16_v:
6737 case NEON::BI__builtin_neon_vcvtp_s32_v:
6738 case NEON::BI__builtin_neon_vcvtp_s64_v:
6739 case NEON::BI__builtin_neon_vcvtp_u16_v:
6740 case NEON::BI__builtin_neon_vcvtp_u32_v:
6741 case NEON::BI__builtin_neon_vcvtp_u64_v:
6742 case NEON::BI__builtin_neon_vcvtpq_s16_v:
6743 case NEON::BI__builtin_neon_vcvtpq_s32_v:
6744 case NEON::BI__builtin_neon_vcvtpq_s64_v:
6745 case NEON::BI__builtin_neon_vcvtpq_u16_v:
6746 case NEON::BI__builtin_neon_vcvtpq_u32_v:
6747 case NEON::BI__builtin_neon_vcvtpq_u64_v:
6748 case NEON::BI__builtin_neon_vcvtm_s16_v:
6749 case NEON::BI__builtin_neon_vcvtm_s32_v:
6750 case NEON::BI__builtin_neon_vcvtm_s64_v:
6751 case NEON::BI__builtin_neon_vcvtm_u16_v:
6752 case NEON::BI__builtin_neon_vcvtm_u32_v:
6753 case NEON::BI__builtin_neon_vcvtm_u64_v:
6754 case NEON::BI__builtin_neon_vcvtmq_s16_v:
6755 case NEON::BI__builtin_neon_vcvtmq_s32_v:
6756 case NEON::BI__builtin_neon_vcvtmq_s64_v:
6757 case NEON::BI__builtin_neon_vcvtmq_u16_v:
6758 case NEON::BI__builtin_neon_vcvtmq_u32_v:
6759 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
6760 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
6761 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
6762 }
6763 case NEON::BI__builtin_neon_vcvtx_f32_v: {
6764 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
6765 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
6766
6767 }
6768 case NEON::BI__builtin_neon_vext_v:
6769 case NEON::BI__builtin_neon_vextq_v: {
6770 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
6771 SmallVector<int, 16> Indices;
6772 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
6773 Indices.push_back(i+CV);
6774
6775 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6776 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6777 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
6778 }
6779 case NEON::BI__builtin_neon_vfma_v:
6780 case NEON::BI__builtin_neon_vfmaq_v: {
6781 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6782 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6783 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6784
6785 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
6786 return emitCallMaybeConstrainedFPBuiltin(
6787 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
6788 {Ops[1], Ops[2], Ops[0]});
6789 }
6790 case NEON::BI__builtin_neon_vld1_v:
6791 case NEON::BI__builtin_neon_vld1q_v: {
6792 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6793 Ops.push_back(getAlignmentValue32(PtrOp0));
6794 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
6795 }
6796 case NEON::BI__builtin_neon_vld1_x2_v:
6797 case NEON::BI__builtin_neon_vld1q_x2_v:
6798 case NEON::BI__builtin_neon_vld1_x3_v:
6799 case NEON::BI__builtin_neon_vld1q_x3_v:
6800 case NEON::BI__builtin_neon_vld1_x4_v:
6801 case NEON::BI__builtin_neon_vld1q_x4_v: {
6802 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
6803 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
6804 llvm::Type *Tys[2] = { VTy, PTy };
6805 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6806 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
6807 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6808 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6809 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6810 }
6811 case NEON::BI__builtin_neon_vld2_v:
6812 case NEON::BI__builtin_neon_vld2q_v:
6813 case NEON::BI__builtin_neon_vld3_v:
6814 case NEON::BI__builtin_neon_vld3q_v:
6815 case NEON::BI__builtin_neon_vld4_v:
6816 case NEON::BI__builtin_neon_vld4q_v:
6817 case NEON::BI__builtin_neon_vld2_dup_v:
6818 case NEON::BI__builtin_neon_vld2q_dup_v:
6819 case NEON::BI__builtin_neon_vld3_dup_v:
6820 case NEON::BI__builtin_neon_vld3q_dup_v:
6821 case NEON::BI__builtin_neon_vld4_dup_v:
6822 case NEON::BI__builtin_neon_vld4q_dup_v: {
6823 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6824 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6825 Value *Align = getAlignmentValue32(PtrOp1);
6826 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
6827 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6828 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6829 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6830 }
6831 case NEON::BI__builtin_neon_vld1_dup_v:
6832 case NEON::BI__builtin_neon_vld1q_dup_v: {
6833 Value *V = UndefValue::get(Ty);
6834 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
6835 PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
6836 LoadInst *Ld = Builder.CreateLoad(PtrOp0);
6837 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
6838 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
6839 return EmitNeonSplat(Ops[0], CI);
6840 }
6841 case NEON::BI__builtin_neon_vld2_lane_v:
6842 case NEON::BI__builtin_neon_vld2q_lane_v:
6843 case NEON::BI__builtin_neon_vld3_lane_v:
6844 case NEON::BI__builtin_neon_vld3q_lane_v:
6845 case NEON::BI__builtin_neon_vld4_lane_v:
6846 case NEON::BI__builtin_neon_vld4q_lane_v: {
6847 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6848 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6849 for (unsigned I = 2; I < Ops.size() - 1; ++I)
6850 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
6851 Ops.push_back(getAlignmentValue32(PtrOp1));
6852 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
6853 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6854 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6855 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6856 }
6857 case NEON::BI__builtin_neon_vmovl_v: {
6858 llvm::FixedVectorType *DTy =
6859 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
6860 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
6861 if (Usgn)
6862 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
6863 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
6864 }
6865 case NEON::BI__builtin_neon_vmovn_v: {
6866 llvm::FixedVectorType *QTy =
6867 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6868 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
6869 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
6870 }
6871 case NEON::BI__builtin_neon_vmull_v:
6872 // FIXME: the integer vmull operations could be emitted in terms of pure
6873 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
6874 // hoisting the exts outside loops. Until global ISel comes along that can
6875 // see through such movement this leads to bad CodeGen. So we need an
6876 // intrinsic for now.
6877 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
6878 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
6879 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
6880 case NEON::BI__builtin_neon_vpadal_v:
6881 case NEON::BI__builtin_neon_vpadalq_v: {
6882 // The source operand type has twice as many elements of half the size.
6883 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
6884 llvm::Type *EltTy =
6885 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
6886 auto *NarrowTy =
6887 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
6888 llvm::Type *Tys[2] = { Ty, NarrowTy };
6889 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6890 }
6891 case NEON::BI__builtin_neon_vpaddl_v:
6892 case NEON::BI__builtin_neon_vpaddlq_v: {
6893 // The source operand type has twice as many elements of half the size.
6894 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
6895 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
6896 auto *NarrowTy =
6897 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
6898 llvm::Type *Tys[2] = { Ty, NarrowTy };
6899 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
6900 }
6901 case NEON::BI__builtin_neon_vqdmlal_v:
6902 case NEON::BI__builtin_neon_vqdmlsl_v: {
6903 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
6904 Ops[1] =
6905 EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
6906 Ops.resize(2);
6907 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
6908 }
6909 case NEON::BI__builtin_neon_vqdmulhq_lane_v:
6910 case NEON::BI__builtin_neon_vqdmulh_lane_v:
6911 case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
6912 case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
6913 auto *RTy = cast<llvm::FixedVectorType>(Ty);
6914 if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
6915 BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
6916 RTy = llvm::FixedVectorType::get(RTy->getElementType(),
6917 RTy->getNumElements() * 2);
6918 llvm::Type *Tys[2] = {
6919 RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6920 /*isQuad*/ false))};
6921 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6922 }
6923 case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
6924 case NEON::BI__builtin_neon_vqdmulh_laneq_v:
6925 case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
6926 case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
6927 llvm::Type *Tys[2] = {
6928 Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6929 /*isQuad*/ true))};
6930 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6931 }
6932 case NEON::BI__builtin_neon_vqshl_n_v:
6933 case NEON::BI__builtin_neon_vqshlq_n_v:
6934 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
6935 1, false);
6936 case NEON::BI__builtin_neon_vqshlu_n_v:
6937 case NEON::BI__builtin_neon_vqshluq_n_v:
6938 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
6939 1, false);
6940 case NEON::BI__builtin_neon_vrecpe_v:
6941 case NEON::BI__builtin_neon_vrecpeq_v:
6942 case NEON::BI__builtin_neon_vrsqrte_v:
6943 case NEON::BI__builtin_neon_vrsqrteq_v:
6944 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
6945 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6946 case NEON::BI__builtin_neon_vrndi_v:
6947 case NEON::BI__builtin_neon_vrndiq_v:
6948 Int = Builder.getIsFPConstrained()
6949 ? Intrinsic::experimental_constrained_nearbyint
6950 : Intrinsic::nearbyint;
6951 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6952 case NEON::BI__builtin_neon_vrshr_n_v:
6953 case NEON::BI__builtin_neon_vrshrq_n_v:
6954 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
6955 1, true);
6956 case NEON::BI__builtin_neon_vsha512hq_v:
6957 case NEON::BI__builtin_neon_vsha512h2q_v:
6958 case NEON::BI__builtin_neon_vsha512su0q_v:
6959 case NEON::BI__builtin_neon_vsha512su1q_v: {
6960 Function *F = CGM.getIntrinsic(Int);
6961 return EmitNeonCall(F, Ops, "");
6962 }
6963 case NEON::BI__builtin_neon_vshl_n_v:
6964 case NEON::BI__builtin_neon_vshlq_n_v:
6965 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
6966 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
6967 "vshl_n");
6968 case NEON::BI__builtin_neon_vshll_n_v: {
6969 llvm::FixedVectorType *SrcTy =
6970 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
6971 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6972 if (Usgn)
6973 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
6974 else
6975 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
6976 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
6977 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
6978 }
6979 case NEON::BI__builtin_neon_vshrn_n_v: {
6980 llvm::FixedVectorType *SrcTy =
6981 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6982 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6983 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
6984 if (Usgn)
6985 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
6986 else
6987 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
6988 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
6989 }
6990 case NEON::BI__builtin_neon_vshr_n_v:
6991 case NEON::BI__builtin_neon_vshrq_n_v:
6992 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
6993 case NEON::BI__builtin_neon_vst1_v:
6994 case NEON::BI__builtin_neon_vst1q_v:
6995 case NEON::BI__builtin_neon_vst2_v:
6996 case NEON::BI__builtin_neon_vst2q_v:
6997 case NEON::BI__builtin_neon_vst3_v:
6998 case NEON::BI__builtin_neon_vst3q_v:
6999 case NEON::BI__builtin_neon_vst4_v:
7000 case NEON::BI__builtin_neon_vst4q_v:
7001 case NEON::BI__builtin_neon_vst2_lane_v:
7002 case NEON::BI__builtin_neon_vst2q_lane_v:
7003 case NEON::BI__builtin_neon_vst3_lane_v:
7004 case NEON::BI__builtin_neon_vst3q_lane_v:
7005 case NEON::BI__builtin_neon_vst4_lane_v:
7006 case NEON::BI__builtin_neon_vst4q_lane_v: {
7007 llvm::Type *Tys[] = {Int8PtrTy, Ty};
7008 Ops.push_back(getAlignmentValue32(PtrOp0));
7009 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
7010 }
7011 case NEON::BI__builtin_neon_vsm3partw1q_v:
7012 case NEON::BI__builtin_neon_vsm3partw2q_v:
7013 case NEON::BI__builtin_neon_vsm3ss1q_v:
7014 case NEON::BI__builtin_neon_vsm4ekeyq_v:
7015 case NEON::BI__builtin_neon_vsm4eq_v: {
7016 Function *F = CGM.getIntrinsic(Int);
7017 return EmitNeonCall(F, Ops, "");
7018 }
7019 case NEON::BI__builtin_neon_vsm3tt1aq_v:
7020 case NEON::BI__builtin_neon_vsm3tt1bq_v:
7021 case NEON::BI__builtin_neon_vsm3tt2aq_v:
7022 case NEON::BI__builtin_neon_vsm3tt2bq_v: {
7023 Function *F = CGM.getIntrinsic(Int);
7024 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
7025 return EmitNeonCall(F, Ops, "");
7026 }
7027 case NEON::BI__builtin_neon_vst1_x2_v:
7028 case NEON::BI__builtin_neon_vst1q_x2_v:
7029 case NEON::BI__builtin_neon_vst1_x3_v:
7030 case NEON::BI__builtin_neon_vst1q_x3_v:
7031 case NEON::BI__builtin_neon_vst1_x4_v:
7032 case NEON::BI__builtin_neon_vst1q_x4_v: {
7033 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
7034 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
7035 // in AArch64 it comes last. We may want to stick to one or another.
7036 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
7037 Arch == llvm::Triple::aarch64_32) {
7038 llvm::Type *Tys[2] = { VTy, PTy };
7039 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7040 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
7041 }
7042 llvm::Type *Tys[2] = { PTy, VTy };
7043 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
7044 }
7045 case NEON::BI__builtin_neon_vsubhn_v: {
7046 llvm::FixedVectorType *SrcTy =
7047 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
7048
7049 // %sum = add <4 x i32> %lhs, %rhs
7050 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
7051 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
7052 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
7053
7054 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
7055 Constant *ShiftAmt =
7056 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
7057 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
7058
7059 // %res = trunc <4 x i32> %high to <4 x i16>
7060 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
7061 }
7062 case NEON::BI__builtin_neon_vtrn_v:
7063 case NEON::BI__builtin_neon_vtrnq_v: {
7064 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
7065 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7066 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7067 Value *SV = nullptr;
7068
7069 for (unsigned vi = 0; vi != 2; ++vi) {
7070 SmallVector<int, 16> Indices;
7071 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
7072 Indices.push_back(i+vi);
7073 Indices.push_back(i+e+vi);
7074 }
7075 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
7076 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
7077 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
7078 }
7079 return SV;
7080 }
7081 case NEON::BI__builtin_neon_vtst_v:
7082 case NEON::BI__builtin_neon_vtstq_v: {
7083 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7084 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7085 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
7086 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
7087 ConstantAggregateZero::get(Ty));
7088 return Builder.CreateSExt(Ops[0], Ty, "vtst");
7089 }
7090 case NEON::BI__builtin_neon_vuzp_v:
7091 case NEON::BI__builtin_neon_vuzpq_v: {
7092 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
7093 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7094 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7095 Value *SV = nullptr;
7096
7097 for (unsigned vi = 0; vi != 2; ++vi) {
7098 SmallVector<int, 16> Indices;
7099 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
7100 Indices.push_back(2*i+vi);
7101
7102 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
7103 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
7104 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
7105 }
7106 return SV;
7107 }
7108 case NEON::BI__builtin_neon_vxarq_v: {
7109 Function *F = CGM.getIntrinsic(Int);
7110 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
7111 return EmitNeonCall(F, Ops, "");
7112 }
7113 case NEON::BI__builtin_neon_vzip_v:
7114 case NEON::BI__builtin_neon_vzipq_v: {
7115 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
7116 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7117 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7118 Value *SV = nullptr;
7119
7120 for (unsigned vi = 0; vi != 2; ++vi) {
7121 SmallVector<int, 16> Indices;
7122 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
7123 Indices.push_back((i + vi*e) >> 1);
7124 Indices.push_back(((i + vi*e) >> 1)+e);
7125 }
7126 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
7127 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
7128 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
7129 }
7130 return SV;
7131 }
7132 case NEON::BI__builtin_neon_vdot_v:
7133 case NEON::BI__builtin_neon_vdotq_v: {
7134 auto *InputTy =
7135 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7136 llvm::Type *Tys[2] = { Ty, InputTy };
7137 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
7138 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
7139 }
7140 case NEON::BI__builtin_neon_vfmlal_low_v:
7141 case NEON::BI__builtin_neon_vfmlalq_low_v: {
7142 auto *InputTy =
7143 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7144 llvm::Type *Tys[2] = { Ty, InputTy };
7145 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
7146 }
7147 case NEON::BI__builtin_neon_vfmlsl_low_v:
7148 case NEON::BI__builtin_neon_vfmlslq_low_v: {
7149 auto *InputTy =
7150 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7151 llvm::Type *Tys[2] = { Ty, InputTy };
7152 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
7153 }
7154 case NEON::BI__builtin_neon_vfmlal_high_v:
7155 case NEON::BI__builtin_neon_vfmlalq_high_v: {
7156 auto *InputTy =
7157 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7158 llvm::Type *Tys[2] = { Ty, InputTy };
7159 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
7160 }
7161 case NEON::BI__builtin_neon_vfmlsl_high_v:
7162 case NEON::BI__builtin_neon_vfmlslq_high_v: {
7163 auto *InputTy =
7164 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7165 llvm::Type *Tys[2] = { Ty, InputTy };
7166 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
7167 }
7168 case NEON::BI__builtin_neon_vmmlaq_v: {
7169 auto *InputTy =
7170 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7171 llvm::Type *Tys[2] = { Ty, InputTy };
7172 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
7173 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla");
7174 }
7175 case NEON::BI__builtin_neon_vusmmlaq_v: {
7176 auto *InputTy =
7177 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7178 llvm::Type *Tys[2] = { Ty, InputTy };
7179 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
7180 }
7181 case NEON::BI__builtin_neon_vusdot_v:
7182 case NEON::BI__builtin_neon_vusdotq_v: {
7183 auto *InputTy =
7184 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7185 llvm::Type *Tys[2] = { Ty, InputTy };
7186 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
7187 }
7188 case NEON::BI__builtin_neon_vbfdot_v:
7189 case NEON::BI__builtin_neon_vbfdotq_v: {
7190 llvm::Type *InputTy =
7191 llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
7192 llvm::Type *Tys[2] = { Ty, InputTy };
7193 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
7194 }
7195 case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
7196 llvm::Type *Tys[1] = { Ty };
7197 Function *F = CGM.getIntrinsic(Int, Tys);
7198 return EmitNeonCall(F, Ops, "vcvtfp2bf");
7199 }
7200
7201 }
7202
7203 assert(Int && "Expected valid intrinsic number")(static_cast <bool> (Int && "Expected valid intrinsic number"
) ? void (0) : __assert_fail ("Int && \"Expected valid intrinsic number\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7203, __extension__ __PRETTY_FUNCTION__))
;
7204
7205 // Determine the type(s) of this overloaded AArch64 intrinsic.
7206 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
7207
7208 Value *Result = EmitNeonCall(F, Ops, NameHint);
7209 llvm::Type *ResultType = ConvertType(E->getType());
7210 // AArch64 intrinsic one-element vector type cast to
7211 // scalar type expected by the builtin
7212 return Builder.CreateBitCast(Result, ResultType, NameHint);
7213}
7214
7215Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
7216 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
7217 const CmpInst::Predicate Ip, const Twine &Name) {
7218 llvm::Type *OTy = Op->getType();
7219
7220 // FIXME: this is utterly horrific. We should not be looking at previous
7221 // codegen context to find out what needs doing. Unfortunately TableGen
7222 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
7223 // (etc).
7224 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
7225 OTy = BI->getOperand(0)->getType();
7226
7227 Op = Builder.CreateBitCast(Op, OTy);
7228 if (OTy->getScalarType()->isFloatingPointTy()) {
7229 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
7230 } else {
7231 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
7232 }
7233 return Builder.CreateSExt(Op, Ty, Name);
7234}
7235
7236static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
7237 Value *ExtOp, Value *IndexOp,
7238 llvm::Type *ResTy, unsigned IntID,
7239 const char *Name) {
7240 SmallVector<Value *, 2> TblOps;
7241 if (ExtOp)
7242 TblOps.push_back(ExtOp);
7243
7244 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
7245 SmallVector<int, 16> Indices;
7246 auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
7247 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
7248 Indices.push_back(2*i);
7249 Indices.push_back(2*i+1);
7250 }
7251
7252 int PairPos = 0, End = Ops.size() - 1;
7253 while (PairPos < End) {
7254 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
7255 Ops[PairPos+1], Indices,
7256 Name));
7257 PairPos += 2;
7258 }
7259
7260 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
7261 // of the 128-bit lookup table with zero.
7262 if (PairPos == End) {
7263 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
7264 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
7265 ZeroTbl, Indices, Name));
7266 }
7267
7268 Function *TblF;
7269 TblOps.push_back(IndexOp);
7270 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
7271
7272 return CGF.EmitNeonCall(TblF, TblOps, Name);
7273}
7274
7275Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
7276 unsigned Value;
7277 switch (BuiltinID) {
7278 default:
7279 return nullptr;
7280 case ARM::BI__builtin_arm_nop:
7281 Value = 0;
7282 break;
7283 case ARM::BI__builtin_arm_yield:
7284 case ARM::BI__yield:
7285 Value = 1;
7286 break;
7287 case ARM::BI__builtin_arm_wfe:
7288 case ARM::BI__wfe:
7289 Value = 2;
7290 break;
7291 case ARM::BI__builtin_arm_wfi:
7292 case ARM::BI__wfi:
7293 Value = 3;
7294 break;
7295 case ARM::BI__builtin_arm_sev:
7296 case ARM::BI__sev:
7297 Value = 4;
7298 break;
7299 case ARM::BI__builtin_arm_sevl:
7300 case ARM::BI__sevl:
7301 Value = 5;
7302 break;
7303 }
7304
7305 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
7306 llvm::ConstantInt::get(Int32Ty, Value));
7307}
7308
7309enum SpecialRegisterAccessKind {
7310 NormalRead,
7311 VolatileRead,
7312 Write,
7313};
7314
7315// Generates the IR for the read/write special register builtin,
7316// ValueType is the type of the value that is to be written or read,
7317// RegisterType is the type of the register being written to or read from.
7318static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
7319 const CallExpr *E,
7320 llvm::Type *RegisterType,
7321 llvm::Type *ValueType,
7322 SpecialRegisterAccessKind AccessKind,
7323 StringRef SysReg = "") {
7324 // write and register intrinsics only support 32 and 64 bit operations.
7325 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))(static_cast <bool> ((RegisterType->isIntegerTy(32) ||
RegisterType->isIntegerTy(64)) && "Unsupported size for register."
) ? void (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7326, __extension__ __PRETTY_FUNCTION__))
7326 && "Unsupported size for register.")(static_cast <bool> ((RegisterType->isIntegerTy(32) ||
RegisterType->isIntegerTy(64)) && "Unsupported size for register."
) ? void (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7326, __extension__ __PRETTY_FUNCTION__))
;
7327
7328 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7329 CodeGen::CodeGenModule &CGM = CGF.CGM;
7330 LLVMContext &Context = CGM.getLLVMContext();
7331
7332 if (SysReg.empty()) {
7333 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
7334 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
7335 }
7336
7337 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
7338 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
7339 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
7340
7341 llvm::Type *Types[] = { RegisterType };
7342
7343 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
7344 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))(static_cast <bool> (!(RegisterType->isIntegerTy(32)
&& ValueType->isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register"
) ? void (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7345, __extension__ __PRETTY_FUNCTION__))
7345 && "Can't fit 64-bit value in 32-bit register")(static_cast <bool> (!(RegisterType->isIntegerTy(32)
&& ValueType->isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register"
) ? void (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7345, __extension__ __PRETTY_FUNCTION__))
;
7346
7347 if (AccessKind != Write) {
7348 assert(AccessKind == NormalRead || AccessKind == VolatileRead)(static_cast <bool> (AccessKind == NormalRead || AccessKind
== VolatileRead) ? void (0) : __assert_fail ("AccessKind == NormalRead || AccessKind == VolatileRead"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7348, __extension__ __PRETTY_FUNCTION__))
;
7349 llvm::Function *F = CGM.getIntrinsic(
7350 AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
7351 : llvm::Intrinsic::read_register,
7352 Types);
7353 llvm::Value *Call = Builder.CreateCall(F, Metadata);
7354
7355 if (MixedTypes)
7356 // Read into 64 bit register and then truncate result to 32 bit.
7357 return Builder.CreateTrunc(Call, ValueType);
7358
7359 if (ValueType->isPointerTy())
7360 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
7361 return Builder.CreateIntToPtr(Call, ValueType);
7362
7363 return Call;
7364 }
7365
7366 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
7367 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
7368 if (MixedTypes) {
7369 // Extend 32 bit write value to 64 bit to pass to write.
7370 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
7371 return Builder.CreateCall(F, { Metadata, ArgValue });
7372 }
7373
7374 if (ValueType->isPointerTy()) {
7375 // Have VoidPtrTy ArgValue but want to return an i32/i64.
7376 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
7377 return Builder.CreateCall(F, { Metadata, ArgValue });
7378 }
7379
7380 return Builder.CreateCall(F, { Metadata, ArgValue });
7381}
7382
7383/// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
7384/// argument that specifies the vector type.
7385static bool HasExtraNeonArgument(unsigned BuiltinID) {
7386 switch (BuiltinID) {
7387 default: break;
7388 case NEON::BI__builtin_neon_vget_lane_i8:
7389 case NEON::BI__builtin_neon_vget_lane_i16:
7390 case NEON::BI__builtin_neon_vget_lane_bf16:
7391 case NEON::BI__builtin_neon_vget_lane_i32:
7392 case NEON::BI__builtin_neon_vget_lane_i64:
7393 case NEON::BI__builtin_neon_vget_lane_f32:
7394 case NEON::BI__builtin_neon_vgetq_lane_i8:
7395 case NEON::BI__builtin_neon_vgetq_lane_i16:
7396 case NEON::BI__builtin_neon_vgetq_lane_bf16:
7397 case NEON::BI__builtin_neon_vgetq_lane_i32:
7398 case NEON::BI__builtin_neon_vgetq_lane_i64:
7399 case NEON::BI__builtin_neon_vgetq_lane_f32:
7400 case NEON::BI__builtin_neon_vduph_lane_bf16:
7401 case NEON::BI__builtin_neon_vduph_laneq_bf16:
7402 case NEON::BI__builtin_neon_vset_lane_i8:
7403 case NEON::BI__builtin_neon_vset_lane_i16:
7404 case NEON::BI__builtin_neon_vset_lane_bf16:
7405 case NEON::BI__builtin_neon_vset_lane_i32:
7406 case NEON::BI__builtin_neon_vset_lane_i64:
7407 case NEON::BI__builtin_neon_vset_lane_f32:
7408 case NEON::BI__builtin_neon_vsetq_lane_i8:
7409 case NEON::BI__builtin_neon_vsetq_lane_i16:
7410 case NEON::BI__builtin_neon_vsetq_lane_bf16:
7411 case NEON::BI__builtin_neon_vsetq_lane_i32:
7412 case NEON::BI__builtin_neon_vsetq_lane_i64:
7413 case NEON::BI__builtin_neon_vsetq_lane_f32:
7414 case NEON::BI__builtin_neon_vsha1h_u32:
7415 case NEON::BI__builtin_neon_vsha1cq_u32:
7416 case NEON::BI__builtin_neon_vsha1pq_u32:
7417 case NEON::BI__builtin_neon_vsha1mq_u32:
7418 case NEON::BI__builtin_neon_vcvth_bf16_f32:
7419 case clang::ARM::BI_MoveToCoprocessor:
7420 case clang::ARM::BI_MoveToCoprocessor2:
7421 return false;
7422 }
7423 return true;
7424}
7425
7426Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
7427 const CallExpr *E,
7428 ReturnValueSlot ReturnValue,
7429 llvm::Triple::ArchType Arch) {
7430 if (auto Hint = GetValueForARMHint(BuiltinID))
7431 return Hint;
7432
7433 if (BuiltinID == ARM::BI__emit) {
7434 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
7435 llvm::FunctionType *FTy =
7436 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
7437
7438 Expr::EvalResult Result;
7439 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
7440 llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7440)
;
7441
7442 llvm::APSInt Value = Result.Val.getInt();
7443 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
7444
7445 llvm::InlineAsm *Emit =
7446 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
7447 /*hasSideEffects=*/true)
7448 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
7449 /*hasSideEffects=*/true);
7450
7451 return Builder.CreateCall(Emit);
7452 }
7453
7454 if (BuiltinID == ARM::BI__builtin_arm_dbg) {
7455 Value *Option = EmitScalarExpr(E->getArg(0));
7456 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
7457 }
7458
7459 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
7460 Value *Address = EmitScalarExpr(E->getArg(0));
7461 Value *RW = EmitScalarExpr(E->getArg(1));
7462 Value *IsData = EmitScalarExpr(E->getArg(2));
7463
7464 // Locality is not supported on ARM target
7465 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
7466
7467 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
7468 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
7469 }
7470
7471 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
7472 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7473 return Builder.CreateCall(
7474 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
7475 }
7476
7477 if (BuiltinID == ARM::BI__builtin_arm_cls) {
7478 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7479 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
7480 }
7481 if (BuiltinID == ARM::BI__builtin_arm_cls64) {
7482 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7483 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
7484 "cls");
7485 }
7486
7487 if (BuiltinID == ARM::BI__clear_cache) {
7488 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")(static_cast <bool> (E->getNumArgs() == 2 &&
"__clear_cache takes 2 arguments") ? void (0) : __assert_fail
("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7488, __extension__ __PRETTY_FUNCTION__))
;
7489 const FunctionDecl *FD = E->getDirectCallee();
7490 Value *Ops[2];
7491 for (unsigned i = 0; i < 2; i++)
7492 Ops[i] = EmitScalarExpr(E->getArg(i));
7493 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
7494 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
7495 StringRef Name = FD->getName();
7496 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
7497 }
7498
7499 if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
7500 BuiltinID == ARM::BI__builtin_arm_mcrr2) {
7501 Function *F;
7502
7503 switch (BuiltinID) {
7504 default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7504)
;
7505 case ARM::BI__builtin_arm_mcrr:
7506 F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
7507 break;
7508 case ARM::BI__builtin_arm_mcrr2:
7509 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
7510 break;
7511 }
7512
7513 // MCRR{2} instruction has 5 operands but
7514 // the intrinsic has 4 because Rt and Rt2
7515 // are represented as a single unsigned 64
7516 // bit integer in the intrinsic definition
7517 // but internally it's represented as 2 32
7518 // bit integers.
7519
7520 Value *Coproc = EmitScalarExpr(E->getArg(0));
7521 Value *Opc1 = EmitScalarExpr(E->getArg(1));
7522 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
7523 Value *CRm = EmitScalarExpr(E->getArg(3));
7524
7525 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
7526 Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
7527 Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
7528 Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
7529
7530 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
7531 }
7532
7533 if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
7534 BuiltinID == ARM::BI__builtin_arm_mrrc2) {
7535 Function *F;
7536
7537 switch (BuiltinID) {
7538 default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7538)
;
7539 case ARM::BI__builtin_arm_mrrc:
7540 F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
7541 break;
7542 case ARM::BI__builtin_arm_mrrc2:
7543 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
7544 break;
7545 }
7546
7547 Value *Coproc = EmitScalarExpr(E->getArg(0));
7548 Value *Opc1 = EmitScalarExpr(E->getArg(1));
7549 Value *CRm = EmitScalarExpr(E->getArg(2));
7550 Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
7551
7552 // Returns an unsigned 64 bit integer, represented
7553 // as two 32 bit integers.
7554
7555 Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
7556 Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
7557 Rt = Builder.CreateZExt(Rt, Int64Ty);
7558 Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
7559
7560 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
7561 RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
7562 RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
7563
7564 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
7565 }
7566
7567 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
7568 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
7569 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
7570 getContext().getTypeSize(E->getType()) == 64) ||
7571 BuiltinID == ARM::BI__ldrexd) {
7572 Function *F;
7573
7574 switch (BuiltinID) {
7575 default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7575)
;
7576 case ARM::BI__builtin_arm_ldaex:
7577 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
7578 break;
7579 case ARM::BI__builtin_arm_ldrexd:
7580 case ARM::BI__builtin_arm_ldrex:
7581 case ARM::BI__ldrexd:
7582 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
7583 break;
7584 }
7585
7586 Value *LdPtr = EmitScalarExpr(E->getArg(0));
7587 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
7588 "ldrexd");
7589
7590 Value *Val0 = Builder.CreateExtractValue(Val, 1);
7591 Value *Val1 = Builder.CreateExtractValue(Val, 0);
7592 Val0 = Builder.CreateZExt(Val0, Int64Ty);
7593 Val1 = Builder.CreateZExt(Val1, Int64Ty);
7594
7595 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
7596 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
7597 Val = Builder.CreateOr(Val, Val1);
7598 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
7599 }
7600
7601 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
7602 BuiltinID == ARM::BI__builtin_arm_ldaex) {
7603 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
7604
7605 QualType Ty = E->getType();
7606 llvm::Type *RealResTy = ConvertType(Ty);
7607 llvm::Type *PtrTy = llvm::IntegerType::get(
7608 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
7609 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
7610
7611 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
7612 ? Intrinsic::arm_ldaex
7613 : Intrinsic::arm_ldrex,
7614 PtrTy);
7615 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
7616
7617 if (RealResTy->isPointerTy())
7618 return Builder.CreateIntToPtr(Val, RealResTy);
7619 else {
7620 llvm::Type *IntResTy = llvm::IntegerType::get(
7621 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
7622 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
7623 return Builder.CreateBitCast(Val, RealResTy);
7624 }
7625 }
7626
7627 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
7628 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
7629 BuiltinID == ARM::BI__builtin_arm_strex) &&
7630 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
7631 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
7632 ? Intrinsic::arm_stlexd
7633 : Intrinsic::arm_strexd);
7634 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
7635
7636 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
7637 Value *Val = EmitScalarExpr(E->getArg(0));
7638 Builder.CreateStore(Val, Tmp);
7639
7640 Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
7641 Val = Builder.CreateLoad(LdPtr);
7642
7643 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
7644 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
7645 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
7646 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
7647 }
7648
7649 if (BuiltinID == ARM::BI__builtin_arm_strex ||
7650 BuiltinID == ARM::BI__builtin_arm_stlex) {
7651 Value *StoreVal = EmitScalarExpr(E->getArg(0));
7652 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
7653
7654 QualType Ty = E->getArg(0)->getType();
7655 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
7656 getContext().getTypeSize(Ty));
7657 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
7658
7659 if (StoreVal->getType()->isPointerTy())
7660 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
7661 else {
7662 llvm::Type *IntTy = llvm::IntegerType::get(
7663 getLLVMContext(),
7664 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
7665 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
7666 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
7667 }
7668
7669 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
7670 ? Intrinsic::arm_stlex
7671 : Intrinsic::arm_strex,
7672 StoreAddr->getType());
7673 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
7674 }
7675
7676 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
7677 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
7678 return Builder.CreateCall(F);
7679 }
7680
7681 // CRC32
7682 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
7683 switch (BuiltinID) {
7684 case ARM::BI__builtin_arm_crc32b:
7685 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
7686 case ARM::BI__builtin_arm_crc32cb:
7687 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
7688 case ARM::BI__builtin_arm_crc32h:
7689 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
7690 case ARM::BI__builtin_arm_crc32ch:
7691 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
7692 case ARM::BI__builtin_arm_crc32w:
7693 case ARM::BI__builtin_arm_crc32d:
7694 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
7695 case ARM::BI__builtin_arm_crc32cw:
7696 case ARM::BI__builtin_arm_crc32cd:
7697 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
7698 }
7699
7700 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
7701 Value *Arg0 = EmitScalarExpr(E->getArg(0));
7702 Value *Arg1 = EmitScalarExpr(E->getArg(1));
7703
7704 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
7705 // intrinsics, hence we need different codegen for these cases.
7706 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
7707 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
7708 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
7709 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
7710 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
7711 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
7712
7713 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7714 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
7715 return Builder.CreateCall(F, {Res, Arg1b});
7716 } else {
7717 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
7718
7719 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7720 return Builder.CreateCall(F, {Arg0, Arg1});
7721 }
7722 }
7723
7724 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
7725 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7726 BuiltinID == ARM::BI__builtin_arm_rsrp ||
7727 BuiltinID == ARM::BI__builtin_arm_wsr ||
7728 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
7729 BuiltinID == ARM::BI__builtin_arm_wsrp) {
7730
7731 SpecialRegisterAccessKind AccessKind = Write;
7732 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
7733 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7734 BuiltinID == ARM::BI__builtin_arm_rsrp)
7735 AccessKind = VolatileRead;
7736
7737 bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
7738 BuiltinID == ARM::BI__builtin_arm_wsrp;
7739
7740 bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7741 BuiltinID == ARM::BI__builtin_arm_wsr64;
7742
7743 llvm::Type *ValueType;
7744 llvm::Type *RegisterType;
7745 if (IsPointerBuiltin) {
7746 ValueType = VoidPtrTy;
7747 RegisterType = Int32Ty;
7748 } else if (Is64Bit) {
7749 ValueType = RegisterType = Int64Ty;
7750 } else {
7751 ValueType = RegisterType = Int32Ty;
7752 }
7753
7754 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
7755 AccessKind);
7756 }
7757
7758 // Handle MSVC intrinsics before argument evaluation to prevent double
7759 // evaluation.
7760 if (Optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID))
7761 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
7762
7763 // Deal with MVE builtins
7764 if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
7765 return Result;
7766 // Handle CDE builtins
7767 if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
7768 return Result;
7769
7770 // Find out if any arguments are required to be integer constant
7771 // expressions.
7772 unsigned ICEArguments = 0;
7773 ASTContext::GetBuiltinTypeError Error;
7774 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
7775 assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None &&
"Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7775, __extension__ __PRETTY_FUNCTION__))
;
7776
7777 auto getAlignmentValue32 = [&](Address addr) -> Value* {
7778 return Builder.getInt32(addr.getAlignment().getQuantity());
7779 };
7780
7781 Address PtrOp0 = Address::invalid();
7782 Address PtrOp1 = Address::invalid();
7783 SmallVector<Value*, 4> Ops;
7784 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
7785 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
7786 for (unsigned i = 0, e = NumArgs; i != e; i++) {
7787 if (i == 0) {
7788 switch (BuiltinID) {
7789 case NEON::BI__builtin_neon_vld1_v:
7790 case NEON::BI__builtin_neon_vld1q_v:
7791 case NEON::BI__builtin_neon_vld1q_lane_v:
7792 case NEON::BI__builtin_neon_vld1_lane_v:
7793 case NEON::BI__builtin_neon_vld1_dup_v:
7794 case NEON::BI__builtin_neon_vld1q_dup_v:
7795 case NEON::BI__builtin_neon_vst1_v:
7796 case NEON::BI__builtin_neon_vst1q_v:
7797 case NEON::BI__builtin_neon_vst1q_lane_v:
7798 case NEON::BI__builtin_neon_vst1_lane_v:
7799 case NEON::BI__builtin_neon_vst2_v:
7800 case NEON::BI__builtin_neon_vst2q_v:
7801 case NEON::BI__builtin_neon_vst2_lane_v:
7802 case NEON::BI__builtin_neon_vst2q_lane_v:
7803 case NEON::BI__builtin_neon_vst3_v:
7804 case NEON::BI__builtin_neon_vst3q_v:
7805 case NEON::BI__builtin_neon_vst3_lane_v:
7806 case NEON::BI__builtin_neon_vst3q_lane_v:
7807 case NEON::BI__builtin_neon_vst4_v:
7808 case NEON::BI__builtin_neon_vst4q_v:
7809 case NEON::BI__builtin_neon_vst4_lane_v:
7810 case NEON::BI__builtin_neon_vst4q_lane_v:
7811 // Get the alignment for the argument in addition to the value;
7812 // we'll use it later.
7813 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
7814 Ops.push_back(PtrOp0.getPointer());
7815 continue;
7816 }
7817 }
7818 if (i == 1) {
7819 switch (BuiltinID) {
7820 case NEON::BI__builtin_neon_vld2_v:
7821 case NEON::BI__builtin_neon_vld2q_v:
7822 case NEON::BI__builtin_neon_vld3_v:
7823 case NEON::BI__builtin_neon_vld3q_v:
7824 case NEON::BI__builtin_neon_vld4_v:
7825 case NEON::BI__builtin_neon_vld4q_v:
7826 case NEON::BI__builtin_neon_vld2_lane_v:
7827 case NEON::BI__builtin_neon_vld2q_lane_v:
7828 case NEON::BI__builtin_neon_vld3_lane_v:
7829 case NEON::BI__builtin_neon_vld3q_lane_v:
7830 case NEON::BI__builtin_neon_vld4_lane_v:
7831 case NEON::BI__builtin_neon_vld4q_lane_v:
7832 case NEON::BI__builtin_neon_vld2_dup_v:
7833 case NEON::BI__builtin_neon_vld2q_dup_v:
7834 case NEON::BI__builtin_neon_vld3_dup_v:
7835 case NEON::BI__builtin_neon_vld3q_dup_v:
7836 case NEON::BI__builtin_neon_vld4_dup_v:
7837 case NEON::BI__builtin_neon_vld4q_dup_v:
7838 // Get the alignment for the argument in addition to the value;
7839 // we'll use it later.
7840 PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
7841 Ops.push_back(PtrOp1.getPointer());
7842 continue;
7843 }
7844 }
7845
7846 if ((ICEArguments & (1 << i)) == 0) {
7847 Ops.push_back(EmitScalarExpr(E->getArg(i)));
7848 } else {
7849 // If this is required to be a constant, constant fold it so that we know
7850 // that the generated intrinsic gets a ConstantInt.
7851 Ops.push_back(llvm::ConstantInt::get(
7852 getLLVMContext(),
7853 *E->getArg(i)->getIntegerConstantExpr(getContext())));
7854 }
7855 }
7856
7857 switch (BuiltinID) {
7858 default: break;
7859
7860 case NEON::BI__builtin_neon_vget_lane_i8:
7861 case NEON::BI__builtin_neon_vget_lane_i16:
7862 case NEON::BI__builtin_neon_vget_lane_i32:
7863 case NEON::BI__builtin_neon_vget_lane_i64:
7864 case NEON::BI__builtin_neon_vget_lane_bf16:
7865 case NEON::BI__builtin_neon_vget_lane_f32:
7866 case NEON::BI__builtin_neon_vgetq_lane_i8:
7867 case NEON::BI__builtin_neon_vgetq_lane_i16:
7868 case NEON::BI__builtin_neon_vgetq_lane_i32:
7869 case NEON::BI__builtin_neon_vgetq_lane_i64:
7870 case NEON::BI__builtin_neon_vgetq_lane_bf16:
7871 case NEON::BI__builtin_neon_vgetq_lane_f32:
7872 case NEON::BI__builtin_neon_vduph_lane_bf16:
7873 case NEON::BI__builtin_neon_vduph_laneq_bf16:
7874 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
7875
7876 case NEON::BI__builtin_neon_vrndns_f32: {
7877 Value *Arg = EmitScalarExpr(E->getArg(0));
7878 llvm::Type *Tys[] = {Arg->getType()};
7879 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
7880 return Builder.CreateCall(F, {Arg}, "vrndn"); }
7881
7882 case NEON::BI__builtin_neon_vset_lane_i8:
7883 case NEON::BI__builtin_neon_vset_lane_i16:
7884 case NEON::BI__builtin_neon_vset_lane_i32:
7885 case NEON::BI__builtin_neon_vset_lane_i64:
7886 case NEON::BI__builtin_neon_vset_lane_bf16:
7887 case NEON::BI__builtin_neon_vset_lane_f32:
7888 case NEON::BI__builtin_neon_vsetq_lane_i8:
7889 case NEON::BI__builtin_neon_vsetq_lane_i16:
7890 case NEON::BI__builtin_neon_vsetq_lane_i32:
7891 case NEON::BI__builtin_neon_vsetq_lane_i64:
7892 case NEON::BI__builtin_neon_vsetq_lane_bf16:
7893 case NEON::BI__builtin_neon_vsetq_lane_f32:
7894 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
7895
7896 case NEON::BI__builtin_neon_vsha1h_u32:
7897 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
7898 "vsha1h");
7899 case NEON::BI__builtin_neon_vsha1cq_u32:
7900 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
7901 "vsha1h");
7902 case NEON::BI__builtin_neon_vsha1pq_u32:
7903 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
7904 "vsha1h");
7905 case NEON::BI__builtin_neon_vsha1mq_u32:
7906 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
7907 "vsha1h");
7908
7909 case NEON::BI__builtin_neon_vcvth_bf16_f32: {
7910 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
7911 "vcvtbfp2bf");
7912 }
7913
7914 // The ARM _MoveToCoprocessor builtins put the input register value as
7915 // the first argument, but the LLVM intrinsic expects it as the third one.
7916 case ARM::BI_MoveToCoprocessor:
7917 case ARM::BI_MoveToCoprocessor2: {
7918 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
7919 Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
7920 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
7921 Ops[3], Ops[4], Ops[5]});
7922 }
7923 }
7924
7925 // Get the last argument, which specifies the vector type.
7926 assert(HasExtraArg)(static_cast <bool> (HasExtraArg) ? void (0) : __assert_fail
("HasExtraArg", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7926, __extension__ __PRETTY_FUNCTION__))
;
7927 const Expr *Arg = E->getArg(E->getNumArgs()-1);
7928 Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext());
7929 if (!Result)
7930 return nullptr;
7931
7932 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
7933 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
7934 // Determine the overloaded type of this builtin.
7935 llvm::Type *Ty;
7936 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
7937 Ty = FloatTy;
7938 else
7939 Ty = DoubleTy;
7940
7941 // Determine whether this is an unsigned conversion or not.
7942 bool usgn = Result->getZExtValue() == 1;
7943 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
7944
7945 // Call the appropriate intrinsic.
7946 Function *F = CGM.getIntrinsic(Int, Ty);
7947 return Builder.CreateCall(F, Ops, "vcvtr");
7948 }
7949
7950 // Determine the type of this overloaded NEON intrinsic.
7951 NeonTypeFlags Type = Result->getZExtValue();
7952 bool usgn = Type.isUnsigned();
7953 bool rightShift = false;
7954
7955 llvm::FixedVectorType *VTy =
7956 GetNeonType(this, Type, getTarget().hasLegalHalfType(), false,
7957 getTarget().hasBFloat16Type());
7958 llvm::Type *Ty = VTy;
7959 if (!Ty)
7960 return nullptr;
7961
7962 // Many NEON builtins have identical semantics and uses in ARM and
7963 // AArch64. Emit these in a single function.
7964 auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
7965 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
7966 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
7967 if (Builtin)
7968 return EmitCommonNeonBuiltinExpr(
7969 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
7970 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
7971
7972 unsigned Int;
7973 switch (BuiltinID) {
7974 default: return nullptr;
7975 case NEON::BI__builtin_neon_vld1q_lane_v:
7976 // Handle 64-bit integer elements as a special case. Use shuffles of
7977 // one-element vectors to avoid poor code for i64 in the backend.
7978 if (VTy->getElementType()->isIntegerTy(64)) {
7979 // Extract the other lane.
7980 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7981 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
7982 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
7983 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7984 // Load the value as a one-element vector.
7985 Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
7986 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7987 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
7988 Value *Align = getAlignmentValue32(PtrOp0);
7989 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
7990 // Combine them.
7991 int Indices[] = {1 - Lane, Lane};
7992 return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
7993 }
7994 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7995 case NEON::BI__builtin_neon_vld1_lane_v: {
7996 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7997 PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
7998 Value *Ld = Builder.CreateLoad(PtrOp0);
7999 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
8000 }
8001 case NEON::BI__builtin_neon_vqrshrn_n_v:
8002 Int =
8003 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
8004 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
8005 1, true);
8006 case NEON::BI__builtin_neon_vqrshrun_n_v:
8007 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
8008 Ops, "vqrshrun_n", 1, true);
8009 case NEON::BI__builtin_neon_vqshrn_n_v:
8010 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
8011 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
8012 1, true);
8013 case NEON::BI__builtin_neon_vqshrun_n_v:
8014 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
8015 Ops, "vqshrun_n", 1, true);
8016 case NEON::BI__builtin_neon_vrecpe_v:
8017 case NEON::BI__builtin_neon_vrecpeq_v:
8018 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
8019 Ops, "vrecpe");
8020 case NEON::BI__builtin_neon_vrshrn_n_v:
8021 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
8022 Ops, "vrshrn_n", 1, true);
8023 case NEON::BI__builtin_neon_vrsra_n_v:
8024 case NEON::BI__builtin_neon_vrsraq_n_v:
8025 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8026 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8027 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
8028 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
8029 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
8030 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
8031 case NEON::BI__builtin_neon_vsri_n_v:
8032 case NEON::BI__builtin_neon_vsriq_n_v:
8033 rightShift = true;
8034 LLVM_FALLTHROUGH[[gnu::fallthrough]];
8035 case NEON::BI__builtin_neon_vsli_n_v:
8036 case NEON::BI__builtin_neon_vsliq_n_v:
8037 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
8038 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
8039 Ops, "vsli_n");
8040 case NEON::BI__builtin_neon_vsra_n_v:
8041 case NEON::BI__builtin_neon_vsraq_n_v:
8042 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8043 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
8044 return Builder.CreateAdd(Ops[0], Ops[1]);
8045 case NEON::BI__builtin_neon_vst1q_lane_v:
8046 // Handle 64-bit integer elements as a special case. Use a shuffle to get
8047 // a one-element vector and avoid poor code for i64 in the backend.
8048 if (VTy->getElementType()->isIntegerTy(64)) {
8049 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8050 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
8051 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
8052 Ops[2] = getAlignmentValue32(PtrOp0);
8053 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
8054 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
8055 Tys), Ops);
8056 }
8057 LLVM_FALLTHROUGH[[gnu::fallthrough]];
8058 case NEON::BI__builtin_neon_vst1_lane_v: {
8059 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8060 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
8061 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
8062 auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
8063 return St;
8064 }
8065 case NEON::BI__builtin_neon_vtbl1_v:
8066 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
8067 Ops, "vtbl1");
8068 case NEON::BI__builtin_neon_vtbl2_v:
8069 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
8070 Ops, "vtbl2");
8071 case NEON::BI__builtin_neon_vtbl3_v:
8072 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
8073 Ops, "vtbl3");
8074 case NEON::BI__builtin_neon_vtbl4_v:
8075 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
8076 Ops, "vtbl4");
8077 case NEON::BI__builtin_neon_vtbx1_v:
8078 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
8079 Ops, "vtbx1");
8080 case NEON::BI__builtin_neon_vtbx2_v:
8081 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
8082 Ops, "vtbx2");
8083 case NEON::BI__builtin_neon_vtbx3_v:
8084 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
8085 Ops, "vtbx3");
8086 case NEON::BI__builtin_neon_vtbx4_v:
8087 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
8088 Ops, "vtbx4");
8089 }
8090}
8091
8092template<typename Integer>
8093static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
8094 return E->getIntegerConstantExpr(Context)->getExtValue();
8095}
8096
8097static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
8098 llvm::Type *T, bool Unsigned) {
8099 // Helper function called by Tablegen-constructed ARM MVE builtin codegen,
8100 // which finds it convenient to specify signed/unsigned as a boolean flag.
8101 return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T);
8102}
8103
8104static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
8105 uint32_t Shift, bool Unsigned) {
8106 // MVE helper function for integer shift right. This must handle signed vs
8107 // unsigned, and also deal specially with the case where the shift count is
8108 // equal to the lane size. In LLVM IR, an LShr with that parameter would be
8109 // undefined behavior, but in MVE it's legal, so we must convert it to code
8110 // that is not undefined in IR.
8111 unsigned LaneBits = cast<llvm::VectorType>(V->getType())
8112 ->getElementType()
8113 ->getPrimitiveSizeInBits();
8114 if (Shift == LaneBits) {
8115 // An unsigned shift of the full lane size always generates zero, so we can
8116 // simply emit a zero vector. A signed shift of the full lane size does the
8117 // same thing as shifting by one bit fewer.
8118 if (Unsigned)
8119 return llvm::Constant::getNullValue(V->getType());
8120 else
8121 --Shift;
8122 }
8123 return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift);
8124}
8125
8126static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
8127 // MVE-specific helper function for a vector splat, which infers the element
8128 // count of the output vector by knowing that MVE vectors are all 128 bits
8129 // wide.
8130 unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits();
8131 return Builder.CreateVectorSplat(Elements, V);
8132}
8133
8134static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
8135 CodeGenFunction *CGF,
8136 llvm::Value *V,
8137 llvm::Type *DestType) {
8138 // Convert one MVE vector type into another by reinterpreting its in-register
8139 // format.
8140 //
8141 // Little-endian, this is identical to a bitcast (which reinterprets the
8142 // memory format). But big-endian, they're not necessarily the same, because
8143 // the register and memory formats map to each other differently depending on
8144 // the lane size.
8145 //
8146 // We generate a bitcast whenever we can (if we're little-endian, or if the
8147 // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
8148 // that performs the different kind of reinterpretation.
8149 if (CGF->getTarget().isBigEndian() &&
8150 V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
8151 return Builder.CreateCall(
8152 CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
8153 {DestType, V->getType()}),
8154 V);
8155 } else {
8156 return Builder.CreateBitCast(V, DestType);
8157 }
8158}
8159
8160static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
8161 // Make a shufflevector that extracts every other element of a vector (evens
8162 // or odds, as desired).
8163 SmallVector<int, 16> Indices;
8164 unsigned InputElements =
8165 cast<llvm::FixedVectorType>(V->getType())->getNumElements();
8166 for (unsigned i = 0; i < InputElements; i += 2)
8167 Indices.push_back(i + Odd);
8168 return Builder.CreateShuffleVector(V, Indices);
8169}
8170
8171static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
8172 llvm::Value *V1) {
8173 // Make a shufflevector that interleaves two vectors element by element.
8174 assert(V0->getType() == V1->getType() && "Can't zip different vector types")(static_cast <bool> (V0->getType() == V1->getType
() && "Can't zip different vector types") ? void (0) :
__assert_fail ("V0->getType() == V1->getType() && \"Can't zip different vector types\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8174, __extension__ __PRETTY_FUNCTION__))
;
8175 SmallVector<int, 16> Indices;
8176 unsigned InputElements =
8177 cast<llvm::FixedVectorType>(V0->getType())->getNumElements();
8178 for (unsigned i = 0; i < InputElements; i++) {
8179 Indices.push_back(i);
8180 Indices.push_back(i + InputElements);
8181 }
8182 return Builder.CreateShuffleVector(V0, V1, Indices);
8183}
8184
8185template<unsigned HighBit, unsigned OtherBits>
8186static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
8187 // MVE-specific helper function to make a vector splat of a constant such as
8188 // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
8189 llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
8190 unsigned LaneBits = T->getPrimitiveSizeInBits();
8191 uint32_t Value = HighBit << (LaneBits - 1);
8192 if (OtherBits)
8193 Value |= (1UL << (LaneBits - 1)) - 1;
8194 llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
8195 return ARMMVEVectorSplat(Builder, Lane);
8196}
8197
8198static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
8199 llvm::Value *V,
8200 unsigned ReverseWidth) {
8201 // MVE-specific helper function which reverses the elements of a
8202 // vector within every (ReverseWidth)-bit collection of lanes.
8203 SmallVector<int, 16> Indices;
8204 unsigned LaneSize = V->getType()->getScalarSizeInBits();
8205 unsigned Elements = 128 / LaneSize;
8206 unsigned Mask = ReverseWidth / LaneSize - 1;
8207 for (unsigned i = 0; i < Elements; i++)
8208 Indices.push_back(i ^ Mask);
8209 return Builder.CreateShuffleVector(V, Indices);
8210}
8211
8212Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
8213 const CallExpr *E,
8214 ReturnValueSlot ReturnValue,
8215 llvm::Triple::ArchType Arch) {
8216 enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType;
8217 Intrinsic::ID IRIntr;
8218 unsigned NumVectors;
8219
8220 // Code autogenerated by Tablegen will handle all the simple builtins.
8221 switch (BuiltinID) {
8222 #include "clang/Basic/arm_mve_builtin_cg.inc"
8223
8224 // If we didn't match an MVE builtin id at all, go back to the
8225 // main EmitARMBuiltinExpr.
8226 default:
8227 return nullptr;
8228 }
8229
8230 // Anything that breaks from that switch is an MVE builtin that
8231 // needs handwritten code to generate.
8232
8233 switch (CustomCodeGenType) {
8234
8235 case CustomCodeGen::VLD24: {
8236 llvm::SmallVector<Value *, 4> Ops;
8237 llvm::SmallVector<llvm::Type *, 4> Tys;
8238
8239 auto MvecCType = E->getType();
8240 auto MvecLType = ConvertType(MvecCType);
8241 assert(MvecLType->isStructTy() &&(static_cast <bool> (MvecLType->isStructTy() &&
"Return type for vld[24]q should be a struct") ? void (0) : __assert_fail
("MvecLType->isStructTy() && \"Return type for vld[24]q should be a struct\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8242, __extension__ __PRETTY_FUNCTION__))
8242 "Return type for vld[24]q should be a struct")(static_cast <bool> (MvecLType->isStructTy() &&
"Return type for vld[24]q should be a struct") ? void (0) : __assert_fail
("MvecLType->isStructTy() && \"Return type for vld[24]q should be a struct\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8242, __extension__ __PRETTY_FUNCTION__))
;
8243 assert(MvecLType->getStructNumElements() == 1 &&(static_cast <bool> (MvecLType->getStructNumElements
() == 1 && "Return-type struct for vld[24]q should have one element"
) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Return-type struct for vld[24]q should have one element\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8244, __extension__ __PRETTY_FUNCTION__))
8244 "Return-type struct for vld[24]q should have one element")(static_cast <bool> (MvecLType->getStructNumElements
() == 1 && "Return-type struct for vld[24]q should have one element"
) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Return-type struct for vld[24]q should have one element\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8244, __extension__ __PRETTY_FUNCTION__))
;
8245 auto MvecLTypeInner = MvecLType->getStructElementType(0);
8246 assert(MvecLTypeInner->isArrayTy() &&(static_cast <bool> (MvecLTypeInner->isArrayTy() &&
"Return-type struct for vld[24]q should contain an array") ?
void (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Return-type struct for vld[24]q should contain an array\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8247, __extension__ __PRETTY_FUNCTION__))
8247 "Return-type struct for vld[24]q should contain an array")(static_cast <bool> (MvecLTypeInner->isArrayTy() &&
"Return-type struct for vld[24]q should contain an array") ?
void (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Return-type struct for vld[24]q should contain an array\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8247, __extension__ __PRETTY_FUNCTION__))
;
8248 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&(static_cast <bool> (MvecLTypeInner->getArrayNumElements
() == NumVectors && "Array member of return-type struct vld[24]q has wrong length"
) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8249, __extension__ __PRETTY_FUNCTION__))
8249 "Array member of return-type struct vld[24]q has wrong length")(static_cast <bool> (MvecLTypeInner->getArrayNumElements
() == NumVectors && "Array member of return-type struct vld[24]q has wrong length"
) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8249, __extension__ __PRETTY_FUNCTION__))
;
8250 auto VecLType = MvecLTypeInner->getArrayElementType();
8251
8252 Tys.push_back(VecLType);
8253
8254 auto Addr = E->getArg(0);
8255 Ops.push_back(EmitScalarExpr(Addr));
8256 Tys.push_back(ConvertType(Addr->getType()));
8257
8258 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
8259 Value *LoadResult = Builder.CreateCall(F, Ops);
8260 Value *MvecOut = UndefValue::get(MvecLType);
8261 for (unsigned i = 0; i < NumVectors; ++i) {
8262 Value *Vec = Builder.CreateExtractValue(LoadResult, i);
8263 MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
8264 }
8265
8266 if (ReturnValue.isNull())
8267 return MvecOut;
8268 else
8269 return Builder.CreateStore(MvecOut, ReturnValue.getValue());
8270 }
8271
8272 case CustomCodeGen::VST24: {
8273 llvm::SmallVector<Value *, 4> Ops;
8274 llvm::SmallVector<llvm::Type *, 4> Tys;
8275
8276 auto Addr = E->getArg(0);
8277 Ops.push_back(EmitScalarExpr(Addr));
8278 Tys.push_back(ConvertType(Addr->getType()));
8279
8280 auto MvecCType = E->getArg(1)->getType();
8281 auto MvecLType = ConvertType(MvecCType);
8282 assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct")(static_cast <bool> (MvecLType->isStructTy() &&
"Data type for vst2q should be a struct") ? void (0) : __assert_fail
("MvecLType->isStructTy() && \"Data type for vst2q should be a struct\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8282, __extension__ __PRETTY_FUNCTION__))
;
8283 assert(MvecLType->getStructNumElements() == 1 &&(static_cast <bool> (MvecLType->getStructNumElements
() == 1 && "Data-type struct for vst2q should have one element"
) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Data-type struct for vst2q should have one element\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8284, __extension__ __PRETTY_FUNCTION__))
8284 "Data-type struct for vst2q should have one element")(static_cast <bool> (MvecLType->getStructNumElements
() == 1 && "Data-type struct for vst2q should have one element"
) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Data-type struct for vst2q should have one element\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8284, __extension__ __PRETTY_FUNCTION__))
;
8285 auto MvecLTypeInner = MvecLType->getStructElementType(0);
8286 assert(MvecLTypeInner->isArrayTy() &&(static_cast <bool> (MvecLTypeInner->isArrayTy() &&
"Data-type struct for vst2q should contain an array") ? void
(0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Data-type struct for vst2q should contain an array\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8287, __extension__ __PRETTY_FUNCTION__))
8287 "Data-type struct for vst2q should contain an array")(static_cast <bool> (MvecLTypeInner->isArrayTy() &&
"Data-type struct for vst2q should contain an array") ? void
(0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Data-type struct for vst2q should contain an array\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8287, __extension__ __PRETTY_FUNCTION__))
;
8288 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&(static_cast <bool> (MvecLTypeInner->getArrayNumElements
() == NumVectors && "Array member of return-type struct vld[24]q has wrong length"
) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8289, __extension__ __PRETTY_FUNCTION__))
8289 "Array member of return-type struct vld[24]q has wrong length")(static_cast <bool> (MvecLTypeInner->getArrayNumElements
() == NumVectors && "Array member of return-type struct vld[24]q has wrong length"
) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8289, __extension__ __PRETTY_FUNCTION__))
;
8290 auto VecLType = MvecLTypeInner->getArrayElementType();
8291
8292 Tys.push_back(VecLType);
8293
8294 AggValueSlot MvecSlot = CreateAggTemp(MvecCType);
8295 EmitAggExpr(E->getArg(1), MvecSlot);
8296 auto Mvec = Builder.CreateLoad(MvecSlot.getAddress());
8297 for (unsigned i = 0; i < NumVectors; i++)
8298 Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
8299
8300 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
8301 Value *ToReturn = nullptr;
8302 for (unsigned i = 0; i < NumVectors; i++) {
8303 Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
8304 ToReturn = Builder.CreateCall(F, Ops);
8305 Ops.pop_back();
8306 }
8307 return ToReturn;
8308 }
8309 }
8310 llvm_unreachable("unknown custom codegen type.")::llvm::llvm_unreachable_internal("unknown custom codegen type."
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8310)
;
8311}
8312
8313Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
8314 const CallExpr *E,
8315 ReturnValueSlot ReturnValue,
8316 llvm::Triple::ArchType Arch) {
8317 switch (BuiltinID) {
8318 default:
8319 return nullptr;
8320#include "clang/Basic/arm_cde_builtin_cg.inc"
8321 }
8322}
8323
8324static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
8325 const CallExpr *E,
8326 SmallVectorImpl<Value *> &Ops,
8327 llvm::Triple::ArchType Arch) {
8328 unsigned int Int = 0;
8329 const char *s = nullptr;
8330
8331 switch (BuiltinID) {
8332 default:
8333 return nullptr;
8334 case NEON::BI__builtin_neon_vtbl1_v:
8335 case NEON::BI__builtin_neon_vqtbl1_v:
8336 case NEON::BI__builtin_neon_vqtbl1q_v:
8337 case NEON::BI__builtin_neon_vtbl2_v:
8338 case NEON::BI__builtin_neon_vqtbl2_v:
8339 case NEON::BI__builtin_neon_vqtbl2q_v:
8340 case NEON::BI__builtin_neon_vtbl3_v:
8341 case NEON::BI__builtin_neon_vqtbl3_v:
8342 case NEON::BI__builtin_neon_vqtbl3q_v:
8343 case NEON::BI__builtin_neon_vtbl4_v:
8344 case NEON::BI__builtin_neon_vqtbl4_v:
8345 case NEON::BI__builtin_neon_vqtbl4q_v:
8346 break;
8347 case NEON::BI__builtin_neon_vtbx1_v:
8348 case NEON::BI__builtin_neon_vqtbx1_v:
8349 case NEON::BI__builtin_neon_vqtbx1q_v:
8350 case NEON::BI__builtin_neon_vtbx2_v:
8351 case NEON::BI__builtin_neon_vqtbx2_v:
8352 case NEON::BI__builtin_neon_vqtbx2q_v:
8353 case NEON::BI__builtin_neon_vtbx3_v:
8354 case NEON::BI__builtin_neon_vqtbx3_v:
8355 case NEON::BI__builtin_neon_vqtbx3q_v:
8356 case NEON::BI__builtin_neon_vtbx4_v:
8357 case NEON::BI__builtin_neon_vqtbx4_v:
8358 case NEON::BI__builtin_neon_vqtbx4q_v:
8359 break;
8360 }
8361
8362 assert(E->getNumArgs() >= 3)(static_cast <bool> (E->getNumArgs() >= 3) ? void
(0) : __assert_fail ("E->getNumArgs() >= 3", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8362, __extension__ __PRETTY_FUNCTION__))
;
8363
8364 // Get the last argument, which specifies the vector type.
8365 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
8366 Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext());
8367 if (!Result)
8368 return nullptr;
8369
8370 // Determine the type of this overloaded NEON intrinsic.
8371 NeonTypeFlags Type = Result->getZExtValue();
8372 llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type);
8373 if (!Ty)
8374 return nullptr;
8375
8376 CodeGen::CGBuilderTy &Builder = CGF.Builder;
8377
8378 // AArch64 scalar builtins are not overloaded, they do not have an extra
8379 // argument that specifies the vector type, need to handle each case.
8380 switch (BuiltinID) {
8381 case NEON::BI__builtin_neon_vtbl1_v: {
8382 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
8383 Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
8384 "vtbl1");
8385 }
8386 case NEON::BI__builtin_neon_vtbl2_v: {
8387 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
8388 Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
8389 "vtbl1");
8390 }
8391 case NEON::BI__builtin_neon_vtbl3_v: {
8392 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
8393 Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
8394 "vtbl2");
8395 }
8396 case NEON::BI__builtin_neon_vtbl4_v: {
8397 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
8398 Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
8399 "vtbl2");
8400 }
8401 case NEON::BI__builtin_neon_vtbx1_v: {
8402 Value *TblRes =
8403 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
8404 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
8405
8406 llvm::Constant *EightV = ConstantInt::get(Ty, 8);
8407 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
8408 CmpRes = Builder.CreateSExt(CmpRes, Ty);
8409
8410 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
8411 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
8412 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
8413 }
8414 case NEON::BI__builtin_neon_vtbx2_v: {
8415 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
8416 Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
8417 "vtbx1");
8418 }
8419 case NEON::BI__builtin_neon_vtbx3_v: {
8420 Value *TblRes =
8421 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
8422 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
8423
8424 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
8425 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
8426 TwentyFourV);
8427 CmpRes = Builder.CreateSExt(CmpRes, Ty);
8428
8429 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
8430 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
8431 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
8432 }
8433 case NEON::BI__builtin_neon_vtbx4_v: {
8434 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
8435 Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
8436 "vtbx2");
8437 }
8438 case NEON::BI__builtin_neon_vqtbl1_v:
8439 case NEON::BI__builtin_neon_vqtbl1q_v:
8440 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
8441 case NEON::BI__builtin_neon_vqtbl2_v:
8442 case NEON::BI__builtin_neon_vqtbl2q_v: {
8443 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
8444 case NEON::BI__builtin_neon_vqtbl3_v:
8445 case NEON::BI__builtin_neon_vqtbl3q_v:
8446 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
8447 case NEON::BI__builtin_neon_vqtbl4_v:
8448 case NEON::BI__builtin_neon_vqtbl4q_v:
8449 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
8450 case NEON::BI__builtin_neon_vqtbx1_v:
8451 case NEON::BI__builtin_neon_vqtbx1q_v:
8452 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
8453 case NEON::BI__builtin_neon_vqtbx2_v:
8454 case NEON::BI__builtin_neon_vqtbx2q_v:
8455 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
8456 case NEON::BI__builtin_neon_vqtbx3_v:
8457 case NEON::BI__builtin_neon_vqtbx3q_v:
8458 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
8459 case NEON::BI__builtin_neon_vqtbx4_v:
8460 case NEON::BI__builtin_neon_vqtbx4q_v:
8461 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
8462 }
8463 }
8464
8465 if (!Int)
8466 return nullptr;
8467
8468 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
8469 return CGF.EmitNeonCall(F, Ops, s);
8470}
8471
8472Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
8473 auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
8474 Op = Builder.CreateBitCast(Op, Int16Ty);
8475 Value *V = UndefValue::get(VTy);
8476 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
8477 Op = Builder.CreateInsertElement(V, Op, CI);
8478 return Op;
8479}
8480
8481/// SVEBuiltinMemEltTy - Returns the memory element type for this memory
8482/// access builtin. Only required if it can't be inferred from the base pointer
8483/// operand.
8484llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags) {
8485 switch (TypeFlags.getMemEltType()) {
8486 case SVETypeFlags::MemEltTyDefault:
8487 return getEltType(TypeFlags);
8488 case SVETypeFlags::MemEltTyInt8:
8489 return Builder.getInt8Ty();
8490 case SVETypeFlags::MemEltTyInt16:
8491 return Builder.getInt16Ty();
8492 case SVETypeFlags::MemEltTyInt32:
8493 return Builder.getInt32Ty();
8494 case SVETypeFlags::MemEltTyInt64:
8495 return Builder.getInt64Ty();
8496 }
8497 llvm_unreachable("Unknown MemEltType")::llvm::llvm_unreachable_internal("Unknown MemEltType", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8497)
;
8498}
8499
8500llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) {
8501 switch (TypeFlags.getEltType()) {
8502 default:
8503 llvm_unreachable("Invalid SVETypeFlag!")::llvm::llvm_unreachable_internal("Invalid SVETypeFlag!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8503)
;
8504
8505 case SVETypeFlags::EltTyInt8:
8506 return Builder.getInt8Ty();
8507 case SVETypeFlags::EltTyInt16:
8508 return Builder.getInt16Ty();
8509 case SVETypeFlags::EltTyInt32:
8510 return Builder.getInt32Ty();
8511 case SVETypeFlags::EltTyInt64:
8512 return Builder.getInt64Ty();
8513
8514 case SVETypeFlags::EltTyFloat16:
8515 return Builder.getHalfTy();
8516 case SVETypeFlags::EltTyFloat32:
8517 return Builder.getFloatTy();
8518 case SVETypeFlags::EltTyFloat64:
8519 return Builder.getDoubleTy();
8520
8521 case SVETypeFlags::EltTyBFloat16:
8522 return Builder.getBFloatTy();
8523
8524 case SVETypeFlags::EltTyBool8:
8525 case SVETypeFlags::EltTyBool16:
8526 case SVETypeFlags::EltTyBool32:
8527 case SVETypeFlags::EltTyBool64:
8528 return Builder.getInt1Ty();
8529 }
8530}
8531
8532// Return the llvm predicate vector type corresponding to the specified element
8533// TypeFlags.
8534llvm::ScalableVectorType *
8535CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) {
8536 switch (TypeFlags.getEltType()) {
8537 default: llvm_unreachable("Unhandled SVETypeFlag!")::llvm::llvm_unreachable_internal("Unhandled SVETypeFlag!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8537)
;
8538
8539 case SVETypeFlags::EltTyInt8:
8540 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8541 case SVETypeFlags::EltTyInt16:
8542 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8543 case SVETypeFlags::EltTyInt32:
8544 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8545 case SVETypeFlags::EltTyInt64:
8546 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8547
8548 case SVETypeFlags::EltTyBFloat16:
8549 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8550 case SVETypeFlags::EltTyFloat16:
8551 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8552 case SVETypeFlags::EltTyFloat32:
8553 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8554 case SVETypeFlags::EltTyFloat64:
8555 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8556
8557 case SVETypeFlags::EltTyBool8:
8558 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8559 case SVETypeFlags::EltTyBool16:
8560 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8561 case SVETypeFlags::EltTyBool32:
8562 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8563 case SVETypeFlags::EltTyBool64:
8564 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8565 }
8566}
8567
8568// Return the llvm vector type corresponding to the specified element TypeFlags.
8569llvm::ScalableVectorType *
8570CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
8571 switch (TypeFlags.getEltType()) {
8572 default:
8573 llvm_unreachable("Invalid SVETypeFlag!")::llvm::llvm_unreachable_internal("Invalid SVETypeFlag!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8573)
;
8574
8575 case SVETypeFlags::EltTyInt8:
8576 return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
8577 case SVETypeFlags::EltTyInt16:
8578 return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
8579 case SVETypeFlags::EltTyInt32:
8580 return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
8581 case SVETypeFlags::EltTyInt64:
8582 return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
8583
8584 case SVETypeFlags::EltTyFloat16:
8585 return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
8586 case SVETypeFlags::EltTyBFloat16:
8587 return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
8588 case SVETypeFlags::EltTyFloat32:
8589 return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
8590 case SVETypeFlags::EltTyFloat64:
8591 return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
8592
8593 case SVETypeFlags::EltTyBool8:
8594 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8595 case SVETypeFlags::EltTyBool16:
8596 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8597 case SVETypeFlags::EltTyBool32:
8598 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8599 case SVETypeFlags::EltTyBool64:
8600 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8601 }
8602}
8603
8604llvm::Value *
8605CodeGenFunction::EmitSVEAllTruePred(const SVETypeFlags &TypeFlags) {
8606 Function *Ptrue =
8607 CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
8608 return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
8609}
8610
8611constexpr unsigned SVEBitsPerBlock = 128;
8612
8613static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
8614 unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
8615 return llvm::ScalableVectorType::get(EltTy, NumElts);
8616}
8617
8618// Reinterpret the input predicate so that it can be used to correctly isolate
8619// the elements of the specified datatype.
8620Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
8621 llvm::ScalableVectorType *VTy) {
8622 auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
8623 if (Pred->getType() == RTy)
8624 return Pred;
8625
8626 unsigned IntID;
8627 llvm::Type *IntrinsicTy;
8628 switch (VTy->getMinNumElements()) {
8629 default:
8630 llvm_unreachable("unsupported element count!")::llvm::llvm_unreachable_internal("unsupported element count!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8630)
;
8631 case 2:
8632 case 4:
8633 case 8:
8634 IntID = Intrinsic::aarch64_sve_convert_from_svbool;
8635 IntrinsicTy = RTy;
8636 break;
8637 case 16:
8638 IntID = Intrinsic::aarch64_sve_convert_to_svbool;
8639 IntrinsicTy = Pred->getType();
8640 break;
8641 }
8642
8643 Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
8644 Value *C = Builder.CreateCall(F, Pred);
8645 assert(C->getType() == RTy && "Unexpected return type!")(static_cast <bool> (C->getType() == RTy && "Unexpected return type!"
) ? void (0) : __assert_fail ("C->getType() == RTy && \"Unexpected return type!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8645, __extension__ __PRETTY_FUNCTION__))
;
8646 return C;
8647}
8648
8649Value *CodeGenFunction::EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
8650 SmallVectorImpl<Value *> &Ops,
8651 unsigned IntID) {
8652 auto *ResultTy = getSVEType(TypeFlags);
8653 auto *OverloadedTy =
8654 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
8655
8656 // At the ACLE level there's only one predicate type, svbool_t, which is
8657 // mapped to <n x 16 x i1>. However, this might be incompatible with the
8658 // actual type being loaded. For example, when loading doubles (i64) the
8659 // predicated should be <n x 2 x i1> instead. At the IR level the type of
8660 // the predicate and the data being loaded must match. Cast accordingly.
8661 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
8662
8663 Function *F = nullptr;
8664 if (Ops[1]->getType()->isVectorTy())
8665 // This is the "vector base, scalar offset" case. In order to uniquely
8666 // map this built-in to an LLVM IR intrinsic, we need both the return type
8667 // and the type of the vector base.
8668 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
8669 else
8670 // This is the "scalar base, vector offset case". The type of the offset
8671 // is encoded in the name of the intrinsic. We only need to specify the
8672 // return type in order to uniquely map this built-in to an LLVM IR
8673 // intrinsic.
8674 F = CGM.getIntrinsic(IntID, OverloadedTy);
8675
8676 // Pass 0 when the offset is missing. This can only be applied when using
8677 // the "vector base" addressing mode for which ACLE allows no offset. The
8678 // corresponding LLVM IR always requires an offset.
8679 if (Ops.size() == 2) {
8680 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset")(static_cast <bool> (Ops[1]->getType()->isVectorTy
() && "Scalar base requires an offset") ? void (0) : __assert_fail
("Ops[1]->getType()->isVectorTy() && \"Scalar base requires an offset\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8680, __extension__ __PRETTY_FUNCTION__))
;
8681 Ops.push_back(ConstantInt::get(Int64Ty, 0));
8682 }
8683
8684 // For "vector base, scalar index" scale the index so that it becomes a
8685 // scalar offset.
8686 if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
8687 unsigned BytesPerElt =
8688 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
8689 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8690 Ops[2] = Builder.CreateMul(Ops[2], Scale);
8691 }
8692
8693 Value *Call = Builder.CreateCall(F, Ops);
8694
8695 // The following sext/zext is only needed when ResultTy != OverloadedTy. In
8696 // other cases it's folded into a nop.
8697 return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
8698 : Builder.CreateSExt(Call, ResultTy);
8699}
8700
8701Value *CodeGenFunction::EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
8702 SmallVectorImpl<Value *> &Ops,
8703 unsigned IntID) {
8704 auto *SrcDataTy = getSVEType(TypeFlags);
8705 auto *OverloadedTy =
8706 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
8707
8708 // In ACLE the source data is passed in the last argument, whereas in LLVM IR
8709 // it's the first argument. Move it accordingly.
8710 Ops.insert(Ops.begin(), Ops.pop_back_val());
8711
8712 Function *F = nullptr;
8713 if (Ops[2]->getType()->isVectorTy())
8714 // This is the "vector base, scalar offset" case. In order to uniquely
8715 // map this built-in to an LLVM IR intrinsic, we need both the return type
8716 // and the type of the vector base.
8717 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
8718 else
8719 // This is the "scalar base, vector offset case". The type of the offset
8720 // is encoded in the name of the intrinsic. We only need to specify the
8721 // return type in order to uniquely map this built-in to an LLVM IR
8722 // intrinsic.
8723 F = CGM.getIntrinsic(IntID, OverloadedTy);
8724
8725 // Pass 0 when the offset is missing. This can only be applied when using
8726 // the "vector base" addressing mode for which ACLE allows no offset. The
8727 // corresponding LLVM IR always requires an offset.
8728 if (Ops.size() == 3) {
8729 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset")(static_cast <bool> (Ops[1]->getType()->isVectorTy
() && "Scalar base requires an offset") ? void (0) : __assert_fail
("Ops[1]->getType()->isVectorTy() && \"Scalar base requires an offset\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8729, __extension__ __PRETTY_FUNCTION__))
;
8730 Ops.push_back(ConstantInt::get(Int64Ty, 0));
8731 }
8732
8733 // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
8734 // folded into a nop.
8735 Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
8736
8737 // At the ACLE level there's only one predicate type, svbool_t, which is
8738 // mapped to <n x 16 x i1>. However, this might be incompatible with the
8739 // actual type being stored. For example, when storing doubles (i64) the
8740 // predicated should be <n x 2 x i1> instead. At the IR level the type of
8741 // the predicate and the data being stored must match. Cast accordingly.
8742 Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
8743
8744 // For "vector base, scalar index" scale the index so that it becomes a
8745 // scalar offset.
8746 if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
8747 unsigned BytesPerElt =
8748 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
8749 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8750 Ops[3] = Builder.CreateMul(Ops[3], Scale);
8751 }
8752
8753 return Builder.CreateCall(F, Ops);
8754}
8755
8756Value *CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
8757 SmallVectorImpl<Value *> &Ops,
8758 unsigned IntID) {
8759 // The gather prefetches are overloaded on the vector input - this can either
8760 // be the vector of base addresses or vector of offsets.
8761 auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
8762 if (!OverloadedTy)
8763 OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
8764
8765 // Cast the predicate from svbool_t to the right number of elements.
8766 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
8767
8768 // vector + imm addressing modes
8769 if (Ops[1]->getType()->isVectorTy()) {
8770 if (Ops.size() == 3) {
8771 // Pass 0 for 'vector+imm' when the index is omitted.
8772 Ops.push_back(ConstantInt::get(Int64Ty, 0));
8773
8774 // The sv_prfop is the last operand in the builtin and IR intrinsic.
8775 std::swap(Ops[2], Ops[3]);
8776 } else {
8777 // Index needs to be passed as scaled offset.
8778 llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8779 unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
8780 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8781 Ops[2] = Builder.CreateMul(Ops[2], Scale);
8782 }
8783 }
8784
8785 Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
8786 return Builder.CreateCall(F, Ops);
8787}
8788
8789Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
8790 SmallVectorImpl<Value*> &Ops,
8791 unsigned IntID) {
8792 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8793 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8794 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8795
8796 unsigned N;
8797 switch (IntID) {
8798 case Intrinsic::aarch64_sve_ld2:
8799 N = 2;
8800 break;
8801 case Intrinsic::aarch64_sve_ld3:
8802 N = 3;
8803 break;
8804 case Intrinsic::aarch64_sve_ld4:
8805 N = 4;
8806 break;
8807 default:
8808 llvm_unreachable("unknown intrinsic!")::llvm::llvm_unreachable_internal("unknown intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8808)
;
8809 }
8810 auto RetTy = llvm::VectorType::get(VTy->getElementType(),
8811 VTy->getElementCount() * N);
8812
8813 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8814 Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy);
8815 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8816 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8817 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8818
8819 Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()});
8820 return Builder.CreateCall(F, { Predicate, BasePtr });
8821}
8822
8823Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags,
8824 SmallVectorImpl<Value*> &Ops,
8825 unsigned IntID) {
8826 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8827 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8828 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8829
8830 unsigned N;
8831 switch (IntID) {
8832 case Intrinsic::aarch64_sve_st2:
8833 N = 2;
8834 break;
8835 case Intrinsic::aarch64_sve_st3:
8836 N = 3;
8837 break;
8838 case Intrinsic::aarch64_sve_st4:
8839 N = 4;
8840 break;
8841 default:
8842 llvm_unreachable("unknown intrinsic!")::llvm::llvm_unreachable_internal("unknown intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8842)
;
8843 }
8844 auto TupleTy =
8845 llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N);
8846
8847 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8848 Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy);
8849 Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0);
8850 Value *Val = Ops.back();
8851 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8852 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8853
8854 // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
8855 // need to break up the tuple vector.
8856 SmallVector<llvm::Value*, 5> Operands;
8857 Function *FExtr =
8858 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8859 for (unsigned I = 0; I < N; ++I)
8860 Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)}));
8861 Operands.append({Predicate, BasePtr});
8862
8863 Function *F = CGM.getIntrinsic(IntID, { VTy });
8864 return Builder.CreateCall(F, Operands);
8865}
8866
8867// SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
8868// svpmullt_pair intrinsics, with the exception that their results are bitcast
8869// to a wider type.
8870Value *CodeGenFunction::EmitSVEPMull(const SVETypeFlags &TypeFlags,
8871 SmallVectorImpl<Value *> &Ops,
8872 unsigned BuiltinID) {
8873 // Splat scalar operand to vector (intrinsics with _n infix)
8874 if (TypeFlags.hasSplatOperand()) {
8875 unsigned OpNo = TypeFlags.getSplatOperand();
8876 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8877 }
8878
8879 // The pair-wise function has a narrower overloaded type.
8880 Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
8881 Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
8882
8883 // Now bitcast to the wider result type.
8884 llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
8885 return EmitSVEReinterpret(Call, Ty);
8886}
8887
8888Value *CodeGenFunction::EmitSVEMovl(const SVETypeFlags &TypeFlags,
8889 ArrayRef<Value *> Ops, unsigned BuiltinID) {
8890 llvm::Type *OverloadedTy = getSVEType(TypeFlags);
8891 Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
8892 return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
8893}
8894
8895Value *CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
8896 SmallVectorImpl<Value *> &Ops,
8897 unsigned BuiltinID) {
8898 auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8899 auto *VectorTy = getSVEVectorForElementType(MemEltTy);
8900 auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8901
8902 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8903 Value *BasePtr = Ops[1];
8904
8905 // Implement the index operand if not omitted.
8906 if (Ops.size() > 3) {
8907 BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
8908 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
8909 }
8910
8911 // Prefetch intriniscs always expect an i8*
8912 BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
8913 Value *PrfOp = Ops.back();
8914
8915 Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
8916 return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
8917}
8918
8919Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
8920 llvm::Type *ReturnTy,
8921 SmallVectorImpl<Value *> &Ops,
8922 unsigned BuiltinID,
8923 bool IsZExtReturn) {
8924 QualType LangPTy = E->getArg(1)->getType();
8925 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8926 LangPTy->castAs<PointerType>()->getPointeeType());
8927
8928 // The vector type that is returned may be different from the
8929 // eventual type loaded from memory.
8930 auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
8931 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8932
8933 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8934 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8935 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8936 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8937
8938 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8939 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8940 Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
8941
8942 return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
8943 : Builder.CreateSExt(Load, VectorTy);
8944}
8945
8946Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
8947 SmallVectorImpl<Value *> &Ops,
8948 unsigned BuiltinID) {
8949 QualType LangPTy = E->getArg(1)->getType();
8950 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8951 LangPTy->castAs<PointerType>()->getPointeeType());
8952
8953 // The vector type that is stored may be different from the
8954 // eventual type stored to memory.
8955 auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
8956 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8957
8958 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8959 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8960 Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0);
8961 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8962
8963 // Last value is always the data
8964 llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
8965
8966 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8967 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8968 return Builder.CreateCall(F, {Val, Predicate, BasePtr});
8969}
8970
8971// Limit the usage of scalable llvm IR generated by the ACLE by using the
8972// sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
8973Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
8974 auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
8975 return Builder.CreateCall(F, Scalar);
8976}
8977
8978Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
8979 return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
8980}
8981
8982Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
8983 // FIXME: For big endian this needs an additional REV, or needs a separate
8984 // intrinsic that is code-generated as a no-op, because the LLVM bitcast
8985 // instruction is defined as 'bitwise' equivalent from memory point of
8986 // view (when storing/reloading), whereas the svreinterpret builtin
8987 // implements bitwise equivalent cast from register point of view.
8988 // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
8989 return Builder.CreateBitCast(Val, Ty);
8990}
8991
8992static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8993 SmallVectorImpl<Value *> &Ops) {
8994 auto *SplatZero = Constant::getNullValue(Ty);
8995 Ops.insert(Ops.begin(), SplatZero);
8996}
8997
8998static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8999 SmallVectorImpl<Value *> &Ops) {
9000 auto *SplatUndef = UndefValue::get(Ty);
9001 Ops.insert(Ops.begin(), SplatUndef);
9002}
9003
9004SmallVector<llvm::Type *, 2>
9005CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags &TypeFlags,
9006 llvm::Type *ResultType,
9007 ArrayRef<Value *> Ops) {
9008 if (TypeFlags.isOverloadNone())
9009 return {};
9010
9011 llvm::Type *DefaultType = getSVEType(TypeFlags);
9012
9013 if (TypeFlags.isOverloadWhile())
9014 return {DefaultType, Ops[1]->getType()};
9015
9016 if (TypeFlags.isOverloadWhileRW())
9017 return {getSVEPredType(TypeFlags), Ops[0]->getType()};
9018
9019 if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
9020 return {Ops[0]->getType(), Ops.back()->getType()};
9021
9022 if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
9023 return {ResultType, Ops[0]->getType()};
9024
9025 assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads")(static_cast <bool> (TypeFlags.isOverloadDefault() &&
"Unexpected value for overloads") ? void (0) : __assert_fail
("TypeFlags.isOverloadDefault() && \"Unexpected value for overloads\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9025, __extension__ __PRETTY_FUNCTION__))
;
9026 return {DefaultType};
9027}
9028
9029Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
9030 const CallExpr *E) {
9031 // Find out if any arguments are required to be integer constant expressions.
9032 unsigned ICEArguments = 0;
9033 ASTContext::GetBuiltinTypeError Error;
9034 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
9035 assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None &&
"Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9035, __extension__ __PRETTY_FUNCTION__))
;
9036
9037 llvm::Type *Ty = ConvertType(E->getType());
9038 if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
9039 BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
9040 Value *Val = EmitScalarExpr(E->getArg(0));
9041 return EmitSVEReinterpret(Val, Ty);
9042 }
9043
9044 llvm::SmallVector<Value *, 4> Ops;
9045 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
9046 if ((ICEArguments & (1 << i)) == 0)
9047 Ops.push_back(EmitScalarExpr(E->getArg(i)));
9048 else {
9049 // If this is required to be a constant, constant fold it so that we know
9050 // that the generated intrinsic gets a ConstantInt.
9051 Optional<llvm::APSInt> Result =
9052 E->getArg(i)->getIntegerConstantExpr(getContext());
9053 assert(Result && "Expected argument to be a constant")(static_cast <bool> (Result && "Expected argument to be a constant"
) ? void (0) : __assert_fail ("Result && \"Expected argument to be a constant\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9053, __extension__ __PRETTY_FUNCTION__))
;
9054
9055 // Immediates for SVE llvm intrinsics are always 32bit. We can safely
9056 // truncate because the immediate has been range checked and no valid
9057 // immediate requires more than a handful of bits.
9058 *Result = Result->extOrTrunc(32);
9059 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
9060 }
9061 }
9062
9063 auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
9064 AArch64SVEIntrinsicsProvenSorted);
9065 SVETypeFlags TypeFlags(Builtin->TypeModifier);
9066 if (TypeFlags.isLoad())
9067 return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
9068 TypeFlags.isZExtReturn());
9069 else if (TypeFlags.isStore())
9070 return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
9071 else if (TypeFlags.isGatherLoad())
9072 return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9073 else if (TypeFlags.isScatterStore())
9074 return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9075 else if (TypeFlags.isPrefetch())
9076 return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9077 else if (TypeFlags.isGatherPrefetch())
9078 return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9079 else if (TypeFlags.isStructLoad())
9080 return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9081 else if (TypeFlags.isStructStore())
9082 return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9083 else if (TypeFlags.isUndef())
9084 return UndefValue::get(Ty);
9085 else if (Builtin->LLVMIntrinsic != 0) {
9086 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
9087 InsertExplicitZeroOperand(Builder, Ty, Ops);
9088
9089 if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
9090 InsertExplicitUndefOperand(Builder, Ty, Ops);
9091
9092 // Some ACLE builtins leave out the argument to specify the predicate
9093 // pattern, which is expected to be expanded to an SV_ALL pattern.
9094 if (TypeFlags.isAppendSVALL())
9095 Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
9096 if (TypeFlags.isInsertOp1SVALL())
9097 Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
9098
9099 // Predicates must match the main datatype.
9100 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
9101 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
9102 if (PredTy->getElementType()->isIntegerTy(1))
9103 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
9104
9105 // Splat scalar operand to vector (intrinsics with _n infix)
9106 if (TypeFlags.hasSplatOperand()) {
9107 unsigned OpNo = TypeFlags.getSplatOperand();
9108 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
9109 }
9110
9111 if (TypeFlags.isReverseCompare())
9112 std::swap(Ops[1], Ops[2]);
9113
9114 if (TypeFlags.isReverseUSDOT())
9115 std::swap(Ops[1], Ops[2]);
9116
9117 // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
9118 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
9119 llvm::Type *OpndTy = Ops[1]->getType();
9120 auto *SplatZero = Constant::getNullValue(OpndTy);
9121 Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
9122 Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
9123 }
9124
9125 Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
9126 getSVEOverloadTypes(TypeFlags, Ty, Ops));
9127 Value *Call = Builder.CreateCall(F, Ops);
9128
9129 // Predicate results must be converted to svbool_t.
9130 if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
9131 if (PredTy->getScalarType()->isIntegerTy(1))
9132 Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
9133
9134 return Call;
9135 }
9136
9137 switch (BuiltinID) {
9138 default:
9139 return nullptr;
9140
9141 case SVE::BI__builtin_sve_svmov_b_z: {
9142 // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
9143 SVETypeFlags TypeFlags(Builtin->TypeModifier);
9144 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
9145 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
9146 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
9147 }
9148
9149 case SVE::BI__builtin_sve_svnot_b_z: {
9150 // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
9151 SVETypeFlags TypeFlags(Builtin->TypeModifier);
9152 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
9153 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
9154 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
9155 }
9156
9157 case SVE::BI__builtin_sve_svmovlb_u16:
9158 case SVE::BI__builtin_sve_svmovlb_u32:
9159 case SVE::BI__builtin_sve_svmovlb_u64:
9160 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
9161
9162 case SVE::BI__builtin_sve_svmovlb_s16:
9163 case SVE::BI__builtin_sve_svmovlb_s32:
9164 case SVE::BI__builtin_sve_svmovlb_s64:
9165 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
9166
9167 case SVE::BI__builtin_sve_svmovlt_u16:
9168 case SVE::BI__builtin_sve_svmovlt_u32:
9169 case SVE::BI__builtin_sve_svmovlt_u64:
9170 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
9171
9172 case SVE::BI__builtin_sve_svmovlt_s16:
9173 case SVE::BI__builtin_sve_svmovlt_s32:
9174 case SVE::BI__builtin_sve_svmovlt_s64:
9175 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
9176
9177 case SVE::BI__builtin_sve_svpmullt_u16:
9178 case SVE::BI__builtin_sve_svpmullt_u64:
9179 case SVE::BI__builtin_sve_svpmullt_n_u16:
9180 case SVE::BI__builtin_sve_svpmullt_n_u64:
9181 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
9182
9183 case SVE::BI__builtin_sve_svpmullb_u16:
9184 case SVE::BI__builtin_sve_svpmullb_u64:
9185 case SVE::BI__builtin_sve_svpmullb_n_u16:
9186 case SVE::BI__builtin_sve_svpmullb_n_u64:
9187 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
9188
9189 case SVE::BI__builtin_sve_svdup_n_b8:
9190 case SVE::BI__builtin_sve_svdup_n_b16:
9191 case SVE::BI__builtin_sve_svdup_n_b32:
9192 case SVE::BI__builtin_sve_svdup_n_b64: {
9193 Value *CmpNE =
9194 Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
9195 llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
9196 Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
9197 return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
9198 }
9199
9200 case SVE::BI__builtin_sve_svdupq_n_b8:
9201 case SVE::BI__builtin_sve_svdupq_n_b16:
9202 case SVE::BI__builtin_sve_svdupq_n_b32:
9203 case SVE::BI__builtin_sve_svdupq_n_b64:
9204 case SVE::BI__builtin_sve_svdupq_n_u8:
9205 case SVE::BI__builtin_sve_svdupq_n_s8:
9206 case SVE::BI__builtin_sve_svdupq_n_u64:
9207 case SVE::BI__builtin_sve_svdupq_n_f64:
9208 case SVE::BI__builtin_sve_svdupq_n_s64:
9209 case SVE::BI__builtin_sve_svdupq_n_u16:
9210 case SVE::BI__builtin_sve_svdupq_n_f16:
9211 case SVE::BI__builtin_sve_svdupq_n_bf16:
9212 case SVE::BI__builtin_sve_svdupq_n_s16:
9213 case SVE::BI__builtin_sve_svdupq_n_u32:
9214 case SVE::BI__builtin_sve_svdupq_n_f32:
9215 case SVE::BI__builtin_sve_svdupq_n_s32: {
9216 // These builtins are implemented by storing each element to an array and using
9217 // ld1rq to materialize a vector.
9218 unsigned NumOpnds = Ops.size();
9219
9220 bool IsBoolTy =
9221 cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
9222
9223 // For svdupq_n_b* the element type of is an integer of type 128/numelts,
9224 // so that the compare can use the width that is natural for the expected
9225 // number of predicate lanes.
9226 llvm::Type *EltTy = Ops[0]->getType();
9227 if (IsBoolTy)
9228 EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
9229
9230 SmallVector<llvm::Value *, 16> VecOps;
9231 for (unsigned I = 0; I < NumOpnds; ++I)
9232 VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy));
9233 Value *Vec = BuildVector(VecOps);
9234
9235 SVETypeFlags TypeFlags(Builtin->TypeModifier);
9236 Value *Pred = EmitSVEAllTruePred(TypeFlags);
9237
9238 llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
9239 Value *InsertSubVec = Builder.CreateInsertVector(
9240 OverloadedTy, UndefValue::get(OverloadedTy), Vec, Builder.getInt64(0));
9241
9242 Function *F =
9243 CGM.getIntrinsic(Intrinsic::aarch64_sve_dupq_lane, OverloadedTy);
9244 Value *DupQLane =
9245 Builder.CreateCall(F, {InsertSubVec, Builder.getInt64(0)});
9246
9247 if (!IsBoolTy)
9248 return DupQLane;
9249
9250 // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
9251 F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
9252 : Intrinsic::aarch64_sve_cmpne_wide,
9253 OverloadedTy);
9254 Value *Call = Builder.CreateCall(
9255 F, {Pred, DupQLane, EmitSVEDupX(Builder.getInt64(0))});
9256 return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
9257 }
9258
9259 case SVE::BI__builtin_sve_svpfalse_b:
9260 return ConstantInt::getFalse(Ty);
9261
9262 case SVE::BI__builtin_sve_svlen_bf16:
9263 case SVE::BI__builtin_sve_svlen_f16:
9264 case SVE::BI__builtin_sve_svlen_f32:
9265 case SVE::BI__builtin_sve_svlen_f64:
9266 case SVE::BI__builtin_sve_svlen_s8:
9267 case SVE::BI__builtin_sve_svlen_s16:
9268 case SVE::BI__builtin_sve_svlen_s32:
9269 case SVE::BI__builtin_sve_svlen_s64:
9270 case SVE::BI__builtin_sve_svlen_u8:
9271 case SVE::BI__builtin_sve_svlen_u16:
9272 case SVE::BI__builtin_sve_svlen_u32:
9273 case SVE::BI__builtin_sve_svlen_u64: {
9274 SVETypeFlags TF(Builtin->TypeModifier);
9275 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
9276 auto *NumEls =
9277 llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue());
9278
9279 Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
9280 return Builder.CreateMul(NumEls, Builder.CreateCall(F));
9281 }
9282
9283 case SVE::BI__builtin_sve_svtbl2_u8:
9284 case SVE::BI__builtin_sve_svtbl2_s8:
9285 case SVE::BI__builtin_sve_svtbl2_u16:
9286 case SVE::BI__builtin_sve_svtbl2_s16:
9287 case SVE::BI__builtin_sve_svtbl2_u32:
9288 case SVE::BI__builtin_sve_svtbl2_s32:
9289 case SVE::BI__builtin_sve_svtbl2_u64:
9290 case SVE::BI__builtin_sve_svtbl2_s64:
9291 case SVE::BI__builtin_sve_svtbl2_f16:
9292 case SVE::BI__builtin_sve_svtbl2_bf16:
9293 case SVE::BI__builtin_sve_svtbl2_f32:
9294 case SVE::BI__builtin_sve_svtbl2_f64: {
9295 SVETypeFlags TF(Builtin->TypeModifier);
9296 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
9297 auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy);
9298 Function *FExtr =
9299 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
9300 Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
9301 Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)});
9302 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
9303 return Builder.CreateCall(F, {V0, V1, Ops[1]});
9304 }
9305 }
9306
9307 /// Should not happen
9308 return nullptr;
9309}
9310
9311Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
9312 const CallExpr *E,
9313 llvm::Triple::ArchType Arch) {
9314 if (BuiltinID >= AArch64::FirstSVEBuiltin &&
9315 BuiltinID <= AArch64::LastSVEBuiltin)
9316 return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
9317
9318 unsigned HintID = static_cast<unsigned>(-1);
9319 switch (BuiltinID) {
9320 default: break;
9321 case AArch64::BI__builtin_arm_nop:
9322 HintID = 0;
9323 break;
9324 case AArch64::BI__builtin_arm_yield:
9325 case AArch64::BI__yield:
9326 HintID = 1;
9327 break;
9328 case AArch64::BI__builtin_arm_wfe:
9329 case AArch64::BI__wfe:
9330 HintID = 2;
9331 break;
9332 case AArch64::BI__builtin_arm_wfi:
9333 case AArch64::BI__wfi:
9334 HintID = 3;
9335 break;
9336 case AArch64::BI__builtin_arm_sev:
9337 case AArch64::BI__sev:
9338 HintID = 4;
9339 break;
9340 case AArch64::BI__builtin_arm_sevl:
9341 case AArch64::BI__sevl:
9342 HintID = 5;
9343 break;
9344 }
9345
9346 if (HintID != static_cast<unsigned>(-1)) {
9347 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
9348 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
9349 }
9350
9351 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
9352 Value *Address = EmitScalarExpr(E->getArg(0));
9353 Value *RW = EmitScalarExpr(E->getArg(1));
9354 Value *CacheLevel = EmitScalarExpr(E->getArg(2));
9355 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
9356 Value *IsData = EmitScalarExpr(E->getArg(4));
9357
9358 Value *Locality = nullptr;
9359 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
9360 // Temporal fetch, needs to convert cache level to locality.
9361 Locality = llvm::ConstantInt::get(Int32Ty,
9362 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
9363 } else {
9364 // Streaming fetch.
9365 Locality = llvm::ConstantInt::get(Int32Ty, 0);
9366 }
9367
9368 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
9369 // PLDL3STRM or PLDL2STRM.
9370 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
9371 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
9372 }
9373
9374 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
9375 assert((getContext().getTypeSize(E->getType()) == 32) &&(static_cast <bool> ((getContext().getTypeSize(E->getType
()) == 32) && "rbit of unusual size!") ? void (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9376, __extension__ __PRETTY_FUNCTION__))
9376 "rbit of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType
()) == 32) && "rbit of unusual size!") ? void (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9376, __extension__ __PRETTY_FUNCTION__))
;
9377 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9378 return Builder.CreateCall(
9379 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
9380 }
9381 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
9382 assert((getContext().getTypeSize(E->getType()) == 64) &&(static_cast <bool> ((getContext().getTypeSize(E->getType
()) == 64) && "rbit of unusual size!") ? void (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9383, __extension__ __PRETTY_FUNCTION__))
9383 "rbit of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType
()) == 64) && "rbit of unusual size!") ? void (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9383, __extension__ __PRETTY_FUNCTION__))
;
9384 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9385 return Builder.CreateCall(
9386 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
9387 }
9388
9389 if (BuiltinID == AArch64::BI__builtin_arm_cls) {
9390 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9391 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
9392 "cls");
9393 }
9394 if (BuiltinID == AArch64::BI__builtin_arm_cls64) {
9395 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9396 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
9397 "cls");
9398 }
9399
9400 if (BuiltinID == AArch64::BI__builtin_arm_frint32zf ||
9401 BuiltinID == AArch64::BI__builtin_arm_frint32z) {
9402 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9403 llvm::Type *Ty = Arg->getType();
9404 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty),
9405 Arg, "frint32z");
9406 }
9407
9408 if (BuiltinID == AArch64::BI__builtin_arm_frint64zf ||
9409 BuiltinID == AArch64::BI__builtin_arm_frint64z) {
9410 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9411 llvm::Type *Ty = Arg->getType();
9412 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty),
9413 Arg, "frint64z");
9414 }
9415
9416 if (BuiltinID == AArch64::BI__builtin_arm_frint32xf ||
9417 BuiltinID == AArch64::BI__builtin_arm_frint32x) {
9418 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9419 llvm::Type *Ty = Arg->getType();
9420 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty),
9421 Arg, "frint32x");
9422 }
9423
9424 if (BuiltinID == AArch64::BI__builtin_arm_frint64xf ||
9425 BuiltinID == AArch64::BI__builtin_arm_frint64x) {
9426 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9427 llvm::Type *Ty = Arg->getType();
9428 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty),
9429 Arg, "frint64x");
9430 }
9431
9432 if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
9433 assert((getContext().getTypeSize(E->getType()) == 32) &&(static_cast <bool> ((getContext().getTypeSize(E->getType
()) == 32) && "__jcvt of unusual size!") ? void (0) :
__assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"__jcvt of unusual size!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9434, __extension__ __PRETTY_FUNCTION__))
9434 "__jcvt of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType
()) == 32) && "__jcvt of unusual size!") ? void (0) :
__assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"__jcvt of unusual size!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9434, __extension__ __PRETTY_FUNCTION__))
;
9435 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9436 return Builder.CreateCall(
9437 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
9438 }
9439
9440 if (BuiltinID == AArch64::BI__builtin_arm_ld64b ||
9441 BuiltinID == AArch64::BI__builtin_arm_st64b ||
9442 BuiltinID == AArch64::BI__builtin_arm_st64bv ||
9443 BuiltinID == AArch64::BI__builtin_arm_st64bv0) {
9444 llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0));
9445 llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1));
9446
9447 if (BuiltinID == AArch64::BI__builtin_arm_ld64b) {
9448 // Load from the address via an LLVM intrinsic, receiving a
9449 // tuple of 8 i64 words, and store each one to ValPtr.
9450 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b);
9451 llvm::Value *Val = Builder.CreateCall(F, MemAddr);
9452 llvm::Value *ToRet;
9453 for (size_t i = 0; i < 8; i++) {
9454 llvm::Value *ValOffsetPtr =
9455 Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
9456 Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
9457 ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr);
9458 }
9459 return ToRet;
9460 } else {
9461 // Load 8 i64 words from ValPtr, and store them to the address
9462 // via an LLVM intrinsic.
9463 SmallVector<llvm::Value *, 9> Args;
9464 Args.push_back(MemAddr);
9465 for (size_t i = 0; i < 8; i++) {
9466 llvm::Value *ValOffsetPtr =
9467 Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
9468 Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
9469 Args.push_back(Builder.CreateLoad(Addr));
9470 }
9471
9472 auto Intr = (BuiltinID == AArch64::BI__builtin_arm_st64b
9473 ? Intrinsic::aarch64_st64b
9474 : BuiltinID == AArch64::BI__builtin_arm_st64bv
9475 ? Intrinsic::aarch64_st64bv
9476 : Intrinsic::aarch64_st64bv0);
9477 Function *F = CGM.getIntrinsic(Intr);
9478 return Builder.CreateCall(F, Args);
9479 }
9480 }
9481
9482 if (BuiltinID == AArch64::BI__builtin_arm_rndr ||
9483 BuiltinID == AArch64::BI__builtin_arm_rndrrs) {
9484
9485 auto Intr = (BuiltinID == AArch64::BI__builtin_arm_rndr
9486 ? Intrinsic::aarch64_rndr
9487 : Intrinsic::aarch64_rndrrs);
9488 Function *F = CGM.getIntrinsic(Intr);
9489 llvm::Value *Val = Builder.CreateCall(F);
9490 Value *RandomValue = Builder.CreateExtractValue(Val, 0);
9491 Value *Status = Builder.CreateExtractValue(Val, 1);
9492
9493 Address MemAddress = EmitPointerWithAlignment(E->getArg(0));
9494 Builder.CreateStore(RandomValue, MemAddress);
9495 Status = Builder.CreateZExt(Status, Int32Ty);
9496 return Status;
9497 }
9498
9499 if (BuiltinID == AArch64::BI__clear_cache) {
9500 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")(static_cast <bool> (E->getNumArgs() == 2 &&
"__clear_cache takes 2 arguments") ? void (0) : __assert_fail
("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9500, __extension__ __PRETTY_FUNCTION__))
;
9501 const FunctionDecl *FD = E->getDirectCallee();
9502 Value *Ops[2];
9503 for (unsigned i = 0; i < 2; i++)
9504 Ops[i] = EmitScalarExpr(E->getArg(i));
9505 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
9506 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
9507 StringRef Name = FD->getName();
9508 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
9509 }
9510
9511 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
9512 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
9513 getContext().getTypeSize(E->getType()) == 128) {
9514 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
9515 ? Intrinsic::aarch64_ldaxp
9516 : Intrinsic::aarch64_ldxp);
9517
9518 Value *LdPtr = EmitScalarExpr(E->getArg(0));
9519 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
9520 "ldxp");
9521
9522 Value *Val0 = Builder.CreateExtractValue(Val, 1);
9523 Value *Val1 = Builder.CreateExtractValue(Val, 0);
9524 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
9525 Val0 = Builder.CreateZExt(Val0, Int128Ty);
9526 Val1 = Builder.CreateZExt(Val1, Int128Ty);
9527
9528 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
9529 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
9530 Val = Builder.CreateOr(Val, Val1);
9531 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
9532 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
9533 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
9534 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
9535
9536 QualType Ty = E->getType();
9537 llvm::Type *RealResTy = ConvertType(Ty);
9538 llvm::Type *PtrTy = llvm::IntegerType::get(
9539 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
9540 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
9541
9542 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
9543 ? Intrinsic::aarch64_ldaxr
9544 : Intrinsic::aarch64_ldxr,
9545 PtrTy);
9546 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
9547
9548 if (RealResTy->isPointerTy())
9549 return Builder.CreateIntToPtr(Val, RealResTy);
9550
9551 llvm::Type *IntResTy = llvm::IntegerType::get(
9552 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
9553 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
9554 return Builder.CreateBitCast(Val, RealResTy);
9555 }
9556
9557 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
9558 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
9559 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
9560 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
9561 ? Intrinsic::aarch64_stlxp
9562 : Intrinsic::aarch64_stxp);
9563 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
9564
9565 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
9566 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
9567
9568 Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
9569 llvm::Value *Val = Builder.CreateLoad(Tmp);
9570
9571 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
9572 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
9573 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
9574 Int8PtrTy);
9575 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
9576 }
9577
9578 if (BuiltinID == AArch64::BI__builtin_arm_strex ||
9579 BuiltinID == AArch64::BI__builtin_arm_stlex) {
9580 Value *StoreVal = EmitScalarExpr(E->getArg(0));
9581 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
9582
9583 QualType Ty = E->getArg(0)->getType();
9584 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
9585 getContext().getTypeSize(Ty));
9586 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
9587
9588 if (StoreVal->getType()->isPointerTy())
9589 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
9590 else {
9591 llvm::Type *IntTy = llvm::IntegerType::get(
9592 getLLVMContext(),
9593 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
9594 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
9595 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
9596 }
9597
9598 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
9599 ? Intrinsic::aarch64_stlxr
9600 : Intrinsic::aarch64_stxr,
9601 StoreAddr->getType());
9602 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
9603 }
9604
9605 if (BuiltinID == AArch64::BI__getReg) {
9606 Expr::EvalResult Result;
9607 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
9608 llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9608)
;
9609
9610 llvm::APSInt Value = Result.Val.getInt();
9611 LLVMContext &Context = CGM.getLLVMContext();
9612 std::string Reg = Value == 31 ? "sp" : "x" + toString(Value, 10);
9613
9614 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
9615 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
9616 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
9617
9618 llvm::Function *F =
9619 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
9620 return Builder.CreateCall(F, Metadata);
9621 }
9622
9623 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
9624 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
9625 return Builder.CreateCall(F);
9626 }
9627
9628 if (BuiltinID == AArch64::BI_ReadWriteBarrier)
9629 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
9630 llvm::SyncScope::SingleThread);
9631
9632 // CRC32
9633 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
9634 switch (BuiltinID) {
9635 case AArch64::BI__builtin_arm_crc32b:
9636 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
9637 case AArch64::BI__builtin_arm_crc32cb:
9638 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
9639 case AArch64::BI__builtin_arm_crc32h:
9640 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
9641 case AArch64::BI__builtin_arm_crc32ch:
9642 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
9643 case AArch64::BI__builtin_arm_crc32w:
9644 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
9645 case AArch64::BI__builtin_arm_crc32cw:
9646 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
9647 case AArch64::BI__builtin_arm_crc32d:
9648 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
9649 case AArch64::BI__builtin_arm_crc32cd:
9650 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
9651 }
9652
9653 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
9654 Value *Arg0 = EmitScalarExpr(E->getArg(0));
9655 Value *Arg1 = EmitScalarExpr(E->getArg(1));
9656 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
9657
9658 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
9659 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
9660
9661 return Builder.CreateCall(F, {Arg0, Arg1});
9662 }
9663
9664 // Memory Tagging Extensions (MTE) Intrinsics
9665 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
9666 switch (BuiltinID) {
9667 case AArch64::BI__builtin_arm_irg:
9668 MTEIntrinsicID = Intrinsic::aarch64_irg; break;
9669 case AArch64::BI__builtin_arm_addg:
9670 MTEIntrinsicID = Intrinsic::aarch64_addg; break;
9671 case AArch64::BI__builtin_arm_gmi:
9672 MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
9673 case AArch64::BI__builtin_arm_ldg:
9674 MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
9675 case AArch64::BI__builtin_arm_stg:
9676 MTEIntrinsicID = Intrinsic::aarch64_stg; break;
9677 case AArch64::BI__builtin_arm_subp:
9678 MTEIntrinsicID = Intrinsic::aarch64_subp; break;
9679 }
9680
9681 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
9682 llvm::Type *T = ConvertType(E->getType());
9683
9684 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
9685 Value *Pointer = EmitScalarExpr(E->getArg(0));
9686 Value *Mask = EmitScalarExpr(E->getArg(1));
9687
9688 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9689 Mask = Builder.CreateZExt(Mask, Int64Ty);
9690 Value *RV = Builder.CreateCall(
9691 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
9692 return Builder.CreatePointerCast(RV, T);
9693 }
9694 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
9695 Value *Pointer = EmitScalarExpr(E->getArg(0));
9696 Value *TagOffset = EmitScalarExpr(E->getArg(1));
9697
9698 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9699 TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
9700 Value *RV = Builder.CreateCall(
9701 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
9702 return Builder.CreatePointerCast(RV, T);
9703 }
9704 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
9705 Value *Pointer = EmitScalarExpr(E->getArg(0));
9706 Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
9707
9708 ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
9709 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9710 return Builder.CreateCall(
9711 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
9712 }
9713 // Although it is possible to supply a different return
9714 // address (first arg) to this intrinsic, for now we set
9715 // return address same as input address.
9716 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
9717 Value *TagAddress = EmitScalarExpr(E->getArg(0));
9718 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
9719 Value *RV = Builder.CreateCall(
9720 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
9721 return Builder.CreatePointerCast(RV, T);
9722 }
9723 // Although it is possible to supply a different tag (to set)
9724 // to this intrinsic (as first arg), for now we supply
9725 // the tag that is in input address arg (common use case).
9726 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
9727 Value *TagAddress = EmitScalarExpr(E->getArg(0));
9728 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
9729 return Builder.CreateCall(
9730 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
9731 }
9732 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
9733 Value *PointerA = EmitScalarExpr(E->getArg(0));
9734 Value *PointerB = EmitScalarExpr(E->getArg(1));
9735 PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
9736 PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
9737 return Builder.CreateCall(
9738 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
9739 }
9740 }
9741
9742 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
9743 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
9744 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
9745 BuiltinID == AArch64::BI__builtin_arm_wsr ||
9746 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
9747 BuiltinID == AArch64::BI__builtin_arm_wsrp) {
9748
9749 SpecialRegisterAccessKind AccessKind = Write;
9750 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
9751 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
9752 BuiltinID == AArch64::BI__builtin_arm_rsrp)
9753 AccessKind = VolatileRead;
9754
9755 bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
9756 BuiltinID == AArch64::BI__builtin_arm_wsrp;
9757
9758 bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
9759 BuiltinID != AArch64::BI__builtin_arm_wsr;
9760
9761 llvm::Type *ValueType;
9762 llvm::Type *RegisterType = Int64Ty;
9763 if (IsPointerBuiltin) {
9764 ValueType = VoidPtrTy;
9765 } else if (Is64Bit) {
9766 ValueType = Int64Ty;
9767 } else {
9768 ValueType = Int32Ty;
9769 }
9770
9771 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
9772 AccessKind);
9773 }
9774
9775 if (BuiltinID == AArch64::BI_ReadStatusReg ||
9776 BuiltinID == AArch64::BI_WriteStatusReg) {
9777 LLVMContext &Context = CGM.getLLVMContext();
9778
9779 unsigned SysReg =
9780 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
9781
9782 std::string SysRegStr;
9783 llvm::raw_string_ostream(SysRegStr) <<
9784 ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
9785 ((SysReg >> 11) & 7) << ":" <<
9786 ((SysReg >> 7) & 15) << ":" <<
9787 ((SysReg >> 3) & 15) << ":" <<
9788 ( SysReg & 7);
9789
9790 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
9791 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
9792 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
9793
9794 llvm::Type *RegisterType = Int64Ty;
9795 llvm::Type *Types[] = { RegisterType };
9796
9797 if (BuiltinID == AArch64::BI_ReadStatusReg) {
9798 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
9799
9800 return Builder.CreateCall(F, Metadata);
9801 }
9802
9803 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
9804 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
9805
9806 return Builder.CreateCall(F, { Metadata, ArgValue });
9807 }
9808
9809 if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
9810 llvm::Function *F =
9811 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
9812 return Builder.CreateCall(F);
9813 }
9814
9815 if (BuiltinID == AArch64::BI__builtin_sponentry) {
9816 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
9817 return Builder.CreateCall(F);
9818 }
9819
9820 if (BuiltinID == AArch64::BI__mulh || BuiltinID == AArch64::BI__umulh) {
9821 llvm::Type *ResType = ConvertType(E->getType());
9822 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
9823
9824 bool IsSigned = BuiltinID == AArch64::BI__mulh;
9825 Value *LHS =
9826 Builder.CreateIntCast(EmitScalarExpr(E->getArg(0)), Int128Ty, IsSigned);
9827 Value *RHS =
9828 Builder.CreateIntCast(EmitScalarExpr(E->getArg(1)), Int128Ty, IsSigned);
9829
9830 Value *MulResult, *HigherBits;
9831 if (IsSigned) {
9832 MulResult = Builder.CreateNSWMul(LHS, RHS);
9833 HigherBits = Builder.CreateAShr(MulResult, 64);
9834 } else {
9835 MulResult = Builder.CreateNUWMul(LHS, RHS);
9836 HigherBits = Builder.CreateLShr(MulResult, 64);
9837 }
9838 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
9839
9840 return HigherBits;
9841 }
9842
9843 // Handle MSVC intrinsics before argument evaluation to prevent double
9844 // evaluation.
9845 if (Optional<MSVCIntrin> MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID))
9846 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
9847
9848 // Find out if any arguments are required to be integer constant
9849 // expressions.
9850 unsigned ICEArguments = 0;
9851 ASTContext::GetBuiltinTypeError Error;
9852 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
9853 assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None &&
"Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9853, __extension__ __PRETTY_FUNCTION__))
;
9854
9855 llvm::SmallVector<Value*, 4> Ops;
9856 Address PtrOp0 = Address::invalid();
9857 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
9858 if (i == 0) {
9859 switch (BuiltinID) {
9860 case NEON::BI__builtin_neon_vld1_v:
9861 case NEON::BI__builtin_neon_vld1q_v:
9862 case NEON::BI__builtin_neon_vld1_dup_v:
9863 case NEON::BI__builtin_neon_vld1q_dup_v:
9864 case NEON::BI__builtin_neon_vld1_lane_v:
9865 case NEON::BI__builtin_neon_vld1q_lane_v:
9866 case NEON::BI__builtin_neon_vst1_v:
9867 case NEON::BI__builtin_neon_vst1q_v:
9868 case NEON::BI__builtin_neon_vst1_lane_v:
9869 case NEON::BI__builtin_neon_vst1q_lane_v:
9870 // Get the alignment for the argument in addition to the value;
9871 // we'll use it later.
9872 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
9873 Ops.push_back(PtrOp0.getPointer());
9874 continue;
9875 }
9876 }
9877 if ((ICEArguments & (1 << i)) == 0) {
9878 Ops.push_back(EmitScalarExpr(E->getArg(i)));
9879 } else {
9880 // If this is required to be a constant, constant fold it so that we know
9881 // that the generated intrinsic gets a ConstantInt.
9882 Ops.push_back(llvm::ConstantInt::get(
9883 getLLVMContext(),
9884 *E->getArg(i)->getIntegerConstantExpr(getContext())));
9885 }
9886 }
9887
9888 auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
9889 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
9890 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
9891
9892 if (Builtin) {
9893 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
9894 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
9895 assert(Result && "SISD intrinsic should have been handled")(static_cast <bool> (Result && "SISD intrinsic should have been handled"
) ? void (0) : __assert_fail ("Result && \"SISD intrinsic should have been handled\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9895, __extension__ __PRETTY_FUNCTION__))
;
9896 return Result;
9897 }
9898
9899 const Expr *Arg = E->getArg(E->getNumArgs()-1);
9900 NeonTypeFlags Type(0);
9901 if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()))
9902 // Determine the type of this overloaded NEON intrinsic.
9903 Type = NeonTypeFlags(Result->getZExtValue());
9904
9905 bool usgn = Type.isUnsigned();
9906 bool quad = Type.isQuad();
9907
9908 // Handle non-overloaded intrinsics first.
9909 switch (BuiltinID) {
9910 default: break;
9911 case NEON::BI__builtin_neon_vabsh_f16:
9912 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9913 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
9914 case NEON::BI__builtin_neon_vaddq_p128: {
9915 llvm::Type *Ty = GetNeonType(this, NeonTypeFlags::Poly128);
9916 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9917 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9918 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9919 Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
9920 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
9921 return Builder.CreateBitCast(Ops[0], Int128Ty);
9922 }
9923 case NEON::BI__builtin_neon_vldrq_p128: {
9924 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
9925 llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
9926 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
9927 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
9928 CharUnits::fromQuantity(16));
9929 }
9930 case NEON::BI__builtin_neon_vstrq_p128: {
9931 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
9932 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
9933 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
9934 }
9935 case NEON::BI__builtin_neon_vcvts_f32_u32:
9936 case NEON::BI__builtin_neon_vcvtd_f64_u64:
9937 usgn = true;
9938 LLVM_FALLTHROUGH[[gnu::fallthrough]];
9939 case NEON::BI__builtin_neon_vcvts_f32_s32:
9940 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
9941 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9942 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
9943 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
9944 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
9945 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9946 if (usgn)
9947 return Builder.CreateUIToFP(Ops[0], FTy);
9948 return Builder.CreateSIToFP(Ops[0], FTy);
9949 }
9950 case NEON::BI__builtin_neon_vcvth_f16_u16:
9951 case NEON::BI__builtin_neon_vcvth_f16_u32:
9952 case NEON::BI__builtin_neon_vcvth_f16_u64:
9953 usgn = true;
9954 LLVM_FALLTHROUGH[[gnu::fallthrough]];
9955 case NEON::BI__builtin_neon_vcvth_f16_s16:
9956 case NEON::BI__builtin_neon_vcvth_f16_s32:
9957 case NEON::BI__builtin_neon_vcvth_f16_s64: {
9958 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9959 llvm::Type *FTy = HalfTy;
9960 llvm::Type *InTy;
9961 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
9962 InTy = Int64Ty;
9963 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
9964 InTy = Int32Ty;
9965 else
9966 InTy = Int16Ty;
9967 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9968 if (usgn)
9969 return Builder.CreateUIToFP(Ops[0], FTy);
9970 return Builder.CreateSIToFP(Ops[0], FTy);
9971 }
9972 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9973 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9974 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9975 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9976 case NEON::BI__builtin_neon_vcvth_u16_f16:
9977 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9978 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9979 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9980 case NEON::BI__builtin_neon_vcvtph_s16_f16:
9981 case NEON::BI__builtin_neon_vcvth_s16_f16: {
9982 unsigned Int;
9983 llvm::Type* InTy = Int32Ty;
9984 llvm::Type* FTy = HalfTy;
9985 llvm::Type *Tys[2] = {InTy, FTy};
9986 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9987 switch (BuiltinID) {
9988 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9988)
;
9989 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9990 Int = Intrinsic::aarch64_neon_fcvtau; break;
9991 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9992 Int = Intrinsic::aarch64_neon_fcvtmu; break;
9993 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9994 Int = Intrinsic::aarch64_neon_fcvtnu; break;
9995 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9996 Int = Intrinsic::aarch64_neon_fcvtpu; break;
9997 case NEON::BI__builtin_neon_vcvth_u16_f16:
9998 Int = Intrinsic::aarch64_neon_fcvtzu; break;
9999 case NEON::BI__builtin_neon_vcvtah_s16_f16:
10000 Int = Intrinsic::aarch64_neon_fcvtas; break;
10001 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
10002 Int = Intrinsic::aarch64_neon_fcvtms; break;
10003 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
10004 Int = Intrinsic::aarch64_neon_fcvtns; break;
10005 case NEON::BI__builtin_neon_vcvtph_s16_f16:
10006 Int = Intrinsic::aarch64_neon_fcvtps; break;
10007 case NEON::BI__builtin_neon_vcvth_s16_f16:
10008 Int = Intrinsic::aarch64_neon_fcvtzs; break;
10009 }
10010 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
10011 return Builder.CreateTrunc(Ops[0], Int16Ty);
10012 }
10013 case NEON::BI__builtin_neon_vcaleh_f16:
10014 case NEON::BI__builtin_neon_vcalth_f16:
10015 case NEON::BI__builtin_neon_vcageh_f16:
10016 case NEON::BI__builtin_neon_vcagth_f16: {
10017 unsigned Int;
10018 llvm::Type* InTy = Int32Ty;
10019 llvm::Type* FTy = HalfTy;
10020 llvm::Type *Tys[2] = {InTy, FTy};
10021 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10022 switch (BuiltinID) {
10023 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10023)
;
10024 case NEON::BI__builtin_neon_vcageh_f16:
10025 Int = Intrinsic::aarch64_neon_facge; break;
10026 case NEON::BI__builtin_neon_vcagth_f16:
10027 Int = Intrinsic::aarch64_neon_facgt; break;
10028 case NEON::BI__builtin_neon_vcaleh_f16:
10029 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
10030 case NEON::BI__builtin_neon_vcalth_f16:
10031 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
10032 }
10033 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
10034 return Builder.CreateTrunc(Ops[0], Int16Ty);
10035 }
10036 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
10037 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
10038 unsigned Int;
10039 llvm::Type* InTy = Int32Ty;
10040 llvm::Type* FTy = HalfTy;
10041 llvm::Type *Tys[2] = {InTy, FTy};
10042 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10043 switch (BuiltinID) {
10044 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10044)
;
10045 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
10046 Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
10047 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
10048 Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
10049 }
10050 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
10051 return Builder.CreateTrunc(Ops[0], Int16Ty);
10052 }
10053 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
10054 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
10055 unsigned Int;
10056 llvm::Type* FTy = HalfTy;
10057 llvm::Type* InTy = Int32Ty;
10058 llvm::Type *Tys[2] = {FTy, InTy};
10059 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10060 switch (BuiltinID) {
10061 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10061)
;
10062 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
10063 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
10064 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
10065 break;
10066 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
10067 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
10068 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
10069 break;
10070 }
10071 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
10072 }
10073 case NEON::BI__builtin_neon_vpaddd_s64: {
10074 auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
10075 Value *Vec = EmitScalarExpr(E->getArg(0));
10076 // The vector is v2f64, so make sure it's bitcast to that.
10077 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
10078 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
10079 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
10080 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
10081 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
10082 // Pairwise addition of a v2f64 into a scalar f64.
10083 return Builder.CreateAdd(Op0, Op1, "vpaddd");
10084 }
10085 case NEON::BI__builtin_neon_vpaddd_f64: {
10086 auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
10087 Value *Vec = EmitScalarExpr(E->getArg(0));
10088 // The vector is v2f64, so make sure it's bitcast to that.
10089 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
10090 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
10091 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
10092 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
10093 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
10094 // Pairwise addition of a v2f64 into a scalar f64.
10095 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
10096 }
10097 case NEON::BI__builtin_neon_vpadds_f32: {
10098 auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
10099 Value *Vec = EmitScalarExpr(E->getArg(0));
10100 // The vector is v2f32, so make sure it's bitcast to that.
10101 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
10102 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
10103 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
10104 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
10105 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
10106 // Pairwise addition of a v2f32 into a scalar f32.
10107 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
10108 }
10109 case NEON::BI__builtin_neon_vceqzd_s64:
10110 case NEON::BI__builtin_neon_vceqzd_f64:
10111 case NEON::BI__builtin_neon_vceqzs_f32:
10112 case NEON::BI__builtin_neon_vceqzh_f16:
10113 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10114 return EmitAArch64CompareBuiltinExpr(
10115 Ops[0], ConvertType(E->getCallReturnType(getContext())),
10116 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
10117 case NEON::BI__builtin_neon_vcgezd_s64:
10118 case NEON::BI__builtin_neon_vcgezd_f64:
10119 case NEON::BI__builtin_neon_vcgezs_f32:
10120 case NEON::BI__builtin_neon_vcgezh_f16:
10121 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10122 return EmitAArch64CompareBuiltinExpr(
10123 Ops[0], ConvertType(E->getCallReturnType(getContext())),
10124 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
10125 case NEON::BI__builtin_neon_vclezd_s64:
10126 case NEON::BI__builtin_neon_vclezd_f64:
10127 case NEON::BI__builtin_neon_vclezs_f32:
10128 case NEON::BI__builtin_neon_vclezh_f16:
10129 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10130 return EmitAArch64CompareBuiltinExpr(
10131 Ops[0], ConvertType(E->getCallReturnType(getContext())),
10132 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
10133 case NEON::BI__builtin_neon_vcgtzd_s64:
10134 case NEON::BI__builtin_neon_vcgtzd_f64:
10135 case NEON::BI__builtin_neon_vcgtzs_f32:
10136 case NEON::BI__builtin_neon_vcgtzh_f16:
10137 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10138 return EmitAArch64CompareBuiltinExpr(
10139 Ops[0], ConvertType(E->getCallReturnType(getContext())),
10140 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
10141 case NEON::BI__builtin_neon_vcltzd_s64:
10142 case NEON::BI__builtin_neon_vcltzd_f64:
10143 case NEON::BI__builtin_neon_vcltzs_f32:
10144 case NEON::BI__builtin_neon_vcltzh_f16:
10145 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10146 return EmitAArch64CompareBuiltinExpr(
10147 Ops[0], ConvertType(E->getCallReturnType(getContext())),
10148 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
10149
10150 case NEON::BI__builtin_neon_vceqzd_u64: {
10151 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10152 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
10153 Ops[0] =
10154 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
10155 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
10156 }
10157 case NEON::BI__builtin_neon_vceqd_f64:
10158 case NEON::BI__builtin_neon_vcled_f64:
10159 case NEON::BI__builtin_neon_vcltd_f64:
10160 case NEON::BI__builtin_neon_vcged_f64:
10161 case NEON::BI__builtin_neon_vcgtd_f64: {
10162 llvm::CmpInst::Predicate P;
10163 switch (BuiltinID) {
10164 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10164)
;
10165 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
10166 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
10167 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
10168 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
10169 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
10170 }
10171 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10172 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10173 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
10174 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
10175 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
10176 }
10177 case NEON::BI__builtin_neon_vceqs_f32:
10178 case NEON::BI__builtin_neon_vcles_f32:
10179 case NEON::BI__builtin_neon_vclts_f32:
10180 case NEON::BI__builtin_neon_vcges_f32:
10181 case NEON::BI__builtin_neon_vcgts_f32: {
10182 llvm::CmpInst::Predicate P;
10183 switch (BuiltinID) {
10184 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10184)
;
10185 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
10186 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
10187 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
10188 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
10189 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
10190 }
10191 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10192 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
10193 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
10194 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
10195 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
10196 }
10197 case NEON::BI__builtin_neon_vceqh_f16:
10198 case NEON::BI__builtin_neon_vcleh_f16:
10199 case NEON::BI__builtin_neon_vclth_f16:
10200 case NEON::BI__builtin_neon_vcgeh_f16:
10201 case NEON::BI__builtin_neon_vcgth_f16: {
10202 llvm::CmpInst::Predicate P;
10203 switch (BuiltinID) {
10204 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10204)
;
10205 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
10206 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
10207 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
10208 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
10209 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
10210 }
10211 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10212 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
10213 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
10214 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
10215 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
10216 }
10217 case NEON::BI__builtin_neon_vceqd_s64:
10218 case NEON::BI__builtin_neon_vceqd_u64:
10219 case NEON::BI__builtin_neon_vcgtd_s64:
10220 case NEON::BI__builtin_neon_vcgtd_u64:
10221 case NEON::BI__builtin_neon_vcltd_s64:
10222 case NEON::BI__builtin_neon_vcltd_u64:
10223 case NEON::BI__builtin_neon_vcged_u64:
10224 case NEON::BI__builtin_neon_vcged_s64:
10225 case NEON::BI__builtin_neon_vcled_u64:
10226 case NEON::BI__builtin_neon_vcled_s64: {
10227 llvm::CmpInst::Predicate P;
10228 switch (BuiltinID) {
10229 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10229)
;
10230 case NEON::BI__builtin_neon_vceqd_s64:
10231 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
10232 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
10233 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
10234 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
10235 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
10236 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
10237 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
10238 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
10239 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
10240 }
10241 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10242 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
10243 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
10244 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
10245 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
10246 }
10247 case NEON::BI__builtin_neon_vtstd_s64:
10248 case NEON::BI__builtin_neon_vtstd_u64: {
10249 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10250 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
10251 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
10252 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
10253 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
10254 llvm::Constant::getNullValue(Int64Ty));
10255 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
10256 }
10257 case NEON::BI__builtin_neon_vset_lane_i8:
10258 case NEON::BI__builtin_neon_vset_lane_i16:
10259 case NEON::BI__builtin_neon_vset_lane_i32:
10260 case NEON::BI__builtin_neon_vset_lane_i64:
10261 case NEON::BI__builtin_neon_vset_lane_bf16:
10262 case NEON::BI__builtin_neon_vset_lane_f32:
10263 case NEON::BI__builtin_neon_vsetq_lane_i8:
10264 case NEON::BI__builtin_neon_vsetq_lane_i16:
10265 case NEON::BI__builtin_neon_vsetq_lane_i32:
10266 case NEON::BI__builtin_neon_vsetq_lane_i64:
10267 case NEON::BI__builtin_neon_vsetq_lane_bf16:
10268 case NEON::BI__builtin_neon_vsetq_lane_f32:
10269 Ops.push_back(EmitScalarExpr(E->getArg(2)));
10270 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
10271 case NEON::BI__builtin_neon_vset_lane_f64:
10272 // The vector type needs a cast for the v1f64 variant.
10273 Ops[1] =
10274 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
10275 Ops.push_back(EmitScalarExpr(E->getArg(2)));
10276 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
10277 case NEON::BI__builtin_neon_vsetq_lane_f64:
10278 // The vector type needs a cast for the v2f64 variant.
10279 Ops[1] =
10280 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
10281 Ops.push_back(EmitScalarExpr(E->getArg(2)));
10282 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
10283
10284 case NEON::BI__builtin_neon_vget_lane_i8:
10285 case NEON::BI__builtin_neon_vdupb_lane_i8:
10286 Ops[0] =
10287 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
10288 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10289 "vget_lane");
10290 case NEON::BI__builtin_neon_vgetq_lane_i8:
10291 case NEON::BI__builtin_neon_vdupb_laneq_i8:
10292 Ops[0] =
10293 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
10294 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10295 "vgetq_lane");
10296 case NEON::BI__builtin_neon_vget_lane_i16:
10297 case NEON::BI__builtin_neon_vduph_lane_i16:
10298 Ops[0] =
10299 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
10300 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10301 "vget_lane");
10302 case NEON::BI__builtin_neon_vgetq_lane_i16:
10303 case NEON::BI__builtin_neon_vduph_laneq_i16:
10304 Ops[0] =
10305 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
10306 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10307 "vgetq_lane");
10308 case NEON::BI__builtin_neon_vget_lane_i32:
10309 case NEON::BI__builtin_neon_vdups_lane_i32:
10310 Ops[0] =
10311 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
10312 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10313 "vget_lane");
10314 case NEON::BI__builtin_neon_vdups_lane_f32:
10315 Ops[0] =
10316 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
10317 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10318 "vdups_lane");
10319 case NEON::BI__builtin_neon_vgetq_lane_i32:
10320 case NEON::BI__builtin_neon_vdups_laneq_i32:
10321 Ops[0] =
10322 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
10323 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10324 "vgetq_lane");
10325 case NEON::BI__builtin_neon_vget_lane_i64:
10326 case NEON::BI__builtin_neon_vdupd_lane_i64:
10327 Ops[0] =
10328 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
10329 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10330 "vget_lane");
10331 case NEON::BI__builtin_neon_vdupd_lane_f64:
10332 Ops[0] =
10333 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
10334 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10335 "vdupd_lane");
10336 case NEON::BI__builtin_neon_vgetq_lane_i64:
10337 case NEON::BI__builtin_neon_vdupd_laneq_i64:
10338 Ops[0] =
10339 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
10340 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10341 "vgetq_lane");
10342 case NEON::BI__builtin_neon_vget_lane_f32:
10343 Ops[0] =
10344 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
10345 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10346 "vget_lane");
10347 case NEON::BI__builtin_neon_vget_lane_f64:
10348 Ops[0] =
10349 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
10350 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10351 "vget_lane");
10352 case NEON::BI__builtin_neon_vgetq_lane_f32:
10353 case NEON::BI__builtin_neon_vdups_laneq_f32:
10354 Ops[0] =
10355 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
10356 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10357 "vgetq_lane");
10358 case NEON::BI__builtin_neon_vgetq_lane_f64:
10359 case NEON::BI__builtin_neon_vdupd_laneq_f64:
10360 Ops[0] =
10361 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
10362 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10363 "vgetq_lane");
10364 case NEON::BI__builtin_neon_vaddh_f16:
10365 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10366 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
10367 case NEON::BI__builtin_neon_vsubh_f16:
10368 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10369 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
10370 case NEON::BI__builtin_neon_vmulh_f16:
10371 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10372 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
10373 case NEON::BI__builtin_neon_vdivh_f16:
10374 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10375 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
10376 case NEON::BI__builtin_neon_vfmah_f16:
10377 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
10378 return emitCallMaybeConstrainedFPBuiltin(
10379 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
10380 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
10381 case NEON::BI__builtin_neon_vfmsh_f16: {
10382 // FIXME: This should be an fneg instruction:
10383 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
10384 Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
10385
10386 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
10387 return emitCallMaybeConstrainedFPBuiltin(
10388 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
10389 {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
10390 }
10391 case NEON::BI__builtin_neon_vaddd_s64:
10392 case NEON::BI__builtin_neon_vaddd_u64:
10393 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
10394 case NEON::BI__builtin_neon_vsubd_s64:
10395 case NEON::BI__builtin_neon_vsubd_u64:
10396 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
10397 case NEON::BI__builtin_neon_vqdmlalh_s16:
10398 case NEON::BI__builtin_neon_vqdmlslh_s16: {
10399 SmallVector<Value *, 2> ProductOps;
10400 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
10401 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
10402 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
10403 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
10404 ProductOps, "vqdmlXl");
10405 Constant *CI = ConstantInt::get(SizeTy, 0);
10406 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
10407
10408 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
10409 ? Intrinsic::aarch64_neon_sqadd
10410 : Intrinsic::aarch64_neon_sqsub;
10411 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
10412 }
10413 case NEON::BI__builtin_neon_vqshlud_n_s64: {
10414 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10415 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
10416 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
10417 Ops, "vqshlu_n");
10418 }
10419 case NEON::BI__builtin_neon_vqshld_n_u64:
10420 case NEON::BI__builtin_neon_vqshld_n_s64: {
10421 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
10422 ? Intrinsic::aarch64_neon_uqshl
10423 : Intrinsic::aarch64_neon_sqshl;
10424 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10425 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
10426 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
10427 }
10428 case NEON::BI__builtin_neon_vrshrd_n_u64:
10429 case NEON::BI__builtin_neon_vrshrd_n_s64: {
10430 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
10431 ? Intrinsic::aarch64_neon_urshl
10432 : Intrinsic::aarch64_neon_srshl;
10433 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10434 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
10435 Ops[1] = ConstantInt::get(Int64Ty, -SV);
10436 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
10437 }
10438 case NEON::BI__builtin_neon_vrsrad_n_u64:
10439 case NEON::BI__builtin_neon_vrsrad_n_s64: {
10440 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
10441 ? Intrinsic::aarch64_neon_urshl
10442 : Intrinsic::aarch64_neon_srshl;
10443 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
10444 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
10445 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
10446 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
10447 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
10448 }
10449 case NEON::BI__builtin_neon_vshld_n_s64:
10450 case NEON::BI__builtin_neon_vshld_n_u64: {
10451 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10452 return Builder.CreateShl(
10453 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
10454 }
10455 case NEON::BI__builtin_neon_vshrd_n_s64: {
10456 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10457 return Builder.CreateAShr(
10458 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
10459 Amt->getZExtValue())),
10460 "shrd_n");
10461 }
10462 case NEON::BI__builtin_neon_vshrd_n_u64: {
10463 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10464 uint64_t ShiftAmt = Amt->getZExtValue();
10465 // Right-shifting an unsigned value by its size yields 0.
10466 if (ShiftAmt == 64)
10467 return ConstantInt::get(Int64Ty, 0);
10468 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
10469 "shrd_n");
10470 }
10471 case NEON::BI__builtin_neon_vsrad_n_s64: {
10472 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
10473 Ops[1] = Builder.CreateAShr(
10474 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
10475 Amt->getZExtValue())),
10476 "shrd_n");
10477 return Builder.CreateAdd(Ops[0], Ops[1]);
10478 }
10479 case NEON::BI__builtin_neon_vsrad_n_u64: {
10480 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
10481 uint64_t ShiftAmt = Amt->getZExtValue();
10482 // Right-shifting an unsigned value by its size yields 0.
10483 // As Op + 0 = Op, return Ops[0] directly.
10484 if (ShiftAmt == 64)
10485 return Ops[0];
10486 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
10487 "shrd_n");
10488 return Builder.CreateAdd(Ops[0], Ops[1]);
10489 }
10490 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
10491 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
10492 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
10493 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
10494 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
10495 "lane");
10496 SmallVector<Value *, 2> ProductOps;
10497 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
10498 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
10499 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
10500 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
10501 ProductOps, "vqdmlXl");
10502 Constant *CI = ConstantInt::get(SizeTy, 0);
10503 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
10504 Ops.pop_back();
10505
10506 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
10507 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
10508 ? Intrinsic::aarch64_neon_sqadd
10509 : Intrinsic::aarch64_neon_sqsub;
10510 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
10511 }
10512 case NEON::BI__builtin_neon_vqdmlals_s32:
10513 case NEON::BI__builtin_neon_vqdmlsls_s32: {
10514 SmallVector<Value *, 2> ProductOps;
10515 ProductOps.push_back(Ops[1]);
10516 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
10517 Ops[1] =
10518 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
10519 ProductOps, "vqdmlXl");
10520
10521 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
10522 ? Intrinsic::aarch64_neon_sqadd
10523 : Intrinsic::aarch64_neon_sqsub;
10524 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
10525 }
10526 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
10527 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
10528 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
10529 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
10530 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
10531 "lane");
10532 SmallVector<Value *, 2> ProductOps;
10533 ProductOps.push_back(Ops[1]);
10534 ProductOps.push_back(Ops[2]);
10535 Ops[1] =
10536 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
10537 ProductOps, "vqdmlXl");
10538 Ops.pop_back();
10539
10540 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
10541 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
10542 ? Intrinsic::aarch64_neon_sqadd
10543 : Intrinsic::aarch64_neon_sqsub;
10544 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
10545 }
10546 case NEON::BI__builtin_neon_vget_lane_bf16:
10547 case NEON::BI__builtin_neon_vduph_lane_bf16:
10548 case NEON::BI__builtin_neon_vduph_lane_f16: {
10549 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10550 "vget_lane");
10551 }
10552 case NEON::BI__builtin_neon_vgetq_lane_bf16:
10553 case NEON::BI__builtin_neon_vduph_laneq_bf16:
10554 case NEON::BI__builtin_neon_vduph_laneq_f16: {
10555 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10556 "vgetq_lane");
10557 }
10558
10559 case AArch64::BI_InterlockedAdd: {
10560 Value *Arg0 = EmitScalarExpr(E->getArg(0));
10561 Value *Arg1 = EmitScalarExpr(E->getArg(1));
10562 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
10563 AtomicRMWInst::Add, Arg0, Arg1,
10564 llvm::AtomicOrdering::SequentiallyConsistent);
10565 return Builder.CreateAdd(RMWI, Arg1);
10566 }
10567 }
10568
10569 llvm::FixedVectorType *VTy = GetNeonType(this, Type);
10570 llvm::Type *Ty = VTy;
10571 if (!Ty)
10572 return nullptr;
10573
10574 // Not all intrinsics handled by the common case work for AArch64 yet, so only
10575 // defer to common code if it's been added to our special map.
10576 Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
10577 AArch64SIMDIntrinsicsProvenSorted);
10578
10579 if (Builtin)
10580 return EmitCommonNeonBuiltinExpr(
10581 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
10582 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
10583 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
10584
10585 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
10586 return V;
10587
10588 unsigned Int;
10589 switch (BuiltinID) {
10590 default: return nullptr;
10591 case NEON::BI__builtin_neon_vbsl_v:
10592 case NEON::BI__builtin_neon_vbslq_v: {
10593 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
10594 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
10595 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
10596 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
10597
10598 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
10599 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
10600 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
10601 return Builder.CreateBitCast(Ops[0], Ty);
10602 }
10603 case NEON::BI__builtin_neon_vfma_lane_v:
10604 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
10605 // The ARM builtins (and instructions) have the addend as the first
10606 // operand, but the 'fma' intrinsics have it last. Swap it around here.
10607 Value *Addend = Ops[0];
10608 Value *Multiplicand = Ops[1];
10609 Value *LaneSource = Ops[2];
10610 Ops[0] = Multiplicand;
10611 Ops[1] = LaneSource;
10612 Ops[2] = Addend;
10613
10614 // Now adjust things to handle the lane access.
10615 auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
10616 ? llvm::FixedVectorType::get(VTy->getElementType(),
10617 VTy->getNumElements() / 2)
10618 : VTy;
10619 llvm::Constant *cst = cast<Constant>(Ops[3]);
10620 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
10621 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
10622 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
10623
10624 Ops.pop_back();
10625 Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
10626 : Intrinsic::fma;
10627 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
10628 }
10629 case NEON::BI__builtin_neon_vfma_laneq_v: {
10630 auto *VTy = cast<llvm::FixedVectorType>(Ty);
10631 // v1f64 fma should be mapped to Neon scalar f64 fma
10632 if (VTy && VTy->getElementType() == DoubleTy) {
10633 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10634 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
10635 llvm::FixedVectorType *VTy =
10636 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
10637 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
10638 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
10639 Value *Result;
10640 Result = emitCallMaybeConstrainedFPBuiltin(
10641 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
10642 DoubleTy, {Ops[1], Ops[2], Ops[0]});
10643 return Builder.CreateBitCast(Result, Ty);
10644 }
10645 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10646 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10647
10648 auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
10649 VTy->getNumElements() * 2);
10650 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
10651 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
10652 cast<ConstantInt>(Ops[3]));
10653 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
10654
10655 return emitCallMaybeConstrainedFPBuiltin(
10656 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10657 {Ops[2], Ops[1], Ops[0]});
10658 }
10659 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
10660 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10661 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10662
10663 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10664 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
10665 return emitCallMaybeConstrainedFPBuiltin(
10666 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10667 {Ops[2], Ops[1], Ops[0]});
10668 }
10669 case NEON::BI__builtin_neon_vfmah_lane_f16:
10670 case NEON::BI__builtin_neon_vfmas_lane_f32:
10671 case NEON::BI__builtin_neon_vfmah_laneq_f16:
10672 case NEON::BI__builtin_neon_vfmas_laneq_f32:
10673 case NEON::BI__builtin_neon_vfmad_lane_f64:
10674 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
10675 Ops.push_back(EmitScalarExpr(E->getArg(3)));
10676 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
10677 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
10678 return emitCallMaybeConstrainedFPBuiltin(
10679 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10680 {Ops[1], Ops[2], Ops[0]});
10681 }
10682 case NEON::BI__builtin_neon_vmull_v:
10683 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10684 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
10685 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
10686 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
10687 case NEON::BI__builtin_neon_vmax_v:
10688 case NEON::BI__builtin_neon_vmaxq_v:
10689 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10690 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
10691 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
10692 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
10693 case NEON::BI__builtin_neon_vmaxh_f16: {
10694 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10695 Int = Intrinsic::aarch64_neon_fmax;
10696 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
10697 }
10698 case NEON::BI__builtin_neon_vmin_v:
10699 case NEON::BI__builtin_neon_vminq_v:
10700 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10701 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
10702 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
10703 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
10704 case NEON::BI__builtin_neon_vminh_f16: {
10705 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10706 Int = Intrinsic::aarch64_neon_fmin;
10707 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
10708 }
10709 case NEON::BI__builtin_neon_vabd_v:
10710 case NEON::BI__builtin_neon_vabdq_v:
10711 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10712 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
10713 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
10714 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
10715 case NEON::BI__builtin_neon_vpadal_v:
10716 case NEON::BI__builtin_neon_vpadalq_v: {
10717 unsigned ArgElts = VTy->getNumElements();
10718 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
10719 unsigned BitWidth = EltTy->getBitWidth();
10720 auto *ArgTy = llvm::FixedVectorType::get(
10721 llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
10722 llvm::Type* Tys[2] = { VTy, ArgTy };
10723 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
10724 SmallVector<llvm::Value*, 1> TmpOps;
10725 TmpOps.push_back(Ops[1]);
10726 Function *F = CGM.getIntrinsic(Int, Tys);
10727 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
10728 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
10729 return Builder.CreateAdd(tmp, addend);
10730 }
10731 case NEON::BI__builtin_neon_vpmin_v:
10732 case NEON::BI__builtin_neon_vpminq_v:
10733 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10734 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
10735 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
10736 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
10737 case NEON::BI__builtin_neon_vpmax_v:
10738 case NEON::BI__builtin_neon_vpmaxq_v:
10739 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10740 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
10741 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
10742 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
10743 case NEON::BI__builtin_neon_vminnm_v:
10744 case NEON::BI__builtin_neon_vminnmq_v:
10745 Int = Intrinsic::aarch64_neon_fminnm;
10746 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
10747 case NEON::BI__builtin_neon_vminnmh_f16:
10748 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10749 Int = Intrinsic::aarch64_neon_fminnm;
10750 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
10751 case NEON::BI__builtin_neon_vmaxnm_v:
10752 case NEON::BI__builtin_neon_vmaxnmq_v:
10753 Int = Intrinsic::aarch64_neon_fmaxnm;
10754 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
10755 case NEON::BI__builtin_neon_vmaxnmh_f16:
10756 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10757 Int = Intrinsic::aarch64_neon_fmaxnm;
10758 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
10759 case NEON::BI__builtin_neon_vrecpss_f32: {
10760 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10761 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
10762 Ops, "vrecps");
10763 }
10764 case NEON::BI__builtin_neon_vrecpsd_f64:
10765 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10766 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
10767 Ops, "vrecps");
10768 case NEON::BI__builtin_neon_vrecpsh_f16:
10769 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10770 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
10771 Ops, "vrecps");
10772 case NEON::BI__builtin_neon_vqshrun_n_v:
10773 Int = Intrinsic::aarch64_neon_sqshrun;
10774 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
10775 case NEON::BI__builtin_neon_vqrshrun_n_v:
10776 Int = Intrinsic::aarch64_neon_sqrshrun;
10777 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
10778 case NEON::BI__builtin_neon_vqshrn_n_v:
10779 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
10780 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
10781 case NEON::BI__builtin_neon_vrshrn_n_v:
10782 Int = Intrinsic::aarch64_neon_rshrn;
10783 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
10784 case NEON::BI__builtin_neon_vqrshrn_n_v:
10785 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
10786 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
10787 case NEON::BI__builtin_neon_vrndah_f16: {
10788 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10789 Int = Builder.getIsFPConstrained()
10790 ? Intrinsic::experimental_constrained_round
10791 : Intrinsic::round;
10792 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
10793 }
10794 case NEON::BI__builtin_neon_vrnda_v:
10795 case NEON::BI__builtin_neon_vrndaq_v: {
10796 Int = Builder.getIsFPConstrained()
10797 ? Intrinsic::experimental_constrained_round
10798 : Intrinsic::round;
10799 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
10800 }
10801 case NEON::BI__builtin_neon_vrndih_f16: {
10802 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10803 Int = Builder.getIsFPConstrained()
10804 ? Intrinsic::experimental_constrained_nearbyint
10805 : Intrinsic::nearbyint;
10806 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
10807 }
10808 case NEON::BI__builtin_neon_vrndmh_f16: {
10809 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10810 Int = Builder.getIsFPConstrained()
10811 ? Intrinsic::experimental_constrained_floor
10812 : Intrinsic::floor;
10813 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
10814 }
10815 case NEON::BI__builtin_neon_vrndm_v:
10816 case NEON::BI__builtin_neon_vrndmq_v: {
10817 Int = Builder.getIsFPConstrained()
10818 ? Intrinsic::experimental_constrained_floor
10819 : Intrinsic::floor;
10820 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
10821 }
10822 case NEON::BI__builtin_neon_vrndnh_f16: {
10823 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10824 Int = Builder.getIsFPConstrained()
10825 ? Intrinsic::experimental_constrained_roundeven
10826 : Intrinsic::roundeven;
10827 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
10828 }
10829 case NEON::BI__builtin_neon_vrndn_v:
10830 case NEON::BI__builtin_neon_vrndnq_v: {
10831 Int = Builder.getIsFPConstrained()
10832 ? Intrinsic::experimental_constrained_roundeven
10833 : Intrinsic::roundeven;
10834 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
10835 }
10836 case NEON::BI__builtin_neon_vrndns_f32: {
10837 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10838 Int = Builder.getIsFPConstrained()
10839 ? Intrinsic::experimental_constrained_roundeven
10840 : Intrinsic::roundeven;
10841 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
10842 }
10843 case NEON::BI__builtin_neon_vrndph_f16: {
10844 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10845 Int = Builder.getIsFPConstrained()
10846 ? Intrinsic::experimental_constrained_ceil
10847 : Intrinsic::ceil;
10848 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
10849 }
10850 case NEON::BI__builtin_neon_vrndp_v:
10851 case NEON::BI__builtin_neon_vrndpq_v: {
10852 Int = Builder.getIsFPConstrained()
10853 ? Intrinsic::experimental_constrained_ceil
10854 : Intrinsic::ceil;
10855 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
10856 }
10857 case NEON::BI__builtin_neon_vrndxh_f16: {
10858 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10859 Int = Builder.getIsFPConstrained()
10860 ? Intrinsic::experimental_constrained_rint
10861 : Intrinsic::rint;
10862 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
10863 }
10864 case NEON::BI__builtin_neon_vrndx_v:
10865 case NEON::BI__builtin_neon_vrndxq_v: {
10866 Int = Builder.getIsFPConstrained()
10867 ? Intrinsic::experimental_constrained_rint
10868 : Intrinsic::rint;
10869 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
10870 }
10871 case NEON::BI__builtin_neon_vrndh_f16: {
10872 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10873 Int = Builder.getIsFPConstrained()
10874 ? Intrinsic::experimental_constrained_trunc
10875 : Intrinsic::trunc;
10876 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
10877 }
10878 case NEON::BI__builtin_neon_vrnd32x_v:
10879 case NEON::BI__builtin_neon_vrnd32xq_v: {
10880 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10881 Int = Intrinsic::aarch64_neon_frint32x;
10882 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x");
10883 }
10884 case NEON::BI__builtin_neon_vrnd32z_v:
10885 case NEON::BI__builtin_neon_vrnd32zq_v: {
10886 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10887 Int = Intrinsic::aarch64_neon_frint32z;
10888 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z");
10889 }
10890 case NEON::BI__builtin_neon_vrnd64x_v:
10891 case NEON::BI__builtin_neon_vrnd64xq_v: {
10892 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10893 Int = Intrinsic::aarch64_neon_frint64x;
10894 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x");
10895 }
10896 case NEON::BI__builtin_neon_vrnd64z_v:
10897 case NEON::BI__builtin_neon_vrnd64zq_v: {
10898 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10899 Int = Intrinsic::aarch64_neon_frint64z;
10900 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
10901 }
10902 case NEON::BI__builtin_neon_vrnd_v:
10903 case NEON::BI__builtin_neon_vrndq_v: {
10904 Int = Builder.getIsFPConstrained()
10905 ? Intrinsic::experimental_constrained_trunc
10906 : Intrinsic::trunc;
10907 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
10908 }
10909 case NEON::BI__builtin_neon_vcvt_f64_v:
10910 case NEON::BI__builtin_neon_vcvtq_f64_v:
10911 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10912 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
10913 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
10914 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
10915 case NEON::BI__builtin_neon_vcvt_f64_f32: {
10916 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&(static_cast <bool> (Type.getEltType() == NeonTypeFlags
::Float64 && quad && "unexpected vcvt_f64_f32 builtin"
) ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10917, __extension__ __PRETTY_FUNCTION__))
10917 "unexpected vcvt_f64_f32 builtin")(static_cast <bool> (Type.getEltType() == NeonTypeFlags
::Float64 && quad && "unexpected vcvt_f64_f32 builtin"
) ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10917, __extension__ __PRETTY_FUNCTION__))
;
10918 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
10919 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10920
10921 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
10922 }
10923 case NEON::BI__builtin_neon_vcvt_f32_f64: {
10924 assert(Type.getEltType() == NeonTypeFlags::Float32 &&(static_cast <bool> (Type.getEltType() == NeonTypeFlags
::Float32 && "unexpected vcvt_f32_f64 builtin") ? void
(0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10925, __extension__ __PRETTY_FUNCTION__))
10925 "unexpected vcvt_f32_f64 builtin")(static_cast <bool> (Type.getEltType() == NeonTypeFlags
::Float32 && "unexpected vcvt_f32_f64 builtin") ? void
(0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10925, __extension__ __PRETTY_FUNCTION__))
;
10926 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
10927 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10928
10929 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
10930 }
10931 case NEON::BI__builtin_neon_vcvt_s32_v:
10932 case NEON::BI__builtin_neon_vcvt_u32_v:
10933 case NEON::BI__builtin_neon_vcvt_s64_v:
10934 case NEON::BI__builtin_neon_vcvt_u64_v:
10935 case NEON::BI__builtin_neon_vcvt_s16_v:
10936 case NEON::BI__builtin_neon_vcvt_u16_v:
10937 case NEON::BI__builtin_neon_vcvtq_s32_v:
10938 case NEON::BI__builtin_neon_vcvtq_u32_v:
10939 case NEON::BI__builtin_neon_vcvtq_s64_v:
10940 case NEON::BI__builtin_neon_vcvtq_u64_v:
10941 case NEON::BI__builtin_neon_vcvtq_s16_v:
10942 case NEON::BI__builtin_neon_vcvtq_u16_v: {
10943 Int =
10944 usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
10945 llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
10946 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
10947 }
10948 case NEON::BI__builtin_neon_vcvta_s16_v:
10949 case NEON::BI__builtin_neon_vcvta_u16_v:
10950 case NEON::BI__builtin_neon_vcvta_s32_v:
10951 case NEON::BI__builtin_neon_vcvtaq_s16_v:
10952 case NEON::BI__builtin_neon_vcvtaq_s32_v:
10953 case NEON::BI__builtin_neon_vcvta_u32_v:
10954 case NEON::BI__builtin_neon_vcvtaq_u16_v:
10955 case NEON::BI__builtin_neon_vcvtaq_u32_v:
10956 case NEON::BI__builtin_neon_vcvta_s64_v:
10957 case NEON::BI__builtin_neon_vcvtaq_s64_v:
10958 case NEON::BI__builtin_neon_vcvta_u64_v:
10959 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
10960 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
10961 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10962 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
10963 }
10964 case NEON::BI__builtin_neon_vcvtm_s16_v:
10965 case NEON::BI__builtin_neon_vcvtm_s32_v:
10966 case NEON::BI__builtin_neon_vcvtmq_s16_v:
10967 case NEON::BI__builtin_neon_vcvtmq_s32_v:
10968 case NEON::BI__builtin_neon_vcvtm_u16_v:
10969 case NEON::BI__builtin_neon_vcvtm_u32_v:
10970 case NEON::BI__builtin_neon_vcvtmq_u16_v:
10971 case NEON::BI__builtin_neon_vcvtmq_u32_v:
10972 case NEON::BI__builtin_neon_vcvtm_s64_v:
10973 case NEON::BI__builtin_neon_vcvtmq_s64_v:
10974 case NEON::BI__builtin_neon_vcvtm_u64_v:
10975 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
10976 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
10977 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10978 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
10979 }
10980 case NEON::BI__builtin_neon_vcvtn_s16_v:
10981 case NEON::BI__builtin_neon_vcvtn_s32_v:
10982 case NEON::BI__builtin_neon_vcvtnq_s16_v:
10983 case NEON::BI__builtin_neon_vcvtnq_s32_v:
10984 case NEON::BI__builtin_neon_vcvtn_u16_v:
10985 case NEON::BI__builtin_neon_vcvtn_u32_v:
10986 case NEON::BI__builtin_neon_vcvtnq_u16_v:
10987 case NEON::BI__builtin_neon_vcvtnq_u32_v:
10988 case NEON::BI__builtin_neon_vcvtn_s64_v:
10989 case NEON::BI__builtin_neon_vcvtnq_s64_v:
10990 case NEON::BI__builtin_neon_vcvtn_u64_v:
10991 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
10992 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
10993 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10994 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
10995 }
10996 case NEON::BI__builtin_neon_vcvtp_s16_v:
10997 case NEON::BI__builtin_neon_vcvtp_s32_v:
10998 case NEON::BI__builtin_neon_vcvtpq_s16_v:
10999 case NEON::BI__builtin_neon_vcvtpq_s32_v:
11000 case NEON::BI__builtin_neon_vcvtp_u16_v:
11001 case NEON::BI__builtin_neon_vcvtp_u32_v:
11002 case NEON::BI__builtin_neon_vcvtpq_u16_v:
11003 case NEON::BI__builtin_neon_vcvtpq_u32_v:
11004 case NEON::BI__builtin_neon_vcvtp_s64_v:
11005 case NEON::BI__builtin_neon_vcvtpq_s64_v:
11006 case NEON::BI__builtin_neon_vcvtp_u64_v:
11007 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
11008 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
11009 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
11010 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
11011 }
11012 case NEON::BI__builtin_neon_vmulx_v:
11013 case NEON::BI__builtin_neon_vmulxq_v: {
11014 Int = Intrinsic::aarch64_neon_fmulx;
11015 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
11016 }
11017 case NEON::BI__builtin_neon_vmulxh_lane_f16:
11018 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
11019 // vmulx_lane should be mapped to Neon scalar mulx after
11020 // extracting the scalar element
11021 Ops.push_back(EmitScalarExpr(E->getArg(2)));
11022 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
11023 Ops.pop_back();
11024 Int = Intrinsic::aarch64_neon_fmulx;
11025 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
11026 }
11027 case NEON::BI__builtin_neon_vmul_lane_v:
11028 case NEON::BI__builtin_neon_vmul_laneq_v: {
11029 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
11030 bool Quad = false;
11031 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
11032 Quad = true;
11033 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
11034 llvm::FixedVectorType *VTy =
11035 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
11036 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
11037 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
11038 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
11039 return Builder.CreateBitCast(Result, Ty);
11040 }
11041 case NEON::BI__builtin_neon_vnegd_s64:
11042 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
11043 case NEON::BI__builtin_neon_vnegh_f16:
11044 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
11045 case NEON::BI__builtin_neon_vpmaxnm_v:
11046 case NEON::BI__builtin_neon_vpmaxnmq_v: {
11047 Int = Intrinsic::aarch64_neon_fmaxnmp;
11048 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
11049 }
11050 case NEON::BI__builtin_neon_vpminnm_v:
11051 case NEON::BI__builtin_neon_vpminnmq_v: {
11052 Int = Intrinsic::aarch64_neon_fminnmp;
11053 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
11054 }
11055 case NEON::BI__builtin_neon_vsqrth_f16: {
11056 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11057 Int = Builder.getIsFPConstrained()
11058 ? Intrinsic::experimental_constrained_sqrt
11059 : Intrinsic::sqrt;
11060 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
11061 }
11062 case NEON::BI__builtin_neon_vsqrt_v:
11063 case NEON::BI__builtin_neon_vsqrtq_v: {
11064 Int = Builder.getIsFPConstrained()
11065 ? Intrinsic::experimental_constrained_sqrt
11066 : Intrinsic::sqrt;
11067 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11068 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
11069 }
11070 case NEON::BI__builtin_neon_vrbit_v:
11071 case NEON::BI__builtin_neon_vrbitq_v: {
11072 Int = Intrinsic::bitreverse;
11073 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
11074 }
11075 case NEON::BI__builtin_neon_vaddv_u8:
11076 // FIXME: These are handled by the AArch64 scalar code.
11077 usgn = true;
11078 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11079 case NEON::BI__builtin_neon_vaddv_s8: {
11080 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
11081 Ty = Int32Ty;
11082 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11083 llvm::Type *Tys[2] = { Ty, VTy };
11084 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11085 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
11086 return Builder.CreateTrunc(Ops[0], Int8Ty);
11087 }
11088 case NEON::BI__builtin_neon_vaddv_u16:
11089 usgn = true;
11090 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11091 case NEON::BI__builtin_neon_vaddv_s16: {
11092 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
11093 Ty = Int32Ty;
11094 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11095 llvm::Type *Tys[2] = { Ty, VTy };
11096 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11097 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
11098 return Builder.CreateTrunc(Ops[0], Int16Ty);
11099 }
11100 case NEON::BI__builtin_neon_vaddvq_u8:
11101 usgn = true;
11102 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11103 case NEON::BI__builtin_neon_vaddvq_s8: {
11104 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
11105 Ty = Int32Ty;
11106 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11107 llvm::Type *Tys[2] = { Ty, VTy };
11108 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11109 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
11110 return Builder.CreateTrunc(Ops[0], Int8Ty);
11111 }
11112 case NEON::BI__builtin_neon_vaddvq_u16:
11113 usgn = true;
11114 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11115 case NEON::BI__builtin_neon_vaddvq_s16: {
11116 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
11117 Ty = Int32Ty;
11118 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11119 llvm::Type *Tys[2] = { Ty, VTy };
11120 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11121 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
11122 return Builder.CreateTrunc(Ops[0], Int16Ty);
11123 }
11124 case NEON::BI__builtin_neon_vmaxv_u8: {
11125 Int = Intrinsic::aarch64_neon_umaxv;
11126 Ty = Int32Ty;
11127 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11128 llvm::Type *Tys[2] = { Ty, VTy };
11129 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11130 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11131 return Builder.CreateTrunc(Ops[0], Int8Ty);
11132 }
11133 case NEON::BI__builtin_neon_vmaxv_u16: {
11134 Int = Intrinsic::aarch64_neon_umaxv;
11135 Ty = Int32Ty;
11136 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11137 llvm::Type *Tys[2] = { Ty, VTy };
11138 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11139 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11140 return Builder.CreateTrunc(Ops[0], Int16Ty);
11141 }
11142 case NEON::BI__builtin_neon_vmaxvq_u8: {
11143 Int = Intrinsic::aarch64_neon_umaxv;
11144 Ty = Int32Ty;
11145 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11146 llvm::Type *Tys[2] = { Ty, VTy };
11147 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11148 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11149 return Builder.CreateTrunc(Ops[0], Int8Ty);
11150 }
11151 case NEON::BI__builtin_neon_vmaxvq_u16: {
11152 Int = Intrinsic::aarch64_neon_umaxv;
11153 Ty = Int32Ty;
11154 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11155 llvm::Type *Tys[2] = { Ty, VTy };
11156 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11157 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11158 return Builder.CreateTrunc(Ops[0], Int16Ty);
11159 }
11160 case NEON::BI__builtin_neon_vmaxv_s8: {
11161 Int = Intrinsic::aarch64_neon_smaxv;
11162 Ty = Int32Ty;
11163 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11164 llvm::Type *Tys[2] = { Ty, VTy };
11165 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11166 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11167 return Builder.CreateTrunc(Ops[0], Int8Ty);
11168 }
11169 case NEON::BI__builtin_neon_vmaxv_s16: {
11170 Int = Intrinsic::aarch64_neon_smaxv;
11171 Ty = Int32Ty;
11172 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11173 llvm::Type *Tys[2] = { Ty, VTy };
11174 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11175 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11176 return Builder.CreateTrunc(Ops[0], Int16Ty);
11177 }
11178 case NEON::BI__builtin_neon_vmaxvq_s8: {
11179 Int = Intrinsic::aarch64_neon_smaxv;
11180 Ty = Int32Ty;
11181 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11182 llvm::Type *Tys[2] = { Ty, VTy };
11183 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11184 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11185 return Builder.CreateTrunc(Ops[0], Int8Ty);
11186 }
11187 case NEON::BI__builtin_neon_vmaxvq_s16: {
11188 Int = Intrinsic::aarch64_neon_smaxv;
11189 Ty = Int32Ty;
11190 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11191 llvm::Type *Tys[2] = { Ty, VTy };
11192 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11193 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11194 return Builder.CreateTrunc(Ops[0], Int16Ty);
11195 }
11196 case NEON::BI__builtin_neon_vmaxv_f16: {
11197 Int = Intrinsic::aarch64_neon_fmaxv;
11198 Ty = HalfTy;
11199 VTy = llvm::FixedVectorType::get(HalfTy, 4);
11200 llvm::Type *Tys[2] = { Ty, VTy };
11201 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11202 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11203 return Builder.CreateTrunc(Ops[0], HalfTy);
11204 }
11205 case NEON::BI__builtin_neon_vmaxvq_f16: {
11206 Int = Intrinsic::aarch64_neon_fmaxv;
11207 Ty = HalfTy;
11208 VTy = llvm::FixedVectorType::get(HalfTy, 8);
11209 llvm::Type *Tys[2] = { Ty, VTy };
11210 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11211 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11212 return Builder.CreateTrunc(Ops[0], HalfTy);
11213 }
11214 case NEON::BI__builtin_neon_vminv_u8: {
11215 Int = Intrinsic::aarch64_neon_uminv;
11216 Ty = Int32Ty;
11217 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11218 llvm::Type *Tys[2] = { Ty, VTy };
11219 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11220 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11221 return Builder.CreateTrunc(Ops[0], Int8Ty);
11222 }
11223 case NEON::BI__builtin_neon_vminv_u16: {
11224 Int = Intrinsic::aarch64_neon_uminv;
11225 Ty = Int32Ty;
11226 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11227 llvm::Type *Tys[2] = { Ty, VTy };
11228 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11229 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11230 return Builder.CreateTrunc(Ops[0], Int16Ty);
11231 }
11232 case NEON::BI__builtin_neon_vminvq_u8: {
11233 Int = Intrinsic::aarch64_neon_uminv;
11234 Ty = Int32Ty;
11235 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11236 llvm::Type *Tys[2] = { Ty, VTy };
11237 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11238 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11239 return Builder.CreateTrunc(Ops[0], Int8Ty);
11240 }
11241 case NEON::BI__builtin_neon_vminvq_u16: {
11242 Int = Intrinsic::aarch64_neon_uminv;
11243 Ty = Int32Ty;
11244 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11245 llvm::Type *Tys[2] = { Ty, VTy };
11246 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11247 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11248 return Builder.CreateTrunc(Ops[0], Int16Ty);
11249 }
11250 case NEON::BI__builtin_neon_vminv_s8: {
11251 Int = Intrinsic::aarch64_neon_sminv;
11252 Ty = Int32Ty;
11253 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11254 llvm::Type *Tys[2] = { Ty, VTy };
11255 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11256 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11257 return Builder.CreateTrunc(Ops[0], Int8Ty);
11258 }
11259 case NEON::BI__builtin_neon_vminv_s16: {
11260 Int = Intrinsic::aarch64_neon_sminv;
11261 Ty = Int32Ty;
11262 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11263 llvm::Type *Tys[2] = { Ty, VTy };
11264 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11265 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11266 return Builder.CreateTrunc(Ops[0], Int16Ty);
11267 }
11268 case NEON::BI__builtin_neon_vminvq_s8: {
11269 Int = Intrinsic::aarch64_neon_sminv;
11270 Ty = Int32Ty;
11271 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11272 llvm::Type *Tys[2] = { Ty, VTy };
11273 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11274 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11275 return Builder.CreateTrunc(Ops[0], Int8Ty);
11276 }
11277 case NEON::BI__builtin_neon_vminvq_s16: {
11278 Int = Intrinsic::aarch64_neon_sminv;
11279 Ty = Int32Ty;
11280 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11281 llvm::Type *Tys[2] = { Ty, VTy };
11282 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11283 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11284 return Builder.CreateTrunc(Ops[0], Int16Ty);
11285 }
11286 case NEON::BI__builtin_neon_vminv_f16: {
11287 Int = Intrinsic::aarch64_neon_fminv;
11288 Ty = HalfTy;
11289 VTy = llvm::FixedVectorType::get(HalfTy, 4);
11290 llvm::Type *Tys[2] = { Ty, VTy };
11291 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11292 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11293 return Builder.CreateTrunc(Ops[0], HalfTy);
11294 }
11295 case NEON::BI__builtin_neon_vminvq_f16: {
11296 Int = Intrinsic::aarch64_neon_fminv;
11297 Ty = HalfTy;
11298 VTy = llvm::FixedVectorType::get(HalfTy, 8);
11299 llvm::Type *Tys[2] = { Ty, VTy };
11300 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11301 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11302 return Builder.CreateTrunc(Ops[0], HalfTy);
11303 }
11304 case NEON::BI__builtin_neon_vmaxnmv_f16: {
11305 Int = Intrinsic::aarch64_neon_fmaxnmv;
11306 Ty = HalfTy;
11307 VTy = llvm::FixedVectorType::get(HalfTy, 4);
11308 llvm::Type *Tys[2] = { Ty, VTy };
11309 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11310 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
11311 return Builder.CreateTrunc(Ops[0], HalfTy);
11312 }
11313 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
11314 Int = Intrinsic::aarch64_neon_fmaxnmv;
11315 Ty = HalfTy;
11316 VTy = llvm::FixedVectorType::get(HalfTy, 8);
11317 llvm::Type *Tys[2] = { Ty, VTy };
11318 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11319 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
11320 return Builder.CreateTrunc(Ops[0], HalfTy);
11321 }
11322 case NEON::BI__builtin_neon_vminnmv_f16: {
11323 Int = Intrinsic::aarch64_neon_fminnmv;
11324 Ty = HalfTy;
11325 VTy = llvm::FixedVectorType::get(HalfTy, 4);
11326 llvm::Type *Tys[2] = { Ty, VTy };
11327 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11328 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
11329 return Builder.CreateTrunc(Ops[0], HalfTy);
11330 }
11331 case NEON::BI__builtin_neon_vminnmvq_f16: {
11332 Int = Intrinsic::aarch64_neon_fminnmv;
11333 Ty = HalfTy;
11334 VTy = llvm::FixedVectorType::get(HalfTy, 8);
11335 llvm::Type *Tys[2] = { Ty, VTy };
11336 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11337 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
11338 return Builder.CreateTrunc(Ops[0], HalfTy);
11339 }
11340 case NEON::BI__builtin_neon_vmul_n_f64: {
11341 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
11342 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
11343 return Builder.CreateFMul(Ops[0], RHS);
11344 }
11345 case NEON::BI__builtin_neon_vaddlv_u8: {
11346 Int = Intrinsic::aarch64_neon_uaddlv;
11347 Ty = Int32Ty;
11348 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11349 llvm::Type *Tys[2] = { Ty, VTy };
11350 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11351 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11352 return Builder.CreateTrunc(Ops[0], Int16Ty);
11353 }
11354 case NEON::BI__builtin_neon_vaddlv_u16: {
11355 Int = Intrinsic::aarch64_neon_uaddlv;
11356 Ty = Int32Ty;
11357 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11358 llvm::Type *Tys[2] = { Ty, VTy };
11359 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11360 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11361 }
11362 case NEON::BI__builtin_neon_vaddlvq_u8: {
11363 Int = Intrinsic::aarch64_neon_uaddlv;
11364 Ty = Int32Ty;
11365 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11366 llvm::Type *Tys[2] = { Ty, VTy };
11367 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11368 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11369 return Builder.CreateTrunc(Ops[0], Int16Ty);
11370 }
11371 case NEON::BI__builtin_neon_vaddlvq_u16: {
11372 Int = Intrinsic::aarch64_neon_uaddlv;
11373 Ty = Int32Ty;
11374 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11375 llvm::Type *Tys[2] = { Ty, VTy };
11376 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11377 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11378 }
11379 case NEON::BI__builtin_neon_vaddlv_s8: {
11380 Int = Intrinsic::aarch64_neon_saddlv;
11381 Ty = Int32Ty;
11382 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11383 llvm::Type *Tys[2] = { Ty, VTy };
11384 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11385 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11386 return Builder.CreateTrunc(Ops[0], Int16Ty);
11387 }
11388 case NEON::BI__builtin_neon_vaddlv_s16: {
11389 Int = Intrinsic::aarch64_neon_saddlv;
11390 Ty = Int32Ty;
11391 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11392 llvm::Type *Tys[2] = { Ty, VTy };
11393 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11394 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11395 }
11396 case NEON::BI__builtin_neon_vaddlvq_s8: {
11397 Int = Intrinsic::aarch64_neon_saddlv;
11398 Ty = Int32Ty;
11399 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11400 llvm::Type *Tys[2] = { Ty, VTy };
11401 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11402 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11403 return Builder.CreateTrunc(Ops[0], Int16Ty);
11404 }
11405 case NEON::BI__builtin_neon_vaddlvq_s16: {
11406 Int = Intrinsic::aarch64_neon_saddlv;
11407 Ty = Int32Ty;
11408 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11409 llvm::Type *Tys[2] = { Ty, VTy };
11410 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11411 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11412 }
11413 case NEON::BI__builtin_neon_vsri_n_v:
11414 case NEON::BI__builtin_neon_vsriq_n_v: {
11415 Int = Intrinsic::aarch64_neon_vsri;
11416 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
11417 return EmitNeonCall(Intrin, Ops, "vsri_n");
11418 }
11419 case NEON::BI__builtin_neon_vsli_n_v:
11420 case NEON::BI__builtin_neon_vsliq_n_v: {
11421 Int = Intrinsic::aarch64_neon_vsli;
11422 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
11423 return EmitNeonCall(Intrin, Ops, "vsli_n");
11424 }
11425 case NEON::BI__builtin_neon_vsra_n_v:
11426 case NEON::BI__builtin_neon_vsraq_n_v:
11427 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11428 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
11429 return Builder.CreateAdd(Ops[0], Ops[1]);
11430 case NEON::BI__builtin_neon_vrsra_n_v:
11431 case NEON::BI__builtin_neon_vrsraq_n_v: {
11432 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
11433 SmallVector<llvm::Value*,2> TmpOps;
11434 TmpOps.push_back(Ops[1]);
11435 TmpOps.push_back(Ops[2]);
11436 Function* F = CGM.getIntrinsic(Int, Ty);
11437 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
11438 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
11439 return Builder.CreateAdd(Ops[0], tmp);
11440 }
11441 case NEON::BI__builtin_neon_vld1_v:
11442 case NEON::BI__builtin_neon_vld1q_v: {
11443 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
11444 return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
11445 }
11446 case NEON::BI__builtin_neon_vst1_v:
11447 case NEON::BI__builtin_neon_vst1q_v:
11448 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
11449 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
11450 return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
11451 case NEON::BI__builtin_neon_vld1_lane_v:
11452 case NEON::BI__builtin_neon_vld1q_lane_v: {
11453 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11454 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
11455 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11456 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
11457 PtrOp0.getAlignment());
11458 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
11459 }
11460 case NEON::BI__builtin_neon_vld1_dup_v:
11461 case NEON::BI__builtin_neon_vld1q_dup_v: {
11462 Value *V = UndefValue::get(Ty);
11463 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
11464 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11465 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
11466 PtrOp0.getAlignment());
11467 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
11468 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
11469 return EmitNeonSplat(Ops[0], CI);
11470 }
11471 case NEON::BI__builtin_neon_vst1_lane_v:
11472 case NEON::BI__builtin_neon_vst1q_lane_v:
11473 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11474 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
11475 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11476 return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
11477 PtrOp0.getAlignment());
11478 case NEON::BI__builtin_neon_vld2_v:
11479 case NEON::BI__builtin_neon_vld2q_v: {
11480 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
11481 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11482 llvm::Type *Tys[2] = { VTy, PTy };
11483 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
11484 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
11485 Ops[0] = Builder.CreateBitCast(Ops[0],
11486 llvm::PointerType::getUnqual(Ops[1]->getType()));
11487 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11488 }
11489 case NEON::BI__builtin_neon_vld3_v:
11490 case NEON::BI__builtin_neon_vld3q_v: {
11491 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
11492 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11493 llvm::Type *Tys[2] = { VTy, PTy };
11494 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
11495 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
11496 Ops[0] = Builder.CreateBitCast(Ops[0],
11497 llvm::PointerType::getUnqual(Ops[1]->getType()));
11498 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11499 }
11500 case NEON::BI__builtin_neon_vld4_v:
11501 case NEON::BI__builtin_neon_vld4q_v: {
11502 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
11503 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11504 llvm::Type *Tys[2] = { VTy, PTy };
11505 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
11506 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
11507 Ops[0] = Builder.CreateBitCast(Ops[0],
11508 llvm::PointerType::getUnqual(Ops[1]->getType()));
11509 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11510 }
11511 case NEON::BI__builtin_neon_vld2_dup_v:
11512 case NEON::BI__builtin_neon_vld2q_dup_v: {
11513 llvm::Type *PTy =
11514 llvm::PointerType::getUnqual(VTy->getElementType());
11515 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11516 llvm::Type *Tys[2] = { VTy, PTy };
11517 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
11518 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
11519 Ops[0] = Builder.CreateBitCast(Ops[0],
11520 llvm::PointerType::getUnqual(Ops[1]->getType()));
11521 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11522 }
11523 case NEON::BI__builtin_neon_vld3_dup_v:
11524 case NEON::BI__builtin_neon_vld3q_dup_v: {
11525 llvm::Type *PTy =
11526 llvm::PointerType::getUnqual(VTy->getElementType());
11527 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11528 llvm::Type *Tys[2] = { VTy, PTy };
11529 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
11530 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
11531 Ops[0] = Builder.CreateBitCast(Ops[0],
11532 llvm::PointerType::getUnqual(Ops[1]->getType()));
11533 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11534 }
11535 case NEON::BI__builtin_neon_vld4_dup_v:
11536 case NEON::BI__builtin_neon_vld4q_dup_v: {
11537 llvm::Type *PTy =
11538 llvm::PointerType::getUnqual(VTy->getElementType());
11539 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11540 llvm::Type *Tys[2] = { VTy, PTy };
11541 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
11542 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
11543 Ops[0] = Builder.CreateBitCast(Ops[0],
11544 llvm::PointerType::getUnqual(Ops[1]->getType()));
11545 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11546 }
11547 case NEON::BI__builtin_neon_vld2_lane_v:
11548 case NEON::BI__builtin_neon_vld2q_lane_v: {
11549 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
11550 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
11551 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
11552 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11553 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11554 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
11555 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
11556 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11557 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11558 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11559 }
11560 case NEON::BI__builtin_neon_vld3_lane_v:
11561 case NEON::BI__builtin_neon_vld3q_lane_v: {
11562 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
11563 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
11564 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
11565 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11566 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11567 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
11568 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
11569 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
11570 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11571 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11572 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11573 }
11574 case NEON::BI__builtin_neon_vld4_lane_v:
11575 case NEON::BI__builtin_neon_vld4q_lane_v: {
11576 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
11577 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
11578 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
11579 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11580 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11581 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
11582 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
11583 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
11584 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
11585 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11586 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11587 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11588 }
11589 case NEON::BI__builtin_neon_vst2_v:
11590 case NEON::BI__builtin_neon_vst2q_v: {
11591 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11592 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
11593 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
11594 Ops, "");
11595 }
11596 case NEON::BI__builtin_neon_vst2_lane_v:
11597 case NEON::BI__builtin_neon_vst2q_lane_v: {
11598 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11599 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
11600 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
11601 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
11602 Ops, "");
11603 }
11604 case NEON::BI__builtin_neon_vst3_v:
11605 case NEON::BI__builtin_neon_vst3q_v: {
11606 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11607 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
11608 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
11609 Ops, "");
11610 }
11611 case NEON::BI__builtin_neon_vst3_lane_v:
11612 case NEON::BI__builtin_neon_vst3q_lane_v: {
11613 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11614 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
11615 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
11616 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
11617 Ops, "");
11618 }
11619 case NEON::BI__builtin_neon_vst4_v:
11620 case NEON::BI__builtin_neon_vst4q_v: {
11621 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11622 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
11623 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
11624 Ops, "");
11625 }
11626 case NEON::BI__builtin_neon_vst4_lane_v:
11627 case NEON::BI__builtin_neon_vst4q_lane_v: {
11628 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11629 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
11630 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
11631 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
11632 Ops, "");
11633 }
11634 case NEON::BI__builtin_neon_vtrn_v:
11635 case NEON::BI__builtin_neon_vtrnq_v: {
11636 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11637 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11638 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11639 Value *SV = nullptr;
11640
11641 for (unsigned vi = 0; vi != 2; ++vi) {
11642 SmallVector<int, 16> Indices;
11643 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
11644 Indices.push_back(i+vi);
11645 Indices.push_back(i+e+vi);
11646 }
11647 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11648 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
11649 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11650 }
11651 return SV;
11652 }
11653 case NEON::BI__builtin_neon_vuzp_v:
11654 case NEON::BI__builtin_neon_vuzpq_v: {
11655 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11656 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11657 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11658 Value *SV = nullptr;
11659
11660 for (unsigned vi = 0; vi != 2; ++vi) {
11661 SmallVector<int, 16> Indices;
11662 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
11663 Indices.push_back(2*i+vi);
11664
11665 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11666 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
11667 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11668 }
11669 return SV;
11670 }
11671 case NEON::BI__builtin_neon_vzip_v:
11672 case NEON::BI__builtin_neon_vzipq_v: {
11673 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11674 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11675 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11676 Value *SV = nullptr;
11677
11678 for (unsigned vi = 0; vi != 2; ++vi) {
11679 SmallVector<int, 16> Indices;
11680 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
11681 Indices.push_back((i + vi*e) >> 1);
11682 Indices.push_back(((i + vi*e) >> 1)+e);
11683 }
11684 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11685 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
11686 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11687 }
11688 return SV;
11689 }
11690 case NEON::BI__builtin_neon_vqtbl1q_v: {
11691 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
11692 Ops, "vtbl1");
11693 }
11694 case NEON::BI__builtin_neon_vqtbl2q_v: {
11695 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
11696 Ops, "vtbl2");
11697 }
11698 case NEON::BI__builtin_neon_vqtbl3q_v: {
11699 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
11700 Ops, "vtbl3");
11701 }
11702 case NEON::BI__builtin_neon_vqtbl4q_v: {
11703 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
11704 Ops, "vtbl4");
11705 }
11706 case NEON::BI__builtin_neon_vqtbx1q_v: {
11707 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
11708 Ops, "vtbx1");
11709 }
11710 case NEON::BI__builtin_neon_vqtbx2q_v: {
11711 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
11712 Ops, "vtbx2");
11713 }
11714 case NEON::BI__builtin_neon_vqtbx3q_v: {
11715 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
11716 Ops, "vtbx3");
11717 }
11718 case NEON::BI__builtin_neon_vqtbx4q_v: {
11719 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
11720 Ops, "vtbx4");
11721 }
11722 case NEON::BI__builtin_neon_vsqadd_v:
11723 case NEON::BI__builtin_neon_vsqaddq_v: {
11724 Int = Intrinsic::aarch64_neon_usqadd;
11725 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
11726 }
11727 case NEON::BI__builtin_neon_vuqadd_v:
11728 case NEON::BI__builtin_neon_vuqaddq_v: {
11729 Int = Intrinsic::aarch64_neon_suqadd;
11730 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
11731 }
11732 }
11733}
11734
11735Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
11736 const CallExpr *E) {
11737 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info
|| BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID ==
BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value
) && "unexpected BPF builtin") ? void (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11741, __extension__ __PRETTY_FUNCTION__))
11738 BuiltinID == BPF::BI__builtin_btf_type_id ||(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info
|| BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID ==
BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value
) && "unexpected BPF builtin") ? void (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11741, __extension__ __PRETTY_FUNCTION__))
11739 BuiltinID == BPF::BI__builtin_preserve_type_info ||(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info
|| BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID ==
BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value
) && "unexpected BPF builtin") ? void (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11741, __extension__ __PRETTY_FUNCTION__))
11740 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info
|| BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID ==
BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value
) && "unexpected BPF builtin") ? void (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11741, __extension__ __PRETTY_FUNCTION__))
11741 "unexpected BPF builtin")(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info
|| BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID ==
BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value
) && "unexpected BPF builtin") ? void (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11741, __extension__ __PRETTY_FUNCTION__))
;
11742
11743 // A sequence number, injected into IR builtin functions, to
11744 // prevent CSE given the only difference of the funciton
11745 // may just be the debuginfo metadata.
11746 static uint32_t BuiltinSeqNum;
11747
11748 switch (BuiltinID) {
11749 default:
11750 llvm_unreachable("Unexpected BPF builtin")::llvm::llvm_unreachable_internal("Unexpected BPF builtin", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11750)
;
11751 case BPF::BI__builtin_preserve_field_info: {
11752 const Expr *Arg = E->getArg(0);
11753 bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
11754
11755 if (!getDebugInfo()) {
11756 CGM.Error(E->getExprLoc(),
11757 "using __builtin_preserve_field_info() without -g");
11758 return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
11759 : EmitLValue(Arg).getPointer(*this);
11760 }
11761
11762 // Enable underlying preserve_*_access_index() generation.
11763 bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
11764 IsInPreservedAIRegion = true;
11765 Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
11766 : EmitLValue(Arg).getPointer(*this);
11767 IsInPreservedAIRegion = OldIsInPreservedAIRegion;
11768
11769 ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11770 Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
11771
11772 // Built the IR for the preserve_field_info intrinsic.
11773 llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
11774 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
11775 {FieldAddr->getType()});
11776 return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
11777 }
11778 case BPF::BI__builtin_btf_type_id:
11779 case BPF::BI__builtin_preserve_type_info: {
11780 if (!getDebugInfo()) {
11781 CGM.Error(E->getExprLoc(), "using builtin function without -g");
11782 return nullptr;
11783 }
11784
11785 const Expr *Arg0 = E->getArg(0);
11786 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
11787 Arg0->getType(), Arg0->getExprLoc());
11788
11789 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11790 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11791 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
11792
11793 llvm::Function *FnDecl;
11794 if (BuiltinID == BPF::BI__builtin_btf_type_id)
11795 FnDecl = llvm::Intrinsic::getDeclaration(
11796 &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
11797 else
11798 FnDecl = llvm::Intrinsic::getDeclaration(
11799 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
11800 CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
11801 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11802 return Fn;
11803 }
11804 case BPF::BI__builtin_preserve_enum_value: {
11805 if (!getDebugInfo()) {
11806 CGM.Error(E->getExprLoc(), "using builtin function without -g");
11807 return nullptr;
11808 }
11809
11810 const Expr *Arg0 = E->getArg(0);
11811 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
11812 Arg0->getType(), Arg0->getExprLoc());
11813
11814 // Find enumerator
11815 const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens());
11816 const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr());
11817 const auto *DR = cast<DeclRefExpr>(CE->getSubExpr());
11818 const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl());
11819
11820 auto &InitVal = Enumerator->getInitVal();
11821 std::string InitValStr;
11822 if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX(9223372036854775807L)))
11823 InitValStr = std::to_string(InitVal.getSExtValue());
11824 else
11825 InitValStr = std::to_string(InitVal.getZExtValue());
11826 std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr;
11827 Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr);
11828
11829 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11830 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11831 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
11832
11833 llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration(
11834 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
11835 CallInst *Fn =
11836 Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
11837 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11838 return Fn;
11839 }
11840 }
11841}
11842
11843llvm::Value *CodeGenFunction::
11844BuildVector(ArrayRef<llvm::Value*> Ops) {
11845 assert((Ops.size() & (Ops.size() - 1)) == 0 &&(static_cast <bool> ((Ops.size() & (Ops.size() - 1)
) == 0 && "Not a power-of-two sized vector!") ? void (
0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11846, __extension__ __PRETTY_FUNCTION__))
11846 "Not a power-of-two sized vector!")(static_cast <bool> ((Ops.size() & (Ops.size() - 1)
) == 0 && "Not a power-of-two sized vector!") ? void (
0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11846, __extension__ __PRETTY_FUNCTION__))
;
11847 bool AllConstants = true;
11848 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
11849 AllConstants &= isa<Constant>(Ops[i]);
11850
11851 // If this is a constant vector, create a ConstantVector.
11852 if (AllConstants) {
11853 SmallVector<llvm::Constant*, 16> CstOps;
11854 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11855 CstOps.push_back(cast<Constant>(Ops[i]));
11856 return llvm::ConstantVector::get(CstOps);
11857 }
11858
11859 // Otherwise, insertelement the values to build the vector.
11860 Value *Result = llvm::UndefValue::get(
11861 llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
11862
11863 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11864 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
11865
11866 return Result;
11867}
11868
11869// Convert the mask from an integer type to a vector of i1.
11870static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
11871 unsigned NumElts) {
11872
11873 auto *MaskTy = llvm::FixedVectorType::get(
11874 CGF.Builder.getInt1Ty(),
11875 cast<IntegerType>(Mask->getType())->getBitWidth());
11876 Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
11877
11878 // If we have less than 8 elements, then the starting mask was an i8 and
11879 // we need to extract down to the right number of elements.
11880 if (NumElts < 8) {
11881 int Indices[4];
11882 for (unsigned i = 0; i != NumElts; ++i)
11883 Indices[i] = i;
11884 MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
11885 makeArrayRef(Indices, NumElts),
11886 "extract");
11887 }
11888 return MaskVec;
11889}
11890
11891static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11892 Align Alignment) {
11893 // Cast the pointer to right type.
11894 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11895 llvm::PointerType::getUnqual(Ops[1]->getType()));
11896
11897 Value *MaskVec = getMaskVecValue(
11898 CGF, Ops[2],
11899 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
11900
11901 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
11902}
11903
11904static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11905 Align Alignment) {
11906 // Cast the pointer to right type.
11907 llvm::Type *Ty = Ops[1]->getType();
11908 Value *Ptr =
11909 CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11910
11911 Value *MaskVec = getMaskVecValue(
11912 CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements());
11913
11914 return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]);
11915}
11916
11917static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
11918 ArrayRef<Value *> Ops) {
11919 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
11920 llvm::Type *PtrTy = ResultTy->getElementType();
11921
11922 // Cast the pointer to element type.
11923 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11924 llvm::PointerType::getUnqual(PtrTy));
11925
11926 Value *MaskVec = getMaskVecValue(
11927 CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
11928
11929 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
11930 ResultTy);
11931 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
11932}
11933
11934static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
11935 ArrayRef<Value *> Ops,
11936 bool IsCompress) {
11937 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11938
11939 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11940
11941 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
11942 : Intrinsic::x86_avx512_mask_expand;
11943 llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
11944 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
11945}
11946
11947static Value *EmitX86CompressStore(CodeGenFunction &CGF,
11948 ArrayRef<Value *> Ops) {
11949 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11950 llvm::Type *PtrTy = ResultTy->getElementType();
11951
11952 // Cast the pointer to element type.
11953 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11954 llvm::PointerType::getUnqual(PtrTy));
11955
11956 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11957
11958 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
11959 ResultTy);
11960 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
11961}
11962
11963static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
11964 ArrayRef<Value *> Ops,
11965 bool InvertLHS = false) {
11966 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11967 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
11968 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
11969
11970 if (InvertLHS)
11971 LHS = CGF.Builder.CreateNot(LHS);
11972
11973 return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
11974 Ops[0]->getType());
11975}
11976
11977static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
11978 Value *Amt, bool IsRight) {
11979 llvm::Type *Ty = Op0->getType();
11980
11981 // Amount may be scalar immediate, in which case create a splat vector.
11982 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
11983 // we only care about the lowest log2 bits anyway.
11984 if (Amt->getType() != Ty) {
11985 unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements();
11986 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
11987 Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
11988 }
11989
11990 unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
11991 Function *F = CGF.CGM.getIntrinsic(IID, Ty);
11992 return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
11993}
11994
11995static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11996 bool IsSigned) {
11997 Value *Op0 = Ops[0];
11998 Value *Op1 = Ops[1];
11999 llvm::Type *Ty = Op0->getType();
12000 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
12001
12002 CmpInst::Predicate Pred;
12003 switch (Imm) {
12004 case 0x0:
12005 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
12006 break;
12007 case 0x1:
12008 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
12009 break;
12010 case 0x2:
12011 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
12012 break;
12013 case 0x3:
12014 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
12015 break;
12016 case 0x4:
12017 Pred = ICmpInst::ICMP_EQ;
12018 break;
12019 case 0x5:
12020 Pred = ICmpInst::ICMP_NE;
12021 break;
12022 case 0x6:
12023 return llvm::Constant::getNullValue(Ty); // FALSE
12024 case 0x7:
12025 return llvm::Constant::getAllOnesValue(Ty); // TRUE
12026 default:
12027 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate")::llvm::llvm_unreachable_internal("Unexpected XOP vpcom/vpcomu predicate"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12027)
;
12028 }
12029
12030 Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
12031 Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
12032 return Res;
12033}
12034
12035static Value *EmitX86Select(CodeGenFunction &CGF,
12036 Value *Mask, Value *Op0, Value *Op1) {
12037
12038 // If the mask is all ones just return first argument.
12039 if (const auto *C = dyn_cast<Constant>(Mask))
12040 if (C->isAllOnesValue())
12041 return Op0;
12042
12043 Mask = getMaskVecValue(
12044 CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements());
12045
12046 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
12047}
12048
12049static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
12050 Value *Mask, Value *Op0, Value *Op1) {
12051 // If the mask is all ones just return first argument.
12052 if (const auto *C = dyn_cast<Constant>(Mask))
12053 if (C->isAllOnesValue())
12054 return Op0;
12055
12056 auto *MaskTy = llvm::FixedVectorType::get(
12057 CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
12058 Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
12059 Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
12060 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
12061}
12062
12063static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
12064 unsigned NumElts, Value *MaskIn) {
12065 if (MaskIn) {
12066 const auto *C = dyn_cast<Constant>(MaskIn);
12067 if (!C || !C->isAllOnesValue())
12068 Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
12069 }
12070
12071 if (NumElts < 8) {
12072 int Indices[8];
12073 for (unsigned i = 0; i != NumElts; ++i)
12074 Indices[i] = i;
12075 for (unsigned i = NumElts; i != 8; ++i)
12076 Indices[i] = i % NumElts + NumElts;
12077 Cmp = CGF.Builder.CreateShuffleVector(
12078 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
12079 }
12080
12081 return CGF.Builder.CreateBitCast(Cmp,
12082 IntegerType::get(CGF.getLLVMContext(),
12083 std::max(NumElts, 8U)));
12084}
12085
12086static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
12087 bool Signed, ArrayRef<Value *> Ops) {
12088 assert((Ops.size() == 2 || Ops.size() == 4) &&(static_cast <bool> ((Ops.size() == 2 || Ops.size() == 4
) && "Unexpected number of arguments") ? void (0) : __assert_fail
("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12089, __extension__ __PRETTY_FUNCTION__))
12089 "Unexpected number of arguments")(static_cast <bool> ((Ops.size() == 2 || Ops.size() == 4
) && "Unexpected number of arguments") ? void (0) : __assert_fail
("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12089, __extension__ __PRETTY_FUNCTION__))
;
12090 unsigned NumElts =
12091 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12092 Value *Cmp;
12093
12094 if (CC == 3) {
12095 Cmp = Constant::getNullValue(
12096 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
12097 } else if (CC == 7) {
12098 Cmp = Constant::getAllOnesValue(
12099 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
12100 } else {
12101 ICmpInst::Predicate Pred;
12102 switch (CC) {
12103 default: llvm_unreachable("Unknown condition code")::llvm::llvm_unreachable_internal("Unknown condition code", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12103)
;
12104 case 0: Pred = ICmpInst::ICMP_EQ; break;
12105 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
12106 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
12107 case 4: Pred = ICmpInst::ICMP_NE; break;
12108 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
12109 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
12110 }
12111 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
12112 }
12113
12114 Value *MaskIn = nullptr;
12115 if (Ops.size() == 4)
12116 MaskIn = Ops[3];
12117
12118 return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
12119}
12120
12121static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
12122 Value *Zero = Constant::getNullValue(In->getType());
12123 return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
12124}
12125
12126static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E,
12127 ArrayRef<Value *> Ops, bool IsSigned) {
12128 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
12129 llvm::Type *Ty = Ops[1]->getType();
12130
12131 Value *Res;
12132 if (Rnd != 4) {
12133 Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
12134 : Intrinsic::x86_avx512_uitofp_round;
12135 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
12136 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
12137 } else {
12138 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
12139 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
12140 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
12141 }
12142
12143 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
12144}
12145
12146// Lowers X86 FMA intrinsics to IR.
12147static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
12148 ArrayRef<Value *> Ops, unsigned BuiltinID,
12149 bool IsAddSub) {
12150
12151 bool Subtract = false;
12152 Intrinsic::ID IID = Intrinsic::not_intrinsic;
12153 switch (BuiltinID) {
12154 default: break;
12155 case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
12156 Subtract = true;
12157 LLVM_FALLTHROUGH[[gnu::fallthrough]];
12158 case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
12159 case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
12160 case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
12161 IID = llvm::Intrinsic::x86_avx512fp16_vfmadd_ph_512;
12162 break;
12163 case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
12164 Subtract = true;
12165 LLVM_FALLTHROUGH[[gnu::fallthrough]];
12166 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
12167 case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
12168 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
12169 IID = llvm::Intrinsic::x86_avx512fp16_vfmaddsub_ph_512;
12170 break;
12171 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
12172 Subtract = true;
12173 LLVM_FALLTHROUGH[[gnu::fallthrough]];
12174 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
12175 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
12176 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
12177 IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
12178 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
12179 Subtract = true;
12180 LLVM_FALLTHROUGH[[gnu::fallthrough]];
12181 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
12182 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
12183 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
12184 IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
12185 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12186 Subtract = true;
12187 LLVM_FALLTHROUGH[[gnu::fallthrough]];
12188 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
12189 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12190 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12191 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
12192 break;
12193 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12194 Subtract = true;
12195 LLVM_FALLTHROUGH[[gnu::fallthrough]];
12196 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12197 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12198 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12199 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
12200 break;
12201 }
12202
12203 Value *A = Ops[0];
12204 Value *B = Ops[1];
12205 Value *C = Ops[2];
12206
12207 if (Subtract)
12208 C = CGF.Builder.CreateFNeg(C);
12209
12210 Value *Res;
12211
12212 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
12213 if (IID != Intrinsic::not_intrinsic &&
12214 (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
12215 IsAddSub)) {
12216 Function *Intr = CGF.CGM.getIntrinsic(IID);
12217 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
12218 } else {
12219 llvm::Type *Ty = A->getType();
12220 Function *FMA;
12221 if (CGF.Builder.getIsFPConstrained()) {
12222 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
12223 FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
12224 Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
12225 } else {
12226 FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
12227 Res = CGF.Builder.CreateCall(FMA, {A, B, C});
12228 }
12229 }
12230
12231 // Handle any required masking.
12232 Value *MaskFalseVal = nullptr;
12233 switch (BuiltinID) {
12234 case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
12235 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
12236 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
12237 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
12238 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
12239 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12240 MaskFalseVal = Ops[0];
12241 break;
12242 case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
12243 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
12244 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
12245 case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
12246 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12247 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12248 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
12249 break;
12250 case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
12251 case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
12252 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
12253 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
12254 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
12255 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
12256 case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
12257 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
12258 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12259 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12260 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12261 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12262 MaskFalseVal = Ops[2];
12263 break;
12264 }
12265
12266 if (MaskFalseVal)
12267 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
12268
12269 return Res;
12270}
12271
12272static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E,
12273 MutableArrayRef<Value *> Ops, Value *Upper,
12274 bool ZeroMask = false, unsigned PTIdx = 0,
12275 bool NegAcc = false) {
12276 unsigned Rnd = 4;
12277 if (Ops.size() > 4)
12278 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
12279
12280 if (NegAcc)
12281 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
12282
12283 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
12284 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
12285 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
12286 Value *Res;
12287 if (Rnd != 4) {
12288 Intrinsic::ID IID;
12289
12290 switch (Ops[0]->getType()->getPrimitiveSizeInBits()) {
12291 case 16:
12292 IID = Intrinsic::x86_avx512fp16_vfmadd_f16;
12293 break;
12294 case 32:
12295 IID = Intrinsic::x86_avx512_vfmadd_f32;
12296 break;
12297 case 64:
12298 IID = Intrinsic::x86_avx512_vfmadd_f64;
12299 break;
12300 default:
12301 llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12301)
;
12302 }
12303 Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
12304 {Ops[0], Ops[1], Ops[2], Ops[4]});
12305 } else if (CGF.Builder.getIsFPConstrained()) {
12306 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
12307 Function *FMA = CGF.CGM.getIntrinsic(
12308 Intrinsic::experimental_constrained_fma, Ops[0]->getType());
12309 Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
12310 } else {
12311 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
12312 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
12313 }
12314 // If we have more than 3 arguments, we need to do masking.
12315 if (Ops.size() > 3) {
12316 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
12317 : Ops[PTIdx];
12318
12319 // If we negated the accumulator and the its the PassThru value we need to
12320 // bypass the negate. Conveniently Upper should be the same thing in this
12321 // case.
12322 if (NegAcc && PTIdx == 2)
12323 PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
12324
12325 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
12326 }
12327 return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
12328}
12329
12330static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
12331 ArrayRef<Value *> Ops) {
12332 llvm::Type *Ty = Ops[0]->getType();
12333 // Arguments have a vXi32 type so cast to vXi64.
12334 Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
12335 Ty->getPrimitiveSizeInBits() / 64);
12336 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
12337 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
12338
12339 if (IsSigned) {
12340 // Shift left then arithmetic shift right.
12341 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
12342 LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
12343 LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
12344 RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
12345 RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
12346 } else {
12347 // Clear the upper bits.
12348 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
12349 LHS = CGF.Builder.CreateAnd(LHS, Mask);
12350 RHS = CGF.Builder.CreateAnd(RHS, Mask);
12351 }
12352
12353 return CGF.Builder.CreateMul(LHS, RHS);
12354}
12355
12356// Emit a masked pternlog intrinsic. This only exists because the header has to
12357// use a macro and we aren't able to pass the input argument to a pternlog
12358// builtin and a select builtin without evaluating it twice.
12359static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
12360 ArrayRef<Value *> Ops) {
12361 llvm::Type *Ty = Ops[0]->getType();
12362
12363 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
12364 unsigned EltWidth = Ty->getScalarSizeInBits();
12365 Intrinsic::ID IID;
12366 if (VecWidth == 128 && EltWidth == 32)
12367 IID = Intrinsic::x86_avx512_pternlog_d_128;
12368 else if (VecWidth == 256 && EltWidth == 32)
12369 IID = Intrinsic::x86_avx512_pternlog_d_256;
12370 else if (VecWidth == 512 && EltWidth == 32)
12371 IID = Intrinsic::x86_avx512_pternlog_d_512;
12372 else if (VecWidth == 128 && EltWidth == 64)
12373 IID = Intrinsic::x86_avx512_pternlog_q_128;
12374 else if (VecWidth == 256 && EltWidth == 64)
12375 IID = Intrinsic::x86_avx512_pternlog_q_256;
12376 else if (VecWidth == 512 && EltWidth == 64)
12377 IID = Intrinsic::x86_avx512_pternlog_q_512;
12378 else
12379 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12379)
;
12380
12381 Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
12382 Ops.drop_back());
12383 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
12384 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
12385}
12386
12387static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
12388 llvm::Type *DstTy) {
12389 unsigned NumberOfElements =
12390 cast<llvm::FixedVectorType>(DstTy)->getNumElements();
12391 Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
12392 return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
12393}
12394
12395// Emit binary intrinsic with the same type used in result/args.
12396static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF,
12397 ArrayRef<Value *> Ops, Intrinsic::ID IID) {
12398 llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
12399 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
12400}
12401
12402Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
12403 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
12404 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
12405 return EmitX86CpuIs(CPUStr);
12406}
12407
12408// Convert F16 halfs to floats.
12409static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
12410 ArrayRef<Value *> Ops,
12411 llvm::Type *DstTy) {
12412 assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&(static_cast <bool> ((Ops.size() == 1 || Ops.size() == 3
|| Ops.size() == 4) && "Unknown cvtph2ps intrinsic")
? void (0) : __assert_fail ("(Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && \"Unknown cvtph2ps intrinsic\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12413, __extension__ __PRETTY_FUNCTION__))
12413 "Unknown cvtph2ps intrinsic")(static_cast <bool> ((Ops.size() == 1 || Ops.size() == 3
|| Ops.size() == 4) && "Unknown cvtph2ps intrinsic")
? void (0) : __assert_fail ("(Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && \"Unknown cvtph2ps intrinsic\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12413, __extension__ __PRETTY_FUNCTION__))
;
12414
12415 // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
12416 if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
12417 Function *F =
12418 CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
12419 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
12420 }
12421
12422 unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
12423 Value *Src = Ops[0];
12424
12425 // Extract the subvector.
12426 if (NumDstElts !=
12427 cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
12428 assert(NumDstElts == 4 && "Unexpected vector size")(static_cast <bool> (NumDstElts == 4 && "Unexpected vector size"
) ? void (0) : __assert_fail ("NumDstElts == 4 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12428, __extension__ __PRETTY_FUNCTION__))
;
12429 Src = CGF.Builder.CreateShuffleVector(Src, ArrayRef<int>{0, 1, 2, 3});
12430 }
12431
12432 // Bitcast from vXi16 to vXf16.
12433 auto *HalfTy = llvm::FixedVectorType::get(
12434 llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
12435 Src = CGF.Builder.CreateBitCast(Src, HalfTy);
12436
12437 // Perform the fp-extension.
12438 Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
12439
12440 if (Ops.size() >= 3)
12441 Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
12442 return Res;
12443}
12444
12445// Convert a BF16 to a float.
12446static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
12447 const CallExpr *E,
12448 ArrayRef<Value *> Ops) {
12449 llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
12450 Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
12451 Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
12452 llvm::Type *ResultType = CGF.ConvertType(E->getType());
12453 Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
12454 return BitCast;
12455}
12456
12457Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
12458
12459 llvm::Type *Int32Ty = Builder.getInt32Ty();
12460
12461 // Matching the struct layout from the compiler-rt/libgcc structure that is
12462 // filled in:
12463 // unsigned int __cpu_vendor;
12464 // unsigned int __cpu_type;
12465 // unsigned int __cpu_subtype;
12466 // unsigned int __cpu_features[1];
12467 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
12468 llvm::ArrayType::get(Int32Ty, 1));
12469
12470 // Grab the global __cpu_model.
12471 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
12472 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
12473
12474 // Calculate the index needed to access the correct field based on the
12475 // range. Also adjust the expected value.
12476 unsigned Index;
12477 unsigned Value;
12478 std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
12479#define X86_VENDOR(ENUM, STRING) \
12480 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
12481#define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
12482 .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
12483#define X86_CPU_TYPE(ENUM, STR) \
12484 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
12485#define X86_CPU_SUBTYPE(ENUM, STR) \
12486 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
12487#include "llvm/Support/X86TargetParser.def"
12488 .Default({0, 0});
12489 assert(Value != 0 && "Invalid CPUStr passed to CpuIs")(static_cast <bool> (Value != 0 && "Invalid CPUStr passed to CpuIs"
) ? void (0) : __assert_fail ("Value != 0 && \"Invalid CPUStr passed to CpuIs\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12489, __extension__ __PRETTY_FUNCTION__))
;
12490
12491 // Grab the appropriate field from __cpu_model.
12492 llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
12493 ConstantInt::get(Int32Ty, Index)};
12494 llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
12495 CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue,
12496 CharUnits::fromQuantity(4));
12497
12498 // Check the value of the field against the requested value.
12499 return Builder.CreateICmpEQ(CpuValue,
12500 llvm::ConstantInt::get(Int32Ty, Value));
12501}
12502
12503Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
12504 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
12505 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
12506 return EmitX86CpuSupports(FeatureStr);
12507}
12508
12509Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
12510 return EmitX86CpuSupports(llvm::X86::getCpuSupportsMask(FeatureStrs));
12511}
12512
12513llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
12514 uint32_t Features1 = Lo_32(FeaturesMask);
12515 uint32_t Features2 = Hi_32(FeaturesMask);
12516
12517 Value *Result = Builder.getTrue();
12518
12519 if (Features1 != 0) {
12520 // Matching the struct layout from the compiler-rt/libgcc structure that is
12521 // filled in:
12522 // unsigned int __cpu_vendor;
12523 // unsigned int __cpu_type;
12524 // unsigned int __cpu_subtype;
12525 // unsigned int __cpu_features[1];
12526 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
12527 llvm::ArrayType::get(Int32Ty, 1));
12528
12529 // Grab the global __cpu_model.
12530 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
12531 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
12532
12533 // Grab the first (0th) element from the field __cpu_features off of the
12534 // global in the struct STy.
12535 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
12536 Builder.getInt32(0)};
12537 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
12538 Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures,
12539 CharUnits::fromQuantity(4));
12540
12541 // Check the value of the bit corresponding to the feature requested.
12542 Value *Mask = Builder.getInt32(Features1);
12543 Value *Bitset = Builder.CreateAnd(Features, Mask);
12544 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
12545 Result = Builder.CreateAnd(Result, Cmp);
12546 }
12547
12548 if (Features2 != 0) {
12549 llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
12550 "__cpu_features2");
12551 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
12552
12553 Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures2,
12554 CharUnits::fromQuantity(4));
12555
12556 // Check the value of the bit corresponding to the feature requested.
12557 Value *Mask = Builder.getInt32(Features2);
12558 Value *Bitset = Builder.CreateAnd(Features, Mask);
12559 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
12560 Result = Builder.CreateAnd(Result, Cmp);
12561 }
12562
12563 return Result;
12564}
12565
12566Value *CodeGenFunction::EmitX86CpuInit() {
12567 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
12568 /*Variadic*/ false);
12569 llvm::FunctionCallee Func =
12570 CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
12571 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
12572 cast<llvm::GlobalValue>(Func.getCallee())
12573 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
12574 return Builder.CreateCall(Func);
12575}
12576
12577Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
12578 const CallExpr *E) {
12579 if (BuiltinID == X86::BI__builtin_cpu_is)
12580 return EmitX86CpuIs(E);
12581 if (BuiltinID == X86::BI__builtin_cpu_supports)
12582 return EmitX86CpuSupports(E);
12583 if (BuiltinID == X86::BI__builtin_cpu_init)
12584 return EmitX86CpuInit();
12585
12586 // Handle MSVC intrinsics before argument evaluation to prevent double
12587 // evaluation.
12588 if (Optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
12589 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
12590
12591 SmallVector<Value*, 4> Ops;
12592 bool IsMaskFCmp = false;
12593 bool IsConjFMA = false;
12594
12595 // Find out if any arguments are required to be integer constant expressions.
12596 unsigned ICEArguments = 0;
12597 ASTContext::GetBuiltinTypeError Error;
12598 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
12599 assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None &&
"Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12599, __extension__ __PRETTY_FUNCTION__))
;
12600
12601 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
12602 // If this is a normal argument, just emit it as a scalar.
12603 if ((ICEArguments & (1 << i)) == 0) {
12604 Ops.push_back(EmitScalarExpr(E->getArg(i)));
12605 continue;
12606 }
12607
12608 // If this is required to be a constant, constant fold it so that we know
12609 // that the generated intrinsic gets a ConstantInt.
12610 Ops.push_back(llvm::ConstantInt::get(
12611 getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
12612 }
12613
12614 // These exist so that the builtin that takes an immediate can be bounds
12615 // checked by clang to avoid passing bad immediates to the backend. Since
12616 // AVX has a larger immediate than SSE we would need separate builtins to
12617 // do the different bounds checking. Rather than create a clang specific
12618 // SSE only builtin, this implements eight separate builtins to match gcc
12619 // implementation.
12620 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
12621 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
12622 llvm::Function *F = CGM.getIntrinsic(ID);
12623 return Builder.CreateCall(F, Ops);
12624 };
12625
12626 // For the vector forms of FP comparisons, translate the builtins directly to
12627 // IR.
12628 // TODO: The builtins could be removed if the SSE header files used vector
12629 // extension comparisons directly (vector ordered/unordered may need
12630 // additional support via __builtin_isnan()).
12631 auto getVectorFCmpIR = [this, &Ops, E](CmpInst::Predicate Pred,
12632 bool IsSignaling) {
12633 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
12634 Value *Cmp;
12635 if (IsSignaling)
12636 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
12637 else
12638 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
12639 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
12640 llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
12641 Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
12642 return Builder.CreateBitCast(Sext, FPVecTy);
12643 };
12644
12645 switch (BuiltinID) {
12646 default: return nullptr;
12647 case X86::BI_mm_prefetch: {
12648 Value *Address = Ops[0];
12649 ConstantInt *C = cast<ConstantInt>(Ops[1]);
12650 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
12651 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
12652 Value *Data = ConstantInt::get(Int32Ty, 1);
12653 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
12654 return Builder.CreateCall(F, {Address, RW, Locality, Data});
12655 }
12656 case X86::BI_mm_clflush: {
12657 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
12658 Ops[0]);
12659 }
12660 case X86::BI_mm_lfence: {
12661 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
12662 }
12663 case X86::BI_mm_mfence: {
12664 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
12665 }
12666 case X86::BI_mm_sfence: {
12667 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
12668 }
12669 case X86::BI_mm_pause: {
12670 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
12671 }
12672 case X86::BI__rdtsc: {
12673 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
12674 }
12675 case X86::BI__builtin_ia32_rdtscp: {
12676 Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
12677 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
12678 Ops[0]);
12679 return Builder.CreateExtractValue(Call, 0);
12680 }
12681 case X86::BI__builtin_ia32_lzcnt_u16:
12682 case X86::BI__builtin_ia32_lzcnt_u32:
12683 case X86::BI__builtin_ia32_lzcnt_u64: {
12684 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
12685 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
12686 }
12687 case X86::BI__builtin_ia32_tzcnt_u16:
12688 case X86::BI__builtin_ia32_tzcnt_u32:
12689 case X86::BI__builtin_ia32_tzcnt_u64: {
12690 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
12691 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
12692 }
12693 case X86::BI__builtin_ia32_undef128:
12694 case X86::BI__builtin_ia32_undef256:
12695 case X86::BI__builtin_ia32_undef512:
12696 // The x86 definition of "undef" is not the same as the LLVM definition
12697 // (PR32176). We leave optimizing away an unnecessary zero constant to the
12698 // IR optimizer and backend.
12699 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
12700 // value, we should use that here instead of a zero.
12701 return llvm::Constant::getNullValue(ConvertType(E->getType()));
12702 case X86::BI__builtin_ia32_vec_init_v8qi:
12703 case X86::BI__builtin_ia32_vec_init_v4hi:
12704 case X86::BI__builtin_ia32_vec_init_v2si:
12705 return Builder.CreateBitCast(BuildVector(Ops),
12706 llvm::Type::getX86_MMXTy(getLLVMContext()));
12707 case X86::BI__builtin_ia32_vec_ext_v2si:
12708 case X86::BI__builtin_ia32_vec_ext_v16qi:
12709 case X86::BI__builtin_ia32_vec_ext_v8hi:
12710 case X86::BI__builtin_ia32_vec_ext_v4si:
12711 case X86::BI__builtin_ia32_vec_ext_v4sf:
12712 case X86::BI__builtin_ia32_vec_ext_v2di:
12713 case X86::BI__builtin_ia32_vec_ext_v32qi:
12714 case X86::BI__builtin_ia32_vec_ext_v16hi:
12715 case X86::BI__builtin_ia32_vec_ext_v8si:
12716 case X86::BI__builtin_ia32_vec_ext_v4di: {
12717 unsigned NumElts =
12718 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12719 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
12720 Index &= NumElts - 1;
12721 // These builtins exist so we can ensure the index is an ICE and in range.
12722 // Otherwise we could just do this in the header file.
12723 return Builder.CreateExtractElement(Ops[0], Index);
12724 }
12725 case X86::BI__builtin_ia32_vec_set_v16qi:
12726 case X86::BI__builtin_ia32_vec_set_v8hi:
12727 case X86::BI__builtin_ia32_vec_set_v4si:
12728 case X86::BI__builtin_ia32_vec_set_v2di:
12729 case X86::BI__builtin_ia32_vec_set_v32qi:
12730 case X86::BI__builtin_ia32_vec_set_v16hi:
12731 case X86::BI__builtin_ia32_vec_set_v8si:
12732 case X86::BI__builtin_ia32_vec_set_v4di: {
12733 unsigned NumElts =
12734 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12735 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
12736 Index &= NumElts - 1;
12737 // These builtins exist so we can ensure the index is an ICE and in range.
12738 // Otherwise we could just do this in the header file.
12739 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
12740 }
12741 case X86::BI_mm_setcsr:
12742 case X86::BI__builtin_ia32_ldmxcsr: {
12743 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
12744 Builder.CreateStore(Ops[0], Tmp);
12745 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
12746 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
12747 }
12748 case X86::BI_mm_getcsr:
12749 case X86::BI__builtin_ia32_stmxcsr: {
12750 Address Tmp = CreateMemTemp(E->getType());
12751 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
12752 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
12753 return Builder.CreateLoad(Tmp, "stmxcsr");
12754 }
12755 case X86::BI__builtin_ia32_xsave:
12756 case X86::BI__builtin_ia32_xsave64:
12757 case X86::BI__builtin_ia32_xrstor:
12758 case X86::BI__builtin_ia32_xrstor64:
12759 case X86::BI__builtin_ia32_xsaveopt:
12760 case X86::BI__builtin_ia32_xsaveopt64:
12761 case X86::BI__builtin_ia32_xrstors:
12762 case X86::BI__builtin_ia32_xrstors64:
12763 case X86::BI__builtin_ia32_xsavec:
12764 case X86::BI__builtin_ia32_xsavec64:
12765 case X86::BI__builtin_ia32_xsaves:
12766 case X86::BI__builtin_ia32_xsaves64:
12767 case X86::BI__builtin_ia32_xsetbv:
12768 case X86::BI_xsetbv: {
12769 Intrinsic::ID ID;
12770#define INTRINSIC_X86_XSAVE_ID(NAME) \
12771 case X86::BI__builtin_ia32_##NAME: \
12772 ID = Intrinsic::x86_##NAME; \
12773 break
12774 switch (BuiltinID) {
12775 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12775)
;
12776 INTRINSIC_X86_XSAVE_ID(xsave);
12777 INTRINSIC_X86_XSAVE_ID(xsave64);
12778 INTRINSIC_X86_XSAVE_ID(xrstor);
12779 INTRINSIC_X86_XSAVE_ID(xrstor64);
12780 INTRINSIC_X86_XSAVE_ID(xsaveopt);
12781 INTRINSIC_X86_XSAVE_ID(xsaveopt64);
12782 INTRINSIC_X86_XSAVE_ID(xrstors);
12783 INTRINSIC_X86_XSAVE_ID(xrstors64);
12784 INTRINSIC_X86_XSAVE_ID(xsavec);
12785 INTRINSIC_X86_XSAVE_ID(xsavec64);
12786 INTRINSIC_X86_XSAVE_ID(xsaves);
12787 INTRINSIC_X86_XSAVE_ID(xsaves64);
12788 INTRINSIC_X86_XSAVE_ID(xsetbv);
12789 case X86::BI_xsetbv:
12790 ID = Intrinsic::x86_xsetbv;
12791 break;
12792 }
12793#undef INTRINSIC_X86_XSAVE_ID
12794 Value *Mhi = Builder.CreateTrunc(
12795 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
12796 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
12797 Ops[1] = Mhi;
12798 Ops.push_back(Mlo);
12799 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
12800 }
12801 case X86::BI__builtin_ia32_xgetbv:
12802 case X86::BI_xgetbv:
12803 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
12804 case X86::BI__builtin_ia32_storedqudi128_mask:
12805 case X86::BI__builtin_ia32_storedqusi128_mask:
12806 case X86::BI__builtin_ia32_storedquhi128_mask:
12807 case X86::BI__builtin_ia32_storedquqi128_mask:
12808 case X86::BI__builtin_ia32_storeupd128_mask:
12809 case X86::BI__builtin_ia32_storeups128_mask:
12810 case X86::BI__builtin_ia32_storedqudi256_mask:
12811 case X86::BI__builtin_ia32_storedqusi256_mask:
12812 case X86::BI__builtin_ia32_storedquhi256_mask:
12813 case X86::BI__builtin_ia32_storedquqi256_mask:
12814 case X86::BI__builtin_ia32_storeupd256_mask:
12815 case X86::BI__builtin_ia32_storeups256_mask:
12816 case X86::BI__builtin_ia32_storedqudi512_mask:
12817 case X86::BI__builtin_ia32_storedqusi512_mask:
12818 case X86::BI__builtin_ia32_storedquhi512_mask:
12819 case X86::BI__builtin_ia32_storedquqi512_mask:
12820 case X86::BI__builtin_ia32_storeupd512_mask:
12821 case X86::BI__builtin_ia32_storeups512_mask:
12822 return EmitX86MaskedStore(*this, Ops, Align(1));
12823
12824 case X86::BI__builtin_ia32_storesh128_mask:
12825 case X86::BI__builtin_ia32_storess128_mask:
12826 case X86::BI__builtin_ia32_storesd128_mask:
12827 return EmitX86MaskedStore(*this, Ops, Align(1));
12828
12829 case X86::BI__builtin_ia32_vpopcntb_128:
12830 case X86::BI__builtin_ia32_vpopcntd_128:
12831 case X86::BI__builtin_ia32_vpopcntq_128:
12832 case X86::BI__builtin_ia32_vpopcntw_128:
12833 case X86::BI__builtin_ia32_vpopcntb_256:
12834 case X86::BI__builtin_ia32_vpopcntd_256:
12835 case X86::BI__builtin_ia32_vpopcntq_256:
12836 case X86::BI__builtin_ia32_vpopcntw_256:
12837 case X86::BI__builtin_ia32_vpopcntb_512:
12838 case X86::BI__builtin_ia32_vpopcntd_512:
12839 case X86::BI__builtin_ia32_vpopcntq_512:
12840 case X86::BI__builtin_ia32_vpopcntw_512: {
12841 llvm::Type *ResultType = ConvertType(E->getType());
12842 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
12843 return Builder.CreateCall(F, Ops);
12844 }
12845 case X86::BI__builtin_ia32_cvtmask2b128:
12846 case X86::BI__builtin_ia32_cvtmask2b256:
12847 case X86::BI__builtin_ia32_cvtmask2b512:
12848 case X86::BI__builtin_ia32_cvtmask2w128:
12849 case X86::BI__builtin_ia32_cvtmask2w256:
12850 case X86::BI__builtin_ia32_cvtmask2w512:
12851 case X86::BI__builtin_ia32_cvtmask2d128:
12852 case X86::BI__builtin_ia32_cvtmask2d256:
12853 case X86::BI__builtin_ia32_cvtmask2d512:
12854 case X86::BI__builtin_ia32_cvtmask2q128:
12855 case X86::BI__builtin_ia32_cvtmask2q256:
12856 case X86::BI__builtin_ia32_cvtmask2q512:
12857 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
12858
12859 case X86::BI__builtin_ia32_cvtb2mask128:
12860 case X86::BI__builtin_ia32_cvtb2mask256:
12861 case X86::BI__builtin_ia32_cvtb2mask512:
12862 case X86::BI__builtin_ia32_cvtw2mask128:
12863 case X86::BI__builtin_ia32_cvtw2mask256:
12864 case X86::BI__builtin_ia32_cvtw2mask512:
12865 case X86::BI__builtin_ia32_cvtd2mask128:
12866 case X86::BI__builtin_ia32_cvtd2mask256:
12867 case X86::BI__builtin_ia32_cvtd2mask512:
12868 case X86::BI__builtin_ia32_cvtq2mask128:
12869 case X86::BI__builtin_ia32_cvtq2mask256:
12870 case X86::BI__builtin_ia32_cvtq2mask512:
12871 return EmitX86ConvertToMask(*this, Ops[0]);
12872
12873 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
12874 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
12875 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
12876 case X86::BI__builtin_ia32_vcvtw2ph512_mask:
12877 case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
12878 case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
12879 return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ true);
12880 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
12881 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
12882 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
12883 case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
12884 case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
12885 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
12886 return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ false);
12887
12888 case X86::BI__builtin_ia32_vfmaddss3:
12889 case X86::BI__builtin_ia32_vfmaddsd3:
12890 case X86::BI__builtin_ia32_vfmaddsh3_mask:
12891 case X86::BI__builtin_ia32_vfmaddss3_mask:
12892 case X86::BI__builtin_ia32_vfmaddsd3_mask:
12893 return EmitScalarFMAExpr(*this, E, Ops, Ops[0]);
12894 case X86::BI__builtin_ia32_vfmaddss:
12895 case X86::BI__builtin_ia32_vfmaddsd:
12896 return EmitScalarFMAExpr(*this, E, Ops,
12897 Constant::getNullValue(Ops[0]->getType()));
12898 case X86::BI__builtin_ia32_vfmaddsh3_maskz:
12899 case X86::BI__builtin_ia32_vfmaddss3_maskz:
12900 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
12901 return EmitScalarFMAExpr(*this, E, Ops, Ops[0], /*ZeroMask*/ true);
12902 case X86::BI__builtin_ia32_vfmaddsh3_mask3:
12903 case X86::BI__builtin_ia32_vfmaddss3_mask3:
12904 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
12905 return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2);
12906 case X86::BI__builtin_ia32_vfmsubsh3_mask3:
12907 case X86::BI__builtin_ia32_vfmsubss3_mask3:
12908 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
12909 return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
12910 /*NegAcc*/ true);
12911 case X86::BI__builtin_ia32_vfmaddph:
12912 case X86::BI__builtin_ia32_vfmaddps:
12913 case X86::BI__builtin_ia32_vfmaddpd:
12914 case X86::BI__builtin_ia32_vfmaddph256:
12915 case X86::BI__builtin_ia32_vfmaddps256:
12916 case X86::BI__builtin_ia32_vfmaddpd256:
12917 case X86::BI__builtin_ia32_vfmaddph512_mask:
12918 case X86::BI__builtin_ia32_vfmaddph512_maskz:
12919 case X86::BI__builtin_ia32_vfmaddph512_mask3:
12920 case X86::BI__builtin_ia32_vfmaddps512_mask:
12921 case X86::BI__builtin_ia32_vfmaddps512_maskz:
12922 case X86::BI__builtin_ia32_vfmaddps512_mask3:
12923 case X86::BI__builtin_ia32_vfmsubps512_mask3:
12924 case X86::BI__builtin_ia32_vfmaddpd512_mask:
12925 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
12926 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
12927 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
12928 case X86::BI__builtin_ia32_vfmsubph512_mask3:
12929 return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ false);
12930 case X86::BI__builtin_ia32_vfmaddsubph512_mask:
12931 case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
12932 case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
12933 case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
12934 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
12935 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12936 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12937 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12938 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12939 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12940 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12941 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12942 return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ true);
12943
12944 case X86::BI__builtin_ia32_movdqa32store128_mask:
12945 case X86::BI__builtin_ia32_movdqa64store128_mask:
12946 case X86::BI__builtin_ia32_storeaps128_mask:
12947 case X86::BI__builtin_ia32_storeapd128_mask:
12948 case X86::BI__builtin_ia32_movdqa32store256_mask:
12949 case X86::BI__builtin_ia32_movdqa64store256_mask:
12950 case X86::BI__builtin_ia32_storeaps256_mask:
12951 case X86::BI__builtin_ia32_storeapd256_mask:
12952 case X86::BI__builtin_ia32_movdqa32store512_mask:
12953 case X86::BI__builtin_ia32_movdqa64store512_mask:
12954 case X86::BI__builtin_ia32_storeaps512_mask:
12955 case X86::BI__builtin_ia32_storeapd512_mask:
12956 return EmitX86MaskedStore(
12957 *this, Ops,
12958 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12959
12960 case X86::BI__builtin_ia32_loadups128_mask:
12961 case X86::BI__builtin_ia32_loadups256_mask:
12962 case X86::BI__builtin_ia32_loadups512_mask:
12963 case X86::BI__builtin_ia32_loadupd128_mask:
12964 case X86::BI__builtin_ia32_loadupd256_mask:
12965 case X86::BI__builtin_ia32_loadupd512_mask:
12966 case X86::BI__builtin_ia32_loaddquqi128_mask:
12967 case X86::BI__builtin_ia32_loaddquqi256_mask:
12968 case X86::BI__builtin_ia32_loaddquqi512_mask:
12969 case X86::BI__builtin_ia32_loaddquhi128_mask:
12970 case X86::BI__builtin_ia32_loaddquhi256_mask:
12971 case X86::BI__builtin_ia32_loaddquhi512_mask:
12972 case X86::BI__builtin_ia32_loaddqusi128_mask:
12973 case X86::BI__builtin_ia32_loaddqusi256_mask:
12974 case X86::BI__builtin_ia32_loaddqusi512_mask:
12975 case X86::BI__builtin_ia32_loaddqudi128_mask:
12976 case X86::BI__builtin_ia32_loaddqudi256_mask:
12977 case X86::BI__builtin_ia32_loaddqudi512_mask:
12978 return EmitX86MaskedLoad(*this, Ops, Align(1));
12979
12980 case X86::BI__builtin_ia32_loadsh128_mask:
12981 case X86::BI__builtin_ia32_loadss128_mask:
12982 case X86::BI__builtin_ia32_loadsd128_mask:
12983 return EmitX86MaskedLoad(*this, Ops, Align(1));
12984
12985 case X86::BI__builtin_ia32_loadaps128_mask:
12986 case X86::BI__builtin_ia32_loadaps256_mask:
12987 case X86::BI__builtin_ia32_loadaps512_mask:
12988 case X86::BI__builtin_ia32_loadapd128_mask:
12989 case X86::BI__builtin_ia32_loadapd256_mask:
12990 case X86::BI__builtin_ia32_loadapd512_mask:
12991 case X86::BI__builtin_ia32_movdqa32load128_mask:
12992 case X86::BI__builtin_ia32_movdqa32load256_mask:
12993 case X86::BI__builtin_ia32_movdqa32load512_mask:
12994 case X86::BI__builtin_ia32_movdqa64load128_mask:
12995 case X86::BI__builtin_ia32_movdqa64load256_mask:
12996 case X86::BI__builtin_ia32_movdqa64load512_mask:
12997 return EmitX86MaskedLoad(
12998 *this, Ops,
12999 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
13000
13001 case X86::BI__builtin_ia32_expandloaddf128_mask:
13002 case X86::BI__builtin_ia32_expandloaddf256_mask:
13003 case X86::BI__builtin_ia32_expandloaddf512_mask:
13004 case X86::BI__builtin_ia32_expandloadsf128_mask:
13005 case X86::BI__builtin_ia32_expandloadsf256_mask:
13006 case X86::BI__builtin_ia32_expandloadsf512_mask:
13007 case X86::BI__builtin_ia32_expandloaddi128_mask:
13008 case X86::BI__builtin_ia32_expandloaddi256_mask:
13009 case X86::BI__builtin_ia32_expandloaddi512_mask:
13010 case X86::BI__builtin_ia32_expandloadsi128_mask:
13011 case X86::BI__builtin_ia32_expandloadsi256_mask:
13012 case X86::BI__builtin_ia32_expandloadsi512_mask:
13013 case X86::BI__builtin_ia32_expandloadhi128_mask:
13014 case X86::BI__builtin_ia32_expandloadhi256_mask:
13015 case X86::BI__builtin_ia32_expandloadhi512_mask:
13016 case X86::BI__builtin_ia32_expandloadqi128_mask:
13017 case X86::BI__builtin_ia32_expandloadqi256_mask:
13018 case X86::BI__builtin_ia32_expandloadqi512_mask:
13019 return EmitX86ExpandLoad(*this, Ops);
13020
13021 case X86::BI__builtin_ia32_compressstoredf128_mask:
13022 case X86::BI__builtin_ia32_compressstoredf256_mask:
13023 case X86::BI__builtin_ia32_compressstoredf512_mask:
13024 case X86::BI__builtin_ia32_compressstoresf128_mask:
13025 case X86::BI__builtin_ia32_compressstoresf256_mask:
13026 case X86::BI__builtin_ia32_compressstoresf512_mask:
13027 case X86::BI__builtin_ia32_compressstoredi128_mask:
13028 case X86::BI__builtin_ia32_compressstoredi256_mask:
13029 case X86::BI__builtin_ia32_compressstoredi512_mask:
13030 case X86::BI__builtin_ia32_compressstoresi128_mask:
13031 case X86::BI__builtin_ia32_compressstoresi256_mask:
13032 case X86::BI__builtin_ia32_compressstoresi512_mask:
13033 case X86::BI__builtin_ia32_compressstorehi128_mask:
13034 case X86::BI__builtin_ia32_compressstorehi256_mask:
13035 case X86::BI__builtin_ia32_compressstorehi512_mask:
13036 case X86::BI__builtin_ia32_compressstoreqi128_mask:
13037 case X86::BI__builtin_ia32_compressstoreqi256_mask:
13038 case X86::BI__builtin_ia32_compressstoreqi512_mask:
13039 return EmitX86CompressStore(*this, Ops);
13040
13041 case X86::BI__builtin_ia32_expanddf128_mask:
13042 case X86::BI__builtin_ia32_expanddf256_mask:
13043 case X86::BI__builtin_ia32_expanddf512_mask:
13044 case X86::BI__builtin_ia32_expandsf128_mask:
13045 case X86::BI__builtin_ia32_expandsf256_mask:
13046 case X86::BI__builtin_ia32_expandsf512_mask:
13047 case X86::BI__builtin_ia32_expanddi128_mask:
13048 case X86::BI__builtin_ia32_expanddi256_mask:
13049 case X86::BI__builtin_ia32_expanddi512_mask:
13050 case X86::BI__builtin_ia32_expandsi128_mask:
13051 case X86::BI__builtin_ia32_expandsi256_mask:
13052 case X86::BI__builtin_ia32_expandsi512_mask:
13053 case X86::BI__builtin_ia32_expandhi128_mask:
13054 case X86::BI__builtin_ia32_expandhi256_mask:
13055 case X86::BI__builtin_ia32_expandhi512_mask:
13056 case X86::BI__builtin_ia32_expandqi128_mask:
13057 case X86::BI__builtin_ia32_expandqi256_mask:
13058 case X86::BI__builtin_ia32_expandqi512_mask:
13059 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
13060
13061 case X86::BI__builtin_ia32_compressdf128_mask:
13062 case X86::BI__builtin_ia32_compressdf256_mask:
13063 case X86::BI__builtin_ia32_compressdf512_mask:
13064 case X86::BI__builtin_ia32_compresssf128_mask:
13065 case X86::BI__builtin_ia32_compresssf256_mask:
13066 case X86::BI__builtin_ia32_compresssf512_mask:
13067 case X86::BI__builtin_ia32_compressdi128_mask:
13068 case X86::BI__builtin_ia32_compressdi256_mask:
13069 case X86::BI__builtin_ia32_compressdi512_mask:
13070 case X86::BI__builtin_ia32_compresssi128_mask:
13071 case X86::BI__builtin_ia32_compresssi256_mask:
13072 case X86::BI__builtin_ia32_compresssi512_mask:
13073 case X86::BI__builtin_ia32_compresshi128_mask:
13074 case X86::BI__builtin_ia32_compresshi256_mask:
13075 case X86::BI__builtin_ia32_compresshi512_mask:
13076 case X86::BI__builtin_ia32_compressqi128_mask:
13077 case X86::BI__builtin_ia32_compressqi256_mask:
13078 case X86::BI__builtin_ia32_compressqi512_mask:
13079 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
13080
13081 case X86::BI__builtin_ia32_gather3div2df:
13082 case X86::BI__builtin_ia32_gather3div2di:
13083 case X86::BI__builtin_ia32_gather3div4df:
13084 case X86::BI__builtin_ia32_gather3div4di:
13085 case X86::BI__builtin_ia32_gather3div4sf:
13086 case X86::BI__builtin_ia32_gather3div4si:
13087 case X86::BI__builtin_ia32_gather3div8sf:
13088 case X86::BI__builtin_ia32_gather3div8si:
13089 case X86::BI__builtin_ia32_gather3siv2df:
13090 case X86::BI__builtin_ia32_gather3siv2di:
13091 case X86::BI__builtin_ia32_gather3siv4df:
13092 case X86::BI__builtin_ia32_gather3siv4di:
13093 case X86::BI__builtin_ia32_gather3siv4sf:
13094 case X86::BI__builtin_ia32_gather3siv4si:
13095 case X86::BI__builtin_ia32_gather3siv8sf:
13096 case X86::BI__builtin_ia32_gather3siv8si:
13097 case X86::BI__builtin_ia32_gathersiv8df:
13098 case X86::BI__builtin_ia32_gathersiv16sf:
13099 case X86::BI__builtin_ia32_gatherdiv8df:
13100 case X86::BI__builtin_ia32_gatherdiv16sf:
13101 case X86::BI__builtin_ia32_gathersiv8di:
13102 case X86::BI__builtin_ia32_gathersiv16si:
13103 case X86::BI__builtin_ia32_gatherdiv8di:
13104 case X86::BI__builtin_ia32_gatherdiv16si: {
13105 Intrinsic::ID IID;
13106 switch (BuiltinID) {
13107 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13107)
;
13108 case X86::BI__builtin_ia32_gather3div2df:
13109 IID = Intrinsic::x86_avx512_mask_gather3div2_df;
13110 break;
13111 case X86::BI__builtin_ia32_gather3div2di:
13112 IID = Intrinsic::x86_avx512_mask_gather3div2_di;
13113 break;
13114 case X86::BI__builtin_ia32_gather3div4df:
13115 IID = Intrinsic::x86_avx512_mask_gather3div4_df;
13116 break;
13117 case X86::BI__builtin_ia32_gather3div4di:
13118 IID = Intrinsic::x86_avx512_mask_gather3div4_di;
13119 break;
13120 case X86::BI__builtin_ia32_gather3div4sf:
13121 IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
13122 break;
13123 case X86::BI__builtin_ia32_gather3div4si:
13124 IID = Intrinsic::x86_avx512_mask_gather3div4_si;
13125 break;
13126 case X86::BI__builtin_ia32_gather3div8sf:
13127 IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
13128 break;
13129 case X86::BI__builtin_ia32_gather3div8si:
13130 IID = Intrinsic::x86_avx512_mask_gather3div8_si;
13131 break;
13132 case X86::BI__builtin_ia32_gather3siv2df:
13133 IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
13134 break;
13135 case X86::BI__builtin_ia32_gather3siv2di:
13136 IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
13137 break;
13138 case X86::BI__builtin_ia32_gather3siv4df:
13139 IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
13140 break;
13141 case X86::BI__builtin_ia32_gather3siv4di:
13142 IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
13143 break;
13144 case X86::BI__builtin_ia32_gather3siv4sf:
13145 IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
13146 break;
13147 case X86::BI__builtin_ia32_gather3siv4si:
13148 IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
13149 break;
13150 case X86::BI__builtin_ia32_gather3siv8sf:
13151 IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
13152 break;
13153 case X86::BI__builtin_ia32_gather3siv8si:
13154 IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
13155 break;
13156 case X86::BI__builtin_ia32_gathersiv8df:
13157 IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
13158 break;
13159 case X86::BI__builtin_ia32_gathersiv16sf:
13160 IID = Intrinsic::x86_avx512_mask_gather_dps_512;
13161 break;
13162 case X86::BI__builtin_ia32_gatherdiv8df:
13163 IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
13164 break;
13165 case X86::BI__builtin_ia32_gatherdiv16sf:
13166 IID = Intrinsic::x86_avx512_mask_gather_qps_512;
13167 break;
13168 case X86::BI__builtin_ia32_gathersiv8di:
13169 IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
13170 break;
13171 case X86::BI__builtin_ia32_gathersiv16si:
13172 IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
13173 break;
13174 case X86::BI__builtin_ia32_gatherdiv8di:
13175 IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
13176 break;
13177 case X86::BI__builtin_ia32_gatherdiv16si:
13178 IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
13179 break;
13180 }
13181
13182 unsigned MinElts = std::min(
13183 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
13184 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
13185 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
13186 Function *Intr = CGM.getIntrinsic(IID);
13187 return Builder.CreateCall(Intr, Ops);
13188 }
13189
13190 case X86::BI__builtin_ia32_scattersiv8df:
13191 case X86::BI__builtin_ia32_scattersiv16sf:
13192 case X86::BI__builtin_ia32_scatterdiv8df:
13193 case X86::BI__builtin_ia32_scatterdiv16sf:
13194 case X86::BI__builtin_ia32_scattersiv8di:
13195 case X86::BI__builtin_ia32_scattersiv16si:
13196 case X86::BI__builtin_ia32_scatterdiv8di:
13197 case X86::BI__builtin_ia32_scatterdiv16si:
13198 case X86::BI__builtin_ia32_scatterdiv2df:
13199 case X86::BI__builtin_ia32_scatterdiv2di:
13200 case X86::BI__builtin_ia32_scatterdiv4df:
13201 case X86::BI__builtin_ia32_scatterdiv4di:
13202 case X86::BI__builtin_ia32_scatterdiv4sf:
13203 case X86::BI__builtin_ia32_scatterdiv4si:
13204 case X86::BI__builtin_ia32_scatterdiv8sf:
13205 case X86::BI__builtin_ia32_scatterdiv8si:
13206 case X86::BI__builtin_ia32_scattersiv2df:
13207 case X86::BI__builtin_ia32_scattersiv2di:
13208 case X86::BI__builtin_ia32_scattersiv4df:
13209 case X86::BI__builtin_ia32_scattersiv4di:
13210 case X86::BI__builtin_ia32_scattersiv4sf:
13211 case X86::BI__builtin_ia32_scattersiv4si:
13212 case X86::BI__builtin_ia32_scattersiv8sf:
13213 case X86::BI__builtin_ia32_scattersiv8si: {
13214 Intrinsic::ID IID;
13215 switch (BuiltinID) {
13216 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13216)
;
13217 case X86::BI__builtin_ia32_scattersiv8df:
13218 IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
13219 break;
13220 case X86::BI__builtin_ia32_scattersiv16sf:
13221 IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
13222 break;
13223 case X86::BI__builtin_ia32_scatterdiv8df:
13224 IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
13225 break;
13226 case X86::BI__builtin_ia32_scatterdiv16sf:
13227 IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
13228 break;
13229 case X86::BI__builtin_ia32_scattersiv8di:
13230 IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
13231 break;
13232 case X86::BI__builtin_ia32_scattersiv16si:
13233 IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
13234 break;
13235 case X86::BI__builtin_ia32_scatterdiv8di:
13236 IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
13237 break;
13238 case X86::BI__builtin_ia32_scatterdiv16si:
13239 IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
13240 break;
13241 case X86::BI__builtin_ia32_scatterdiv2df:
13242 IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
13243 break;
13244 case X86::BI__builtin_ia32_scatterdiv2di:
13245 IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
13246 break;
13247 case X86::BI__builtin_ia32_scatterdiv4df:
13248 IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
13249 break;
13250 case X86::BI__builtin_ia32_scatterdiv4di:
13251 IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
13252 break;
13253 case X86::BI__builtin_ia32_scatterdiv4sf:
13254 IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
13255 break;
13256 case X86::BI__builtin_ia32_scatterdiv4si:
13257 IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
13258 break;
13259 case X86::BI__builtin_ia32_scatterdiv8sf:
13260 IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
13261 break;
13262 case X86::BI__builtin_ia32_scatterdiv8si:
13263 IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
13264 break;
13265 case X86::BI__builtin_ia32_scattersiv2df:
13266 IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
13267 break;
13268 case X86::BI__builtin_ia32_scattersiv2di:
13269 IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
13270 break;
13271 case X86::BI__builtin_ia32_scattersiv4df:
13272 IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
13273 break;
13274 case X86::BI__builtin_ia32_scattersiv4di:
13275 IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
13276 break;
13277 case X86::BI__builtin_ia32_scattersiv4sf:
13278 IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
13279 break;
13280 case X86::BI__builtin_ia32_scattersiv4si:
13281 IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
13282 break;
13283 case X86::BI__builtin_ia32_scattersiv8sf:
13284 IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
13285 break;
13286 case X86::BI__builtin_ia32_scattersiv8si:
13287 IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
13288 break;
13289 }
13290
13291 unsigned MinElts = std::min(
13292 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
13293 cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
13294 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
13295 Function *Intr = CGM.getIntrinsic(IID);
13296 return Builder.CreateCall(Intr, Ops);
13297 }
13298
13299 case X86::BI__builtin_ia32_vextractf128_pd256:
13300 case X86::BI__builtin_ia32_vextractf128_ps256:
13301 case X86::BI__builtin_ia32_vextractf128_si256:
13302 case X86::BI__builtin_ia32_extract128i256:
13303 case X86::BI__builtin_ia32_extractf64x4_mask:
13304 case X86::BI__builtin_ia32_extractf32x4_mask:
13305 case X86::BI__builtin_ia32_extracti64x4_mask:
13306 case X86::BI__builtin_ia32_extracti32x4_mask:
13307 case X86::BI__builtin_ia32_extractf32x8_mask:
13308 case X86::BI__builtin_ia32_extracti32x8_mask:
13309 case X86::BI__builtin_ia32_extractf32x4_256_mask:
13310 case X86::BI__builtin_ia32_extracti32x4_256_mask:
13311 case X86::BI__builtin_ia32_extractf64x2_256_mask:
13312 case X86::BI__builtin_ia32_extracti64x2_256_mask:
13313 case X86::BI__builtin_ia32_extractf64x2_512_mask:
13314 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
13315 auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType()));
13316 unsigned NumElts = DstTy->getNumElements();
13317 unsigned SrcNumElts =
13318 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13319 unsigned SubVectors = SrcNumElts / NumElts;
13320 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
13321 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")(static_cast <bool> (llvm::isPowerOf2_32(SubVectors) &&
"Expected power of 2 subvectors") ? void (0) : __assert_fail
("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13321, __extension__ __PRETTY_FUNCTION__))
;
13322 Index &= SubVectors - 1; // Remove any extra bits.
13323 Index *= NumElts;
13324
13325 int Indices[16];
13326 for (unsigned i = 0; i != NumElts; ++i)
13327 Indices[i] = i + Index;
13328
13329 Value *Res = Builder.CreateShuffleVector(Ops[0],
13330 makeArrayRef(Indices, NumElts),
13331 "extract");
13332
13333 if (Ops.size() == 4)
13334 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
13335
13336 return Res;
13337 }
13338 case X86::BI__builtin_ia32_vinsertf128_pd256:
13339 case X86::BI__builtin_ia32_vinsertf128_ps256:
13340 case X86::BI__builtin_ia32_vinsertf128_si256:
13341 case X86::BI__builtin_ia32_insert128i256:
13342 case X86::BI__builtin_ia32_insertf64x4:
13343 case X86::BI__builtin_ia32_insertf32x4:
13344 case X86::BI__builtin_ia32_inserti64x4:
13345 case X86::BI__builtin_ia32_inserti32x4:
13346 case X86::BI__builtin_ia32_insertf32x8:
13347 case X86::BI__builtin_ia32_inserti32x8:
13348 case X86::BI__builtin_ia32_insertf32x4_256:
13349 case X86::BI__builtin_ia32_inserti32x4_256:
13350 case X86::BI__builtin_ia32_insertf64x2_256:
13351 case X86::BI__builtin_ia32_inserti64x2_256:
13352 case X86::BI__builtin_ia32_insertf64x2_512:
13353 case X86::BI__builtin_ia32_inserti64x2_512: {
13354 unsigned DstNumElts =
13355 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13356 unsigned SrcNumElts =
13357 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
13358 unsigned SubVectors = DstNumElts / SrcNumElts;
13359 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
13360 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")(static_cast <bool> (llvm::isPowerOf2_32(SubVectors) &&
"Expected power of 2 subvectors") ? void (0) : __assert_fail
("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13360, __extension__ __PRETTY_FUNCTION__))
;
13361 Index &= SubVectors - 1; // Remove any extra bits.
13362 Index *= SrcNumElts;
13363
13364 int Indices[16];
13365 for (unsigned i = 0; i != DstNumElts; ++i)
13366 Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
13367
13368 Value *Op1 = Builder.CreateShuffleVector(Ops[1],
13369 makeArrayRef(Indices, DstNumElts),
13370 "widen");
13371
13372 for (unsigned i = 0; i != DstNumElts; ++i) {
13373 if (i >= Index && i < (Index + SrcNumElts))
13374 Indices[i] = (i - Index) + DstNumElts;
13375 else
13376 Indices[i] = i;
13377 }
13378
13379 return Builder.CreateShuffleVector(Ops[0], Op1,
13380 makeArrayRef(Indices, DstNumElts),
13381 "insert");
13382 }
13383 case X86::BI__builtin_ia32_pmovqd512_mask:
13384 case X86::BI__builtin_ia32_pmovwb512_mask: {
13385 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
13386 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
13387 }
13388 case X86::BI__builtin_ia32_pmovdb512_mask:
13389 case X86::BI__builtin_ia32_pmovdw512_mask:
13390 case X86::BI__builtin_ia32_pmovqw512_mask: {
13391 if (const auto *C = dyn_cast<Constant>(Ops[2]))
13392 if (C->isAllOnesValue())
13393 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
13394
13395 Intrinsic::ID IID;
13396 switch (BuiltinID) {
13397 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13397)
;
13398 case X86::BI__builtin_ia32_pmovdb512_mask:
13399 IID = Intrinsic::x86_avx512_mask_pmov_db_512;
13400 break;
13401 case X86::BI__builtin_ia32_pmovdw512_mask:
13402 IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
13403 break;
13404 case X86::BI__builtin_ia32_pmovqw512_mask:
13405 IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
13406 break;
13407 }
13408
13409 Function *Intr = CGM.getIntrinsic(IID);
13410 return Builder.CreateCall(Intr, Ops);
13411 }
13412 case X86::BI__builtin_ia32_pblendw128:
13413 case X86::BI__builtin_ia32_blendpd:
13414 case X86::BI__builtin_ia32_blendps:
13415 case X86::BI__builtin_ia32_blendpd256:
13416 case X86::BI__builtin_ia32_blendps256:
13417 case X86::BI__builtin_ia32_pblendw256:
13418 case X86::BI__builtin_ia32_pblendd128:
13419 case X86::BI__builtin_ia32_pblendd256: {
13420 unsigned NumElts =
13421 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13422 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13423
13424 int Indices[16];
13425 // If there are more than 8 elements, the immediate is used twice so make
13426 // sure we handle that.
13427 for (unsigned i = 0; i != NumElts; ++i)
13428 Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
13429
13430 return Builder.CreateShuffleVector(Ops[0], Ops[1],
13431 makeArrayRef(Indices, NumElts),
13432 "blend");
13433 }
13434 case X86::BI__builtin_ia32_pshuflw:
13435 case X86::BI__builtin_ia32_pshuflw256:
13436 case X86::BI__builtin_ia32_pshuflw512: {
13437 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13438 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13439 unsigned NumElts = Ty->getNumElements();
13440
13441 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13442 Imm = (Imm & 0xff) * 0x01010101;
13443
13444 int Indices[32];
13445 for (unsigned l = 0; l != NumElts; l += 8) {
13446 for (unsigned i = 0; i != 4; ++i) {
13447 Indices[l + i] = l + (Imm & 3);
13448 Imm >>= 2;
13449 }
13450 for (unsigned i = 4; i != 8; ++i)
13451 Indices[l + i] = l + i;
13452 }
13453
13454 return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13455 "pshuflw");
13456 }
13457 case X86::BI__builtin_ia32_pshufhw:
13458 case X86::BI__builtin_ia32_pshufhw256:
13459 case X86::BI__builtin_ia32_pshufhw512: {
13460 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13461 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13462 unsigned NumElts = Ty->getNumElements();
13463
13464 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13465 Imm = (Imm & 0xff) * 0x01010101;
13466
13467 int Indices[32];
13468 for (unsigned l = 0; l != NumElts; l += 8) {
13469 for (unsigned i = 0; i != 4; ++i)
13470 Indices[l + i] = l + i;
13471 for (unsigned i = 4; i != 8; ++i) {
13472 Indices[l + i] = l + 4 + (Imm & 3);
13473 Imm >>= 2;
13474 }
13475 }
13476
13477 return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13478 "pshufhw");
13479 }
13480 case X86::BI__builtin_ia32_pshufd:
13481 case X86::BI__builtin_ia32_pshufd256:
13482 case X86::BI__builtin_ia32_pshufd512:
13483 case X86::BI__builtin_ia32_vpermilpd:
13484 case X86::BI__builtin_ia32_vpermilps:
13485 case X86::BI__builtin_ia32_vpermilpd256:
13486 case X86::BI__builtin_ia32_vpermilps256:
13487 case X86::BI__builtin_ia32_vpermilpd512:
13488 case X86::BI__builtin_ia32_vpermilps512: {
13489 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13490 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13491 unsigned NumElts = Ty->getNumElements();
13492 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
13493 unsigned NumLaneElts = NumElts / NumLanes;
13494
13495 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13496 Imm = (Imm & 0xff) * 0x01010101;
13497
13498 int Indices[16];
13499 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
13500 for (unsigned i = 0; i != NumLaneElts; ++i) {
13501 Indices[i + l] = (Imm % NumLaneElts) + l;
13502 Imm /= NumLaneElts;
13503 }
13504 }
13505
13506 return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13507 "permil");
13508 }
13509 case X86::BI__builtin_ia32_shufpd:
13510 case X86::BI__builtin_ia32_shufpd256:
13511 case X86::BI__builtin_ia32_shufpd512:
13512 case X86::BI__builtin_ia32_shufps:
13513 case X86::BI__builtin_ia32_shufps256:
13514 case X86::BI__builtin_ia32_shufps512: {
13515 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13516 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13517 unsigned NumElts = Ty->getNumElements();
13518 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
13519 unsigned NumLaneElts = NumElts / NumLanes;
13520
13521 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13522 Imm = (Imm & 0xff) * 0x01010101;
13523
13524 int Indices[16];
13525 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
13526 for (unsigned i = 0; i != NumLaneElts; ++i) {
13527 unsigned Index = Imm % NumLaneElts;
13528 Imm /= NumLaneElts;
13529 if (i >= (NumLaneElts / 2))
13530 Index += NumElts;
13531 Indices[l + i] = l + Index;
13532 }
13533 }
13534
13535 return Builder.CreateShuffleVector(Ops[0], Ops[1],
13536 makeArrayRef(Indices, NumElts),
13537 "shufp");
13538 }
13539 case X86::BI__builtin_ia32_permdi256:
13540 case X86::BI__builtin_ia32_permdf256:
13541 case X86::BI__builtin_ia32_permdi512:
13542 case X86::BI__builtin_ia32_permdf512: {
13543 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13544 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13545 unsigned NumElts = Ty->getNumElements();
13546
13547 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
13548 int Indices[8];
13549 for (unsigned l = 0; l != NumElts; l += 4)
13550 for (unsigned i = 0; i != 4; ++i)
13551 Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
13552
13553 return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13554 "perm");
13555 }
13556 case X86::BI__builtin_ia32_palignr128:
13557 case X86::BI__builtin_ia32_palignr256:
13558 case X86::BI__builtin_ia32_palignr512: {
13559 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
13560
13561 unsigned NumElts =
13562 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13563 assert(NumElts % 16 == 0)(static_cast <bool> (NumElts % 16 == 0) ? void (0) : __assert_fail
("NumElts % 16 == 0", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13563, __extension__ __PRETTY_FUNCTION__))
;
13564
13565 // If palignr is shifting the pair of vectors more than the size of two
13566 // lanes, emit zero.
13567 if (ShiftVal >= 32)
13568 return llvm::Constant::getNullValue(ConvertType(E->getType()));
13569
13570 // If palignr is shifting the pair of input vectors more than one lane,
13571 // but less than two lanes, convert to shifting in zeroes.
13572 if (ShiftVal > 16) {
13573 ShiftVal -= 16;
13574 Ops[1] = Ops[0];
13575 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
13576 }
13577
13578 int Indices[64];
13579 // 256-bit palignr operates on 128-bit lanes so we need to handle that
13580 for (unsigned l = 0; l != NumElts; l += 16) {
13581 for (unsigned i = 0; i != 16; ++i) {
13582 unsigned Idx = ShiftVal + i;
13583 if (Idx >= 16)
13584 Idx += NumElts - 16; // End of lane, switch operand.
13585 Indices[l + i] = Idx + l;
13586 }
13587 }
13588
13589 return Builder.CreateShuffleVector(Ops[1], Ops[0],
13590 makeArrayRef(Indices, NumElts),
13591 "palignr");
13592 }
13593 case X86::BI__builtin_ia32_alignd128:
13594 case X86::BI__builtin_ia32_alignd256:
13595 case X86::BI__builtin_ia32_alignd512:
13596 case X86::BI__builtin_ia32_alignq128:
13597 case X86::BI__builtin_ia32_alignq256:
13598 case X86::BI__builtin_ia32_alignq512: {
13599 unsigned NumElts =
13600 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13601 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
13602
13603 // Mask the shift amount to width of a vector.
13604 ShiftVal &= NumElts - 1;
13605
13606 int Indices[16];
13607 for (unsigned i = 0; i != NumElts; ++i)
13608 Indices[i] = i + ShiftVal;
13609
13610 return Builder.CreateShuffleVector(Ops[1], Ops[0],
13611 makeArrayRef(Indices, NumElts),
13612 "valign");
13613 }
13614 case X86::BI__builtin_ia32_shuf_f32x4_256:
13615 case X86::BI__builtin_ia32_shuf_f64x2_256:
13616 case X86::BI__builtin_ia32_shuf_i32x4_256:
13617 case X86::BI__builtin_ia32_shuf_i64x2_256:
13618 case X86::BI__builtin_ia32_shuf_f32x4:
13619 case X86::BI__builtin_ia32_shuf_f64x2:
13620 case X86::BI__builtin_ia32_shuf_i32x4:
13621 case X86::BI__builtin_ia32_shuf_i64x2: {
13622 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13623 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13624 unsigned NumElts = Ty->getNumElements();
13625 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
13626 unsigned NumLaneElts = NumElts / NumLanes;
13627
13628 int Indices[16];
13629 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
13630 unsigned Index = (Imm % NumLanes) * NumLaneElts;
13631 Imm /= NumLanes; // Discard the bits we just used.
13632 if (l >= (NumElts / 2))
13633 Index += NumElts; // Switch to other source.
13634 for (unsigned i = 0; i != NumLaneElts; ++i) {
13635 Indices[l + i] = Index + i;
13636 }
13637 }
13638
13639 return Builder.CreateShuffleVector(Ops[0], Ops[1],
13640 makeArrayRef(Indices, NumElts),
13641 "shuf");
13642 }
13643
13644 case X86::BI__builtin_ia32_vperm2f128_pd256:
13645 case X86::BI__builtin_ia32_vperm2f128_ps256:
13646 case X86::BI__builtin_ia32_vperm2f128_si256:
13647 case X86::BI__builtin_ia32_permti256: {
13648 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13649 unsigned NumElts =
13650 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13651
13652 // This takes a very simple approach since there are two lanes and a
13653 // shuffle can have 2 inputs. So we reserve the first input for the first
13654 // lane and the second input for the second lane. This may result in
13655 // duplicate sources, but this can be dealt with in the backend.
13656
13657 Value *OutOps[2];
13658 int Indices[8];
13659 for (unsigned l = 0; l != 2; ++l) {
13660 // Determine the source for this lane.
13661 if (Imm & (1 << ((l * 4) + 3)))
13662 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
13663 else if (Imm & (1 << ((l * 4) + 1)))
13664 OutOps[l] = Ops[1];
13665 else
13666 OutOps[l] = Ops[0];
13667
13668 for (unsigned i = 0; i != NumElts/2; ++i) {
13669 // Start with ith element of the source for this lane.
13670 unsigned Idx = (l * NumElts) + i;
13671 // If bit 0 of the immediate half is set, switch to the high half of
13672 // the source.
13673 if (Imm & (1 << (l * 4)))
13674 Idx += NumElts/2;
13675 Indices[(l * (NumElts/2)) + i] = Idx;
13676 }
13677 }
13678
13679 return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
13680 makeArrayRef(Indices, NumElts),
13681 "vperm");
13682 }
13683
13684 case X86::BI__builtin_ia32_pslldqi128_byteshift:
13685 case X86::BI__builtin_ia32_pslldqi256_byteshift:
13686 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
13687 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13688 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
13689 // Builtin type is vXi64 so multiply by 8 to get bytes.
13690 unsigned NumElts = ResultType->getNumElements() * 8;
13691
13692 // If pslldq is shifting the vector more than 15 bytes, emit zero.
13693 if (ShiftVal >= 16)
13694 return llvm::Constant::getNullValue(ResultType);
13695
13696 int Indices[64];
13697 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
13698 for (unsigned l = 0; l != NumElts; l += 16) {
13699 for (unsigned i = 0; i != 16; ++i) {
13700 unsigned Idx = NumElts + i - ShiftVal;
13701 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
13702 Indices[l + i] = Idx + l;
13703 }
13704 }
13705
13706 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
13707 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
13708 Value *Zero = llvm::Constant::getNullValue(VecTy);
13709 Value *SV = Builder.CreateShuffleVector(Zero, Cast,
13710 makeArrayRef(Indices, NumElts),
13711 "pslldq");
13712 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
13713 }
13714 case X86::BI__builtin_ia32_psrldqi128_byteshift:
13715 case X86::BI__builtin_ia32_psrldqi256_byteshift:
13716 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
13717 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13718 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
13719 // Builtin type is vXi64 so multiply by 8 to get bytes.
13720 unsigned NumElts = ResultType->getNumElements() * 8;
13721
13722 // If psrldq is shifting the vector more than 15 bytes, emit zero.
13723 if (ShiftVal >= 16)
13724 return llvm::Constant::getNullValue(ResultType);
13725
13726 int Indices[64];
13727 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
13728 for (unsigned l = 0; l != NumElts; l += 16) {
13729 for (unsigned i = 0; i != 16; ++i) {
13730 unsigned Idx = i + ShiftVal;
13731 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
13732 Indices[l + i] = Idx + l;
13733 }
13734 }
13735
13736 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
13737 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
13738 Value *Zero = llvm::Constant::getNullValue(VecTy);
13739 Value *SV = Builder.CreateShuffleVector(Cast, Zero,
13740 makeArrayRef(Indices, NumElts),
13741 "psrldq");
13742 return Builder.CreateBitCast(SV, ResultType, "cast");
13743 }
13744 case X86::BI__builtin_ia32_kshiftliqi:
13745 case X86::BI__builtin_ia32_kshiftlihi:
13746 case X86::BI__builtin_ia32_kshiftlisi:
13747 case X86::BI__builtin_ia32_kshiftlidi: {
13748 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13749 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13750
13751 if (ShiftVal >= NumElts)
13752 return llvm::Constant::getNullValue(Ops[0]->getType());
13753
13754 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
13755
13756 int Indices[64];
13757 for (unsigned i = 0; i != NumElts; ++i)
13758 Indices[i] = NumElts + i - ShiftVal;
13759
13760 Value *Zero = llvm::Constant::getNullValue(In->getType());
13761 Value *SV = Builder.CreateShuffleVector(Zero, In,
13762 makeArrayRef(Indices, NumElts),
13763 "kshiftl");
13764 return Builder.CreateBitCast(SV, Ops[0]->getType());
13765 }
13766 case X86::BI__builtin_ia32_kshiftriqi:
13767 case X86::BI__builtin_ia32_kshiftrihi:
13768 case X86::BI__builtin_ia32_kshiftrisi:
13769 case X86::BI__builtin_ia32_kshiftridi: {
13770 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13771 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13772
13773 if (ShiftVal >= NumElts)
13774 return llvm::Constant::getNullValue(Ops[0]->getType());
13775
13776 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
13777
13778 int Indices[64];
13779 for (unsigned i = 0; i != NumElts; ++i)
13780 Indices[i] = i + ShiftVal;
13781
13782 Value *Zero = llvm::Constant::getNullValue(In->getType());
13783 Value *SV = Builder.CreateShuffleVector(In, Zero,
13784 makeArrayRef(Indices, NumElts),
13785 "kshiftr");
13786 return Builder.CreateBitCast(SV, Ops[0]->getType());
13787 }
13788 case X86::BI__builtin_ia32_movnti:
13789 case X86::BI__builtin_ia32_movnti64:
13790 case X86::BI__builtin_ia32_movntsd:
13791 case X86::BI__builtin_ia32_movntss: {
13792 llvm::MDNode *Node = llvm::MDNode::get(
13793 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
13794
13795 Value *Ptr = Ops[0];
13796 Value *Src = Ops[1];
13797
13798 // Extract the 0'th element of the source vector.
13799 if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
13800 BuiltinID == X86::BI__builtin_ia32_movntss)
13801 Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
13802
13803 // Convert the type of the pointer to a pointer to the stored type.
13804 Value *BC = Builder.CreateBitCast(
13805 Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
13806
13807 // Unaligned nontemporal store of the scalar value.
13808 StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
13809 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
13810 SI->setAlignment(llvm::Align(1));
13811 return SI;
13812 }
13813 // Rotate is a special case of funnel shift - 1st 2 args are the same.
13814 case X86::BI__builtin_ia32_vprotb:
13815 case X86::BI__builtin_ia32_vprotw:
13816 case X86::BI__builtin_ia32_vprotd:
13817 case X86::BI__builtin_ia32_vprotq:
13818 case X86::BI__builtin_ia32_vprotbi:
13819 case X86::BI__builtin_ia32_vprotwi:
13820 case X86::BI__builtin_ia32_vprotdi:
13821 case X86::BI__builtin_ia32_vprotqi:
13822 case X86::BI__builtin_ia32_prold128:
13823 case X86::BI__builtin_ia32_prold256:
13824 case X86::BI__builtin_ia32_prold512:
13825 case X86::BI__builtin_ia32_prolq128:
13826 case X86::BI__builtin_ia32_prolq256:
13827 case X86::BI__builtin_ia32_prolq512:
13828 case X86::BI__builtin_ia32_prolvd128:
13829 case X86::BI__builtin_ia32_prolvd256:
13830 case X86::BI__builtin_ia32_prolvd512:
13831 case X86::BI__builtin_ia32_prolvq128:
13832 case X86::BI__builtin_ia32_prolvq256:
13833 case X86::BI__builtin_ia32_prolvq512:
13834 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
13835 case X86::BI__builtin_ia32_prord128:
13836 case X86::BI__builtin_ia32_prord256:
13837 case X86::BI__builtin_ia32_prord512:
13838 case X86::BI__builtin_ia32_prorq128:
13839 case X86::BI__builtin_ia32_prorq256:
13840 case X86::BI__builtin_ia32_prorq512:
13841 case X86::BI__builtin_ia32_prorvd128:
13842 case X86::BI__builtin_ia32_prorvd256:
13843 case X86::BI__builtin_ia32_prorvd512:
13844 case X86::BI__builtin_ia32_prorvq128:
13845 case X86::BI__builtin_ia32_prorvq256:
13846 case X86::BI__builtin_ia32_prorvq512:
13847 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
13848 case X86::BI__builtin_ia32_selectb_128:
13849 case X86::BI__builtin_ia32_selectb_256:
13850 case X86::BI__builtin_ia32_selectb_512:
13851 case X86::BI__builtin_ia32_selectw_128:
13852 case X86::BI__builtin_ia32_selectw_256:
13853 case X86::BI__builtin_ia32_selectw_512:
13854 case X86::BI__builtin_ia32_selectd_128:
13855 case X86::BI__builtin_ia32_selectd_256:
13856 case X86::BI__builtin_ia32_selectd_512:
13857 case X86::BI__builtin_ia32_selectq_128:
13858 case X86::BI__builtin_ia32_selectq_256:
13859 case X86::BI__builtin_ia32_selectq_512:
13860 case X86::BI__builtin_ia32_selectph_128:
13861 case X86::BI__builtin_ia32_selectph_256:
13862 case X86::BI__builtin_ia32_selectph_512:
13863 case X86::BI__builtin_ia32_selectps_128:
13864 case X86::BI__builtin_ia32_selectps_256:
13865 case X86::BI__builtin_ia32_selectps_512:
13866 case X86::BI__builtin_ia32_selectpd_128:
13867 case X86::BI__builtin_ia32_selectpd_256:
13868 case X86::BI__builtin_ia32_selectpd_512:
13869 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
13870 case X86::BI__builtin_ia32_selectsh_128:
13871 case X86::BI__builtin_ia32_selectss_128:
13872 case X86::BI__builtin_ia32_selectsd_128: {
13873 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13874 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13875 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
13876 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
13877 }
13878 case X86::BI__builtin_ia32_cmpb128_mask:
13879 case X86::BI__builtin_ia32_cmpb256_mask:
13880 case X86::BI__builtin_ia32_cmpb512_mask:
13881 case X86::BI__builtin_ia32_cmpw128_mask:
13882 case X86::BI__builtin_ia32_cmpw256_mask:
13883 case X86::BI__builtin_ia32_cmpw512_mask:
13884 case X86::BI__builtin_ia32_cmpd128_mask:
13885 case X86::BI__builtin_ia32_cmpd256_mask:
13886 case X86::BI__builtin_ia32_cmpd512_mask:
13887 case X86::BI__builtin_ia32_cmpq128_mask:
13888 case X86::BI__builtin_ia32_cmpq256_mask:
13889 case X86::BI__builtin_ia32_cmpq512_mask: {
13890 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13891 return EmitX86MaskedCompare(*this, CC, true, Ops);
13892 }
13893 case X86::BI__builtin_ia32_ucmpb128_mask:
13894 case X86::BI__builtin_ia32_ucmpb256_mask:
13895 case X86::BI__builtin_ia32_ucmpb512_mask:
13896 case X86::BI__builtin_ia32_ucmpw128_mask:
13897 case X86::BI__builtin_ia32_ucmpw256_mask:
13898 case X86::BI__builtin_ia32_ucmpw512_mask:
13899 case X86::BI__builtin_ia32_ucmpd128_mask:
13900 case X86::BI__builtin_ia32_ucmpd256_mask:
13901 case X86::BI__builtin_ia32_ucmpd512_mask:
13902 case X86::BI__builtin_ia32_ucmpq128_mask:
13903 case X86::BI__builtin_ia32_ucmpq256_mask:
13904 case X86::BI__builtin_ia32_ucmpq512_mask: {
13905 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13906 return EmitX86MaskedCompare(*this, CC, false, Ops);
13907 }
13908 case X86::BI__builtin_ia32_vpcomb:
13909 case X86::BI__builtin_ia32_vpcomw:
13910 case X86::BI__builtin_ia32_vpcomd:
13911 case X86::BI__builtin_ia32_vpcomq:
13912 return EmitX86vpcom(*this, Ops, true);
13913 case X86::BI__builtin_ia32_vpcomub:
13914 case X86::BI__builtin_ia32_vpcomuw:
13915 case X86::BI__builtin_ia32_vpcomud:
13916 case X86::BI__builtin_ia32_vpcomuq:
13917 return EmitX86vpcom(*this, Ops, false);
13918
13919 case X86::BI__builtin_ia32_kortestcqi:
13920 case X86::BI__builtin_ia32_kortestchi:
13921 case X86::BI__builtin_ia32_kortestcsi:
13922 case X86::BI__builtin_ia32_kortestcdi: {
13923 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13924 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
13925 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13926 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13927 }
13928 case X86::BI__builtin_ia32_kortestzqi:
13929 case X86::BI__builtin_ia32_kortestzhi:
13930 case X86::BI__builtin_ia32_kortestzsi:
13931 case X86::BI__builtin_ia32_kortestzdi: {
13932 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13933 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
13934 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13935 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13936 }
13937
13938 case X86::BI__builtin_ia32_ktestcqi:
13939 case X86::BI__builtin_ia32_ktestzqi:
13940 case X86::BI__builtin_ia32_ktestchi:
13941 case X86::BI__builtin_ia32_ktestzhi:
13942 case X86::BI__builtin_ia32_ktestcsi:
13943 case X86::BI__builtin_ia32_ktestzsi:
13944 case X86::BI__builtin_ia32_ktestcdi:
13945 case X86::BI__builtin_ia32_ktestzdi: {
13946 Intrinsic::ID IID;
13947 switch (BuiltinID) {
13948 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13948)
;
13949 case X86::BI__builtin_ia32_ktestcqi:
13950 IID = Intrinsic::x86_avx512_ktestc_b;
13951 break;
13952 case X86::BI__builtin_ia32_ktestzqi:
13953 IID = Intrinsic::x86_avx512_ktestz_b;
13954 break;
13955 case X86::BI__builtin_ia32_ktestchi:
13956 IID = Intrinsic::x86_avx512_ktestc_w;
13957 break;
13958 case X86::BI__builtin_ia32_ktestzhi:
13959 IID = Intrinsic::x86_avx512_ktestz_w;
13960 break;
13961 case X86::BI__builtin_ia32_ktestcsi:
13962 IID = Intrinsic::x86_avx512_ktestc_d;
13963 break;
13964 case X86::BI__builtin_ia32_ktestzsi:
13965 IID = Intrinsic::x86_avx512_ktestz_d;
13966 break;
13967 case X86::BI__builtin_ia32_ktestcdi:
13968 IID = Intrinsic::x86_avx512_ktestc_q;
13969 break;
13970 case X86::BI__builtin_ia32_ktestzdi:
13971 IID = Intrinsic::x86_avx512_ktestz_q;
13972 break;
13973 }
13974
13975 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13976 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13977 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13978 Function *Intr = CGM.getIntrinsic(IID);
13979 return Builder.CreateCall(Intr, {LHS, RHS});
13980 }
13981
13982 case X86::BI__builtin_ia32_kaddqi:
13983 case X86::BI__builtin_ia32_kaddhi:
13984 case X86::BI__builtin_ia32_kaddsi:
13985 case X86::BI__builtin_ia32_kadddi: {
13986 Intrinsic::ID IID;
13987 switch (BuiltinID) {
13988 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13988)
;
13989 case X86::BI__builtin_ia32_kaddqi:
13990 IID = Intrinsic::x86_avx512_kadd_b;
13991 break;
13992 case X86::BI__builtin_ia32_kaddhi:
13993 IID = Intrinsic::x86_avx512_kadd_w;
13994 break;
13995 case X86::BI__builtin_ia32_kaddsi:
13996 IID = Intrinsic::x86_avx512_kadd_d;
13997 break;
13998 case X86::BI__builtin_ia32_kadddi:
13999 IID = Intrinsic::x86_avx512_kadd_q;
14000 break;
14001 }
14002
14003 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14004 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
14005 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
14006 Function *Intr = CGM.getIntrinsic(IID);
14007 Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
14008 return Builder.CreateBitCast(Res, Ops[0]->getType());
14009 }
14010 case X86::BI__builtin_ia32_kandqi:
14011 case X86::BI__builtin_ia32_kandhi:
14012 case X86::BI__builtin_ia32_kandsi:
14013 case X86::BI__builtin_ia32_kanddi:
14014 return EmitX86MaskLogic(*this, Instruction::And, Ops);
14015 case X86::BI__builtin_ia32_kandnqi:
14016 case X86::BI__builtin_ia32_kandnhi:
14017 case X86::BI__builtin_ia32_kandnsi:
14018 case X86::BI__builtin_ia32_kandndi:
14019 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
14020 case X86::BI__builtin_ia32_korqi:
14021 case X86::BI__builtin_ia32_korhi:
14022 case X86::BI__builtin_ia32_korsi:
14023 case X86::BI__builtin_ia32_kordi:
14024 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
14025 case X86::BI__builtin_ia32_kxnorqi:
14026 case X86::BI__builtin_ia32_kxnorhi:
14027 case X86::BI__builtin_ia32_kxnorsi:
14028 case X86::BI__builtin_ia32_kxnordi:
14029 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
14030 case X86::BI__builtin_ia32_kxorqi:
14031 case X86::BI__builtin_ia32_kxorhi:
14032 case X86::BI__builtin_ia32_kxorsi:
14033 case X86::BI__builtin_ia32_kxordi:
14034 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
14035 case X86::BI__builtin_ia32_knotqi:
14036 case X86::BI__builtin_ia32_knothi:
14037 case X86::BI__builtin_ia32_knotsi:
14038 case X86::BI__builtin_ia32_knotdi: {
14039 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14040 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
14041 return Builder.CreateBitCast(Builder.CreateNot(Res),
14042 Ops[0]->getType());
14043 }
14044 case X86::BI__builtin_ia32_kmovb:
14045 case X86::BI__builtin_ia32_kmovw:
14046 case X86::BI__builtin_ia32_kmovd:
14047 case X86::BI__builtin_ia32_kmovq: {
14048 // Bitcast to vXi1 type and then back to integer. This gets the mask
14049 // register type into the IR, but might be optimized out depending on
14050 // what's around it.
14051 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14052 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
14053 return Builder.CreateBitCast(Res, Ops[0]->getType());
14054 }
14055
14056 case X86::BI__builtin_ia32_kunpckdi:
14057 case X86::BI__builtin_ia32_kunpcksi:
14058 case X86::BI__builtin_ia32_kunpckhi: {
14059 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14060 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
14061 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
14062 int Indices[64];
14063 for (unsigned i = 0; i != NumElts; ++i)
14064 Indices[i] = i;
14065
14066 // First extract half of each vector. This gives better codegen than
14067 // doing it in a single shuffle.
14068 LHS = Builder.CreateShuffleVector(LHS, LHS,
14069 makeArrayRef(Indices, NumElts / 2));
14070 RHS = Builder.CreateShuffleVector(RHS, RHS,
14071 makeArrayRef(Indices, NumElts / 2));
14072 // Concat the vectors.
14073 // NOTE: Operands are swapped to match the intrinsic definition.
14074 Value *Res = Builder.CreateShuffleVector(RHS, LHS,
14075 makeArrayRef(Indices, NumElts));
14076 return Builder.CreateBitCast(Res, Ops[0]->getType());
14077 }
14078
14079 case X86::BI__builtin_ia32_vplzcntd_128:
14080 case X86::BI__builtin_ia32_vplzcntd_256:
14081 case X86::BI__builtin_ia32_vplzcntd_512:
14082 case X86::BI__builtin_ia32_vplzcntq_128:
14083 case X86::BI__builtin_ia32_vplzcntq_256:
14084 case X86::BI__builtin_ia32_vplzcntq_512: {
14085 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
14086 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
14087 }
14088 case X86::BI__builtin_ia32_sqrtss:
14089 case X86::BI__builtin_ia32_sqrtsd: {
14090 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
14091 Function *F;
14092 if (Builder.getIsFPConstrained()) {
14093 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
14094 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
14095 A->getType());
14096 A = Builder.CreateConstrainedFPCall(F, {A});
14097 } else {
14098 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
14099 A = Builder.CreateCall(F, {A});
14100 }
14101 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
14102 }
14103 case X86::BI__builtin_ia32_sqrtsh_round_mask:
14104 case X86::BI__builtin_ia32_sqrtsd_round_mask:
14105 case X86::BI__builtin_ia32_sqrtss_round_mask: {
14106 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
14107 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
14108 // otherwise keep the intrinsic.
14109 if (CC != 4) {
14110 Intrinsic::ID IID;
14111
14112 switch (BuiltinID) {
14113 default:
14114 llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14114)
;
14115 case X86::BI__builtin_ia32_sqrtsh_round_mask:
14116 IID = Intrinsic::x86_avx512fp16_mask_sqrt_sh;
14117 break;
14118 case X86::BI__builtin_ia32_sqrtsd_round_mask:
14119 IID = Intrinsic::x86_avx512_mask_sqrt_sd;
14120 break;
14121 case X86::BI__builtin_ia32_sqrtss_round_mask:
14122 IID = Intrinsic::x86_avx512_mask_sqrt_ss;
14123 break;
14124 }
14125 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
14126 }
14127 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
14128 Function *F;
14129 if (Builder.getIsFPConstrained()) {
14130 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
14131 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
14132 A->getType());
14133 A = Builder.CreateConstrainedFPCall(F, A);
14134 } else {
14135 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
14136 A = Builder.CreateCall(F, A);
14137 }
14138 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
14139 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
14140 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
14141 }
14142 case X86::BI__builtin_ia32_sqrtpd256:
14143 case X86::BI__builtin_ia32_sqrtpd:
14144 case X86::BI__builtin_ia32_sqrtps256:
14145 case X86::BI__builtin_ia32_sqrtps:
14146 case X86::BI__builtin_ia32_sqrtph256:
14147 case X86::BI__builtin_ia32_sqrtph:
14148 case X86::BI__builtin_ia32_sqrtph512:
14149 case X86::BI__builtin_ia32_sqrtps512:
14150 case X86::BI__builtin_ia32_sqrtpd512: {
14151 if (Ops.size() == 2) {
14152 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
14153 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
14154 // otherwise keep the intrinsic.
14155 if (CC != 4) {
14156 Intrinsic::ID IID;
14157
14158 switch (BuiltinID) {
14159 default:
14160 llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14160)
;
14161 case X86::BI__builtin_ia32_sqrtph512:
14162 IID = Intrinsic::x86_avx512fp16_sqrt_ph_512;
14163 break;
14164 case X86::BI__builtin_ia32_sqrtps512:
14165 IID = Intrinsic::x86_avx512_sqrt_ps_512;
14166 break;
14167 case X86::BI__builtin_ia32_sqrtpd512:
14168 IID = Intrinsic::x86_avx512_sqrt_pd_512;
14169 break;
14170 }
14171 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
14172 }
14173 }
14174 if (Builder.getIsFPConstrained()) {
14175 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
14176 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
14177 Ops[0]->getType());
14178 return Builder.CreateConstrainedFPCall(F, Ops[0]);
14179 } else {
14180 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
14181 return Builder.CreateCall(F, Ops[0]);
14182 }
14183 }
14184 case X86::BI__builtin_ia32_pabsb128:
14185 case X86::BI__builtin_ia32_pabsw128:
14186 case X86::BI__builtin_ia32_pabsd128:
14187 case X86::BI__builtin_ia32_pabsb256:
14188 case X86::BI__builtin_ia32_pabsw256:
14189 case X86::BI__builtin_ia32_pabsd256:
14190 case X86::BI__builtin_ia32_pabsq128:
14191 case X86::BI__builtin_ia32_pabsq256:
14192 case X86::BI__builtin_ia32_pabsb512:
14193 case X86::BI__builtin_ia32_pabsw512:
14194 case X86::BI__builtin_ia32_pabsd512:
14195 case X86::BI__builtin_ia32_pabsq512: {
14196 Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType());
14197 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
14198 }
14199 case X86::BI__builtin_ia32_pmaxsb128:
14200 case X86::BI__builtin_ia32_pmaxsw128:
14201 case X86::BI__builtin_ia32_pmaxsd128:
14202 case X86::BI__builtin_ia32_pmaxsq128:
14203 case X86::BI__builtin_ia32_pmaxsb256:
14204 case X86::BI__builtin_ia32_pmaxsw256:
14205 case X86::BI__builtin_ia32_pmaxsd256:
14206 case X86::BI__builtin_ia32_pmaxsq256:
14207 case X86::BI__builtin_ia32_pmaxsb512:
14208 case X86::BI__builtin_ia32_pmaxsw512:
14209 case X86::BI__builtin_ia32_pmaxsd512:
14210 case X86::BI__builtin_ia32_pmaxsq512:
14211 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax);
14212 case X86::BI__builtin_ia32_pmaxub128:
14213 case X86::BI__builtin_ia32_pmaxuw128:
14214 case X86::BI__builtin_ia32_pmaxud128:
14215 case X86::BI__builtin_ia32_pmaxuq128:
14216 case X86::BI__builtin_ia32_pmaxub256:
14217 case X86::BI__builtin_ia32_pmaxuw256:
14218 case X86::BI__builtin_ia32_pmaxud256:
14219 case X86::BI__builtin_ia32_pmaxuq256:
14220 case X86::BI__builtin_ia32_pmaxub512:
14221 case X86::BI__builtin_ia32_pmaxuw512:
14222 case X86::BI__builtin_ia32_pmaxud512:
14223 case X86::BI__builtin_ia32_pmaxuq512:
14224 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax);
14225 case X86::BI__builtin_ia32_pminsb128:
14226 case X86::BI__builtin_ia32_pminsw128:
14227 case X86::BI__builtin_ia32_pminsd128:
14228 case X86::BI__builtin_ia32_pminsq128:
14229 case X86::BI__builtin_ia32_pminsb256:
14230 case X86::BI__builtin_ia32_pminsw256:
14231 case X86::BI__builtin_ia32_pminsd256:
14232 case X86::BI__builtin_ia32_pminsq256:
14233 case X86::BI__builtin_ia32_pminsb512:
14234 case X86::BI__builtin_ia32_pminsw512:
14235 case X86::BI__builtin_ia32_pminsd512:
14236 case X86::BI__builtin_ia32_pminsq512:
14237 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin);
14238 case X86::BI__builtin_ia32_pminub128:
14239 case X86::BI__builtin_ia32_pminuw128:
14240 case X86::BI__builtin_ia32_pminud128:
14241 case X86::BI__builtin_ia32_pminuq128:
14242 case X86::BI__builtin_ia32_pminub256:
14243 case X86::BI__builtin_ia32_pminuw256:
14244 case X86::BI__builtin_ia32_pminud256:
14245 case X86::BI__builtin_ia32_pminuq256:
14246 case X86::BI__builtin_ia32_pminub512:
14247 case X86::BI__builtin_ia32_pminuw512:
14248 case X86::BI__builtin_ia32_pminud512:
14249 case X86::BI__builtin_ia32_pminuq512:
14250 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin);
14251
14252 case X86::BI__builtin_ia32_pmuludq128:
14253 case X86::BI__builtin_ia32_pmuludq256:
14254 case X86::BI__builtin_ia32_pmuludq512:
14255 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
14256
14257 case X86::BI__builtin_ia32_pmuldq128:
14258 case X86::BI__builtin_ia32_pmuldq256:
14259 case X86::BI__builtin_ia32_pmuldq512:
14260 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
14261
14262 case X86::BI__builtin_ia32_pternlogd512_mask:
14263 case X86::BI__builtin_ia32_pternlogq512_mask:
14264 case X86::BI__builtin_ia32_pternlogd128_mask:
14265 case X86::BI__builtin_ia32_pternlogd256_mask:
14266 case X86::BI__builtin_ia32_pternlogq128_mask:
14267 case X86::BI__builtin_ia32_pternlogq256_mask:
14268 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
14269
14270 case X86::BI__builtin_ia32_pternlogd512_maskz:
14271 case X86::BI__builtin_ia32_pternlogq512_maskz:
14272 case X86::BI__builtin_ia32_pternlogd128_maskz:
14273 case X86::BI__builtin_ia32_pternlogd256_maskz:
14274 case X86::BI__builtin_ia32_pternlogq128_maskz:
14275 case X86::BI__builtin_ia32_pternlogq256_maskz:
14276 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
14277
14278 case X86::BI__builtin_ia32_vpshldd128:
14279 case X86::BI__builtin_ia32_vpshldd256:
14280 case X86::BI__builtin_ia32_vpshldd512:
14281 case X86::BI__builtin_ia32_vpshldq128:
14282 case X86::BI__builtin_ia32_vpshldq256:
14283 case X86::BI__builtin_ia32_vpshldq512:
14284 case X86::BI__builtin_ia32_vpshldw128:
14285 case X86::BI__builtin_ia32_vpshldw256:
14286 case X86::BI__builtin_ia32_vpshldw512:
14287 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
14288
14289 case X86::BI__builtin_ia32_vpshrdd128:
14290 case X86::BI__builtin_ia32_vpshrdd256:
14291 case X86::BI__builtin_ia32_vpshrdd512:
14292 case X86::BI__builtin_ia32_vpshrdq128:
14293 case X86::BI__builtin_ia32_vpshrdq256:
14294 case X86::BI__builtin_ia32_vpshrdq512:
14295 case X86::BI__builtin_ia32_vpshrdw128:
14296 case X86::BI__builtin_ia32_vpshrdw256:
14297 case X86::BI__builtin_ia32_vpshrdw512:
14298 // Ops 0 and 1 are swapped.
14299 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
14300
14301 case X86::BI__builtin_ia32_vpshldvd128:
14302 case X86::BI__builtin_ia32_vpshldvd256:
14303 case X86::BI__builtin_ia32_vpshldvd512:
14304 case X86::BI__builtin_ia32_vpshldvq128:
14305 case X86::BI__builtin_ia32_vpshldvq256:
14306 case X86::BI__builtin_ia32_vpshldvq512:
14307 case X86::BI__builtin_ia32_vpshldvw128:
14308 case X86::BI__builtin_ia32_vpshldvw256:
14309 case X86::BI__builtin_ia32_vpshldvw512:
14310 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
14311
14312 case X86::BI__builtin_ia32_vpshrdvd128:
14313 case X86::BI__builtin_ia32_vpshrdvd256:
14314 case X86::BI__builtin_ia32_vpshrdvd512:
14315 case X86::BI__builtin_ia32_vpshrdvq128:
14316 case X86::BI__builtin_ia32_vpshrdvq256:
14317 case X86::BI__builtin_ia32_vpshrdvq512:
14318 case X86::BI__builtin_ia32_vpshrdvw128:
14319 case X86::BI__builtin_ia32_vpshrdvw256:
14320 case X86::BI__builtin_ia32_vpshrdvw512:
14321 // Ops 0 and 1 are swapped.
14322 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
14323
14324 // Reductions
14325 case X86::BI__builtin_ia32_reduce_add_d512:
14326 case X86::BI__builtin_ia32_reduce_add_q512: {
14327 Function *F =
14328 CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType());
14329 return Builder.CreateCall(F, {Ops[0]});
14330 }
14331 case X86::BI__builtin_ia32_reduce_and_d512:
14332 case X86::BI__builtin_ia32_reduce_and_q512: {
14333 Function *F =
14334 CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType());
14335 return Builder.CreateCall(F, {Ops[0]});
14336 }
14337 case X86::BI__builtin_ia32_reduce_fadd_pd512:
14338 case X86::BI__builtin_ia32_reduce_fadd_ps512:
14339 case X86::BI__builtin_ia32_reduce_fadd_ph512:
14340 case X86::BI__builtin_ia32_reduce_fadd_ph256:
14341 case X86::BI__builtin_ia32_reduce_fadd_ph128: {
14342 Function *F =
14343 CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType());
14344 Builder.getFastMathFlags().setAllowReassoc();
14345 return Builder.CreateCall(F, {Ops[0], Ops[1]});
14346 }
14347 case X86::BI__builtin_ia32_reduce_fmul_pd512:
14348 case X86::BI__builtin_ia32_reduce_fmul_ps512:
14349 case X86::BI__builtin_ia32_reduce_fmul_ph512:
14350 case X86::BI__builtin_ia32_reduce_fmul_ph256:
14351 case X86::BI__builtin_ia32_reduce_fmul_ph128: {
14352 Function *F =
14353 CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType());
14354 Builder.getFastMathFlags().setAllowReassoc();
14355 return Builder.CreateCall(F, {Ops[0], Ops[1]});
14356 }
14357 case X86::BI__builtin_ia32_reduce_fmax_pd512:
14358 case X86::BI__builtin_ia32_reduce_fmax_ps512:
14359 case X86::BI__builtin_ia32_reduce_fmax_ph512:
14360 case X86::BI__builtin_ia32_reduce_fmax_ph256:
14361 case X86::BI__builtin_ia32_reduce_fmax_ph128: {
14362 Function *F =
14363 CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType());
14364 Builder.getFastMathFlags().setNoNaNs();
14365 return Builder.CreateCall(F, {Ops[0]});
14366 }
14367 case X86::BI__builtin_ia32_reduce_fmin_pd512:
14368 case X86::BI__builtin_ia32_reduce_fmin_ps512:
14369 case X86::BI__builtin_ia32_reduce_fmin_ph512:
14370 case X86::BI__builtin_ia32_reduce_fmin_ph256:
14371 case X86::BI__builtin_ia32_reduce_fmin_ph128: {
14372 Function *F =
14373 CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType());
14374 Builder.getFastMathFlags().setNoNaNs();
14375 return Builder.CreateCall(F, {Ops[0]});
14376 }
14377 case X86::BI__builtin_ia32_reduce_mul_d512:
14378 case X86::BI__builtin_ia32_reduce_mul_q512: {
14379 Function *F =
14380 CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType());
14381 return Builder.CreateCall(F, {Ops[0]});
14382 }
14383 case X86::BI__builtin_ia32_reduce_or_d512:
14384 case X86::BI__builtin_ia32_reduce_or_q512: {
14385 Function *F =
14386 CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType());
14387 return Builder.CreateCall(F, {Ops[0]});
14388 }
14389 case X86::BI__builtin_ia32_reduce_smax_d512:
14390 case X86::BI__builtin_ia32_reduce_smax_q512: {
14391 Function *F =
14392 CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType());
14393 return Builder.CreateCall(F, {Ops[0]});
14394 }
14395 case X86::BI__builtin_ia32_reduce_smin_d512:
14396 case X86::BI__builtin_ia32_reduce_smin_q512: {
14397 Function *F =
14398 CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType());
14399 return Builder.CreateCall(F, {Ops[0]});
14400 }
14401 case X86::BI__builtin_ia32_reduce_umax_d512:
14402 case X86::BI__builtin_ia32_reduce_umax_q512: {
14403 Function *F =
14404 CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType());
14405 return Builder.CreateCall(F, {Ops[0]});
14406 }
14407 case X86::BI__builtin_ia32_reduce_umin_d512:
14408 case X86::BI__builtin_ia32_reduce_umin_q512: {
14409 Function *F =
14410 CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType());
14411 return Builder.CreateCall(F, {Ops[0]});
14412 }
14413
14414 // 3DNow!
14415 case X86::BI__builtin_ia32_pswapdsf:
14416 case X86::BI__builtin_ia32_pswapdsi: {
14417 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
14418 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
14419 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
14420 return Builder.CreateCall(F, Ops, "pswapd");
14421 }
14422 case X86::BI__builtin_ia32_rdrand16_step:
14423 case X86::BI__builtin_ia32_rdrand32_step:
14424 case X86::BI__builtin_ia32_rdrand64_step:
14425 case X86::BI__builtin_ia32_rdseed16_step:
14426 case X86::BI__builtin_ia32_rdseed32_step:
14427 case X86::BI__builtin_ia32_rdseed64_step: {
14428 Intrinsic::ID ID;
14429 switch (BuiltinID) {
14430 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14430)
;
14431 case X86::BI__builtin_ia32_rdrand16_step:
14432 ID = Intrinsic::x86_rdrand_16;
14433 break;
14434 case X86::BI__builtin_ia32_rdrand32_step:
14435 ID = Intrinsic::x86_rdrand_32;
14436 break;
14437 case X86::BI__builtin_ia32_rdrand64_step:
14438 ID = Intrinsic::x86_rdrand_64;
14439 break;
14440 case X86::BI__builtin_ia32_rdseed16_step:
14441 ID = Intrinsic::x86_rdseed_16;
14442 break;
14443 case X86::BI__builtin_ia32_rdseed32_step:
14444 ID = Intrinsic::x86_rdseed_32;
14445 break;
14446 case X86::BI__builtin_ia32_rdseed64_step:
14447 ID = Intrinsic::x86_rdseed_64;
14448 break;
14449 }
14450
14451 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
14452 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
14453 Ops[0]);
14454 return Builder.CreateExtractValue(Call, 1);
14455 }
14456 case X86::BI__builtin_ia32_addcarryx_u32:
14457 case X86::BI__builtin_ia32_addcarryx_u64:
14458 case X86::BI__builtin_ia32_subborrow_u32:
14459 case X86::BI__builtin_ia32_subborrow_u64: {
14460 Intrinsic::ID IID;
14461 switch (BuiltinID) {
14462 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14462)
;
14463 case X86::BI__builtin_ia32_addcarryx_u32:
14464 IID = Intrinsic::x86_addcarry_32;
14465 break;
14466 case X86::BI__builtin_ia32_addcarryx_u64:
14467 IID = Intrinsic::x86_addcarry_64;
14468 break;
14469 case X86::BI__builtin_ia32_subborrow_u32:
14470 IID = Intrinsic::x86_subborrow_32;
14471 break;
14472 case X86::BI__builtin_ia32_subborrow_u64:
14473 IID = Intrinsic::x86_subborrow_64;
14474 break;
14475 }
14476
14477 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
14478 { Ops[0], Ops[1], Ops[2] });
14479 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
14480 Ops[3]);
14481 return Builder.CreateExtractValue(Call, 0);
14482 }
14483
14484 case X86::BI__builtin_ia32_fpclassps128_mask:
14485 case X86::BI__builtin_ia32_fpclassps256_mask:
14486 case X86::BI__builtin_ia32_fpclassps512_mask:
14487 case X86::BI__builtin_ia32_fpclassph128_mask:
14488 case X86::BI__builtin_ia32_fpclassph256_mask:
14489 case X86::BI__builtin_ia32_fpclassph512_mask:
14490 case X86::BI__builtin_ia32_fpclasspd128_mask:
14491 case X86::BI__builtin_ia32_fpclasspd256_mask:
14492 case X86::BI__builtin_ia32_fpclasspd512_mask: {
14493 unsigned NumElts =
14494 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14495 Value *MaskIn = Ops[2];
14496 Ops.erase(&Ops[2]);
14497
14498 Intrinsic::ID ID;
14499 switch (BuiltinID) {
14500 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14500)
;
14501 case X86::BI__builtin_ia32_fpclassph128_mask:
14502 ID = Intrinsic::x86_avx512fp16_fpclass_ph_128;
14503 break;
14504 case X86::BI__builtin_ia32_fpclassph256_mask:
14505 ID = Intrinsic::x86_avx512fp16_fpclass_ph_256;
14506 break;
14507 case X86::BI__builtin_ia32_fpclassph512_mask:
14508 ID = Intrinsic::x86_avx512fp16_fpclass_ph_512;
14509 break;
14510 case X86::BI__builtin_ia32_fpclassps128_mask:
14511 ID = Intrinsic::x86_avx512_fpclass_ps_128;
14512 break;
14513 case X86::BI__builtin_ia32_fpclassps256_mask:
14514 ID = Intrinsic::x86_avx512_fpclass_ps_256;
14515 break;
14516 case X86::BI__builtin_ia32_fpclassps512_mask:
14517 ID = Intrinsic::x86_avx512_fpclass_ps_512;
14518 break;
14519 case X86::BI__builtin_ia32_fpclasspd128_mask:
14520 ID = Intrinsic::x86_avx512_fpclass_pd_128;
14521 break;
14522 case X86::BI__builtin_ia32_fpclasspd256_mask:
14523 ID = Intrinsic::x86_avx512_fpclass_pd_256;
14524 break;
14525 case X86::BI__builtin_ia32_fpclasspd512_mask:
14526 ID = Intrinsic::x86_avx512_fpclass_pd_512;
14527 break;
14528 }
14529
14530 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14531 return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
14532 }
14533
14534 case X86::BI__builtin_ia32_vp2intersect_q_512:
14535 case X86::BI__builtin_ia32_vp2intersect_q_256:
14536 case X86::BI__builtin_ia32_vp2intersect_q_128:
14537 case X86::BI__builtin_ia32_vp2intersect_d_512:
14538 case X86::BI__builtin_ia32_vp2intersect_d_256:
14539 case X86::BI__builtin_ia32_vp2intersect_d_128: {
14540 unsigned NumElts =
14541 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14542 Intrinsic::ID ID;
14543
14544 switch (BuiltinID) {
14545 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14545)
;
14546 case X86::BI__builtin_ia32_vp2intersect_q_512:
14547 ID = Intrinsic::x86_avx512_vp2intersect_q_512;
14548 break;
14549 case X86::BI__builtin_ia32_vp2intersect_q_256:
14550 ID = Intrinsic::x86_avx512_vp2intersect_q_256;
14551 break;
14552 case X86::BI__builtin_ia32_vp2intersect_q_128:
14553 ID = Intrinsic::x86_avx512_vp2intersect_q_128;
14554 break;
14555 case X86::BI__builtin_ia32_vp2intersect_d_512:
14556 ID = Intrinsic::x86_avx512_vp2intersect_d_512;
14557 break;
14558 case X86::BI__builtin_ia32_vp2intersect_d_256:
14559 ID = Intrinsic::x86_avx512_vp2intersect_d_256;
14560 break;
14561 case X86::BI__builtin_ia32_vp2intersect_d_128:
14562 ID = Intrinsic::x86_avx512_vp2intersect_d_128;
14563 break;
14564 }
14565
14566 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
14567 Value *Result = Builder.CreateExtractValue(Call, 0);
14568 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
14569 Builder.CreateDefaultAlignedStore(Result, Ops[2]);
14570
14571 Result = Builder.CreateExtractValue(Call, 1);
14572 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
14573 return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
14574 }
14575
14576 case X86::BI__builtin_ia32_vpmultishiftqb128:
14577 case X86::BI__builtin_ia32_vpmultishiftqb256:
14578 case X86::BI__builtin_ia32_vpmultishiftqb512: {
14579 Intrinsic::ID ID;
14580 switch (BuiltinID) {
14581 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14581)
;
14582 case X86::BI__builtin_ia32_vpmultishiftqb128:
14583 ID = Intrinsic::x86_avx512_pmultishift_qb_128;
14584 break;
14585 case X86::BI__builtin_ia32_vpmultishiftqb256:
14586 ID = Intrinsic::x86_avx512_pmultishift_qb_256;
14587 break;
14588 case X86::BI__builtin_ia32_vpmultishiftqb512:
14589 ID = Intrinsic::x86_avx512_pmultishift_qb_512;
14590 break;
14591 }
14592
14593 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14594 }
14595
14596 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
14597 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
14598 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
14599 unsigned NumElts =
14600 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14601 Value *MaskIn = Ops[2];
14602 Ops.erase(&Ops[2]);
14603
14604 Intrinsic::ID ID;
14605 switch (BuiltinID) {
14606 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14606)
;
14607 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
14608 ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
14609 break;
14610 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
14611 ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
14612 break;
14613 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
14614 ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
14615 break;
14616 }
14617
14618 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14619 return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
14620 }
14621
14622 // packed comparison intrinsics
14623 case X86::BI__builtin_ia32_cmpeqps:
14624 case X86::BI__builtin_ia32_cmpeqpd:
14625 return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
14626 case X86::BI__builtin_ia32_cmpltps:
14627 case X86::BI__builtin_ia32_cmpltpd:
14628 return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
14629 case X86::BI__builtin_ia32_cmpleps:
14630 case X86::BI__builtin_ia32_cmplepd:
14631 return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
14632 case X86::BI__builtin_ia32_cmpunordps:
14633 case X86::BI__builtin_ia32_cmpunordpd:
14634 return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
14635 case X86::BI__builtin_ia32_cmpneqps:
14636 case X86::BI__builtin_ia32_cmpneqpd:
14637 return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
14638 case X86::BI__builtin_ia32_cmpnltps:
14639 case X86::BI__builtin_ia32_cmpnltpd:
14640 return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
14641 case X86::BI__builtin_ia32_cmpnleps:
14642 case X86::BI__builtin_ia32_cmpnlepd:
14643 return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
14644 case X86::BI__builtin_ia32_cmpordps:
14645 case X86::BI__builtin_ia32_cmpordpd:
14646 return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
14647 case X86::BI__builtin_ia32_cmpph128_mask:
14648 case X86::BI__builtin_ia32_cmpph256_mask:
14649 case X86::BI__builtin_ia32_cmpph512_mask:
14650 case X86::BI__builtin_ia32_cmpps128_mask:
14651 case X86::BI__builtin_ia32_cmpps256_mask:
14652 case X86::BI__builtin_ia32_cmpps512_mask:
14653 case X86::BI__builtin_ia32_cmppd128_mask:
14654 case X86::BI__builtin_ia32_cmppd256_mask:
14655 case X86::BI__builtin_ia32_cmppd512_mask:
14656 IsMaskFCmp = true;
14657 LLVM_FALLTHROUGH[[gnu::fallthrough]];
14658 case X86::BI__builtin_ia32_cmpps:
14659 case X86::BI__builtin_ia32_cmpps256:
14660 case X86::BI__builtin_ia32_cmppd:
14661 case X86::BI__builtin_ia32_cmppd256: {
14662 // Lowering vector comparisons to fcmp instructions, while
14663 // ignoring signalling behaviour requested
14664 // ignoring rounding mode requested
14665 // This is only possible if fp-model is not strict and FENV_ACCESS is off.
14666
14667 // The third argument is the comparison condition, and integer in the
14668 // range [0, 31]
14669 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
14670
14671 // Lowering to IR fcmp instruction.
14672 // Ignoring requested signaling behaviour,
14673 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
14674 FCmpInst::Predicate Pred;
14675 bool IsSignaling;
14676 // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
14677 // behavior is inverted. We'll handle that after the switch.
14678 switch (CC & 0xf) {
14679 case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
14680 case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
14681 case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
14682 case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
14683 case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
14684 case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
14685 case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
14686 case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
14687 case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
14688 case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
14689 case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
14690 case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
14691 case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
14692 case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
14693 case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
14694 case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
14695 default: llvm_unreachable("Unhandled CC")::llvm::llvm_unreachable_internal("Unhandled CC", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14695)
;
14696 }
14697
14698 // Invert the signalling behavior for 16-31.
14699 if (CC & 0x10)
14700 IsSignaling = !IsSignaling;
14701
14702 // If the predicate is true or false and we're using constrained intrinsics,
14703 // we don't have a compare intrinsic we can use. Just use the legacy X86
14704 // specific intrinsic.
14705 // If the intrinsic is mask enabled and we're using constrained intrinsics,
14706 // use the legacy X86 specific intrinsic.
14707 if (Builder.getIsFPConstrained() &&
14708 (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE ||
14709 IsMaskFCmp)) {
14710
14711 Intrinsic::ID IID;
14712 switch (BuiltinID) {
14713 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14713)
;
14714 case X86::BI__builtin_ia32_cmpps:
14715 IID = Intrinsic::x86_sse_cmp_ps;
14716 break;
14717 case X86::BI__builtin_ia32_cmpps256:
14718 IID = Intrinsic::x86_avx_cmp_ps_256;
14719 break;
14720 case X86::BI__builtin_ia32_cmppd:
14721 IID = Intrinsic::x86_sse2_cmp_pd;
14722 break;
14723 case X86::BI__builtin_ia32_cmppd256:
14724 IID = Intrinsic::x86_avx_cmp_pd_256;
14725 break;
14726 case X86::BI__builtin_ia32_cmpps512_mask:
14727 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
14728 break;
14729 case X86::BI__builtin_ia32_cmppd512_mask:
14730 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
14731 break;
14732 case X86::BI__builtin_ia32_cmpps128_mask:
14733 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
14734 break;
14735 case X86::BI__builtin_ia32_cmpps256_mask:
14736 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
14737 break;
14738 case X86::BI__builtin_ia32_cmppd128_mask:
14739 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
14740 break;
14741 case X86::BI__builtin_ia32_cmppd256_mask:
14742 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
14743 break;
14744 }
14745
14746 Function *Intr = CGM.getIntrinsic(IID);
14747 if (IsMaskFCmp) {
14748 unsigned NumElts =
14749 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14750 Ops[3] = getMaskVecValue(*this, Ops[3], NumElts);
14751 Value *Cmp = Builder.CreateCall(Intr, Ops);
14752 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr);
14753 }
14754
14755 return Builder.CreateCall(Intr, Ops);
14756 }
14757
14758 // Builtins without the _mask suffix return a vector of integers
14759 // of the same width as the input vectors
14760 if (IsMaskFCmp) {
14761 // We ignore SAE if strict FP is disabled. We only keep precise
14762 // exception behavior under strict FP.
14763 // NOTE: If strict FP does ever go through here a CGFPOptionsRAII
14764 // object will be required.
14765 unsigned NumElts =
14766 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14767 Value *Cmp;
14768 if (IsSignaling)
14769 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
14770 else
14771 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
14772 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
14773 }
14774
14775 return getVectorFCmpIR(Pred, IsSignaling);
14776 }
14777
14778 // SSE scalar comparison intrinsics
14779 case X86::BI__builtin_ia32_cmpeqss:
14780 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
14781 case X86::BI__builtin_ia32_cmpltss:
14782 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
14783 case X86::BI__builtin_ia32_cmpless:
14784 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
14785 case X86::BI__builtin_ia32_cmpunordss:
14786 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
14787 case X86::BI__builtin_ia32_cmpneqss:
14788 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
14789 case X86::BI__builtin_ia32_cmpnltss:
14790 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
14791 case X86::BI__builtin_ia32_cmpnless:
14792 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
14793 case X86::BI__builtin_ia32_cmpordss:
14794 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
14795 case X86::BI__builtin_ia32_cmpeqsd:
14796 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
14797 case X86::BI__builtin_ia32_cmpltsd:
14798 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
14799 case X86::BI__builtin_ia32_cmplesd:
14800 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
14801 case X86::BI__builtin_ia32_cmpunordsd:
14802 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
14803 case X86::BI__builtin_ia32_cmpneqsd:
14804 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
14805 case X86::BI__builtin_ia32_cmpnltsd:
14806 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
14807 case X86::BI__builtin_ia32_cmpnlesd:
14808 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
14809 case X86::BI__builtin_ia32_cmpordsd:
14810 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
14811
14812 // f16c half2float intrinsics
14813 case X86::BI__builtin_ia32_vcvtph2ps:
14814 case X86::BI__builtin_ia32_vcvtph2ps256:
14815 case X86::BI__builtin_ia32_vcvtph2ps_mask:
14816 case X86::BI__builtin_ia32_vcvtph2ps256_mask:
14817 case X86::BI__builtin_ia32_vcvtph2ps512_mask: {
14818 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
14819 return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
14820 }
14821
14822// AVX512 bf16 intrinsics
14823 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
14824 Ops[2] = getMaskVecValue(
14825 *this, Ops[2],
14826 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
14827 Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
14828 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
14829 }
14830 case X86::BI__builtin_ia32_cvtsbf162ss_32:
14831 return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
14832
14833 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
14834 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
14835 Intrinsic::ID IID;
14836 switch (BuiltinID) {
14837 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14837)
;
14838 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
14839 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
14840 break;
14841 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
14842 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
14843 break;
14844 }
14845 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
14846 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
14847 }
14848
14849 case X86::BI__emul:
14850 case X86::BI__emulu: {
14851 llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
14852 bool isSigned = (BuiltinID == X86::BI__emul);
14853 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
14854 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
14855 return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
14856 }
14857 case X86::BI__mulh:
14858 case X86::BI__umulh:
14859 case X86::BI_mul128:
14860 case X86::BI_umul128: {
14861 llvm::Type *ResType = ConvertType(E->getType());
14862 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
14863
14864 bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
14865 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
14866 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
14867
14868 Value *MulResult, *HigherBits;
14869 if (IsSigned) {
14870 MulResult = Builder.CreateNSWMul(LHS, RHS);
14871 HigherBits = Builder.CreateAShr(MulResult, 64);
14872 } else {
14873 MulResult = Builder.CreateNUWMul(LHS, RHS);
14874 HigherBits = Builder.CreateLShr(MulResult, 64);
14875 }
14876 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
14877
14878 if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
14879 return HigherBits;
14880
14881 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
14882 Builder.CreateStore(HigherBits, HighBitsAddress);
14883 return Builder.CreateIntCast(MulResult, ResType, IsSigned);
14884 }
14885
14886 case X86::BI__faststorefence: {
14887 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
14888 llvm::SyncScope::System);
14889 }
14890 case X86::BI__shiftleft128:
14891 case X86::BI__shiftright128: {
14892 llvm::Function *F = CGM.getIntrinsic(
14893 BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
14894 Int64Ty);
14895 // Flip low/high ops and zero-extend amount to matching type.
14896 // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt)
14897 // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt)
14898 std::swap(Ops[0], Ops[1]);
14899 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
14900 return Builder.CreateCall(F, Ops);
14901 }
14902 case X86::BI_ReadWriteBarrier:
14903 case X86::BI_ReadBarrier:
14904 case X86::BI_WriteBarrier: {
14905 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
14906 llvm::SyncScope::SingleThread);
14907 }
14908
14909 case X86::BI_AddressOfReturnAddress: {
14910 Function *F =
14911 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
14912 return Builder.CreateCall(F);
14913 }
14914 case X86::BI__stosb: {
14915 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
14916 // instruction, but it will create a memset that won't be optimized away.
14917 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
14918 }
14919 case X86::BI__ud2:
14920 // llvm.trap makes a ud2a instruction on x86.
14921 return EmitTrapCall(Intrinsic::trap);
14922 case X86::BI__int2c: {
14923 // This syscall signals a driver assertion failure in x86 NT kernels.
14924 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
14925 llvm::InlineAsm *IA =
14926 llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
14927 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
14928 getLLVMContext(), llvm::AttributeList::FunctionIndex,
14929 llvm::Attribute::NoReturn);
14930 llvm::CallInst *CI = Builder.CreateCall(IA);
14931 CI->setAttributes(NoReturnAttr);
14932 return CI;
14933 }
14934 case X86::BI__readfsbyte:
14935 case X86::BI__readfsword:
14936 case X86::BI__readfsdword:
14937 case X86::BI__readfsqword: {
14938 llvm::Type *IntTy = ConvertType(E->getType());
14939 Value *Ptr =
14940 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
14941 LoadInst *Load = Builder.CreateAlignedLoad(
14942 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14943 Load->setVolatile(true);
14944 return Load;
14945 }
14946 case X86::BI__readgsbyte:
14947 case X86::BI__readgsword:
14948 case X86::BI__readgsdword:
14949 case X86::BI__readgsqword: {
14950 llvm::Type *IntTy = ConvertType(E->getType());
14951 Value *Ptr =
14952 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
14953 LoadInst *Load = Builder.CreateAlignedLoad(
14954 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14955 Load->setVolatile(true);
14956 return Load;
14957 }
14958 case X86::BI__builtin_ia32_paddsb512:
14959 case X86::BI__builtin_ia32_paddsw512:
14960 case X86::BI__builtin_ia32_paddsb256:
14961 case X86::BI__builtin_ia32_paddsw256:
14962 case X86::BI__builtin_ia32_paddsb128:
14963 case X86::BI__builtin_ia32_paddsw128:
14964 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat);
14965 case X86::BI__builtin_ia32_paddusb512:
14966 case X86::BI__builtin_ia32_paddusw512:
14967 case X86::BI__builtin_ia32_paddusb256:
14968 case X86::BI__builtin_ia32_paddusw256:
14969 case X86::BI__builtin_ia32_paddusb128:
14970 case X86::BI__builtin_ia32_paddusw128:
14971 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat);
14972 case X86::BI__builtin_ia32_psubsb512:
14973 case X86::BI__builtin_ia32_psubsw512:
14974 case X86::BI__builtin_ia32_psubsb256:
14975 case X86::BI__builtin_ia32_psubsw256:
14976 case X86::BI__builtin_ia32_psubsb128:
14977 case X86::BI__builtin_ia32_psubsw128:
14978 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat);
14979 case X86::BI__builtin_ia32_psubusb512:
14980 case X86::BI__builtin_ia32_psubusw512:
14981 case X86::BI__builtin_ia32_psubusb256:
14982 case X86::BI__builtin_ia32_psubusw256:
14983 case X86::BI__builtin_ia32_psubusb128:
14984 case X86::BI__builtin_ia32_psubusw128:
14985 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat);
14986 case X86::BI__builtin_ia32_encodekey128_u32: {
14987 Intrinsic::ID IID = Intrinsic::x86_encodekey128;
14988
14989 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]});
14990
14991 for (int i = 0; i < 3; ++i) {
14992 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14993 Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[2], i * 16);
14994 Ptr = Builder.CreateBitCast(
14995 Ptr, llvm::PointerType::getUnqual(Extract->getType()));
14996 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
14997 }
14998
14999 return Builder.CreateExtractValue(Call, 0);
15000 }
15001 case X86::BI__builtin_ia32_encodekey256_u32: {
15002 Intrinsic::ID IID = Intrinsic::x86_encodekey256;
15003
15004 Value *Call =
15005 Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
15006
15007 for (int i = 0; i < 4; ++i) {
15008 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
15009 Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[3], i * 16);
15010 Ptr = Builder.CreateBitCast(
15011 Ptr, llvm::PointerType::getUnqual(Extract->getType()));
15012 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
15013 }
15014
15015 return Builder.CreateExtractValue(Call, 0);
15016 }
15017 case X86::BI__builtin_ia32_aesenc128kl_u8:
15018 case X86::BI__builtin_ia32_aesdec128kl_u8:
15019 case X86::BI__builtin_ia32_aesenc256kl_u8:
15020 case X86::BI__builtin_ia32_aesdec256kl_u8: {
15021 Intrinsic::ID IID;
15022 StringRef BlockName;
15023 switch (BuiltinID) {
15024 default:
15025 llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15025)
;
15026 case X86::BI__builtin_ia32_aesenc128kl_u8:
15027 IID = Intrinsic::x86_aesenc128kl;
15028 BlockName = "aesenc128kl";
15029 break;
15030 case X86::BI__builtin_ia32_aesdec128kl_u8:
15031 IID = Intrinsic::x86_aesdec128kl;
15032 BlockName = "aesdec128kl";
15033 break;
15034 case X86::BI__builtin_ia32_aesenc256kl_u8:
15035 IID = Intrinsic::x86_aesenc256kl;
15036 BlockName = "aesenc256kl";
15037 break;
15038 case X86::BI__builtin_ia32_aesdec256kl_u8:
15039 IID = Intrinsic::x86_aesdec256kl;
15040 BlockName = "aesdec256kl";
15041 break;
15042 }
15043
15044 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]});
15045
15046 BasicBlock *NoError =
15047 createBasicBlock(BlockName + "_no_error", this->CurFn);
15048 BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn);
15049 BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn);
15050
15051 Value *Ret = Builder.CreateExtractValue(Call, 0);
15052 Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty());
15053 Value *Out = Builder.CreateExtractValue(Call, 1);
15054 Builder.CreateCondBr(Succ, NoError, Error);
15055
15056 Builder.SetInsertPoint(NoError);
15057 Builder.CreateDefaultAlignedStore(Out, Ops[0]);
15058 Builder.CreateBr(End);
15059
15060 Builder.SetInsertPoint(Error);
15061 Constant *Zero = llvm::Constant::getNullValue(Out->getType());
15062 Builder.CreateDefaultAlignedStore(Zero, Ops[0]);
15063 Builder.CreateBr(End);
15064
15065 Builder.SetInsertPoint(End);
15066 return Builder.CreateExtractValue(Call, 0);
15067 }
15068 case X86::BI__builtin_ia32_aesencwide128kl_u8:
15069 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
15070 case X86::BI__builtin_ia32_aesencwide256kl_u8:
15071 case X86::BI__builtin_ia32_aesdecwide256kl_u8: {
15072 Intrinsic::ID IID;
15073 StringRef BlockName;
15074 switch (BuiltinID) {
15075 case X86::BI__builtin_ia32_aesencwide128kl_u8:
15076 IID = Intrinsic::x86_aesencwide128kl;
15077 BlockName = "aesencwide128kl";
15078 break;
15079 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
15080 IID = Intrinsic::x86_aesdecwide128kl;
15081 BlockName = "aesdecwide128kl";
15082 break;
15083 case X86::BI__builtin_ia32_aesencwide256kl_u8:
15084 IID = Intrinsic::x86_aesencwide256kl;
15085 BlockName = "aesencwide256kl";
15086 break;
15087 case X86::BI__builtin_ia32_aesdecwide256kl_u8:
15088 IID = Intrinsic::x86_aesdecwide256kl;
15089 BlockName = "aesdecwide256kl";
15090 break;
15091 }
15092
15093 llvm::Type *Ty = FixedVectorType::get(Builder.getInt64Ty(), 2);
15094 Value *InOps[9];
15095 InOps[0] = Ops[2];
15096 for (int i = 0; i != 8; ++i) {
15097 Value *Ptr = Builder.CreateConstGEP1_32(Ty, Ops[1], i);
15098 InOps[i + 1] = Builder.CreateAlignedLoad(Ty, Ptr, Align(16));
15099 }
15100
15101 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps);
15102
15103 BasicBlock *NoError =
15104 createBasicBlock(BlockName + "_no_error", this->CurFn);
15105 BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn);
15106 BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn);
15107
15108 Value *Ret = Builder.CreateExtractValue(Call, 0);
15109 Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty());
15110 Builder.CreateCondBr(Succ, NoError, Error);
15111
15112 Builder.SetInsertPoint(NoError);
15113 for (int i = 0; i != 8; ++i) {
15114 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
15115 Value *Ptr = Builder.CreateConstGEP1_32(Extract->getType(), Ops[0], i);
15116 Builder.CreateAlignedStore(Extract, Ptr, Align(16));
15117 }
15118 Builder.CreateBr(End);
15119
15120 Builder.SetInsertPoint(Error);
15121 for (int i = 0; i != 8; ++i) {
15122 Value *Out = Builder.CreateExtractValue(Call, i + 1);
15123 Constant *Zero = llvm::Constant::getNullValue(Out->getType());
15124 Value *Ptr = Builder.CreateConstGEP1_32(Out->getType(), Ops[0], i);
15125 Builder.CreateAlignedStore(Zero, Ptr, Align(16));
15126 }
15127 Builder.CreateBr(End);
15128
15129 Builder.SetInsertPoint(End);
15130 return Builder.CreateExtractValue(Call, 0);
15131 }
15132 case X86::BI__builtin_ia32_vfcmaddcph512_mask:
15133 IsConjFMA = true;
15134 LLVM_FALLTHROUGH[[gnu::fallthrough]];
15135 case X86::BI__builtin_ia32_vfmaddcph512_mask: {
15136 Intrinsic::ID IID = IsConjFMA
15137 ? Intrinsic::x86_avx512fp16_mask_vfcmadd_cph_512
15138 : Intrinsic::x86_avx512fp16_mask_vfmadd_cph_512;
15139 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15140 return EmitX86Select(*this, Ops[3], Call, Ops[0]);
15141 }
15142 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
15143 IsConjFMA = true;
15144 LLVM_FALLTHROUGH[[gnu::fallthrough]];
15145 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: {
15146 Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
15147 : Intrinsic::x86_avx512fp16_mask_vfmadd_csh;
15148 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15149 Value *And = Builder.CreateAnd(Ops[3], llvm::ConstantInt::get(Int8Ty, 1));
15150 return EmitX86Select(*this, And, Call, Ops[0]);
15151 }
15152 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
15153 IsConjFMA = true;
15154 LLVM_FALLTHROUGH[[gnu::fallthrough]];
15155 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: {
15156 Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
15157 : Intrinsic::x86_avx512fp16_mask_vfmadd_csh;
15158 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15159 static constexpr int Mask[] = {0, 5, 6, 7};
15160 return Builder.CreateShuffleVector(Call, Ops[2], Mask);
15161 }
15162 }
15163}
15164
15165Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
15166 const CallExpr *E) {
15167 SmallVector<Value*, 4> Ops;
15168
15169 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
1
Assuming 'i' is equal to 'e'
2
Loop condition is false. Execution continues on line 15172
15170 Ops.push_back(EmitScalarExpr(E->getArg(i)));
15171
15172 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15173
15174 switch (BuiltinID) {
3
Control jumps to 'case BI__builtin_vsx_strmb:' at line 15369
15175 default: return nullptr;
15176
15177 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
15178 // call __builtin_readcyclecounter.
15179 case PPC::BI__builtin_ppc_get_timebase:
15180 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
15181
15182 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
15183 case PPC::BI__builtin_altivec_lvx:
15184 case PPC::BI__builtin_altivec_lvxl:
15185 case PPC::BI__builtin_altivec_lvebx:
15186 case PPC::BI__builtin_altivec_lvehx:
15187 case PPC::BI__builtin_altivec_lvewx:
15188 case PPC::BI__builtin_altivec_lvsl:
15189 case PPC::BI__builtin_altivec_lvsr:
15190 case PPC::BI__builtin_vsx_lxvd2x:
15191 case PPC::BI__builtin_vsx_lxvw4x:
15192 case PPC::BI__builtin_vsx_lxvd2x_be:
15193 case PPC::BI__builtin_vsx_lxvw4x_be:
15194 case PPC::BI__builtin_vsx_lxvl:
15195 case PPC::BI__builtin_vsx_lxvll:
15196 {
15197 if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
15198 BuiltinID == PPC::BI__builtin_vsx_lxvll){
15199 Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
15200 }else {
15201 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
15202 Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
15203 Ops.pop_back();
15204 }
15205
15206 switch (BuiltinID) {
15207 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!")::llvm::llvm_unreachable_internal("Unsupported ld/lvsl/lvsr intrinsic!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15207)
;
15208 case PPC::BI__builtin_altivec_lvx:
15209 ID = Intrinsic::ppc_altivec_lvx;
15210 break;
15211 case PPC::BI__builtin_altivec_lvxl:
15212 ID = Intrinsic::ppc_altivec_lvxl;
15213 break;
15214 case PPC::BI__builtin_altivec_lvebx:
15215 ID = Intrinsic::ppc_altivec_lvebx;
15216 break;
15217 case PPC::BI__builtin_altivec_lvehx:
15218 ID = Intrinsic::ppc_altivec_lvehx;
15219 break;
15220 case PPC::BI__builtin_altivec_lvewx:
15221 ID = Intrinsic::ppc_altivec_lvewx;
15222 break;
15223 case PPC::BI__builtin_altivec_lvsl:
15224 ID = Intrinsic::ppc_altivec_lvsl;
15225 break;
15226 case PPC::BI__builtin_altivec_lvsr:
15227 ID = Intrinsic::ppc_altivec_lvsr;
15228 break;
15229 case PPC::BI__builtin_vsx_lxvd2x:
15230 ID = Intrinsic::ppc_vsx_lxvd2x;
15231 break;
15232 case PPC::BI__builtin_vsx_lxvw4x:
15233 ID = Intrinsic::ppc_vsx_lxvw4x;
15234 break;
15235 case PPC::BI__builtin_vsx_lxvd2x_be:
15236 ID = Intrinsic::ppc_vsx_lxvd2x_be;
15237 break;
15238 case PPC::BI__builtin_vsx_lxvw4x_be:
15239 ID = Intrinsic::ppc_vsx_lxvw4x_be;
15240 break;
15241 case PPC::BI__builtin_vsx_lxvl:
15242 ID = Intrinsic::ppc_vsx_lxvl;
15243 break;
15244 case PPC::BI__builtin_vsx_lxvll:
15245 ID = Intrinsic::ppc_vsx_lxvll;
15246 break;
15247 }
15248 llvm::Function *F = CGM.getIntrinsic(ID);
15249 return Builder.CreateCall(F, Ops, "");
15250 }
15251
15252 // vec_st, vec_xst_be
15253 case PPC::BI__builtin_altivec_stvx:
15254 case PPC::BI__builtin_altivec_stvxl:
15255 case PPC::BI__builtin_altivec_stvebx:
15256 case PPC::BI__builtin_altivec_stvehx:
15257 case PPC::BI__builtin_altivec_stvewx:
15258 case PPC::BI__builtin_vsx_stxvd2x:
15259 case PPC::BI__builtin_vsx_stxvw4x:
15260 case PPC::BI__builtin_vsx_stxvd2x_be:
15261 case PPC::BI__builtin_vsx_stxvw4x_be:
15262 case PPC::BI__builtin_vsx_stxvl:
15263 case PPC::BI__builtin_vsx_stxvll:
15264 {
15265 if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
15266 BuiltinID == PPC::BI__builtin_vsx_stxvll ){
15267 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
15268 }else {
15269 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
15270 Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
15271 Ops.pop_back();
15272 }
15273
15274 switch (BuiltinID) {
15275 default: llvm_unreachable("Unsupported st intrinsic!")::llvm::llvm_unreachable_internal("Unsupported st intrinsic!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15275)
;
15276 case PPC::BI__builtin_altivec_stvx:
15277 ID = Intrinsic::ppc_altivec_stvx;
15278 break;
15279 case PPC::BI__builtin_altivec_stvxl:
15280 ID = Intrinsic::ppc_altivec_stvxl;
15281 break;
15282 case PPC::BI__builtin_altivec_stvebx:
15283 ID = Intrinsic::ppc_altivec_stvebx;
15284 break;
15285 case PPC::BI__builtin_altivec_stvehx:
15286 ID = Intrinsic::ppc_altivec_stvehx;
15287 break;
15288 case PPC::BI__builtin_altivec_stvewx:
15289 ID = Intrinsic::ppc_altivec_stvewx;
15290 break;
15291 case PPC::BI__builtin_vsx_stxvd2x:
15292 ID = Intrinsic::ppc_vsx_stxvd2x;
15293 break;
15294 case PPC::BI__builtin_vsx_stxvw4x:
15295 ID = Intrinsic::ppc_vsx_stxvw4x;
15296 break;
15297 case PPC::BI__builtin_vsx_stxvd2x_be:
15298 ID = Intrinsic::ppc_vsx_stxvd2x_be;
15299 break;
15300 case PPC::BI__builtin_vsx_stxvw4x_be:
15301 ID = Intrinsic::ppc_vsx_stxvw4x_be;
15302 break;
15303 case PPC::BI__builtin_vsx_stxvl:
15304 ID = Intrinsic::ppc_vsx_stxvl;
15305 break;
15306 case PPC::BI__builtin_vsx_stxvll:
15307 ID = Intrinsic::ppc_vsx_stxvll;
15308 break;
15309 }
15310 llvm::Function *F = CGM.getIntrinsic(ID);
15311 return Builder.CreateCall(F, Ops, "");
15312 }
15313 case PPC::BI__builtin_vsx_ldrmb: {
15314 // Essentially boils down to performing an unaligned VMX load sequence so
15315 // as to avoid crossing a page boundary and then shuffling the elements
15316 // into the right side of the vector register.
15317 int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue();
15318 llvm::Type *ResTy = ConvertType(E->getType());
15319 bool IsLE = getTarget().isLittleEndian();
15320
15321 // If the user wants the entire vector, just load the entire vector.
15322 if (NumBytes == 16) {
15323 Value *BC = Builder.CreateBitCast(Ops[0], ResTy->getPointerTo());
15324 Value *LD = Builder.CreateLoad(Address(BC, CharUnits::fromQuantity(1)));
15325 if (!IsLE)
15326 return LD;
15327
15328 // Reverse the bytes on LE.
15329 SmallVector<int, 16> RevMask;
15330 for (int Idx = 0; Idx < 16; Idx++)
15331 RevMask.push_back(15 - Idx);
15332 return Builder.CreateShuffleVector(LD, LD, RevMask);
15333 }
15334
15335 llvm::Function *Lvx = CGM.getIntrinsic(Intrinsic::ppc_altivec_lvx);
15336 llvm::Function *Lvs = CGM.getIntrinsic(IsLE ? Intrinsic::ppc_altivec_lvsr
15337 : Intrinsic::ppc_altivec_lvsl);
15338 llvm::Function *Vperm = CGM.getIntrinsic(Intrinsic::ppc_altivec_vperm);
15339 Value *HiMem = Builder.CreateGEP(
15340 Int8Ty, Ops[0], ConstantInt::get(Ops[1]->getType(), NumBytes - 1));
15341 Value *LoLd = Builder.CreateCall(Lvx, Ops[0], "ld.lo");
15342 Value *HiLd = Builder.CreateCall(Lvx, HiMem, "ld.hi");
15343 Value *Mask1 = Builder.CreateCall(Lvs, Ops[0], "mask1");
15344
15345 Ops.clear();
15346 Ops.push_back(IsLE ? HiLd : LoLd);
15347 Ops.push_back(IsLE ? LoLd : HiLd);
15348 Ops.push_back(Mask1);
15349 Value *AllElts = Builder.CreateCall(Vperm, Ops, "shuffle1");
15350 Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType());
15351
15352 if (IsLE) {
15353 SmallVector<int, 16> Consts;
15354 for (int Idx = 0; Idx < 16; Idx++) {
15355 int Val = (NumBytes - Idx - 1 >= 0) ? (NumBytes - Idx - 1)
15356 : 16 - (NumBytes - Idx);
15357 Consts.push_back(Val);
15358 }
15359 return Builder.CreateShuffleVector(Builder.CreateBitCast(AllElts, ResTy),
15360 Zero, Consts);
15361 }
15362 SmallVector<Constant *, 16> Consts;
15363 for (int Idx = 0; Idx < 16; Idx++)
15364 Consts.push_back(Builder.getInt8(NumBytes + Idx));
15365 Value *Mask2 = ConstantVector::get(Consts);
15366 return Builder.CreateBitCast(
15367 Builder.CreateCall(Vperm, {Zero, AllElts, Mask2}, "shuffle2"), ResTy);
15368 }
15369 case PPC::BI__builtin_vsx_strmb: {
15370 int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue();
4
The object is a 'ConstantInt'
15371 bool IsLE = getTarget().isLittleEndian();
15372 auto StoreSubVec = [&](unsigned Width, unsigned Offset, unsigned EltNo) {
15373 // Storing the whole vector, simply store it on BE and reverse bytes and
15374 // store on LE.
15375 if (Width == 16) {
15376 Value *BC =
15377 Builder.CreateBitCast(Ops[0], Ops[2]->getType()->getPointerTo());
15378 Value *StVec = Ops[2];
15379 if (IsLE) {
15380 SmallVector<int, 16> RevMask;
15381 for (int Idx = 0; Idx < 16; Idx++)
15382 RevMask.push_back(15 - Idx);
15383 StVec = Builder.CreateShuffleVector(Ops[2], Ops[2], RevMask);
15384 }
15385 return Builder.CreateStore(StVec,
15386 Address(BC, CharUnits::fromQuantity(1)));
15387 }
15388 auto *ConvTy = Int64Ty;
15389 unsigned NumElts = 0;
15390 switch (Width) {
15391 default:
15392 llvm_unreachable("width for stores must be a power of 2")::llvm::llvm_unreachable_internal("width for stores must be a power of 2"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15392)
;
15393 case 8:
15394 ConvTy = Int64Ty;
15395 NumElts = 2;
15396 break;
15397 case 4:
15398 ConvTy = Int32Ty;
15399 NumElts = 4;
15400 break;
15401 case 2:
15402 ConvTy = Int16Ty;
15403 NumElts = 8;
15404 break;
15405 case 1:
15406 ConvTy = Int8Ty;
15407 NumElts = 16;
15408 break;
15409 }
15410 Value *Vec = Builder.CreateBitCast(
15411 Ops[2], llvm::FixedVectorType::get(ConvTy, NumElts));
15412 Value *Ptr = Builder.CreateGEP(Int8Ty, Ops[0],
15413 ConstantInt::get(Int64Ty, Offset));
15414 Value *PtrBC = Builder.CreateBitCast(Ptr, ConvTy->getPointerTo());
15415 Value *Elt = Builder.CreateExtractElement(Vec, EltNo);
15416 if (IsLE && Width > 1) {
15417 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy);
15418 Elt = Builder.CreateCall(F, Elt);
15419 }
15420 return Builder.CreateStore(Elt,
15421 Address(PtrBC, CharUnits::fromQuantity(1)));
15422 };
15423 unsigned Stored = 0;
15424 unsigned RemainingBytes = NumBytes;
15425 Value *Result;
5
'Result' declared without an initial value
15426 if (NumBytes == 16)
6
Assuming 'NumBytes' is not equal to 16
7
Taking false branch
15427 return StoreSubVec(16, 0, 0);
15428 if (NumBytes >= 8) {
8
Assuming 'NumBytes' is < 8
9
Taking false branch
15429 Result = StoreSubVec(8, NumBytes - 8, IsLE ? 0 : 1);
15430 RemainingBytes -= 8;
15431 Stored += 8;
15432 }
15433 if (RemainingBytes >= 4) {
10
Assuming 'RemainingBytes' is < 4
11
Taking false branch
15434 Result = StoreSubVec(4, NumBytes - Stored - 4,
15435 IsLE ? (Stored >> 2) : 3 - (Stored >> 2));
15436 RemainingBytes -= 4;
15437 Stored += 4;
15438 }
15439 if (RemainingBytes >= 2) {
12
Assuming 'RemainingBytes' is < 2
13
Taking false branch
15440 Result = StoreSubVec(2, NumBytes - Stored - 2,
15441 IsLE ? (Stored >> 1) : 7 - (Stored >> 1));
15442 RemainingBytes -= 2;
15443 Stored += 2;
15444 }
15445 if (RemainingBytes)
14
Assuming 'RemainingBytes' is 0
15
Taking false branch
15446 Result =
15447 StoreSubVec(1, NumBytes - Stored - 1, IsLE ? Stored : 15 - Stored);
15448 return Result;
16
Undefined or garbage value returned to caller
15449 }
15450 // Square root
15451 case PPC::BI__builtin_vsx_xvsqrtsp:
15452 case PPC::BI__builtin_vsx_xvsqrtdp: {
15453 llvm::Type *ResultType = ConvertType(E->getType());
15454 Value *X = EmitScalarExpr(E->getArg(0));
15455 if (Builder.getIsFPConstrained()) {
15456 llvm::Function *F = CGM.getIntrinsic(
15457 Intrinsic::experimental_constrained_sqrt, ResultType);
15458 return Builder.CreateConstrainedFPCall(F, X);
15459 } else {
15460 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
15461 return Builder.CreateCall(F, X);
15462 }
15463 }
15464 // Count leading zeros
15465 case PPC::BI__builtin_altivec_vclzb:
15466 case PPC::BI__builtin_altivec_vclzh:
15467 case PPC::BI__builtin_altivec_vclzw:
15468 case PPC::BI__builtin_altivec_vclzd: {
15469 llvm::Type *ResultType = ConvertType(E->getType());
15470 Value *X = EmitScalarExpr(E->getArg(0));
15471 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
15472 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
15473 return Builder.CreateCall(F, {X, Undef});
15474 }
15475 case PPC::BI__builtin_altivec_vctzb:
15476 case PPC::BI__builtin_altivec_vctzh:
15477 case PPC::BI__builtin_altivec_vctzw:
15478 case PPC::BI__builtin_altivec_vctzd: {
15479 llvm::Type *ResultType = ConvertType(E->getType());
15480 Value *X = EmitScalarExpr(E->getArg(0));
15481 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
15482 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
15483 return Builder.CreateCall(F, {X, Undef});
15484 }
15485 case PPC::BI__builtin_altivec_vec_replace_elt:
15486 case PPC::BI__builtin_altivec_vec_replace_unaligned: {
15487 // The third argument of vec_replace_elt and vec_replace_unaligned must
15488 // be a compile time constant and will be emitted either to the vinsw
15489 // or vinsd instruction.
15490 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
15491 assert(ArgCI &&(static_cast <bool> (ArgCI && "Third Arg to vinsw/vinsd intrinsic must be a constant integer!"
) ? void (0) : __assert_fail ("ArgCI && \"Third Arg to vinsw/vinsd intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15492, __extension__ __PRETTY_FUNCTION__))
15492 "Third Arg to vinsw/vinsd intrinsic must be a constant integer!")(static_cast <bool> (ArgCI && "Third Arg to vinsw/vinsd intrinsic must be a constant integer!"
) ? void (0) : __assert_fail ("ArgCI && \"Third Arg to vinsw/vinsd intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15492, __extension__ __PRETTY_FUNCTION__))
;
15493 llvm::Type *ResultType = ConvertType(E->getType());
15494 llvm::Function *F = nullptr;
15495 Value *Call = nullptr;
15496 int64_t ConstArg = ArgCI->getSExtValue();
15497 unsigned ArgWidth = Ops[1]->getType()->getPrimitiveSizeInBits();
15498 bool Is32Bit = false;
15499 assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width")(static_cast <bool> ((ArgWidth == 32 || ArgWidth == 64)
&& "Invalid argument width") ? void (0) : __assert_fail
("(ArgWidth == 32 || ArgWidth == 64) && \"Invalid argument width\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15499, __extension__ __PRETTY_FUNCTION__))
;
15500 // The input to vec_replace_elt is an element index, not a byte index.
15501 if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt)
15502 ConstArg *= ArgWidth / 8;
15503 if (ArgWidth == 32) {
15504 Is32Bit = true;
15505 // When the second argument is 32 bits, it can either be an integer or
15506 // a float. The vinsw intrinsic is used in this case.
15507 F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw);
15508 // Fix the constant according to endianess.
15509 if (getTarget().isLittleEndian())
15510 ConstArg = 12 - ConstArg;
15511 } else {
15512 // When the second argument is 64 bits, it can either be a long long or
15513 // a double. The vinsd intrinsic is used in this case.
15514 F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd);
15515 // Fix the constant for little endian.
15516 if (getTarget().isLittleEndian())
15517 ConstArg = 8 - ConstArg;
15518 }
15519 Ops[2] = ConstantInt::getSigned(Int32Ty, ConstArg);
15520 // Depending on ArgWidth, the input vector could be a float or a double.
15521 // If the input vector is a float type, bitcast the inputs to integers. Or,
15522 // if the input vector is a double, bitcast the inputs to 64-bit integers.
15523 if (!Ops[1]->getType()->isIntegerTy(ArgWidth)) {
15524 Ops[0] = Builder.CreateBitCast(
15525 Ops[0], Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4)
15526 : llvm::FixedVectorType::get(Int64Ty, 2));
15527 Ops[1] = Builder.CreateBitCast(Ops[1], Is32Bit ? Int32Ty : Int64Ty);
15528 }
15529 // Emit the call to vinsw or vinsd.
15530 Call = Builder.CreateCall(F, Ops);
15531 // Depending on the builtin, bitcast to the approriate result type.
15532 if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
15533 !Ops[1]->getType()->isIntegerTy())
15534 return Builder.CreateBitCast(Call, ResultType);
15535 else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
15536 Ops[1]->getType()->isIntegerTy())
15537 return Call;
15538 else
15539 return Builder.CreateBitCast(Call,
15540 llvm::FixedVectorType::get(Int8Ty, 16));
15541 }
15542 case PPC::BI__builtin_altivec_vpopcntb:
15543 case PPC::BI__builtin_altivec_vpopcnth:
15544 case PPC::BI__builtin_altivec_vpopcntw:
15545 case PPC::BI__builtin_altivec_vpopcntd: {
15546 llvm::Type *ResultType = ConvertType(E->getType());
15547 Value *X = EmitScalarExpr(E->getArg(0));
15548 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
15549 return Builder.CreateCall(F, X);
15550 }
15551 case PPC::BI__builtin_altivec_vadduqm:
15552 case PPC::BI__builtin_altivec_vsubuqm: {
15553 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
15554 Ops[0] =
15555 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int128Ty, 1));
15556 Ops[1] =
15557 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int128Ty, 1));
15558 if (BuiltinID == PPC::BI__builtin_altivec_vadduqm)
15559 return Builder.CreateAdd(Ops[0], Ops[1], "vadduqm");
15560 else
15561 return Builder.CreateSub(Ops[0], Ops[1], "vsubuqm");
15562 }
15563 // Rotate and insert under mask operation.
15564 // __rldimi(rs, is, shift, mask)
15565 // (rotl64(rs, shift) & mask) | (is & ~mask)
15566 // __rlwimi(rs, is, shift, mask)
15567 // (rotl(rs, shift) & mask) | (is & ~mask)
15568 case PPC::BI__builtin_ppc_rldimi:
15569 case PPC::BI__builtin_ppc_rlwimi: {
15570 llvm::Type *Ty = Ops[0]->getType();
15571 Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
15572 if (BuiltinID == PPC::BI__builtin_ppc_rldimi)
15573 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
15574 Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[2]});
15575 Value *X = Builder.CreateAnd(Shift, Ops[3]);
15576 Value *Y = Builder.CreateAnd(Ops[1], Builder.CreateNot(Ops[3]));
15577 return Builder.CreateOr(X, Y);
15578 }
15579 // Rotate and insert under mask operation.
15580 // __rlwnm(rs, shift, mask)
15581 // rotl(rs, shift) & mask
15582 case PPC::BI__builtin_ppc_rlwnm: {
15583 llvm::Type *Ty = Ops[0]->getType();
15584 Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
15585 Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[1]});
15586 return Builder.CreateAnd(Shift, Ops[2]);
15587 }
15588 case PPC::BI__builtin_ppc_poppar4:
15589 case PPC::BI__builtin_ppc_poppar8: {
15590 llvm::Type *ArgType = Ops[0]->getType();
15591 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
15592 Value *Tmp = Builder.CreateCall(F, Ops[0]);
15593
15594 llvm::Type *ResultType = ConvertType(E->getType());
15595 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
15596 if (Result->getType() != ResultType)
15597 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
15598 "cast");
15599 return Result;
15600 }
15601 case PPC::BI__builtin_ppc_cmpb: {
15602 if (getTarget().getTriple().isPPC64()) {
15603 Function *F =
15604 CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int64Ty, Int64Ty, Int64Ty});
15605 return Builder.CreateCall(F, Ops, "cmpb");
15606 }
15607 // For 32 bit, emit the code as below:
15608 // %conv = trunc i64 %a to i32
15609 // %conv1 = trunc i64 %b to i32
15610 // %shr = lshr i64 %a, 32
15611 // %conv2 = trunc i64 %shr to i32
15612 // %shr3 = lshr i64 %b, 32
15613 // %conv4 = trunc i64 %shr3 to i32
15614 // %0 = tail call i32 @llvm.ppc.cmpb32(i32 %conv, i32 %conv1)
15615 // %conv5 = zext i32 %0 to i64
15616 // %1 = tail call i32 @llvm.ppc.cmpb32(i32 %conv2, i32 %conv4)
15617 // %conv614 = zext i32 %1 to i64
15618 // %shl = shl nuw i64 %conv614, 32
15619 // %or = or i64 %shl, %conv5
15620 // ret i64 %or
15621 Function *F =
15622 CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int32Ty, Int32Ty, Int32Ty});
15623 Value *ArgOneLo = Builder.CreateTrunc(Ops[0], Int32Ty);
15624 Value *ArgTwoLo = Builder.CreateTrunc(Ops[1], Int32Ty);
15625 Constant *ShiftAmt = ConstantInt::get(Int64Ty, 32);
15626 Value *ArgOneHi =
15627 Builder.CreateTrunc(Builder.CreateLShr(Ops[0], ShiftAmt), Int32Ty);
15628 Value *ArgTwoHi =
15629 Builder.CreateTrunc(Builder.CreateLShr(Ops[1], ShiftAmt), Int32Ty);
15630 Value *ResLo = Builder.CreateZExt(
15631 Builder.CreateCall(F, {ArgOneLo, ArgTwoLo}, "cmpb"), Int64Ty);
15632 Value *ResHiShift = Builder.CreateZExt(
15633 Builder.CreateCall(F, {ArgOneHi, ArgTwoHi}, "cmpb"), Int64Ty);
15634 Value *ResHi = Builder.CreateShl(ResHiShift, ShiftAmt);
15635 return Builder.CreateOr(ResLo, ResHi);
15636 }
15637 // Copy sign
15638 case PPC::BI__builtin_vsx_xvcpsgnsp:
15639 case PPC::BI__builtin_vsx_xvcpsgndp: {
15640 llvm::Type *ResultType = ConvertType(E->getType());
15641 Value *X = EmitScalarExpr(E->getArg(0));
15642 Value *Y = EmitScalarExpr(E->getArg(1));
15643 ID = Intrinsic::copysign;
15644 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
15645 return Builder.CreateCall(F, {X, Y});
15646 }
15647 // Rounding/truncation
15648 case PPC::BI__builtin_vsx_xvrspip:
15649 case PPC::BI__builtin_vsx_xvrdpip:
15650 case PPC::BI__builtin_vsx_xvrdpim:
15651 case PPC::BI__builtin_vsx_xvrspim:
15652 case PPC::BI__builtin_vsx_xvrdpi:
15653 case PPC::BI__builtin_vsx_xvrspi:
15654 case PPC::BI__builtin_vsx_xvrdpic:
15655 case PPC::BI__builtin_vsx_xvrspic:
15656 case PPC::BI__builtin_vsx_xvrdpiz:
15657 case PPC::BI__builtin_vsx_xvrspiz: {
15658 llvm::Type *ResultType = ConvertType(E->getType());
15659 Value *X = EmitScalarExpr(E->getArg(0));
15660 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
15661 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
15662 ID = Builder.getIsFPConstrained()
15663 ? Intrinsic::experimental_constrained_floor
15664 : Intrinsic::floor;
15665 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
15666 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
15667 ID = Builder.getIsFPConstrained()
15668 ? Intrinsic::experimental_constrained_round
15669 : Intrinsic::round;
15670 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
15671 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
15672 ID = Builder.getIsFPConstrained()
15673 ? Intrinsic::experimental_constrained_rint
15674 : Intrinsic::rint;
15675 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
15676 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
15677 ID = Builder.getIsFPConstrained()
15678 ? Intrinsic::experimental_constrained_ceil
15679 : Intrinsic::ceil;
15680 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
15681 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
15682 ID = Builder.getIsFPConstrained()
15683 ? Intrinsic::experimental_constrained_trunc
15684 : Intrinsic::trunc;
15685 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
15686 return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
15687 : Builder.CreateCall(F, X);
15688 }
15689
15690 // Absolute value
15691 case PPC::BI__builtin_vsx_xvabsdp:
15692 case PPC::BI__builtin_vsx_xvabssp: {
15693 llvm::Type *ResultType = ConvertType(E->getType());
15694 Value *X = EmitScalarExpr(E->getArg(0));
15695 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15696 return Builder.CreateCall(F, X);
15697 }
15698
15699 // Fastmath by default
15700 case PPC::BI__builtin_ppc_recipdivf:
15701 case PPC::BI__builtin_ppc_recipdivd:
15702 case PPC::BI__builtin_ppc_rsqrtf:
15703 case PPC::BI__builtin_ppc_rsqrtd: {
15704 FastMathFlags FMF = Builder.getFastMathFlags();
15705 Builder.getFastMathFlags().setFast();
15706 llvm::Type *ResultType = ConvertType(E->getType());
15707 Value *X = EmitScalarExpr(E->getArg(0));
15708
15709 if (BuiltinID == PPC::BI__builtin_ppc_recipdivf ||
15710 BuiltinID == PPC::BI__builtin_ppc_recipdivd) {
15711 Value *Y = EmitScalarExpr(E->getArg(1));
15712 Value *FDiv = Builder.CreateFDiv(X, Y, "recipdiv");
15713 Builder.getFastMathFlags() &= (FMF);
15714 return FDiv;
15715 }
15716 auto *One = ConstantFP::get(ResultType, 1.0);
15717 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
15718 Value *FDiv = Builder.CreateFDiv(One, Builder.CreateCall(F, X), "rsqrt");
15719 Builder.getFastMathFlags() &= (FMF);
15720 return FDiv;
15721 }
15722 case PPC::BI__builtin_ppc_alignx: {
15723 ConstantInt *AlignmentCI = cast<ConstantInt>(Ops[0]);
15724 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
15725 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
15726 llvm::Value::MaximumAlignment);
15727
15728 emitAlignmentAssumption(Ops[1], E->getArg(1),
15729 /*The expr loc is sufficient.*/ SourceLocation(),
15730 AlignmentCI, nullptr);
15731 return Ops[1];
15732 }
15733 case PPC::BI__builtin_ppc_rdlam: {
15734 llvm::Type *Ty = Ops[0]->getType();
15735 Value *ShiftAmt = Builder.CreateIntCast(Ops[1], Ty, false);
15736 Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
15737 Value *Rotate = Builder.CreateCall(F, {Ops[0], Ops[0], ShiftAmt});
15738 return Builder.CreateAnd(Rotate, Ops[2]);
15739 }
15740 case PPC::BI__builtin_ppc_load2r: {
15741 Function *F = CGM.getIntrinsic(Intrinsic::ppc_load2r);
15742 Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
15743 Value *LoadIntrinsic = Builder.CreateCall(F, Ops);
15744 return Builder.CreateTrunc(LoadIntrinsic, Int16Ty);
15745 }
15746 // FMA variations
15747 case PPC::BI__builtin_vsx_xvmaddadp:
15748 case PPC::BI__builtin_vsx_xvmaddasp:
15749 case PPC::BI__builtin_vsx_xvnmaddadp:
15750 case PPC::BI__builtin_vsx_xvnmaddasp:
15751 case PPC::BI__builtin_vsx_xvmsubadp:
15752 case PPC::BI__builtin_vsx_xvmsubasp:
15753 case PPC::BI__builtin_vsx_xvnmsubadp:
15754 case PPC::BI__builtin_vsx_xvnmsubasp: {
15755 llvm::Type *ResultType = ConvertType(E->getType());
15756 Value *X = EmitScalarExpr(E->getArg(0));
15757 Value *Y = EmitScalarExpr(E->getArg(1));
15758 Value *Z = EmitScalarExpr(E->getArg(2));
15759 llvm::Function *F;
15760 if (Builder.getIsFPConstrained())
15761 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15762 else
15763 F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15764 switch (BuiltinID) {
15765 case PPC::BI__builtin_vsx_xvmaddadp:
15766 case PPC::BI__builtin_vsx_xvmaddasp:
15767 if (Builder.getIsFPConstrained())
15768 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
15769 else
15770 return Builder.CreateCall(F, {X, Y, Z});
15771 case PPC::BI__builtin_vsx_xvnmaddadp:
15772 case PPC::BI__builtin_vsx_xvnmaddasp:
15773 if (Builder.getIsFPConstrained())
15774 return Builder.CreateFNeg(
15775 Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
15776 else
15777 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
15778 case PPC::BI__builtin_vsx_xvmsubadp:
15779 case PPC::BI__builtin_vsx_xvmsubasp:
15780 if (Builder.getIsFPConstrained())
15781 return Builder.CreateConstrainedFPCall(
15782 F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15783 else
15784 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15785 case PPC::BI__builtin_vsx_xvnmsubadp:
15786 case PPC::BI__builtin_vsx_xvnmsubasp:
15787 if (Builder.getIsFPConstrained())
15788 return Builder.CreateFNeg(
15789 Builder.CreateConstrainedFPCall(
15790 F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
15791 "neg");
15792 else
15793 return Builder.CreateFNeg(
15794 Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
15795 "neg");
15796 }
15797 llvm_unreachable("Unknown FMA operation")::llvm::llvm_unreachable_internal("Unknown FMA operation", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15797)
;
15798 return nullptr; // Suppress no-return warning
15799 }
15800
15801 case PPC::BI__builtin_vsx_insertword: {
15802 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
15803
15804 // Third argument is a compile time constant int. It must be clamped to
15805 // to the range [0, 12].
15806 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
15807 assert(ArgCI &&(static_cast <bool> (ArgCI && "Third arg to xxinsertw intrinsic must be constant integer"
) ? void (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15808, __extension__ __PRETTY_FUNCTION__))
15808 "Third arg to xxinsertw intrinsic must be constant integer")(static_cast <bool> (ArgCI && "Third arg to xxinsertw intrinsic must be constant integer"
) ? void (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15808, __extension__ __PRETTY_FUNCTION__))
;
15809 const int64_t MaxIndex = 12;
15810 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
15811
15812 // The builtin semantics don't exactly match the xxinsertw instructions
15813 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
15814 // word from the first argument, and inserts it in the second argument. The
15815 // instruction extracts the word from its second input register and inserts
15816 // it into its first input register, so swap the first and second arguments.
15817 std::swap(Ops[0], Ops[1]);
15818
15819 // Need to cast the second argument from a vector of unsigned int to a
15820 // vector of long long.
15821 Ops[1] =
15822 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
15823
15824 if (getTarget().isLittleEndian()) {
15825 // Reverse the double words in the vector we will extract from.
15826 Ops[0] =
15827 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
15828 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
15829
15830 // Reverse the index.
15831 Index = MaxIndex - Index;
15832 }
15833
15834 // Intrinsic expects the first arg to be a vector of int.
15835 Ops[0] =
15836 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
15837 Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
15838 return Builder.CreateCall(F, Ops);
15839 }
15840
15841 case PPC::BI__builtin_vsx_extractuword: {
15842 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
15843
15844 // Intrinsic expects the first argument to be a vector of doublewords.
15845 Ops[0] =
15846 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
15847
15848 // The second argument is a compile time constant int that needs to
15849 // be clamped to the range [0, 12].
15850 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
15851 assert(ArgCI &&(static_cast <bool> (ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!"
) ? void (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15852, __extension__ __PRETTY_FUNCTION__))
15852 "Second Arg to xxextractuw intrinsic must be a constant integer!")(static_cast <bool> (ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!"
) ? void (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15852, __extension__ __PRETTY_FUNCTION__))
;
15853 const int64_t MaxIndex = 12;
15854 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
15855
15856 if (getTarget().isLittleEndian()) {
15857 // Reverse the index.
15858 Index = MaxIndex - Index;
15859 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
15860
15861 // Emit the call, then reverse the double words of the results vector.
15862 Value *Call = Builder.CreateCall(F, Ops);
15863
15864 Value *ShuffleCall =
15865 Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
15866 return ShuffleCall;
15867 } else {
15868 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
15869 return Builder.CreateCall(F, Ops);
15870 }
15871 }
15872
15873 case PPC::BI__builtin_vsx_xxpermdi: {
15874 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
15875 assert(ArgCI && "Third arg must be constant integer!")(static_cast <bool> (ArgCI && "Third arg must be constant integer!"
) ? void (0) : __assert_fail ("ArgCI && \"Third arg must be constant integer!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15875, __extension__ __PRETTY_FUNCTION__))
;
15876
15877 unsigned Index = ArgCI->getZExtValue();
15878 Ops[0] =
15879 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
15880 Ops[1] =
15881 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
15882
15883 // Account for endianness by treating this as just a shuffle. So we use the
15884 // same indices for both LE and BE in order to produce expected results in
15885 // both cases.
15886 int ElemIdx0 = (Index & 2) >> 1;
15887 int ElemIdx1 = 2 + (Index & 1);
15888
15889 int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
15890 Value *ShuffleCall =
15891 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
15892 QualType BIRetType = E->getType();
15893 auto RetTy = ConvertType(BIRetType);
15894 return Builder.CreateBitCast(ShuffleCall, RetTy);
15895 }
15896
15897 case PPC::BI__builtin_vsx_xxsldwi: {
15898 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
15899 assert(ArgCI && "Third argument must be a compile time constant")(static_cast <bool> (ArgCI && "Third argument must be a compile time constant"
) ? void (0) : __assert_fail ("ArgCI && \"Third argument must be a compile time constant\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15899, __extension__ __PRETTY_FUNCTION__))
;
15900 unsigned Index = ArgCI->getZExtValue() & 0x3;
15901 Ops[0] =
15902 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
15903 Ops[1] =
15904 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
15905
15906 // Create a shuffle mask
15907 int ElemIdx0;
15908 int ElemIdx1;
15909 int ElemIdx2;
15910 int ElemIdx3;
15911 if (getTarget().isLittleEndian()) {
15912 // Little endian element N comes from element 8+N-Index of the
15913 // concatenated wide vector (of course, using modulo arithmetic on
15914 // the total number of elements).
15915 ElemIdx0 = (8 - Index) % 8;
15916 ElemIdx1 = (9 - Index) % 8;
15917 ElemIdx2 = (10 - Index) % 8;
15918 ElemIdx3 = (11 - Index) % 8;
15919 } else {
15920 // Big endian ElemIdx<N> = Index + N
15921 ElemIdx0 = Index;
15922 ElemIdx1 = Index + 1;
15923 ElemIdx2 = Index + 2;
15924 ElemIdx3 = Index + 3;
15925 }
15926
15927 int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
15928 Value *ShuffleCall =
15929 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
15930 QualType BIRetType = E->getType();
15931 auto RetTy = ConvertType(BIRetType);
15932 return Builder.CreateBitCast(ShuffleCall, RetTy);
15933 }
15934
15935 case PPC::BI__builtin_pack_vector_int128: {
15936 bool isLittleEndian = getTarget().isLittleEndian();
15937 Value *UndefValue =
15938 llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
15939 Value *Res = Builder.CreateInsertElement(
15940 UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
15941 Res = Builder.CreateInsertElement(Res, Ops[1],
15942 (uint64_t)(isLittleEndian ? 0 : 1));
15943 return Builder.CreateBitCast(Res, ConvertType(E->getType()));
15944 }
15945
15946 case PPC::BI__builtin_unpack_vector_int128: {
15947 ConstantInt *Index = cast<ConstantInt>(Ops[1]);
15948 Value *Unpacked = Builder.CreateBitCast(
15949 Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
15950
15951 if (getTarget().isLittleEndian())
15952 Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
15953
15954 return Builder.CreateExtractElement(Unpacked, Index);
15955 }
15956
15957 case PPC::BI__builtin_ppc_sthcx: {
15958 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_sthcx);
15959 Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
15960 Ops[1] = Builder.CreateSExt(Ops[1], Int32Ty);
15961 return Builder.CreateCall(F, Ops);
15962 }
15963
15964 // The PPC MMA builtins take a pointer to a __vector_quad as an argument.
15965 // Some of the MMA instructions accumulate their result into an existing
15966 // accumulator whereas the others generate a new accumulator. So we need to
15967 // use custom code generation to expand a builtin call with a pointer to a
15968 // load (if the corresponding instruction accumulates its result) followed by
15969 // the call to the intrinsic and a store of the result.
15970#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate) \
15971 case PPC::BI__builtin_##Name:
15972#include "clang/Basic/BuiltinsPPC.def"
15973 {
15974 // The first argument of these two builtins is a pointer used to store their
15975 // result. However, the llvm intrinsics return their result in multiple
15976 // return values. So, here we emit code extracting these values from the
15977 // intrinsic results and storing them using that pointer.
15978 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
15979 BuiltinID == PPC::BI__builtin_vsx_disassemble_pair ||
15980 BuiltinID == PPC::BI__builtin_mma_disassemble_pair) {
15981 unsigned NumVecs = 2;
15982 auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair;
15983 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
15984 NumVecs = 4;
15985 Intrinsic = Intrinsic::ppc_mma_disassemble_acc;
15986 }
15987 llvm::Function *F = CGM.getIntrinsic(Intrinsic);
15988 Address Addr = EmitPointerWithAlignment(E->getArg(1));
15989 Value *Vec = Builder.CreateLoad(Addr);
15990 Value *Call = Builder.CreateCall(F, {Vec});
15991 llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
15992 Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo());
15993 for (unsigned i=0; i<NumVecs; i++) {
15994 Value *Vec = Builder.CreateExtractValue(Call, i);
15995 llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
15996 Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index);
15997 Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16));
15998 }
15999 return Call;
16000 }
16001 if (BuiltinID == PPC::BI__builtin_vsx_build_pair ||
16002 BuiltinID == PPC::BI__builtin_mma_build_acc) {
16003 // Reverse the order of the operands for LE, so the
16004 // same builtin call can be used on both LE and BE
16005 // without the need for the programmer to swap operands.
16006 // The operands are reversed starting from the second argument,
16007 // the first operand is the pointer to the pair/accumulator
16008 // that is being built.
16009 if (getTarget().isLittleEndian())
16010 std::reverse(Ops.begin() + 1, Ops.end());
16011 }
16012 bool Accumulate;
16013 switch (BuiltinID) {
16014 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
16015 case PPC::BI__builtin_##Name: \
16016 ID = Intrinsic::ppc_##Intr; \
16017 Accumulate = Acc; \
16018 break;
16019 #include "clang/Basic/BuiltinsPPC.def"
16020 }
16021 if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
16022 BuiltinID == PPC::BI__builtin_vsx_stxvp ||
16023 BuiltinID == PPC::BI__builtin_mma_lxvp ||
16024 BuiltinID == PPC::BI__builtin_mma_stxvp) {
16025 if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
16026 BuiltinID == PPC::BI__builtin_mma_lxvp) {
16027 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
16028 Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
16029 } else {
16030 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
16031 Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
16032 }
16033 Ops.pop_back();
16034 llvm::Function *F = CGM.getIntrinsic(ID);
16035 return Builder.CreateCall(F, Ops, "");
16036 }
16037 SmallVector<Value*, 4> CallOps;
16038 if (Accumulate) {
16039 Address Addr = EmitPointerWithAlignment(E->getArg(0));
16040 Value *Acc = Builder.CreateLoad(Addr);
16041 CallOps.push_back(Acc);
16042 }
16043 for (unsigned i=1; i<Ops.size(); i++)
16044 CallOps.push_back(Ops[i]);
16045 llvm::Function *F = CGM.getIntrinsic(ID);
16046 Value *Call = Builder.CreateCall(F, CallOps);
16047 return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
16048 }
16049
16050 case PPC::BI__builtin_ppc_compare_and_swap:
16051 case PPC::BI__builtin_ppc_compare_and_swaplp: {
16052 Address Addr = EmitPointerWithAlignment(E->getArg(0));
16053 Address OldValAddr = EmitPointerWithAlignment(E->getArg(1));
16054 Value *OldVal = Builder.CreateLoad(OldValAddr);
16055 QualType AtomicTy = E->getArg(0)->getType()->getPointeeType();
16056 LValue LV = MakeAddrLValue(Addr, AtomicTy);
16057 auto Pair = EmitAtomicCompareExchange(
16058 LV, RValue::get(OldVal), RValue::get(Ops[2]), E->getExprLoc(),
16059 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true);
16060 // Unlike c11's atomic_compare_exchange, accroding to
16061 // https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp
16062 // > In either case, the contents of the memory location specified by addr
16063 // > are copied into the memory location specified by old_val_addr.
16064 // But it hasn't specified storing to OldValAddr is atomic or not and
16065 // which order to use. Now following XL's codegen, treat it as a normal
16066 // store.
16067 Value *LoadedVal = Pair.first.getScalarVal();
16068 Builder.CreateStore(LoadedVal, OldValAddr);
16069 return Builder.CreateZExt(Pair.second, Builder.getInt32Ty());
16070 }
16071 case PPC::BI__builtin_ppc_fetch_and_add:
16072 case PPC::BI__builtin_ppc_fetch_and_addlp: {
16073 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
16074 llvm::AtomicOrdering::Monotonic);
16075 }
16076 case PPC::BI__builtin_ppc_fetch_and_and:
16077 case PPC::BI__builtin_ppc_fetch_and_andlp: {
16078 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
16079 llvm::AtomicOrdering::Monotonic);
16080 }
16081
16082 case PPC::BI__builtin_ppc_fetch_and_or:
16083 case PPC::BI__builtin_ppc_fetch_and_orlp: {
16084 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
16085 llvm::AtomicOrdering::Monotonic);
16086 }
16087 case PPC::BI__builtin_ppc_fetch_and_swap:
16088 case PPC::BI__builtin_ppc_fetch_and_swaplp: {
16089 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
16090 llvm::AtomicOrdering::Monotonic);
16091 }
16092 case PPC::BI__builtin_ppc_ldarx:
16093 case PPC::BI__builtin_ppc_lwarx:
16094 case PPC::BI__builtin_ppc_lharx:
16095 case PPC::BI__builtin_ppc_lbarx:
16096 return emitPPCLoadReserveIntrinsic(*this, BuiltinID, E);
16097 case PPC::BI__builtin_ppc_mfspr: {
16098 llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
16099 ? Int32Ty
16100 : Int64Ty;
16101 Function *F = CGM.getIntrinsic(Intrinsic::ppc_mfspr, RetType);
16102 return Builder.CreateCall(F, Ops);
16103 }
16104 case PPC::BI__builtin_ppc_mtspr: {
16105 llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
16106 ? Int32Ty
16107 : Int64Ty;
16108 Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtspr, RetType);
16109 return Builder.CreateCall(F, Ops);
16110 }
16111 case PPC::BI__builtin_ppc_popcntb: {
16112 Value *ArgValue = EmitScalarExpr(E->getArg(0));
16113 llvm::Type *ArgType = ArgValue->getType();
16114 Function *F = CGM.getIntrinsic(Intrinsic::ppc_popcntb, {ArgType, ArgType});
16115 return Builder.CreateCall(F, Ops, "popcntb");
16116 }
16117 case PPC::BI__builtin_ppc_mtfsf: {
16118 // The builtin takes a uint32 that needs to be cast to an
16119 // f64 to be passed to the intrinsic.
16120 Value *Cast = Builder.CreateUIToFP(Ops[1], DoubleTy);
16121 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtfsf);
16122 return Builder.CreateCall(F, {Ops[0], Cast}, "");
16123 }
16124
16125 case PPC::BI__builtin_ppc_swdiv_nochk:
16126 case PPC::BI__builtin_ppc_swdivs_nochk: {
16127 FastMathFlags FMF = Builder.getFastMathFlags();
16128 Builder.getFastMathFlags().setFast();
16129 Value *FDiv = Builder.CreateFDiv(Ops[0], Ops[1], "swdiv_nochk");
16130 Builder.getFastMathFlags() &= (FMF);
16131 return FDiv;
16132 }
16133 case PPC::BI__builtin_ppc_fric:
16134 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16135 *this, E, Intrinsic::rint,
16136 Intrinsic::experimental_constrained_rint))
16137 .getScalarVal();
16138 case PPC::BI__builtin_ppc_frim:
16139 case PPC::BI__builtin_ppc_frims:
16140 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16141 *this, E, Intrinsic::floor,
16142 Intrinsic::experimental_constrained_floor))
16143 .getScalarVal();
16144 case PPC::BI__builtin_ppc_frin:
16145 case PPC::BI__builtin_ppc_frins:
16146 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16147 *this, E, Intrinsic::round,
16148 Intrinsic::experimental_constrained_round))
16149 .getScalarVal();
16150 case PPC::BI__builtin_ppc_frip:
16151 case PPC::BI__builtin_ppc_frips:
16152 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16153 *this, E, Intrinsic::ceil,
16154 Intrinsic::experimental_constrained_ceil))
16155 .getScalarVal();
16156 case PPC::BI__builtin_ppc_friz:
16157 case PPC::BI__builtin_ppc_frizs:
16158 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16159 *this, E, Intrinsic::trunc,
16160 Intrinsic::experimental_constrained_trunc))
16161 .getScalarVal();
16162 case PPC::BI__builtin_ppc_fsqrt:
16163 case PPC::BI__builtin_ppc_fsqrts:
16164 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16165 *this, E, Intrinsic::sqrt,
16166 Intrinsic::experimental_constrained_sqrt))
16167 .getScalarVal();
16168 case PPC::BI__builtin_ppc_test_data_class: {
16169 llvm::Type *ArgType = EmitScalarExpr(E->getArg(0))->getType();
16170 unsigned IntrinsicID;
16171 if (ArgType->isDoubleTy())
16172 IntrinsicID = Intrinsic::ppc_test_data_class_d;
16173 else if (ArgType->isFloatTy())
16174 IntrinsicID = Intrinsic::ppc_test_data_class_f;
16175 else
16176 llvm_unreachable("Invalid Argument Type")::llvm::llvm_unreachable_internal("Invalid Argument Type", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16176)
;
16177 return Builder.CreateCall(CGM.getIntrinsic(IntrinsicID), Ops,
16178 "test_data_class");
16179 }
16180 case PPC::BI__builtin_ppc_swdiv:
16181 case PPC::BI__builtin_ppc_swdivs:
16182 return Builder.CreateFDiv(Ops[0], Ops[1], "swdiv");
16183 }
16184}
16185
16186namespace {
16187// If \p E is not null pointer, insert address space cast to match return
16188// type of \p E if necessary.
16189Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
16190 const CallExpr *E = nullptr) {
16191 auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
16192 auto *Call = CGF.Builder.CreateCall(F);
16193 Call->addRetAttr(
16194 Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
16195 Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(4)));
16196 if (!E)
16197 return Call;
16198 QualType BuiltinRetType = E->getType();
16199 auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
16200 if (RetTy == Call->getType())
16201 return Call;
16202 return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
16203}
16204
16205// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
16206Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
16207 const unsigned XOffset = 4;
16208 auto *DP = EmitAMDGPUDispatchPtr(CGF);
16209 // Indexing the HSA kernel_dispatch_packet struct.
16210 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
16211 auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
16212 auto *DstTy =
16213 CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
16214 auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
16215 auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
16216 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
16217 llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
16218 APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
16219 LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
16220 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
16221 llvm::MDNode::get(CGF.getLLVMContext(), None));
16222 return LD;
16223}
16224
16225// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
16226Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
16227 const unsigned XOffset = 12;
16228 auto *DP = EmitAMDGPUDispatchPtr(CGF);
16229 // Indexing the HSA kernel_dispatch_packet struct.
16230 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
16231 auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
16232 auto *DstTy =
16233 CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
16234 auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
16235 auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4)));
16236 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
16237 llvm::MDNode::get(CGF.getLLVMContext(), None));
16238 return LD;
16239}
16240} // namespace
16241
16242// For processing memory ordering and memory scope arguments of various
16243// amdgcn builtins.
16244// \p Order takes a C++11 comptabile memory-ordering specifier and converts
16245// it into LLVM's memory ordering specifier using atomic C ABI, and writes
16246// to \p AO. \p Scope takes a const char * and converts it into AMDGCN
16247// specific SyncScopeID and writes it to \p SSID.
16248bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
16249 llvm::AtomicOrdering &AO,
16250 llvm::SyncScope::ID &SSID) {
16251 if (isa<llvm::ConstantInt>(Order)) {
16252 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
16253
16254 // Map C11/C++11 memory ordering to LLVM memory ordering
16255 assert(llvm::isValidAtomicOrderingCABI(ord))(static_cast <bool> (llvm::isValidAtomicOrderingCABI(ord
)) ? void (0) : __assert_fail ("llvm::isValidAtomicOrderingCABI(ord)"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16255, __extension__ __PRETTY_FUNCTION__))
;
16256 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
16257 case llvm::AtomicOrderingCABI::acquire:
16258 case llvm::AtomicOrderingCABI::consume:
16259 AO = llvm::AtomicOrdering::Acquire;
16260 break;
16261 case llvm::AtomicOrderingCABI::release:
16262 AO = llvm::AtomicOrdering::Release;
16263 break;
16264 case llvm::AtomicOrderingCABI::acq_rel:
16265 AO = llvm::AtomicOrdering::AcquireRelease;
16266 break;
16267 case llvm::AtomicOrderingCABI::seq_cst:
16268 AO = llvm::AtomicOrdering::SequentiallyConsistent;
16269 break;
16270 case llvm::AtomicOrderingCABI::relaxed:
16271 AO = llvm::AtomicOrdering::Monotonic;
16272 break;
16273 }
16274
16275 StringRef scp;
16276 llvm::getConstantStringInfo(Scope, scp);
16277 SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
16278 return true;
16279 }
16280 return false;
16281}
16282
16283Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
16284 const CallExpr *E) {
16285 llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
16286 llvm::SyncScope::ID SSID;
16287 switch (BuiltinID) {
16288 case AMDGPU::BI__builtin_amdgcn_div_scale:
16289 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
16290 // Translate from the intrinsics's struct return to the builtin's out
16291 // argument.
16292
16293 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
16294
16295 llvm::Value *X = EmitScalarExpr(E->getArg(0));
16296 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
16297 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
16298
16299 llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
16300 X->getType());
16301
16302 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
16303
16304 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
16305 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
16306
16307 llvm::Type *RealFlagType
16308 = FlagOutPtr.getPointer()->getType()->getPointerElementType();
16309
16310 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
16311 Builder.CreateStore(FlagExt, FlagOutPtr);
16312 return Result;
16313 }
16314 case AMDGPU::BI__builtin_amdgcn_div_fmas:
16315 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
16316 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
16317 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
16318 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
16319 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
16320
16321 llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
16322 Src0->getType());
16323 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
16324 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
16325 }
16326
16327 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
16328 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
16329 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
16330 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
16331 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
16332 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
16333 llvm::SmallVector<llvm::Value *, 6> Args;
16334 for (unsigned I = 0; I != E->getNumArgs(); ++I)
16335 Args.push_back(EmitScalarExpr(E->getArg(I)));
16336 assert(Args.size() == 5 || Args.size() == 6)(static_cast <bool> (Args.size() == 5 || Args.size() ==
6) ? void (0) : __assert_fail ("Args.size() == 5 || Args.size() == 6"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16336, __extension__ __PRETTY_FUNCTION__))
;
16337 if (Args.size() == 5)
16338 Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
16339 Function *F =
16340 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
16341 return Builder.CreateCall(F, Args);
16342 }
16343 case AMDGPU::BI__builtin_amdgcn_div_fixup:
16344 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
16345 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
16346 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
16347 case AMDGPU::BI__builtin_amdgcn_trig_preop:
16348 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
16349 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
16350 case AMDGPU::BI__builtin_amdgcn_rcp:
16351 case AMDGPU::BI__builtin_amdgcn_rcpf:
16352 case AMDGPU::BI__builtin_amdgcn_rcph:
16353 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
16354 case AMDGPU::BI__builtin_amdgcn_sqrt:
16355 case AMDGPU::BI__builtin_amdgcn_sqrtf:
16356 case AMDGPU::BI__builtin_amdgcn_sqrth:
16357 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
16358 case AMDGPU::BI__builtin_amdgcn_rsq:
16359 case AMDGPU::BI__builtin_amdgcn_rsqf:
16360 case AMDGPU::BI__builtin_amdgcn_rsqh:
16361 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
16362 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
16363 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
16364 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
16365 case AMDGPU::BI__builtin_amdgcn_sinf:
16366 case AMDGPU::BI__builtin_amdgcn_sinh:
16367 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
16368 case AMDGPU::BI__builtin_amdgcn_cosf:
16369 case AMDGPU::BI__builtin_amdgcn_cosh:
16370 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
16371 case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
16372 return EmitAMDGPUDispatchPtr(*this, E);
16373 case AMDGPU::BI__builtin_amdgcn_log_clampf:
16374 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
16375 case AMDGPU::BI__builtin_amdgcn_ldexp:
16376 case AMDGPU::BI__builtin_amdgcn_ldexpf:
16377 case AMDGPU::BI__builtin_amdgcn_ldexph:
16378 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
16379 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
16380 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
16381 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
16382 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
16383 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
16384 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
16385 Value *Src0 = EmitScalarExpr(E->getArg(0));
16386 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
16387 { Builder.getInt32Ty(), Src0->getType() });
16388 return Builder.CreateCall(F, Src0);
16389 }
16390 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
16391 Value *Src0 = EmitScalarExpr(E->getArg(0));
16392 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
16393 { Builder.getInt16Ty(), Src0->getType() });
16394 return Builder.CreateCall(F, Src0);
16395 }
16396 case AMDGPU::BI__builtin_amdgcn_fract:
16397 case AMDGPU::BI__builtin_amdgcn_fractf:
16398 case AMDGPU::BI__builtin_amdgcn_fracth:
16399 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
16400 case AMDGPU::BI__builtin_amdgcn_lerp:
16401 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
16402 case AMDGPU::BI__builtin_amdgcn_ubfe:
16403 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
16404 case AMDGPU::BI__builtin_amdgcn_sbfe:
16405 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
16406 case AMDGPU::BI__builtin_amdgcn_uicmp:
16407 case AMDGPU::BI__builtin_amdgcn_uicmpl:
16408 case AMDGPU::BI__builtin_amdgcn_sicmp:
16409 case AMDGPU::BI__builtin_amdgcn_sicmpl: {
16410 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
16411 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
16412 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
16413
16414 // FIXME-GFX10: How should 32 bit mask be handled?
16415 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
16416 { Builder.getInt64Ty(), Src0->getType() });
16417 return Builder.CreateCall(F, { Src0, Src1, Src2 });
16418 }
16419 case AMDGPU::BI__builtin_amdgcn_fcmp:
16420 case AMDGPU::BI__builtin_amdgcn_fcmpf: {
16421 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
16422 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
16423 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
16424
16425 // FIXME-GFX10: How should 32 bit mask be handled?
16426 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
16427 { Builder.getInt64Ty(), Src0->getType() });
16428 return Builder.CreateCall(F, { Src0, Src1, Src2 });
16429 }
16430 case AMDGPU::BI__builtin_amdgcn_class:
16431 case AMDGPU::BI__builtin_amdgcn_classf:
16432 case AMDGPU::BI__builtin_amdgcn_classh:
16433 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
16434 case AMDGPU::BI__builtin_amdgcn_fmed3f:
16435 case AMDGPU::BI__builtin_amdgcn_fmed3h:
16436 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
16437 case AMDGPU::BI__builtin_amdgcn_ds_append:
16438 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
16439 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
16440 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
16441 Value *Src0 = EmitScalarExpr(E->getArg(0));
16442 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
16443 return Builder.CreateCall(F, { Src0, Builder.getFalse() });
16444 }
16445 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
16446 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
16447 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
16448 Intrinsic::ID Intrin;
16449 switch (BuiltinID) {
16450 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
16451 Intrin = Intrinsic::amdgcn_ds_fadd;
16452 break;
16453 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
16454 Intrin = Intrinsic::amdgcn_ds_fmin;
16455 break;
16456 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
16457 Intrin = Intrinsic::amdgcn_ds_fmax;
16458 break;
16459 }
16460 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
16461 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
16462 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
16463 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
16464 llvm::Value *Src4 = EmitScalarExpr(E->getArg(4));
16465 llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() });
16466 llvm::FunctionType *FTy = F->getFunctionType();
16467 llvm::Type *PTy = FTy->getParamType(0);
16468 Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
16469 return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
16470 }
16471 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
16472 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
16473 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
16474 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
16475 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
16476 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
16477 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
16478 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64: {
16479 Intrinsic::ID IID;
16480 llvm::Type *ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
16481 switch (BuiltinID) {
16482 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
16483 ArgTy = llvm::Type::getFloatTy(getLLVMContext());
16484 IID = Intrinsic::amdgcn_global_atomic_fadd;
16485 break;
16486 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
16487 ArgTy = llvm::FixedVectorType::get(
16488 llvm::Type::getHalfTy(getLLVMContext()), 2);
16489 IID = Intrinsic::amdgcn_global_atomic_fadd;
16490 break;
16491 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
16492 IID = Intrinsic::amdgcn_global_atomic_fadd;
16493 break;
16494 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
16495 IID = Intrinsic::amdgcn_global_atomic_fmin;
16496 break;
16497 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
16498 IID = Intrinsic::amdgcn_global_atomic_fmax;
16499 break;
16500 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
16501 IID = Intrinsic::amdgcn_flat_atomic_fadd;
16502 break;
16503 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
16504 IID = Intrinsic::amdgcn_flat_atomic_fmin;
16505 break;
16506 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
16507 IID = Intrinsic::amdgcn_flat_atomic_fmax;
16508 break;
16509 }
16510 llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
16511 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
16512 llvm::Function *F =
16513 CGM.getIntrinsic(IID, {ArgTy, Addr->getType(), Val->getType()});
16514 return Builder.CreateCall(F, {Addr, Val});
16515 }
16516 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
16517 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32: {
16518 Intrinsic::ID IID;
16519 llvm::Type *ArgTy;
16520 switch (BuiltinID) {
16521 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
16522 ArgTy = llvm::Type::getFloatTy(getLLVMContext());
16523 IID = Intrinsic::amdgcn_ds_fadd;
16524 break;
16525 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
16526 ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
16527 IID = Intrinsic::amdgcn_ds_fadd;
16528 break;
16529 }
16530 llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
16531 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
16532 llvm::Constant *ZeroI32 = llvm::ConstantInt::getIntegerValue(
16533 llvm::Type::getInt32Ty(getLLVMContext()), APInt(32, 0, true));
16534 llvm::Constant *ZeroI1 = llvm::ConstantInt::getIntegerValue(
16535 llvm::Type::getInt1Ty(getLLVMContext()), APInt(1, 0));
16536 llvm::Function *F = CGM.getIntrinsic(IID, {ArgTy});
16537 return Builder.CreateCall(F, {Addr, Val, ZeroI32, ZeroI32, ZeroI1});
16538 }
16539 case AMDGPU::BI__builtin_amdgcn_read_exec: {
16540 CallInst *CI = cast<CallInst>(
16541 EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
16542 CI->setConvergent();
16543 return CI;
16544 }
16545 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
16546 case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
16547 StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
16548 "exec_lo" : "exec_hi";
16549 CallInst *CI = cast<CallInst>(
16550 EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName));
16551 CI->setConvergent();
16552 return CI;
16553 }
16554 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray:
16555 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h:
16556 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l:
16557 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_lh: {
16558 llvm::Value *NodePtr = EmitScalarExpr(E->getArg(0));
16559 llvm::Value *RayExtent = EmitScalarExpr(E->getArg(1));
16560 llvm::Value *RayOrigin = EmitScalarExpr(E->getArg(2));
16561 llvm::Value *RayDir = EmitScalarExpr(E->getArg(3));
16562 llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4));
16563 llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5));
16564
16565 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray,
16566 {NodePtr->getType(), RayDir->getType()});
16567 return Builder.CreateCall(F, {NodePtr, RayExtent, RayOrigin, RayDir,
16568 RayInverseDir, TextureDescr});
16569 }
16570
16571 // amdgcn workitem
16572 case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
16573 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
16574 case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
16575 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
16576 case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
16577 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
16578
16579 // amdgcn workgroup size
16580 case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
16581 return EmitAMDGPUWorkGroupSize(*this, 0);
16582 case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
16583 return EmitAMDGPUWorkGroupSize(*this, 1);
16584 case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
16585 return EmitAMDGPUWorkGroupSize(*this, 2);
16586
16587 // amdgcn grid size
16588 case AMDGPU::BI__builtin_amdgcn_grid_size_x:
16589 return EmitAMDGPUGridSize(*this, 0);
16590 case AMDGPU::BI__builtin_amdgcn_grid_size_y:
16591 return EmitAMDGPUGridSize(*this, 1);
16592 case AMDGPU::BI__builtin_amdgcn_grid_size_z:
16593 return EmitAMDGPUGridSize(*this, 2);
16594
16595 // r600 intrinsics
16596 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
16597 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
16598 return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
16599 case AMDGPU::BI__builtin_r600_read_tidig_x:
16600 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
16601 case AMDGPU::BI__builtin_r600_read_tidig_y:
16602 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
16603 case AMDGPU::BI__builtin_r600_read_tidig_z:
16604 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
16605 case AMDGPU::BI__builtin_amdgcn_alignbit: {
16606 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
16607 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
16608 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
16609 Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
16610 return Builder.CreateCall(F, { Src0, Src1, Src2 });
16611 }
16612
16613 case AMDGPU::BI__builtin_amdgcn_fence: {
16614 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
16615 EmitScalarExpr(E->getArg(1)), AO, SSID))
16616 return Builder.CreateFence(AO, SSID);
16617 LLVM_FALLTHROUGH[[gnu::fallthrough]];
16618 }
16619 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
16620 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
16621 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
16622 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
16623 unsigned BuiltinAtomicOp;
16624 llvm::Type *ResultType = ConvertType(E->getType());
16625
16626 switch (BuiltinID) {
16627 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
16628 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
16629 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
16630 break;
16631 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
16632 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
16633 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
16634 break;
16635 }
16636
16637 Value *Ptr = EmitScalarExpr(E->getArg(0));
16638 Value *Val = EmitScalarExpr(E->getArg(1));
16639
16640 llvm::Function *F =
16641 CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
16642
16643 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
16644 EmitScalarExpr(E->getArg(3)), AO, SSID)) {
16645
16646 // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
16647 // scope as unsigned values
16648 Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
16649 Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
16650
16651 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
16652 bool Volatile =
16653 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
16654 Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
16655
16656 return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
16657 }
16658 LLVM_FALLTHROUGH[[gnu::fallthrough]];
16659 }
16660 default:
16661 return nullptr;
16662 }
16663}
16664
16665/// Handle a SystemZ function in which the final argument is a pointer
16666/// to an int that receives the post-instruction CC value. At the LLVM level
16667/// this is represented as a function that returns a {result, cc} pair.
16668static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
16669 unsigned IntrinsicID,
16670 const CallExpr *E) {
16671 unsigned NumArgs = E->getNumArgs() - 1;
16672 SmallVector<Value *, 8> Args(NumArgs);
16673 for (unsigned I = 0; I < NumArgs; ++I)
16674 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
16675 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
16676 Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
16677 Value *Call = CGF.Builder.CreateCall(F, Args);
16678 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
16679 CGF.Builder.CreateStore(CC, CCPtr);
16680 return CGF.Builder.CreateExtractValue(Call, 0);
16681}
16682
16683Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
16684 const CallExpr *E) {
16685 switch (BuiltinID) {
16686 case SystemZ::BI__builtin_tbegin: {
16687 Value *TDB = EmitScalarExpr(E->getArg(0));
16688 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
16689 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
16690 return Builder.CreateCall(F, {TDB, Control});
16691 }
16692 case SystemZ::BI__builtin_tbegin_nofloat: {
16693 Value *TDB = EmitScalarExpr(E->getArg(0));
16694 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
16695 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
16696 return Builder.CreateCall(F, {TDB, Control});
16697 }
16698 case SystemZ::BI__builtin_tbeginc: {
16699 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
16700 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
16701 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
16702 return Builder.CreateCall(F, {TDB, Control});
16703 }
16704 case SystemZ::BI__builtin_tabort: {
16705 Value *Data = EmitScalarExpr(E->getArg(0));
16706 Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
16707 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
16708 }
16709 case SystemZ::BI__builtin_non_tx_store: {
16710 Value *Address = EmitScalarExpr(E->getArg(0));
16711 Value *Data = EmitScalarExpr(E->getArg(1));
16712 Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
16713 return Builder.CreateCall(F, {Data, Address});
16714 }
16715
16716 // Vector builtins. Note that most vector builtins are mapped automatically
16717 // to target-specific LLVM intrinsics. The ones handled specially here can
16718 // be represented via standard LLVM IR, which is preferable to enable common
16719 // LLVM optimizations.
16720
16721 case SystemZ::BI__builtin_s390_vpopctb:
16722 case SystemZ::BI__builtin_s390_vpopcth:
16723 case SystemZ::BI__builtin_s390_vpopctf:
16724 case SystemZ::BI__builtin_s390_vpopctg: {
16725 llvm::Type *ResultType = ConvertType(E->getType());
16726 Value *X = EmitScalarExpr(E->getArg(0));
16727 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
16728 return Builder.CreateCall(F, X);
16729 }
16730
16731 case SystemZ::BI__builtin_s390_vclzb:
16732 case SystemZ::BI__builtin_s390_vclzh:
16733 case SystemZ::BI__builtin_s390_vclzf:
16734 case SystemZ::BI__builtin_s390_vclzg: {
16735 llvm::Type *ResultType = ConvertType(E->getType());
16736 Value *X = EmitScalarExpr(E->getArg(0));
16737 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
16738 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
16739 return Builder.CreateCall(F, {X, Undef});
16740 }
16741
16742 case SystemZ::BI__builtin_s390_vctzb:
16743 case SystemZ::BI__builtin_s390_vctzh:
16744 case SystemZ::BI__builtin_s390_vctzf:
16745 case SystemZ::BI__builtin_s390_vctzg: {
16746 llvm::Type *ResultType = ConvertType(E->getType());
16747 Value *X = EmitScalarExpr(E->getArg(0));
16748 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
16749 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
16750 return Builder.CreateCall(F, {X, Undef});
16751 }
16752
16753 case SystemZ::BI__builtin_s390_vfsqsb:
16754 case SystemZ::BI__builtin_s390_vfsqdb: {
16755 llvm::Type *ResultType = ConvertType(E->getType());
16756 Value *X = EmitScalarExpr(E->getArg(0));
16757 if (Builder.getIsFPConstrained()) {
16758 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
16759 return Builder.CreateConstrainedFPCall(F, { X });
16760 } else {
16761 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
16762 return Builder.CreateCall(F, X);
16763 }
16764 }
16765 case SystemZ::BI__builtin_s390_vfmasb:
16766 case SystemZ::BI__builtin_s390_vfmadb: {
16767 llvm::Type *ResultType = ConvertType(E->getType());
16768 Value *X = EmitScalarExpr(E->getArg(0));
16769 Value *Y = EmitScalarExpr(E->getArg(1));
16770 Value *Z = EmitScalarExpr(E->getArg(2));
16771 if (Builder.getIsFPConstrained()) {
16772 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
16773 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
16774 } else {
16775 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
16776 return Builder.CreateCall(F, {X, Y, Z});
16777 }
16778 }
16779 case SystemZ::BI__builtin_s390_vfmssb:
16780 case SystemZ::BI__builtin_s390_vfmsdb: {
16781 llvm::Type *ResultType = ConvertType(E->getType());
16782 Value *X = EmitScalarExpr(E->getArg(0));
16783 Value *Y = EmitScalarExpr(E->getArg(1));
16784 Value *Z = EmitScalarExpr(E->getArg(2));
16785 if (Builder.getIsFPConstrained()) {
16786 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
16787 return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
16788 } else {
16789 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
16790 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
16791 }
16792 }
16793 case SystemZ::BI__builtin_s390_vfnmasb:
16794 case SystemZ::BI__builtin_s390_vfnmadb: {
16795 llvm::Type *ResultType = ConvertType(E->getType());
16796 Value *X = EmitScalarExpr(E->getArg(0));
16797 Value *Y = EmitScalarExpr(E->getArg(1));
16798 Value *Z = EmitScalarExpr(E->getArg(2));
16799 if (Builder.getIsFPConstrained()) {
16800 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
16801 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
16802 } else {
16803 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
16804 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
16805 }
16806 }
16807 case SystemZ::BI__builtin_s390_vfnmssb:
16808 case SystemZ::BI__builtin_s390_vfnmsdb: {
16809 llvm::Type *ResultType = ConvertType(E->getType());
16810 Value *X = EmitScalarExpr(E->getArg(0));
16811 Value *Y = EmitScalarExpr(E->getArg(1));
16812 Value *Z = EmitScalarExpr(E->getArg(2));
16813 if (Builder.getIsFPConstrained()) {
16814 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
16815 Value *NegZ = Builder.CreateFNeg(Z, "sub");
16816 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
16817 } else {
16818 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
16819 Value *NegZ = Builder.CreateFNeg(Z, "neg");
16820 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
16821 }
16822 }
16823 case SystemZ::BI__builtin_s390_vflpsb:
16824 case SystemZ::BI__builtin_s390_vflpdb: {
16825 llvm::Type *ResultType = ConvertType(E->getType());
16826 Value *X = EmitScalarExpr(E->getArg(0));
16827 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
16828 return Builder.CreateCall(F, X);
16829 }
16830 case SystemZ::BI__builtin_s390_vflnsb:
16831 case SystemZ::BI__builtin_s390_vflndb: {
16832 llvm::Type *ResultType = ConvertType(E->getType());
16833 Value *X = EmitScalarExpr(E->getArg(0));
16834 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
16835 return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg");
16836 }
16837 case SystemZ::BI__builtin_s390_vfisb:
16838 case SystemZ::BI__builtin_s390_vfidb: {
16839 llvm::Type *ResultType = ConvertType(E->getType());
16840 Value *X = EmitScalarExpr(E->getArg(0));
16841 // Constant-fold the M4 and M5 mask arguments.
16842 llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext());
16843 llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext());
16844 // Check whether this instance can be represented via a LLVM standard
16845 // intrinsic. We only support some combinations of M4 and M5.
16846 Intrinsic::ID ID = Intrinsic::not_intrinsic;
16847 Intrinsic::ID CI;
16848 switch (M4.getZExtValue()) {
16849 default: break;
16850 case 0: // IEEE-inexact exception allowed
16851 switch (M5.getZExtValue()) {
16852 default: break;
16853 case 0: ID = Intrinsic::rint;
16854 CI = Intrinsic::experimental_constrained_rint; break;
16855 }
16856 break;
16857 case 4: // IEEE-inexact exception suppressed
16858 switch (M5.getZExtValue()) {
16859 default: break;
16860 case 0: ID = Intrinsic::nearbyint;
16861 CI = Intrinsic::experimental_constrained_nearbyint; break;
16862 case 1: ID = Intrinsic::round;
16863 CI = Intrinsic::experimental_constrained_round; break;
16864 case 5: ID = Intrinsic::trunc;
16865 CI = Intrinsic::experimental_constrained_trunc; break;
16866 case 6: ID = Intrinsic::ceil;
16867 CI = Intrinsic::experimental_constrained_ceil; break;
16868 case 7: ID = Intrinsic::floor;
16869 CI = Intrinsic::experimental_constrained_floor; break;
16870 }
16871 break;
16872 }
16873 if (ID != Intrinsic::not_intrinsic) {
16874 if (Builder.getIsFPConstrained()) {
16875 Function *F = CGM.getIntrinsic(CI, ResultType);
16876 return Builder.CreateConstrainedFPCall(F, X);
16877 } else {
16878 Function *F = CGM.getIntrinsic(ID, ResultType);
16879 return Builder.CreateCall(F, X);
16880 }
16881 }
16882 switch (BuiltinID) { // FIXME: constrained version?
16883 case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
16884 case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
16885 default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16885)
;
16886 }
16887 Function *F = CGM.getIntrinsic(ID);
16888 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
16889 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
16890 return Builder.CreateCall(F, {X, M4Value, M5Value});
16891 }
16892 case SystemZ::BI__builtin_s390_vfmaxsb:
16893 case SystemZ::BI__builtin_s390_vfmaxdb: {
16894 llvm::Type *ResultType = ConvertType(E->getType());
16895 Value *X = EmitScalarExpr(E->getArg(0));
16896 Value *Y = EmitScalarExpr(E->getArg(1));
16897 // Constant-fold the M4 mask argument.
16898 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
16899 // Check whether this instance can be represented via a LLVM standard
16900 // intrinsic. We only support some values of M4.
16901 Intrinsic::ID ID = Intrinsic::not_intrinsic;
16902 Intrinsic::ID CI;
16903 switch (M4.getZExtValue()) {
16904 default: break;
16905 case 4: ID = Intrinsic::maxnum;
16906 CI = Intrinsic::experimental_constrained_maxnum; break;
16907 }
16908 if (ID != Intrinsic::not_intrinsic) {
16909 if (Builder.getIsFPConstrained()) {
16910 Function *F = CGM.getIntrinsic(CI, ResultType);
16911 return Builder.CreateConstrainedFPCall(F, {X, Y});
16912 } else {
16913 Function *F = CGM.getIntrinsic(ID, ResultType);
16914 return Builder.CreateCall(F, {X, Y});
16915 }
16916 }
16917 switch (BuiltinID) {
16918 case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
16919 case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
16920 default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16920)
;
16921 }
16922 Function *F = CGM.getIntrinsic(ID);
16923 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
16924 return Builder.CreateCall(F, {X, Y, M4Value});
16925 }
16926 case SystemZ::BI__builtin_s390_vfminsb:
16927 case SystemZ::BI__builtin_s390_vfmindb: {
16928 llvm::Type *ResultType = ConvertType(E->getType());
16929 Value *X = EmitScalarExpr(E->getArg(0));
16930 Value *Y = EmitScalarExpr(E->getArg(1));
16931 // Constant-fold the M4 mask argument.
16932 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
16933 // Check whether this instance can be represented via a LLVM standard
16934 // intrinsic. We only support some values of M4.
16935 Intrinsic::ID ID = Intrinsic::not_intrinsic;
16936 Intrinsic::ID CI;
16937 switch (M4.getZExtValue()) {
16938 default: break;
16939 case 4: ID = Intrinsic::minnum;
16940 CI = Intrinsic::experimental_constrained_minnum; break;
16941 }
16942 if (ID != Intrinsic::not_intrinsic) {
16943 if (Builder.getIsFPConstrained()) {
16944 Function *F = CGM.getIntrinsic(CI, ResultType);
16945 return Builder.CreateConstrainedFPCall(F, {X, Y});
16946 } else {
16947 Function *F = CGM.getIntrinsic(ID, ResultType);
16948 return Builder.CreateCall(F, {X, Y});
16949 }
16950 }
16951 switch (BuiltinID) {
16952 case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
16953 case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
16954 default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16954)
;
16955 }
16956 Function *F = CGM.getIntrinsic(ID);
16957 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
16958 return Builder.CreateCall(F, {X, Y, M4Value});
16959 }
16960
16961 case SystemZ::BI__builtin_s390_vlbrh:
16962 case SystemZ::BI__builtin_s390_vlbrf:
16963 case SystemZ::BI__builtin_s390_vlbrg: {
16964 llvm::Type *ResultType = ConvertType(E->getType());
16965 Value *X = EmitScalarExpr(E->getArg(0));
16966 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
16967 return Builder.CreateCall(F, X);
16968 }
16969
16970 // Vector intrinsics that output the post-instruction CC value.
16971
16972#define INTRINSIC_WITH_CC(NAME) \
16973 case SystemZ::BI__builtin_##NAME: \
16974 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
16975
16976 INTRINSIC_WITH_CC(s390_vpkshs);
16977 INTRINSIC_WITH_CC(s390_vpksfs);
16978 INTRINSIC_WITH_CC(s390_vpksgs);
16979
16980 INTRINSIC_WITH_CC(s390_vpklshs);
16981 INTRINSIC_WITH_CC(s390_vpklsfs);
16982 INTRINSIC_WITH_CC(s390_vpklsgs);
16983
16984 INTRINSIC_WITH_CC(s390_vceqbs);
16985 INTRINSIC_WITH_CC(s390_vceqhs);
16986 INTRINSIC_WITH_CC(s390_vceqfs);
16987 INTRINSIC_WITH_CC(s390_vceqgs);
16988
16989 INTRINSIC_WITH_CC(s390_vchbs);
16990 INTRINSIC_WITH_CC(s390_vchhs);
16991 INTRINSIC_WITH_CC(s390_vchfs);
16992 INTRINSIC_WITH_CC(s390_vchgs);
16993
16994 INTRINSIC_WITH_CC(s390_vchlbs);
16995 INTRINSIC_WITH_CC(s390_vchlhs);
16996 INTRINSIC_WITH_CC(s390_vchlfs);
16997 INTRINSIC_WITH_CC(s390_vchlgs);
16998
16999 INTRINSIC_WITH_CC(s390_vfaebs);
17000 INTRINSIC_WITH_CC(s390_vfaehs);
17001 INTRINSIC_WITH_CC(s390_vfaefs);
17002
17003 INTRINSIC_WITH_CC(s390_vfaezbs);
17004 INTRINSIC_WITH_CC(s390_vfaezhs);
17005 INTRINSIC_WITH_CC(s390_vfaezfs);
17006
17007 INTRINSIC_WITH_CC(s390_vfeebs);
17008 INTRINSIC_WITH_CC(s390_vfeehs);
17009 INTRINSIC_WITH_CC(s390_vfeefs);
17010
17011 INTRINSIC_WITH_CC(s390_vfeezbs);
17012 INTRINSIC_WITH_CC(s390_vfeezhs);
17013 INTRINSIC_WITH_CC(s390_vfeezfs);
17014
17015 INTRINSIC_WITH_CC(s390_vfenebs);
17016 INTRINSIC_WITH_CC(s390_vfenehs);
17017 INTRINSIC_WITH_CC(s390_vfenefs);
17018
17019 INTRINSIC_WITH_CC(s390_vfenezbs);
17020 INTRINSIC_WITH_CC(s390_vfenezhs);
17021 INTRINSIC_WITH_CC(s390_vfenezfs);
17022
17023 INTRINSIC_WITH_CC(s390_vistrbs);
17024 INTRINSIC_WITH_CC(s390_vistrhs);
17025 INTRINSIC_WITH_CC(s390_vistrfs);
17026
17027 INTRINSIC_WITH_CC(s390_vstrcbs);
17028 INTRINSIC_WITH_CC(s390_vstrchs);
17029 INTRINSIC_WITH_CC(s390_vstrcfs);
17030
17031 INTRINSIC_WITH_CC(s390_vstrczbs);
17032 INTRINSIC_WITH_CC(s390_vstrczhs);
17033 INTRINSIC_WITH_CC(s390_vstrczfs);
17034
17035 INTRINSIC_WITH_CC(s390_vfcesbs);
17036 INTRINSIC_WITH_CC(s390_vfcedbs);
17037 INTRINSIC_WITH_CC(s390_vfchsbs);
17038 INTRINSIC_WITH_CC(s390_vfchdbs);
17039 INTRINSIC_WITH_CC(s390_vfchesbs);
17040 INTRINSIC_WITH_CC(s390_vfchedbs);
17041
17042 INTRINSIC_WITH_CC(s390_vftcisb);
17043 INTRINSIC_WITH_CC(s390_vftcidb);
17044
17045 INTRINSIC_WITH_CC(s390_vstrsb);
17046 INTRINSIC_WITH_CC(s390_vstrsh);
17047 INTRINSIC_WITH_CC(s390_vstrsf);
17048
17049 INTRINSIC_WITH_CC(s390_vstrszb);
17050 INTRINSIC_WITH_CC(s390_vstrszh);
17051 INTRINSIC_WITH_CC(s390_vstrszf);
17052
17053#undef INTRINSIC_WITH_CC
17054
17055 default:
17056 return nullptr;
17057 }
17058}
17059
17060namespace {
17061// Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
17062struct NVPTXMmaLdstInfo {
17063 unsigned NumResults; // Number of elements to load/store
17064 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
17065 unsigned IID_col;
17066 unsigned IID_row;
17067};
17068
17069#define MMA_INTR(geom_op_type, layout) \
17070 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
17071#define MMA_LDST(n, geom_op_type) \
17072 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
17073
17074static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
17075 switch (BuiltinID) {
17076 // FP MMA loads
17077 case NVPTX::BI__hmma_m16n16k16_ld_a:
17078 return MMA_LDST(8, m16n16k16_load_a_f16);
17079 case NVPTX::BI__hmma_m16n16k16_ld_b:
17080 return MMA_LDST(8, m16n16k16_load_b_f16);
17081 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
17082 return MMA_LDST(4, m16n16k16_load_c_f16);
17083 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
17084 return MMA_LDST(8, m16n16k16_load_c_f32);
17085 case NVPTX::BI__hmma_m32n8k16_ld_a:
17086 return MMA_LDST(8, m32n8k16_load_a_f16);
17087 case NVPTX::BI__hmma_m32n8k16_ld_b:
17088 return MMA_LDST(8, m32n8k16_load_b_f16);
17089 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
17090 return MMA_LDST(4, m32n8k16_load_c_f16);
17091 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
17092 return MMA_LDST(8, m32n8k16_load_c_f32);
17093 case NVPTX::BI__hmma_m8n32k16_ld_a:
17094 return MMA_LDST(8, m8n32k16_load_a_f16);
17095 case NVPTX::BI__hmma_m8n32k16_ld_b:
17096 return MMA_LDST(8, m8n32k16_load_b_f16);
17097 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
17098 return MMA_LDST(4, m8n32k16_load_c_f16);
17099 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
17100 return MMA_LDST(8, m8n32k16_load_c_f32);
17101
17102 // Integer MMA loads
17103 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
17104 return MMA_LDST(2, m16n16k16_load_a_s8);
17105 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
17106 return MMA_LDST(2, m16n16k16_load_a_u8);
17107 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
17108 return MMA_LDST(2, m16n16k16_load_b_s8);
17109 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
17110 return MMA_LDST(2, m16n16k16_load_b_u8);
17111 case NVPTX::BI__imma_m16n16k16_ld_c:
17112 return MMA_LDST(8, m16n16k16_load_c_s32);
17113 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
17114 return MMA_LDST(4, m32n8k16_load_a_s8);
17115 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
17116 return MMA_LDST(4, m32n8k16_load_a_u8);
17117 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
17118 return MMA_LDST(1, m32n8k16_load_b_s8);
17119 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
17120 return MMA_LDST(1, m32n8k16_load_b_u8);
17121 case NVPTX::BI__imma_m32n8k16_ld_c:
17122 return MMA_LDST(8, m32n8k16_load_c_s32);
17123 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
17124 return MMA_LDST(1, m8n32k16_load_a_s8);
17125 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
17126 return MMA_LDST(1, m8n32k16_load_a_u8);
17127 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
17128 return MMA_LDST(4, m8n32k16_load_b_s8);
17129 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
17130 return MMA_LDST(4, m8n32k16_load_b_u8);
17131 case NVPTX::BI__imma_m8n32k16_ld_c:
17132 return MMA_LDST(8, m8n32k16_load_c_s32);
17133
17134 // Sub-integer MMA loads.
17135 // Only row/col layout is supported by A/B fragments.
17136 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
17137 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
17138 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
17139 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
17140 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
17141 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
17142 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
17143 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
17144 case NVPTX::BI__imma_m8n8k32_ld_c:
17145 return MMA_LDST(2, m8n8k32_load_c_s32);
17146 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
17147 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
17148 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
17149 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
17150 case NVPTX::BI__bmma_m8n8k128_ld_c:
17151 return MMA_LDST(2, m8n8k128_load_c_s32);
17152
17153 // Double MMA loads
17154 case NVPTX::BI__dmma_m8n8k4_ld_a:
17155 return MMA_LDST(1, m8n8k4_load_a_f64);
17156 case NVPTX::BI__dmma_m8n8k4_ld_b:
17157 return MMA_LDST(1, m8n8k4_load_b_f64);
17158 case NVPTX::BI__dmma_m8n8k4_ld_c:
17159 return MMA_LDST(2, m8n8k4_load_c_f64);
17160
17161 // Alternate float MMA loads
17162 case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
17163 return MMA_LDST(4, m16n16k16_load_a_bf16);
17164 case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
17165 return MMA_LDST(4, m16n16k16_load_b_bf16);
17166 case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
17167 return MMA_LDST(2, m8n32k16_load_a_bf16);
17168 case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
17169 return MMA_LDST(8, m8n32k16_load_b_bf16);
17170 case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
17171 return MMA_LDST(8, m32n8k16_load_a_bf16);
17172 case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
17173 return MMA_LDST(2, m32n8k16_load_b_bf16);
17174 case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
17175 return MMA_LDST(4, m16n16k8_load_a_tf32);
17176 case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
17177 return MMA_LDST(2, m16n16k8_load_b_tf32);
17178 case NVPTX::BI__mma_tf32_m16n16k8_ld_c:
17179 return MMA_LDST(8, m16n16k8_load_c_f32);
17180
17181 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
17182 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
17183 // use fragment C for both loads and stores.
17184 // FP MMA stores.
17185 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
17186 return MMA_LDST(4, m16n16k16_store_d_f16);
17187 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
17188 return MMA_LDST(8, m16n16k16_store_d_f32);
17189 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
17190 return MMA_LDST(4, m32n8k16_store_d_f16);
17191 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
17192 return MMA_LDST(8, m32n8k16_store_d_f32);
17193 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
17194 return MMA_LDST(4, m8n32k16_store_d_f16);
17195 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
17196 return MMA_LDST(8, m8n32k16_store_d_f32);
17197
17198 // Integer and sub-integer MMA stores.
17199 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
17200 // name, integer loads/stores use LLVM's i32.
17201 case NVPTX::BI__imma_m16n16k16_st_c_i32:
17202 return MMA_LDST(8, m16n16k16_store_d_s32);
17203 case NVPTX::BI__imma_m32n8k16_st_c_i32:
17204 return MMA_LDST(8, m32n8k16_store_d_s32);
17205 case NVPTX::BI__imma_m8n32k16_st_c_i32:
17206 return MMA_LDST(8, m8n32k16_store_d_s32);
17207 case NVPTX::BI__imma_m8n8k32_st_c_i32:
17208 return MMA_LDST(2, m8n8k32_store_d_s32);
17209 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
17210 return MMA_LDST(2, m8n8k128_store_d_s32);
17211
17212 // Double MMA store
17213 case NVPTX::BI__dmma_m8n8k4_st_c_f64:
17214 return MMA_LDST(2, m8n8k4_store_d_f64);
17215
17216 // Alternate float MMA store
17217 case NVPTX::BI__mma_m16n16k8_st_c_f32:
17218 return MMA_LDST(8, m16n16k8_store_d_f32);
17219
17220 default:
17221 llvm_unreachable("Unknown MMA builtin")::llvm::llvm_unreachable_internal("Unknown MMA builtin", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17221)
;
17222 }
17223}
17224#undef MMA_LDST
17225#undef MMA_INTR
17226
17227
17228struct NVPTXMmaInfo {
17229 unsigned NumEltsA;
17230 unsigned NumEltsB;
17231 unsigned NumEltsC;
17232 unsigned NumEltsD;
17233
17234 // Variants are ordered by layout-A/layout-B/satf, where 'row' has priority
17235 // over 'col' for layout. The index of non-satf variants is expected to match
17236 // the undocumented layout constants used by CUDA's mma.hpp.
17237 std::array<unsigned, 8> Variants;
17238
17239 unsigned getMMAIntrinsic(int Layout, bool Satf) {
17240 unsigned Index = Layout + 4 * Satf;
17241 if (Index >= Variants.size())
17242 return 0;
17243 return Variants[Index];
17244 }
17245};
17246
17247 // Returns an intrinsic that matches Layout and Satf for valid combinations of
17248 // Layout and Satf, 0 otherwise.
17249static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
17250 // clang-format off
17251#define MMA_VARIANTS(geom, type) \
17252 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
17253 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
17254 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
17255 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type
17256#define MMA_SATF_VARIANTS(geom, type) \
17257 MMA_VARIANTS(geom, type), \
17258 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
17259 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
17260 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
17261 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite
17262// Sub-integer MMA only supports row.col layout.
17263#define MMA_VARIANTS_I4(geom, type) \
17264 0, \
17265 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
17266 0, \
17267 0, \
17268 0, \
17269 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
17270 0, \
17271 0
17272// b1 MMA does not support .satfinite.
17273#define MMA_VARIANTS_B1_XOR(geom, type) \
17274 0, \
17275 Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type, \
17276 0, \
17277 0, \
17278 0, \
17279 0, \
17280 0, \
17281 0
17282#define MMA_VARIANTS_B1_AND(geom, type) \
17283 0, \
17284 Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type, \
17285 0, \
17286 0, \
17287 0, \
17288 0, \
17289 0, \
17290 0
17291 // clang-format on
17292 switch (BuiltinID) {
17293 // FP MMA
17294 // Note that 'type' argument of MMA_SATF_VARIANTS uses D_C notation, while
17295 // NumEltsN of return value are ordered as A,B,C,D.
17296 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
17297 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f16)}}};
17298 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
17299 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f16)}}};
17300 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
17301 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f32)}}};
17302 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
17303 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f32)}}};
17304 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
17305 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f16)}}};
17306 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
17307 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f16)}}};
17308 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
17309 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f32)}}};
17310 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
17311 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f32)}}};
17312 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
17313 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f16)}}};
17314 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
17315 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f16)}}};
17316 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
17317 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f32)}}};
17318 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
17319 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f32)}}};
17320
17321 // Integer MMA
17322 case NVPTX::BI__imma_m16n16k16_mma_s8:
17323 return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, s8)}}};
17324 case NVPTX::BI__imma_m16n16k16_mma_u8:
17325 return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, u8)}}};
17326 case NVPTX::BI__imma_m32n8k16_mma_s8:
17327 return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, s8)}}};
17328 case NVPTX::BI__imma_m32n8k16_mma_u8:
17329 return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, u8)}}};
17330 case NVPTX::BI__imma_m8n32k16_mma_s8:
17331 return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, s8)}}};
17332 case NVPTX::BI__imma_m8n32k16_mma_u8:
17333 return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, u8)}}};
17334
17335 // Sub-integer MMA
17336 case NVPTX::BI__imma_m8n8k32_mma_s4:
17337 return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, s4)}}};
17338 case NVPTX::BI__imma_m8n8k32_mma_u4:
17339 return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, u4)}}};
17340 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
17341 return {1, 1, 2, 2, {{MMA_VARIANTS_B1_XOR(m8n8k128, b1)}}};
17342 case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
17343 return {1, 1, 2, 2, {{MMA_VARIANTS_B1_AND(m8n8k128, b1)}}};
17344
17345 // Double MMA
17346 case NVPTX::BI__dmma_m8n8k4_mma_f64:
17347 return {1, 1, 2, 2, {{MMA_VARIANTS(m8n8k4, f64)}}};
17348
17349 // Alternate FP MMA
17350 case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
17351 return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k16, bf16)}}};
17352 case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
17353 return {2, 8, 8, 8, {{MMA_VARIANTS(m8n32k16, bf16)}}};
17354 case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
17355 return {8, 2, 8, 8, {{MMA_VARIANTS(m32n8k16, bf16)}}};
17356 case NVPTX::BI__mma_tf32_m16n16k8_mma_f32:
17357 return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k8, tf32)}}};
17358 default:
17359 llvm_unreachable("Unexpected builtin ID.")::llvm::llvm_unreachable_internal("Unexpected builtin ID.", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17359)
;
17360 }
17361#undef MMA_VARIANTS
17362#undef MMA_SATF_VARIANTS
17363#undef MMA_VARIANTS_I4
17364#undef MMA_VARIANTS_B1_AND
17365#undef MMA_VARIANTS_B1_XOR
17366}
17367
17368} // namespace
17369
17370Value *
17371CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
17372 auto MakeLdg = [&](unsigned IntrinsicID) {
17373 Value *Ptr = EmitScalarExpr(E->getArg(0));
17374 clang::CharUnits Align =
17375 CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
17376 return Builder.CreateCall(
17377 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
17378 Ptr->getType()}),
17379 {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
17380 };
17381 auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
17382 Value *Ptr = EmitScalarExpr(E->getArg(0));
17383 return Builder.CreateCall(
17384 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
17385 Ptr->getType()}),
17386 {Ptr, EmitScalarExpr(E->getArg(1))});
17387 };
17388 switch (BuiltinID) {
17389 case NVPTX::BI__nvvm_atom_add_gen_i:
17390 case NVPTX::BI__nvvm_atom_add_gen_l:
17391 case NVPTX::BI__nvvm_atom_add_gen_ll:
17392 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
17393
17394 case NVPTX::BI__nvvm_atom_sub_gen_i:
17395 case NVPTX::BI__nvvm_atom_sub_gen_l:
17396 case NVPTX::BI__nvvm_atom_sub_gen_ll:
17397 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
17398
17399 case NVPTX::BI__nvvm_atom_and_gen_i:
17400 case NVPTX::BI__nvvm_atom_and_gen_l:
17401 case NVPTX::BI__nvvm_atom_and_gen_ll:
17402 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
17403
17404 case NVPTX::BI__nvvm_atom_or_gen_i:
17405 case NVPTX::BI__nvvm_atom_or_gen_l:
17406 case NVPTX::BI__nvvm_atom_or_gen_ll:
17407 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
17408
17409 case NVPTX::BI__nvvm_atom_xor_gen_i:
17410 case NVPTX::BI__nvvm_atom_xor_gen_l:
17411 case NVPTX::BI__nvvm_atom_xor_gen_ll:
17412 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
17413
17414 case NVPTX::BI__nvvm_atom_xchg_gen_i:
17415 case NVPTX::BI__nvvm_atom_xchg_gen_l:
17416 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
17417 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
17418
17419 case NVPTX::BI__nvvm_atom_max_gen_i:
17420 case NVPTX::BI__nvvm_atom_max_gen_l:
17421 case NVPTX::BI__nvvm_atom_max_gen_ll:
17422 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
17423
17424 case NVPTX::BI__nvvm_atom_max_gen_ui:
17425 case NVPTX::BI__nvvm_atom_max_gen_ul:
17426 case NVPTX::BI__nvvm_atom_max_gen_ull:
17427 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
17428
17429 case NVPTX::BI__nvvm_atom_min_gen_i:
17430 case NVPTX::BI__nvvm_atom_min_gen_l:
17431 case NVPTX::BI__nvvm_atom_min_gen_ll:
17432 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
17433
17434 case NVPTX::BI__nvvm_atom_min_gen_ui:
17435 case NVPTX::BI__nvvm_atom_min_gen_ul:
17436 case NVPTX::BI__nvvm_atom_min_gen_ull:
17437 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
17438
17439 case NVPTX::BI__nvvm_atom_cas_gen_i:
17440 case NVPTX::BI__nvvm_atom_cas_gen_l:
17441 case NVPTX::BI__nvvm_atom_cas_gen_ll:
17442 // __nvvm_atom_cas_gen_* should return the old value rather than the
17443 // success flag.
17444 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
17445
17446 case NVPTX::BI__nvvm_atom_add_gen_f:
17447 case NVPTX::BI__nvvm_atom_add_gen_d: {
17448 Value *Ptr = EmitScalarExpr(E->getArg(0));
17449 Value *Val = EmitScalarExpr(E->getArg(1));
17450 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
17451 AtomicOrdering::SequentiallyConsistent);
17452 }
17453
17454 case NVPTX::BI__nvvm_atom_inc_gen_ui: {
17455 Value *Ptr = EmitScalarExpr(E->getArg(0));
17456 Value *Val = EmitScalarExpr(E->getArg(1));
17457 Function *FnALI32 =
17458 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
17459 return Builder.CreateCall(FnALI32, {Ptr, Val});
17460 }
17461
17462 case NVPTX::BI__nvvm_atom_dec_gen_ui: {
17463 Value *Ptr = EmitScalarExpr(E->getArg(0));
17464 Value *Val = EmitScalarExpr(E->getArg(1));
17465 Function *FnALD32 =
17466 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
17467 return Builder.CreateCall(FnALD32, {Ptr, Val});
17468 }
17469
17470 case NVPTX::BI__nvvm_ldg_c:
17471 case NVPTX::BI__nvvm_ldg_c2:
17472 case NVPTX::BI__nvvm_ldg_c4:
17473 case NVPTX::BI__nvvm_ldg_s:
17474 case NVPTX::BI__nvvm_ldg_s2:
17475 case NVPTX::BI__nvvm_ldg_s4:
17476 case NVPTX::BI__nvvm_ldg_i:
17477 case NVPTX::BI__nvvm_ldg_i2:
17478 case NVPTX::BI__nvvm_ldg_i4:
17479 case NVPTX::BI__nvvm_ldg_l:
17480 case NVPTX::BI__nvvm_ldg_ll:
17481 case NVPTX::BI__nvvm_ldg_ll2:
17482 case NVPTX::BI__nvvm_ldg_uc:
17483 case NVPTX::BI__nvvm_ldg_uc2:
17484 case NVPTX::BI__nvvm_ldg_uc4:
17485 case NVPTX::BI__nvvm_ldg_us:
17486 case NVPTX::BI__nvvm_ldg_us2:
17487 case NVPTX::BI__nvvm_ldg_us4:
17488 case NVPTX::BI__nvvm_ldg_ui:
17489 case NVPTX::BI__nvvm_ldg_ui2:
17490 case NVPTX::BI__nvvm_ldg_ui4:
17491 case NVPTX::BI__nvvm_ldg_ul:
17492 case NVPTX::BI__nvvm_ldg_ull:
17493 case NVPTX::BI__nvvm_ldg_ull2:
17494 // PTX Interoperability section 2.2: "For a vector with an even number of
17495 // elements, its alignment is set to number of elements times the alignment
17496 // of its member: n*alignof(t)."
17497 return MakeLdg(Intrinsic::nvvm_ldg_global_i);
17498 case NVPTX::BI__nvvm_ldg_f:
17499 case NVPTX::BI__nvvm_ldg_f2:
17500 case NVPTX::BI__nvvm_ldg_f4:
17501 case NVPTX::BI__nvvm_ldg_d:
17502 case NVPTX::BI__nvvm_ldg_d2:
17503 return MakeLdg(Intrinsic::nvvm_ldg_global_f);
17504
17505 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
17506 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
17507 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
17508 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
17509 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
17510 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
17511 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
17512 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
17513 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
17514 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
17515 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
17516 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
17517 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
17518 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
17519 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
17520 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
17521 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
17522 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
17523 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
17524 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
17525 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
17526 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
17527 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
17528 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
17529 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
17530 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
17531 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
17532 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
17533 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
17534 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
17535 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
17536 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
17537 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
17538 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
17539 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
17540 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
17541 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
17542 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
17543 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
17544 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
17545 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
17546 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
17547 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
17548 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
17549 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
17550 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
17551 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
17552 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
17553 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
17554 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
17555 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
17556 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
17557 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
17558 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
17559 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
17560 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
17561 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
17562 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
17563 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
17564 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
17565 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
17566 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
17567 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
17568 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
17569 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
17570 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
17571 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
17572 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
17573 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
17574 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
17575 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
17576 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
17577 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
17578 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
17579 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
17580 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
17581 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
17582 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
17583 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
17584 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
17585 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
17586 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
17587 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
17588 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
17589 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
17590 Value *Ptr = EmitScalarExpr(E->getArg(0));
17591 return Builder.CreateCall(
17592 CGM.getIntrinsic(
17593 Intrinsic::nvvm_atomic_cas_gen_i_cta,
17594 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
17595 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
17596 }
17597 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
17598 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
17599 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
17600 Value *Ptr = EmitScalarExpr(E->getArg(0));
17601 return Builder.CreateCall(
17602 CGM.getIntrinsic(
17603 Intrinsic::nvvm_atomic_cas_gen_i_sys,
17604 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
17605 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
17606 }
17607 case NVPTX::BI__nvvm_match_all_sync_i32p:
17608 case NVPTX::BI__nvvm_match_all_sync_i64p: {
17609 Value *Mask = EmitScalarExpr(E->getArg(0));
17610 Value *Val = EmitScalarExpr(E->getArg(1));
17611 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
17612 Value *ResultPair = Builder.CreateCall(
17613 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
17614 ? Intrinsic::nvvm_match_all_sync_i32p
17615 : Intrinsic::nvvm_match_all_sync_i64p),
17616 {Mask, Val});
17617 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
17618 PredOutPtr.getElementType());
17619 Builder.CreateStore(Pred, PredOutPtr);
17620 return Builder.CreateExtractValue(ResultPair, 0);
17621 }
17622
17623 // FP MMA loads
17624 case NVPTX::BI__hmma_m16n16k16_ld_a:
17625 case NVPTX::BI__hmma_m16n16k16_ld_b:
17626 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
17627 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
17628 case NVPTX::BI__hmma_m32n8k16_ld_a:
17629 case NVPTX::BI__hmma_m32n8k16_ld_b:
17630 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
17631 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
17632 case NVPTX::BI__hmma_m8n32k16_ld_a:
17633 case NVPTX::BI__hmma_m8n32k16_ld_b:
17634 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
17635 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
17636 // Integer MMA loads.
17637 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
17638 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
17639 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
17640 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
17641 case NVPTX::BI__imma_m16n16k16_ld_c:
17642 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
17643 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
17644 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
17645 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
17646 case NVPTX::BI__imma_m32n8k16_ld_c:
17647 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
17648 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
17649 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
17650 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
17651 case NVPTX::BI__imma_m8n32k16_ld_c:
17652 // Sub-integer MMA loads.
17653 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
17654 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
17655 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
17656 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
17657 case NVPTX::BI__imma_m8n8k32_ld_c:
17658 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
17659 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
17660 case NVPTX::BI__bmma_m8n8k128_ld_c:
17661 // Double MMA loads.
17662 case NVPTX::BI__dmma_m8n8k4_ld_a:
17663 case NVPTX::BI__dmma_m8n8k4_ld_b:
17664 case NVPTX::BI__dmma_m8n8k4_ld_c:
17665 // Alternate float MMA loads.
17666 case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
17667 case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
17668 case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
17669 case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
17670 case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
17671 case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
17672 case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
17673 case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
17674 case NVPTX::BI__mma_tf32_m16n16k8_ld_c: {
17675 Address Dst = EmitPointerWithAlignment(E->getArg(0));
17676 Value *Src = EmitScalarExpr(E->getArg(1));
17677 Value *Ldm = EmitScalarExpr(E->getArg(2));
17678 Optional<llvm::APSInt> isColMajorArg =
17679 E->getArg(3)->getIntegerConstantExpr(getContext());
17680 if (!isColMajorArg)
17681 return nullptr;
17682 bool isColMajor = isColMajorArg->getSExtValue();
17683 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
17684 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
17685 if (IID == 0)
17686 return nullptr;
17687
17688 Value *Result =
17689 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
17690
17691 // Save returned values.
17692 assert(II.NumResults)(static_cast <bool> (II.NumResults) ? void (0) : __assert_fail
("II.NumResults", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17692, __extension__ __PRETTY_FUNCTION__))
;
17693 if (II.NumResults == 1) {
17694 Builder.CreateAlignedStore(Result, Dst.getPointer(),
17695 CharUnits::fromQuantity(4));
17696 } else {
17697 for (unsigned i = 0; i < II.NumResults; ++i) {
17698 Builder.CreateAlignedStore(
17699 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
17700 Dst.getElementType()),
17701 Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
17702 llvm::ConstantInt::get(IntTy, i)),
17703 CharUnits::fromQuantity(4));
17704 }
17705 }
17706 return Result;
17707 }
17708
17709 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
17710 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
17711 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
17712 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
17713 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
17714 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
17715 case NVPTX::BI__imma_m16n16k16_st_c_i32:
17716 case NVPTX::BI__imma_m32n8k16_st_c_i32:
17717 case NVPTX::BI__imma_m8n32k16_st_c_i32:
17718 case NVPTX::BI__imma_m8n8k32_st_c_i32:
17719 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
17720 case NVPTX::BI__dmma_m8n8k4_st_c_f64:
17721 case NVPTX::BI__mma_m16n16k8_st_c_f32: {
17722 Value *Dst = EmitScalarExpr(E->getArg(0));
17723 Address Src = EmitPointerWithAlignment(E->getArg(1));
17724 Value *Ldm = EmitScalarExpr(E->getArg(2));
17725 Optional<llvm::APSInt> isColMajorArg =
17726 E->getArg(3)->getIntegerConstantExpr(getContext());
17727 if (!isColMajorArg)
17728 return nullptr;
17729 bool isColMajor = isColMajorArg->getSExtValue();
17730 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
17731 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
17732 if (IID == 0)
17733 return nullptr;
17734 Function *Intrinsic =
17735 CGM.getIntrinsic(IID, Dst->getType());
17736 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
17737 SmallVector<Value *, 10> Values = {Dst};
17738 for (unsigned i = 0; i < II.NumResults; ++i) {
17739 Value *V = Builder.CreateAlignedLoad(
17740 Src.getElementType(),
17741 Builder.CreateGEP(Src.getElementType(), Src.getPointer(),
17742 llvm::ConstantInt::get(IntTy, i)),
17743 CharUnits::fromQuantity(4));
17744 Values.push_back(Builder.CreateBitCast(V, ParamType));
17745 }
17746 Values.push_back(Ldm);
17747 Value *Result = Builder.CreateCall(Intrinsic, Values);
17748 return Result;
17749 }
17750
17751 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
17752 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
17753 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
17754 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
17755 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
17756 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
17757 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
17758 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
17759 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
17760 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
17761 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
17762 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
17763 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
17764 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
17765 case NVPTX::BI__imma_m16n16k16_mma_s8:
17766 case NVPTX::BI__imma_m16n16k16_mma_u8:
17767 case NVPTX::BI__imma_m32n8k16_mma_s8:
17768 case NVPTX::BI__imma_m32n8k16_mma_u8:
17769 case NVPTX::BI__imma_m8n32k16_mma_s8:
17770 case NVPTX::BI__imma_m8n32k16_mma_u8:
17771 case NVPTX::BI__imma_m8n8k32_mma_s4:
17772 case NVPTX::BI__imma_m8n8k32_mma_u4:
17773 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
17774 case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
17775 case NVPTX::BI__dmma_m8n8k4_mma_f64:
17776 case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
17777 case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
17778 case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
17779 case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: {
17780 Address Dst = EmitPointerWithAlignment(E->getArg(0));
17781 Address SrcA = EmitPointerWithAlignment(E->getArg(1));
17782 Address SrcB = EmitPointerWithAlignment(E->getArg(2));
17783 Address SrcC = EmitPointerWithAlignment(E->getArg(3));
17784 Optional<llvm::APSInt> LayoutArg =
17785 E->getArg(4)->getIntegerConstantExpr(getContext());
17786 if (!LayoutArg)
17787 return nullptr;
17788 int Layout = LayoutArg->getSExtValue();
17789 if (Layout < 0 || Layout > 3)
17790 return nullptr;
17791 llvm::APSInt SatfArg;
17792 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 ||
17793 BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1)
17794 SatfArg = 0; // .b1 does not have satf argument.
17795 else if (Optional<llvm::APSInt> OptSatfArg =
17796 E->getArg(5)->getIntegerConstantExpr(getContext()))
17797 SatfArg = *OptSatfArg;
17798 else
17799 return nullptr;
17800 bool Satf = SatfArg.getSExtValue();
17801 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
17802 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
17803 if (IID == 0) // Unsupported combination of Layout/Satf.
17804 return nullptr;
17805
17806 SmallVector<Value *, 24> Values;
17807 Function *Intrinsic = CGM.getIntrinsic(IID);
17808 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
17809 // Load A
17810 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
17811 Value *V = Builder.CreateAlignedLoad(
17812 SrcA.getElementType(),
17813 Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(),
17814 llvm::ConstantInt::get(IntTy, i)),
17815 CharUnits::fromQuantity(4));
17816 Values.push_back(Builder.CreateBitCast(V, AType));
17817 }
17818 // Load B
17819 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
17820 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
17821 Value *V = Builder.CreateAlignedLoad(
17822 SrcB.getElementType(),
17823 Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(),
17824 llvm::ConstantInt::get(IntTy, i)),
17825 CharUnits::fromQuantity(4));
17826 Values.push_back(Builder.CreateBitCast(V, BType));
17827 }
17828 // Load C
17829 llvm::Type *CType =
17830 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
17831 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
17832 Value *V = Builder.CreateAlignedLoad(
17833 SrcC.getElementType(),
17834 Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(),
17835 llvm::ConstantInt::get(IntTy, i)),
17836 CharUnits::fromQuantity(4));
17837 Values.push_back(Builder.CreateBitCast(V, CType));
17838 }
17839 Value *Result = Builder.CreateCall(Intrinsic, Values);
17840 llvm::Type *DType = Dst.getElementType();
17841 for (unsigned i = 0; i < MI.NumEltsD; ++i)
17842 Builder.CreateAlignedStore(
17843 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
17844 Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
17845 llvm::ConstantInt::get(IntTy, i)),
17846 CharUnits::fromQuantity(4));
17847 return Result;
17848 }
17849 default:
17850 return nullptr;
17851 }
17852}
17853
17854namespace {
17855struct BuiltinAlignArgs {
17856 llvm::Value *Src = nullptr;
17857 llvm::Type *SrcType = nullptr;
17858 llvm::Value *Alignment = nullptr;
17859 llvm::Value *Mask = nullptr;
17860 llvm::IntegerType *IntType = nullptr;
17861
17862 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
17863 QualType AstType = E->getArg(0)->getType();
17864 if (AstType->isArrayType())
17865 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
17866 else
17867 Src = CGF.EmitScalarExpr(E->getArg(0));
17868 SrcType = Src->getType();
17869 if (SrcType->isPointerTy()) {
17870 IntType = IntegerType::get(
17871 CGF.getLLVMContext(),
17872 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
17873 } else {
17874 assert(SrcType->isIntegerTy())(static_cast <bool> (SrcType->isIntegerTy()) ? void (
0) : __assert_fail ("SrcType->isIntegerTy()", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17874, __extension__ __PRETTY_FUNCTION__))
;
17875 IntType = cast<llvm::IntegerType>(SrcType);
17876 }
17877 Alignment = CGF.EmitScalarExpr(E->getArg(1));
17878 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
17879 auto *One = llvm::ConstantInt::get(IntType, 1);
17880 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
17881 }
17882};
17883} // namespace
17884
17885/// Generate (x & (y-1)) == 0.
17886RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
17887 BuiltinAlignArgs Args(E, *this);
17888 llvm::Value *SrcAddress = Args.Src;
17889 if (Args.SrcType->isPointerTy())
17890 SrcAddress =
17891 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
17892 return RValue::get(Builder.CreateICmpEQ(
17893 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
17894 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
17895}
17896
17897/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
17898/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
17899/// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
17900/// TODO: actually use ptrmask once most optimization passes know about it.
17901RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
17902 BuiltinAlignArgs Args(E, *this);
17903 llvm::Value *SrcAddr = Args.Src;
17904 if (Args.Src->getType()->isPointerTy())
17905 SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
17906 llvm::Value *SrcForMask = SrcAddr;
17907 if (AlignUp) {
17908 // When aligning up we have to first add the mask to ensure we go over the
17909 // next alignment value and then align down to the next valid multiple.
17910 // By adding the mask, we ensure that align_up on an already aligned
17911 // value will not change the value.
17912 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
17913 }
17914 // Invert the mask to only clear the lower bits.
17915 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
17916 llvm::Value *Result =
17917 Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
17918 if (Args.Src->getType()->isPointerTy()) {
17919 /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
17920 // Result = Builder.CreateIntrinsic(
17921 // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
17922 // {SrcForMask, NegatedMask}, nullptr, "aligned_result");
17923 Result->setName("aligned_intptr");
17924 llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
17925 // The result must point to the same underlying allocation. This means we
17926 // can use an inbounds GEP to enable better optimization.
17927 Value *Base = EmitCastToVoidPtr(Args.Src);
17928 if (getLangOpts().isSignedOverflowDefined())
17929 Result = Builder.CreateGEP(Int8Ty, Base, Difference, "aligned_result");
17930 else
17931 Result = EmitCheckedInBoundsGEP(Base, Difference,
17932 /*SignedIndices=*/true,
17933 /*isSubtraction=*/!AlignUp,
17934 E->getExprLoc(), "aligned_result");
17935 Result = Builder.CreatePointerCast(Result, Args.SrcType);
17936 // Emit an alignment assumption to ensure that the new alignment is
17937 // propagated to loads/stores, etc.
17938 emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
17939 }
17940 assert(Result->getType() == Args.SrcType)(static_cast <bool> (Result->getType() == Args.SrcType
) ? void (0) : __assert_fail ("Result->getType() == Args.SrcType"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17940, __extension__ __PRETTY_FUNCTION__))
;
17941 return RValue::get(Result);
17942}
17943
17944Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
17945 const CallExpr *E) {
17946 switch (BuiltinID) {
17947 case WebAssembly::BI__builtin_wasm_memory_size: {
17948 llvm::Type *ResultType = ConvertType(E->getType());
17949 Value *I = EmitScalarExpr(E->getArg(0));
17950 Function *Callee =
17951 CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
17952 return Builder.CreateCall(Callee, I);
17953 }
17954 case WebAssembly::BI__builtin_wasm_memory_grow: {
17955 llvm::Type *ResultType = ConvertType(E->getType());
17956 Value *Args[] = {EmitScalarExpr(E->getArg(0)),
17957 EmitScalarExpr(E->getArg(1))};
17958 Function *Callee =
17959 CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
17960 return Builder.CreateCall(Callee, Args);
17961 }
17962 case WebAssembly::BI__builtin_wasm_tls_size: {
17963 llvm::Type *ResultType = ConvertType(E->getType());
17964 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
17965 return Builder.CreateCall(Callee);
17966 }
17967 case WebAssembly::BI__builtin_wasm_tls_align: {
17968 llvm::Type *ResultType = ConvertType(E->getType());
17969 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
17970 return Builder.CreateCall(Callee);
17971 }
17972 case WebAssembly::BI__builtin_wasm_tls_base: {
17973 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
17974 return Builder.CreateCall(Callee);
17975 }
17976 case WebAssembly::BI__builtin_wasm_throw: {
17977 Value *Tag = EmitScalarExpr(E->getArg(0));
17978 Value *Obj = EmitScalarExpr(E->getArg(1));
17979 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
17980 return Builder.CreateCall(Callee, {Tag, Obj});
17981 }
17982 case WebAssembly::BI__builtin_wasm_rethrow: {
17983 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow);
17984 return Builder.CreateCall(Callee);
17985 }
17986 case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: {
17987 Value *Addr = EmitScalarExpr(E->getArg(0));
17988 Value *Expected = EmitScalarExpr(E->getArg(1));
17989 Value *Timeout = EmitScalarExpr(E->getArg(2));
17990 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32);
17991 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
17992 }
17993 case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: {
17994 Value *Addr = EmitScalarExpr(E->getArg(0));
17995 Value *Expected = EmitScalarExpr(E->getArg(1));
17996 Value *Timeout = EmitScalarExpr(E->getArg(2));
17997 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64);
17998 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
17999 }
18000 case WebAssembly::BI__builtin_wasm_memory_atomic_notify: {
18001 Value *Addr = EmitScalarExpr(E->getArg(0));
18002 Value *Count = EmitScalarExpr(E->getArg(1));
18003 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_notify);
18004 return Builder.CreateCall(Callee, {Addr, Count});
18005 }
18006 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
18007 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
18008 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
18009 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
18010 Value *Src = EmitScalarExpr(E->getArg(0));
18011 llvm::Type *ResT = ConvertType(E->getType());
18012 Function *Callee =
18013 CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
18014 return Builder.CreateCall(Callee, {Src});
18015 }
18016 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
18017 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
18018 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
18019 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
18020 Value *Src = EmitScalarExpr(E->getArg(0));
18021 llvm::Type *ResT = ConvertType(E->getType());
18022 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
18023 {ResT, Src->getType()});
18024 return Builder.CreateCall(Callee, {Src});
18025 }
18026 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
18027 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
18028 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
18029 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
18030 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
18031 Value *Src = EmitScalarExpr(E->getArg(0));
18032 llvm::Type *ResT = ConvertType(E->getType());
18033 Function *Callee =
18034 CGM.getIntrinsic(Intrinsic::fptosi_sat, {ResT, Src->getType()});
18035 return Builder.CreateCall(Callee, {Src});
18036 }
18037 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
18038 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
18039 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
18040 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
18041 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
18042 Value *Src = EmitScalarExpr(E->getArg(0));
18043 llvm::Type *ResT = ConvertType(E->getType());
18044 Function *Callee =
18045 CGM.getIntrinsic(Intrinsic::fptoui_sat, {ResT, Src->getType()});
18046 return Builder.CreateCall(Callee, {Src});
18047 }
18048 case WebAssembly::BI__builtin_wasm_min_f32:
18049 case WebAssembly::BI__builtin_wasm_min_f64:
18050 case WebAssembly::BI__builtin_wasm_min_f32x4:
18051 case WebAssembly::BI__builtin_wasm_min_f64x2: {
18052 Value *LHS = EmitScalarExpr(E->getArg(0));
18053 Value *RHS = EmitScalarExpr(E->getArg(1));
18054 Function *Callee =
18055 CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType()));
18056 return Builder.CreateCall(Callee, {LHS, RHS});
18057 }
18058 case WebAssembly::BI__builtin_wasm_max_f32:
18059 case WebAssembly::BI__builtin_wasm_max_f64:
18060 case WebAssembly::BI__builtin_wasm_max_f32x4:
18061 case WebAssembly::BI__builtin_wasm_max_f64x2: {
18062 Value *LHS = EmitScalarExpr(E->getArg(0));
18063 Value *RHS = EmitScalarExpr(E->getArg(1));
18064 Function *Callee =
18065 CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType()));
18066 return Builder.CreateCall(Callee, {LHS, RHS});
18067 }
18068 case WebAssembly::BI__builtin_wasm_pmin_f32x4:
18069 case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
18070 Value *LHS = EmitScalarExpr(E->getArg(0));
18071 Value *RHS = EmitScalarExpr(E->getArg(1));
18072 Function *Callee =
18073 CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
18074 return Builder.CreateCall(Callee, {LHS, RHS});
18075 }
18076 case WebAssembly::BI__builtin_wasm_pmax_f32x4:
18077 case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
18078 Value *LHS = EmitScalarExpr(E->getArg(0));
18079 Value *RHS = EmitScalarExpr(E->getArg(1));
18080 Function *Callee =
18081 CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
18082 return Builder.CreateCall(Callee, {LHS, RHS});
18083 }
18084 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
18085 case WebAssembly::BI__builtin_wasm_floor_f32x4:
18086 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
18087 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
18088 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
18089 case WebAssembly::BI__builtin_wasm_floor_f64x2:
18090 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
18091 case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
18092 unsigned IntNo;
18093 switch (BuiltinID) {
18094 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
18095 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
18096 IntNo = Intrinsic::ceil;
18097 break;
18098 case WebAssembly::BI__builtin_wasm_floor_f32x4:
18099 case WebAssembly::BI__builtin_wasm_floor_f64x2:
18100 IntNo = Intrinsic::floor;
18101 break;
18102 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
18103 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
18104 IntNo = Intrinsic::trunc;
18105 break;
18106 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
18107 case WebAssembly::BI__builtin_wasm_nearest_f64x2:
18108 IntNo = Intrinsic::nearbyint;
18109 break;
18110 default:
18111 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18111)
;
18112 }
18113 Value *Value = EmitScalarExpr(E->getArg(0));
18114 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
18115 return Builder.CreateCall(Callee, Value);
18116 }
18117 case WebAssembly::BI__builtin_wasm_swizzle_i8x16: {
18118 Value *Src = EmitScalarExpr(E->getArg(0));
18119 Value *Indices = EmitScalarExpr(E->getArg(1));
18120 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
18121 return Builder.CreateCall(Callee, {Src, Indices});
18122 }
18123 case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
18124 case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
18125 case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
18126 case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
18127 case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
18128 case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
18129 case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
18130 case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: {
18131 unsigned IntNo;
18132 switch (BuiltinID) {
18133 case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
18134 case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
18135 IntNo = Intrinsic::sadd_sat;
18136 break;
18137 case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
18138 case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
18139 IntNo = Intrinsic::uadd_sat;
18140 break;
18141 case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
18142 case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
18143 IntNo = Intrinsic::wasm_sub_sat_signed;
18144 break;
18145 case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
18146 case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8:
18147 IntNo = Intrinsic::wasm_sub_sat_unsigned;
18148 break;
18149 default:
18150 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18150)
;
18151 }
18152 Value *LHS = EmitScalarExpr(E->getArg(0));
18153 Value *RHS = EmitScalarExpr(E->getArg(1));
18154 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
18155 return Builder.CreateCall(Callee, {LHS, RHS});
18156 }
18157 case WebAssembly::BI__builtin_wasm_abs_i8x16:
18158 case WebAssembly::BI__builtin_wasm_abs_i16x8:
18159 case WebAssembly::BI__builtin_wasm_abs_i32x4:
18160 case WebAssembly::BI__builtin_wasm_abs_i64x2: {
18161 Value *Vec = EmitScalarExpr(E->getArg(0));
18162 Value *Neg = Builder.CreateNeg(Vec, "neg");
18163 Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
18164 Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
18165 return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
18166 }
18167 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
18168 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
18169 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
18170 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
18171 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
18172 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
18173 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
18174 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
18175 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
18176 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
18177 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
18178 case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
18179 Value *LHS = EmitScalarExpr(E->getArg(0));
18180 Value *RHS = EmitScalarExpr(E->getArg(1));
18181 Value *ICmp;
18182 switch (BuiltinID) {
18183 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
18184 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
18185 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
18186 ICmp = Builder.CreateICmpSLT(LHS, RHS);
18187 break;
18188 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
18189 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
18190 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
18191 ICmp = Builder.CreateICmpULT(LHS, RHS);
18192 break;
18193 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
18194 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
18195 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
18196 ICmp = Builder.CreateICmpSGT(LHS, RHS);
18197 break;
18198 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
18199 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
18200 case WebAssembly::BI__builtin_wasm_max_u_i32x4:
18201 ICmp = Builder.CreateICmpUGT(LHS, RHS);
18202 break;
18203 default:
18204 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18204)
;
18205 }
18206 return Builder.CreateSelect(ICmp, LHS, RHS);
18207 }
18208 case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
18209 case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
18210 Value *LHS = EmitScalarExpr(E->getArg(0));
18211 Value *RHS = EmitScalarExpr(E->getArg(1));
18212 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned,
18213 ConvertType(E->getType()));
18214 return Builder.CreateCall(Callee, {LHS, RHS});
18215 }
18216 case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8: {
18217 Value *LHS = EmitScalarExpr(E->getArg(0));
18218 Value *RHS = EmitScalarExpr(E->getArg(1));
18219 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_q15mulr_sat_signed);
18220 return Builder.CreateCall(Callee, {LHS, RHS});
18221 }
18222 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
18223 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
18224 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
18225 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: {
18226 Value *Vec = EmitScalarExpr(E->getArg(0));
18227 unsigned IntNo;
18228 switch (BuiltinID) {
18229 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
18230 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
18231 IntNo = Intrinsic::wasm_extadd_pairwise_signed;
18232 break;
18233 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
18234 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4:
18235 IntNo = Intrinsic::wasm_extadd_pairwise_unsigned;
18236 break;
18237 default:
18238 llvm_unreachable("unexptected builtin ID")::llvm::llvm_unreachable_internal("unexptected builtin ID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18238)
;
18239 }
18240
18241 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
18242 return Builder.CreateCall(Callee, Vec);
18243 }
18244 case WebAssembly::BI__builtin_wasm_bitselect: {
18245 Value *V1 = EmitScalarExpr(E->getArg(0));
18246 Value *V2 = EmitScalarExpr(E->getArg(1));
18247 Value *C = EmitScalarExpr(E->getArg(2));
18248 Function *Callee =
18249 CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType()));
18250 return Builder.CreateCall(Callee, {V1, V2, C});
18251 }
18252 case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
18253 Value *LHS = EmitScalarExpr(E->getArg(0));
18254 Value *RHS = EmitScalarExpr(E->getArg(1));
18255 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
18256 return Builder.CreateCall(Callee, {LHS, RHS});
18257 }
18258 case WebAssembly::BI__builtin_wasm_popcnt_i8x16: {
18259 Value *Vec = EmitScalarExpr(E->getArg(0));
18260 Function *Callee =
18261 CGM.getIntrinsic(Intrinsic::ctpop, ConvertType(E->getType()));
18262 return Builder.CreateCall(Callee, {Vec});
18263 }
18264 case WebAssembly::BI__builtin_wasm_any_true_v128:
18265 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
18266 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
18267 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
18268 case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
18269 unsigned IntNo;
18270 switch (BuiltinID) {
18271 case WebAssembly::BI__builtin_wasm_any_true_v128:
18272 IntNo = Intrinsic::wasm_anytrue;
18273 break;
18274 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
18275 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
18276 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
18277 case WebAssembly::BI__builtin_wasm_all_true_i64x2:
18278 IntNo = Intrinsic::wasm_alltrue;
18279 break;
18280 default:
18281 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18281)
;
18282 }
18283 Value *Vec = EmitScalarExpr(E->getArg(0));
18284 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
18285 return Builder.CreateCall(Callee, {Vec});
18286 }
18287 case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
18288 case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
18289 case WebAssembly::BI__builtin_wasm_bitmask_i32x4:
18290 case WebAssembly::BI__builtin_wasm_bitmask_i64x2: {
18291 Value *Vec = EmitScalarExpr(E->getArg(0));
18292 Function *Callee =
18293 CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
18294 return Builder.CreateCall(Callee, {Vec});
18295 }
18296 case WebAssembly::BI__builtin_wasm_abs_f32x4:
18297 case WebAssembly::BI__builtin_wasm_abs_f64x2: {
18298 Value *Vec = EmitScalarExpr(E->getArg(0));
18299 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
18300 return Builder.CreateCall(Callee, {Vec});
18301 }
18302 case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
18303 case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
18304 Value *Vec = EmitScalarExpr(E->getArg(0));
18305 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
18306 return Builder.CreateCall(Callee, {Vec});
18307 }
18308 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
18309 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
18310 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
18311 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
18312 Value *Low = EmitScalarExpr(E->getArg(0));
18313 Value *High = EmitScalarExpr(E->getArg(1));
18314 unsigned IntNo;
18315 switch (BuiltinID) {
18316 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
18317 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
18318 IntNo = Intrinsic::wasm_narrow_signed;
18319 break;
18320 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
18321 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
18322 IntNo = Intrinsic::wasm_narrow_unsigned;
18323 break;
18324 default:
18325 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18325)
;
18326 }
18327 Function *Callee =
18328 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
18329 return Builder.CreateCall(Callee, {Low, High});
18330 }
18331 case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
18332 case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4: {
18333 Value *Vec = EmitScalarExpr(E->getArg(0));
18334 unsigned IntNo;
18335 switch (BuiltinID) {
18336 case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
18337 IntNo = Intrinsic::fptosi_sat;
18338 break;
18339 case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4:
18340 IntNo = Intrinsic::fptoui_sat;
18341 break;
18342 default:
18343 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18343)
;
18344 }
18345 llvm::Type *SrcT = Vec->getType();
18346 llvm::Type *TruncT =
18347 SrcT->getWithNewType(llvm::IntegerType::get(getLLVMContext(), 32));
18348 Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT});
18349 Value *Trunc = Builder.CreateCall(Callee, Vec);
18350 Value *Splat = Builder.CreateVectorSplat(2, Builder.getInt32(0));
18351 Value *ConcatMask =
18352 llvm::ConstantVector::get({Builder.getInt32(0), Builder.getInt32(1),
18353 Builder.getInt32(2), Builder.getInt32(3)});
18354 return Builder.CreateShuffleVector(Trunc, Splat, ConcatMask);
18355 }
18356 case WebAssembly::BI__builtin_wasm_shuffle_i8x16: {
18357 Value *Ops[18];
18358 size_t OpIdx = 0;
18359 Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
18360 Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
18361 while (OpIdx < 18) {
18362 Optional<llvm::APSInt> LaneConst =
18363 E->getArg(OpIdx)->getIntegerConstantExpr(getContext());
18364 assert(LaneConst && "Constant arg isn't actually constant?")(static_cast <bool> (LaneConst && "Constant arg isn't actually constant?"
) ? void (0) : __assert_fail ("LaneConst && \"Constant arg isn't actually constant?\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18364, __extension__ __PRETTY_FUNCTION__))
;
18365 Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
18366 }
18367 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
18368 return Builder.CreateCall(Callee, Ops);
18369 }
18370 case WebAssembly::BI__builtin_wasm_fma_f32x4:
18371 case WebAssembly::BI__builtin_wasm_fms_f32x4:
18372 case WebAssembly::BI__builtin_wasm_fma_f64x2:
18373 case WebAssembly::BI__builtin_wasm_fms_f64x2: {
18374 Value *A = EmitScalarExpr(E->getArg(0));
18375 Value *B = EmitScalarExpr(E->getArg(1));
18376 Value *C = EmitScalarExpr(E->getArg(2));
18377 unsigned IntNo;
18378 switch (BuiltinID) {
18379 case WebAssembly::BI__builtin_wasm_fma_f32x4:
18380 case WebAssembly::BI__builtin_wasm_fma_f64x2:
18381 IntNo = Intrinsic::wasm_fma;
18382 break;
18383 case WebAssembly::BI__builtin_wasm_fms_f32x4:
18384 case WebAssembly::BI__builtin_wasm_fms_f64x2:
18385 IntNo = Intrinsic::wasm_fms;
18386 break;
18387 default:
18388 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18388)
;
18389 }
18390 Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
18391 return Builder.CreateCall(Callee, {A, B, C});
18392 }
18393 case WebAssembly::BI__builtin_wasm_laneselect_i8x16:
18394 case WebAssembly::BI__builtin_wasm_laneselect_i16x8:
18395 case WebAssembly::BI__builtin_wasm_laneselect_i32x4:
18396 case WebAssembly::BI__builtin_wasm_laneselect_i64x2: {
18397 Value *A = EmitScalarExpr(E->getArg(0));
18398 Value *B = EmitScalarExpr(E->getArg(1));
18399 Value *C = EmitScalarExpr(E->getArg(2));
18400 Function *Callee =
18401 CGM.getIntrinsic(Intrinsic::wasm_laneselect, A->getType());
18402 return Builder.CreateCall(Callee, {A, B, C});
18403 }
18404 case WebAssembly::BI__builtin_wasm_relaxed_swizzle_i8x16: {
18405 Value *Src = EmitScalarExpr(E->getArg(0));
18406 Value *Indices = EmitScalarExpr(E->getArg(1));
18407 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_relaxed_swizzle);
18408 return Builder.CreateCall(Callee, {Src, Indices});
18409 }
18410 case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4:
18411 case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4:
18412 case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2:
18413 case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2: {
18414 Value *LHS = EmitScalarExpr(E->getArg(0));
18415 Value *RHS = EmitScalarExpr(E->getArg(1));
18416 unsigned IntNo;
18417 switch (BuiltinID) {
18418 case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4:
18419 case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2:
18420 IntNo = Intrinsic::wasm_relaxed_min;
18421 break;
18422 case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4:
18423 case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2:
18424 IntNo = Intrinsic::wasm_relaxed_max;
18425 break;
18426 default:
18427 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18427)
;
18428 }
18429 Function *Callee = CGM.getIntrinsic(IntNo, LHS->getType());
18430 return Builder.CreateCall(Callee, {LHS, RHS});
18431 }
18432 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
18433 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
18434 case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_s_i32x4_f64x2:
18435 case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_u_i32x4_f64x2: {
18436 Value *Vec = EmitScalarExpr(E->getArg(0));
18437 unsigned IntNo;
18438 switch (BuiltinID) {
18439 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
18440 IntNo = Intrinsic::wasm_relaxed_trunc_signed;
18441 break;
18442 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
18443 IntNo = Intrinsic::wasm_relaxed_trunc_unsigned;
18444 break;
18445 case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_s_i32x4_f64x2:
18446 IntNo = Intrinsic::wasm_relaxed_trunc_zero_signed;
18447 break;
18448 case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_u_i32x4_f64x2:
18449 IntNo = Intrinsic::wasm_relaxed_trunc_zero_unsigned;
18450 break;
18451 default:
18452 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18452)
;
18453 }
18454 Function *Callee = CGM.getIntrinsic(IntNo);
18455 return Builder.CreateCall(Callee, {Vec});
18456 }
18457 default:
18458 return nullptr;
18459 }
18460}
18461
18462static std::pair<Intrinsic::ID, unsigned>
18463getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
18464 struct Info {
18465 unsigned BuiltinID;
18466 Intrinsic::ID IntrinsicID;
18467 unsigned VecLen;
18468 };
18469 Info Infos[] = {
18470#define CUSTOM_BUILTIN_MAPPING(x,s) \
18471 { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
18472 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
18473 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
18474 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
18475 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
18476 CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
18477 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
18478 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
18479 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
18480 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
18481 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
18482 CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
18483 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
18484 CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
18485 CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
18486 CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
18487 CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
18488 CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
18489 CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
18490 CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
18491 CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
18492 CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
18493 CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
18494 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
18495 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
18496 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
18497 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
18498 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
18499 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
18500 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
18501 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
18502#include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
18503#undef CUSTOM_BUILTIN_MAPPING
18504 };
18505
18506 auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
18507 static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
18508 (void)SortOnce;
18509
18510 const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos),
18511 Info{BuiltinID, 0, 0}, CmpInfo);
18512 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
18513 return {Intrinsic::not_intrinsic, 0};
18514
18515 return {F->IntrinsicID, F->VecLen};
18516}
18517
18518Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
18519 const CallExpr *E) {
18520 Intrinsic::ID ID;
18521 unsigned VecLen;
18522 std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
18523
18524 auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
18525 // The base pointer is passed by address, so it needs to be loaded.
18526 Address A = EmitPointerWithAlignment(E->getArg(0));
18527 Address BP = Address(
18528 Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
18529 llvm::Value *Base = Builder.CreateLoad(BP);
18530 // The treatment of both loads and stores is the same: the arguments for
18531 // the builtin are the same as the arguments for the intrinsic.
18532 // Load:
18533 // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
18534 // builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
18535 // Store:
18536 // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
18537 // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
18538 SmallVector<llvm::Value*,5> Ops = { Base };
18539 for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
18540 Ops.push_back(EmitScalarExpr(E->getArg(i)));
18541
18542 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
18543 // The load intrinsics generate two results (Value, NewBase), stores
18544 // generate one (NewBase). The new base address needs to be stored.
18545 llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
18546 : Result;
18547 llvm::Value *LV = Builder.CreateBitCast(
18548 EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
18549 Address Dest = EmitPointerWithAlignment(E->getArg(0));
18550 llvm::Value *RetVal =
18551 Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
18552 if (IsLoad)
18553 RetVal = Builder.CreateExtractValue(Result, 0);
18554 return RetVal;
18555 };
18556
18557 // Handle the conversion of bit-reverse load intrinsics to bit code.
18558 // The intrinsic call after this function only reads from memory and the
18559 // write to memory is dealt by the store instruction.
18560 auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
18561 // The intrinsic generates one result, which is the new value for the base
18562 // pointer. It needs to be returned. The result of the load instruction is
18563 // passed to intrinsic by address, so the value needs to be stored.
18564 llvm::Value *BaseAddress =
18565 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
18566
18567 // Expressions like &(*pt++) will be incremented per evaluation.
18568 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
18569 // per call.
18570 Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
18571 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
18572 DestAddr.getAlignment());
18573 llvm::Value *DestAddress = DestAddr.getPointer();
18574
18575 // Operands are Base, Dest, Modifier.
18576 // The intrinsic format in LLVM IR is defined as
18577 // { ValueType, i8* } (i8*, i32).
18578 llvm::Value *Result = Builder.CreateCall(
18579 CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
18580
18581 // The value needs to be stored as the variable is passed by reference.
18582 llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
18583
18584 // The store needs to be truncated to fit the destination type.
18585 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
18586 // to be handled with stores of respective destination type.
18587 DestVal = Builder.CreateTrunc(DestVal, DestTy);
18588
18589 llvm::Value *DestForStore =
18590 Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
18591 Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
18592 // The updated value of the base pointer is returned.
18593 return Builder.CreateExtractValue(Result, 1);
18594 };
18595
18596 auto V2Q = [this, VecLen] (llvm::Value *Vec) {
18597 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
18598 : Intrinsic::hexagon_V6_vandvrt;
18599 return Builder.CreateCall(CGM.getIntrinsic(ID),
18600 {Vec, Builder.getInt32(-1)});
18601 };
18602 auto Q2V = [this, VecLen] (llvm::Value *Pred) {
18603 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
18604 : Intrinsic::hexagon_V6_vandqrt;
18605 return Builder.CreateCall(CGM.getIntrinsic(ID),
18606 {Pred, Builder.getInt32(-1)});
18607 };
18608
18609 switch (BuiltinID) {
18610 // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
18611 // and the corresponding C/C++ builtins use loads/stores to update
18612 // the predicate.
18613 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
18614 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
18615 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
18616 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
18617 // Get the type from the 0-th argument.
18618 llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
18619 Address PredAddr = Builder.CreateBitCast(
18620 EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
18621 llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
18622 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
18623 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
18624
18625 llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
18626 Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
18627 PredAddr.getAlignment());
18628 return Builder.CreateExtractValue(Result, 0);
18629 }
18630
18631 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
18632 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
18633 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
18634 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
18635 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
18636 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
18637 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
18638 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
18639 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
18640 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
18641 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
18642 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
18643 return MakeCircOp(ID, /*IsLoad=*/true);
18644 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
18645 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
18646 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
18647 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
18648 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
18649 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
18650 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
18651 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
18652 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
18653 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
18654 return MakeCircOp(ID, /*IsLoad=*/false);
18655 case Hexagon::BI__builtin_brev_ldub:
18656 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
18657 case Hexagon::BI__builtin_brev_ldb:
18658 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
18659 case Hexagon::BI__builtin_brev_lduh:
18660 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
18661 case Hexagon::BI__builtin_brev_ldh:
18662 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
18663 case Hexagon::BI__builtin_brev_ldw:
18664 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
18665 case Hexagon::BI__builtin_brev_ldd:
18666 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
18667
18668 default: {
18669 if (ID == Intrinsic::not_intrinsic)
18670 return nullptr;
18671
18672 auto IsVectorPredTy = [](llvm::Type *T) {
18673 return T->isVectorTy() &&
18674 cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
18675 };
18676
18677 llvm::Function *IntrFn = CGM.getIntrinsic(ID);
18678 llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
18679 SmallVector<llvm::Value*,4> Ops;
18680 for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
18681 llvm::Type *T = IntrTy->getParamType(i);
18682 const Expr *A = E->getArg(i);
18683 if (IsVectorPredTy(T)) {
18684 // There will be an implicit cast to a boolean vector. Strip it.
18685 if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
18686 if (Cast->getCastKind() == CK_BitCast)
18687 A = Cast->getSubExpr();
18688 }
18689 Ops.push_back(V2Q(EmitScalarExpr(A)));
18690 } else {
18691 Ops.push_back(EmitScalarExpr(A));
18692 }
18693 }
18694
18695 llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
18696 if (IsVectorPredTy(IntrTy->getReturnType()))
18697 Call = Q2V(Call);
18698
18699 return Call;
18700 } // default
18701 } // switch
18702
18703 return nullptr;
18704}
18705
18706Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
18707 const CallExpr *E,
18708 ReturnValueSlot ReturnValue) {
18709 SmallVector<Value *, 4> Ops;
18710 llvm::Type *ResultType = ConvertType(E->getType());
18711
18712 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
18713 Ops.push_back(EmitScalarExpr(E->getArg(i)));
18714
18715 Intrinsic::ID ID = Intrinsic::not_intrinsic;
18716 unsigned NF = 1;
18717 constexpr unsigned TAIL_UNDISTURBED = 0;
18718
18719 // Required for overloaded intrinsics.
18720 llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
18721 switch (BuiltinID) {
18722 default: llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18722)
;
18723 case RISCV::BI__builtin_riscv_orc_b_32:
18724 case RISCV::BI__builtin_riscv_orc_b_64:
18725 case RISCV::BI__builtin_riscv_clmul:
18726 case RISCV::BI__builtin_riscv_clmulh:
18727 case RISCV::BI__builtin_riscv_clmulr:
18728 case RISCV::BI__builtin_riscv_bcompress_32:
18729 case RISCV::BI__builtin_riscv_bcompress_64:
18730 case RISCV::BI__builtin_riscv_bdecompress_32:
18731 case RISCV::BI__builtin_riscv_bdecompress_64:
18732 case RISCV::BI__builtin_riscv_grev_32:
18733 case RISCV::BI__builtin_riscv_grev_64:
18734 case RISCV::BI__builtin_riscv_gorc_32:
18735 case RISCV::BI__builtin_riscv_gorc_64:
18736 case RISCV::BI__builtin_riscv_shfl_32:
18737 case RISCV::BI__builtin_riscv_shfl_64:
18738 case RISCV::BI__builtin_riscv_unshfl_32:
18739 case RISCV::BI__builtin_riscv_unshfl_64:
18740 case RISCV::BI__builtin_riscv_xperm_n:
18741 case RISCV::BI__builtin_riscv_xperm_b:
18742 case RISCV::BI__builtin_riscv_xperm_h:
18743 case RISCV::BI__builtin_riscv_xperm_w:
18744 case RISCV::BI__builtin_riscv_crc32_b:
18745 case RISCV::BI__builtin_riscv_crc32_h:
18746 case RISCV::BI__builtin_riscv_crc32_w:
18747 case RISCV::BI__builtin_riscv_crc32_d:
18748 case RISCV::BI__builtin_riscv_crc32c_b:
18749 case RISCV::BI__builtin_riscv_crc32c_h:
18750 case RISCV::BI__builtin_riscv_crc32c_w:
18751 case RISCV::BI__builtin_riscv_crc32c_d: {
18752 switch (BuiltinID) {
18753 default: llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18753)
;
18754 // Zbb
18755 case RISCV::BI__builtin_riscv_orc_b_32:
18756 case RISCV::BI__builtin_riscv_orc_b_64:
18757 ID = Intrinsic::riscv_orc_b;
18758 break;
18759
18760 // Zbc
18761 case RISCV::BI__builtin_riscv_clmul:
18762 ID = Intrinsic::riscv_clmul;
18763 break;
18764 case RISCV::BI__builtin_riscv_clmulh:
18765 ID = Intrinsic::riscv_clmulh;
18766 break;
18767 case RISCV::BI__builtin_riscv_clmulr:
18768 ID = Intrinsic::riscv_clmulr;
18769 break;
18770
18771 // Zbe
18772 case RISCV::BI__builtin_riscv_bcompress_32:
18773 case RISCV::BI__builtin_riscv_bcompress_64:
18774 ID = Intrinsic::riscv_bcompress;
18775 break;
18776 case RISCV::BI__builtin_riscv_bdecompress_32:
18777 case RISCV::BI__builtin_riscv_bdecompress_64:
18778 ID = Intrinsic::riscv_bdecompress;
18779 break;
18780
18781 // Zbp
18782 case RISCV::BI__builtin_riscv_grev_32:
18783 case RISCV::BI__builtin_riscv_grev_64:
18784 ID = Intrinsic::riscv_grev;
18785 break;
18786 case RISCV::BI__builtin_riscv_gorc_32:
18787 case RISCV::BI__builtin_riscv_gorc_64:
18788 ID = Intrinsic::riscv_gorc;
18789 break;
18790 case RISCV::BI__builtin_riscv_shfl_32:
18791 case RISCV::BI__builtin_riscv_shfl_64:
18792 ID = Intrinsic::riscv_shfl;
18793 break;
18794 case RISCV::BI__builtin_riscv_unshfl_32:
18795 case RISCV::BI__builtin_riscv_unshfl_64:
18796 ID = Intrinsic::riscv_unshfl;
18797 break;
18798 case RISCV::BI__builtin_riscv_xperm_n:
18799 ID = Intrinsic::riscv_xperm_n;
18800 break;
18801 case RISCV::BI__builtin_riscv_xperm_b:
18802 ID = Intrinsic::riscv_xperm_b;
18803 break;
18804 case RISCV::BI__builtin_riscv_xperm_h:
18805 ID = Intrinsic::riscv_xperm_h;
18806 break;
18807 case RISCV::BI__builtin_riscv_xperm_w:
18808 ID = Intrinsic::riscv_xperm_w;
18809 break;
18810
18811 // Zbr
18812 case RISCV::BI__builtin_riscv_crc32_b:
18813 ID = Intrinsic::riscv_crc32_b;
18814 break;
18815 case RISCV::BI__builtin_riscv_crc32_h:
18816 ID = Intrinsic::riscv_crc32_h;
18817 break;
18818 case RISCV::BI__builtin_riscv_crc32_w:
18819 ID = Intrinsic::riscv_crc32_w;
18820 break;
18821 case RISCV::BI__builtin_riscv_crc32_d:
18822 ID = Intrinsic::riscv_crc32_d;
18823 break;
18824 case RISCV::BI__builtin_riscv_crc32c_b:
18825 ID = Intrinsic::riscv_crc32c_b;
18826 break;
18827 case RISCV::BI__builtin_riscv_crc32c_h:
18828 ID = Intrinsic::riscv_crc32c_h;
18829 break;
18830 case RISCV::BI__builtin_riscv_crc32c_w:
18831 ID = Intrinsic::riscv_crc32c_w;
18832 break;
18833 case RISCV::BI__builtin_riscv_crc32c_d:
18834 ID = Intrinsic::riscv_crc32c_d;
18835 break;
18836 }
18837
18838 IntrinsicTypes = {ResultType};
18839 break;
18840 }
18841 // Vector builtins are handled from here.
18842#include "clang/Basic/riscv_vector_builtin_cg.inc"
18843 }
18844
18845 assert(ID != Intrinsic::not_intrinsic)(static_cast <bool> (ID != Intrinsic::not_intrinsic) ? void
(0) : __assert_fail ("ID != Intrinsic::not_intrinsic", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/clang/lib/CodeGen/CGBuiltin.cpp"
, 18845, __extension__ __PRETTY_FUNCTION__))
;
18846
18847 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
18848 return Builder.CreateCall(F, Ops, "");
18849}