Bug Summary

File:clang/lib/CodeGen/CGBuiltin.cpp
Warning:line 14826, column 9
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGBuiltin.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D CLANG_VENDOR="Debian " -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/include -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-09-17-195756-12974-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGObjCRuntime.h"
15#include "CGOpenCLRuntime.h"
16#include "CGRecordLayout.h"
17#include "CodeGenFunction.h"
18#include "CodeGenModule.h"
19#include "ConstantEmitter.h"
20#include "PatternInit.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/Decl.h"
25#include "clang/AST/OSLog.h"
26#include "clang/Basic/TargetBuiltins.h"
27#include "clang/Basic/TargetInfo.h"
28#include "clang/CodeGen/CGFunctionInfo.h"
29#include "llvm/ADT/SmallPtrSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/Analysis/ValueTracking.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/IntrinsicsAArch64.h"
36#include "llvm/IR/IntrinsicsAMDGPU.h"
37#include "llvm/IR/IntrinsicsARM.h"
38#include "llvm/IR/IntrinsicsBPF.h"
39#include "llvm/IR/IntrinsicsHexagon.h"
40#include "llvm/IR/IntrinsicsNVPTX.h"
41#include "llvm/IR/IntrinsicsPowerPC.h"
42#include "llvm/IR/IntrinsicsR600.h"
43#include "llvm/IR/IntrinsicsS390.h"
44#include "llvm/IR/IntrinsicsWebAssembly.h"
45#include "llvm/IR/IntrinsicsX86.h"
46#include "llvm/IR/MDBuilder.h"
47#include "llvm/IR/MatrixBuilder.h"
48#include "llvm/Support/ConvertUTF.h"
49#include "llvm/Support/ScopedPrinter.h"
50#include "llvm/Support/X86TargetParser.h"
51#include <sstream>
52
53using namespace clang;
54using namespace CodeGen;
55using namespace llvm;
56
57static
58int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
59 return std::min(High, std::max(Low, Value));
60}
61
62static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
63 Align AlignmentInBytes) {
64 ConstantInt *Byte;
65 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
66 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
67 // Nothing to initialize.
68 return;
69 case LangOptions::TrivialAutoVarInitKind::Zero:
70 Byte = CGF.Builder.getInt8(0x00);
71 break;
72 case LangOptions::TrivialAutoVarInitKind::Pattern: {
73 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
74 Byte = llvm::dyn_cast<llvm::ConstantInt>(
75 initializationPatternFor(CGF.CGM, Int8));
76 break;
77 }
78 }
79 if (CGF.CGM.stopAutoInit())
80 return;
81 CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
82}
83
84/// getBuiltinLibFunction - Given a builtin id for a function like
85/// "__builtin_fabsf", return a Function* for "fabsf".
86llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
87 unsigned BuiltinID) {
88 assert(Context.BuiltinInfo.isLibFunction(BuiltinID))((Context.BuiltinInfo.isLibFunction(BuiltinID)) ? static_cast
<void> (0) : __assert_fail ("Context.BuiltinInfo.isLibFunction(BuiltinID)"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 88, __PRETTY_FUNCTION__))
;
89
90 // Get the name, skip over the __builtin_ prefix (if necessary).
91 StringRef Name;
92 GlobalDecl D(FD);
93
94 // If the builtin has been declared explicitly with an assembler label,
95 // use the mangled name. This differs from the plain label on platforms
96 // that prefix labels.
97 if (FD->hasAttr<AsmLabelAttr>())
98 Name = getMangledName(D);
99 else
100 Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
101
102 llvm::FunctionType *Ty =
103 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
104
105 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
106}
107
108/// Emit the conversions required to turn the given value into an
109/// integer of the given size.
110static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
111 QualType T, llvm::IntegerType *IntType) {
112 V = CGF.EmitToMemory(V, T);
113
114 if (V->getType()->isPointerTy())
115 return CGF.Builder.CreatePtrToInt(V, IntType);
116
117 assert(V->getType() == IntType)((V->getType() == IntType) ? static_cast<void> (0) :
__assert_fail ("V->getType() == IntType", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 117, __PRETTY_FUNCTION__))
;
118 return V;
119}
120
121static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
122 QualType T, llvm::Type *ResultType) {
123 V = CGF.EmitFromMemory(V, T);
124
125 if (ResultType->isPointerTy())
126 return CGF.Builder.CreateIntToPtr(V, ResultType);
127
128 assert(V->getType() == ResultType)((V->getType() == ResultType) ? static_cast<void> (0
) : __assert_fail ("V->getType() == ResultType", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 128, __PRETTY_FUNCTION__))
;
129 return V;
130}
131
132/// Utility to insert an atomic instruction based on Intrinsic::ID
133/// and the expression node.
134static Value *MakeBinaryAtomicValue(
135 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
136 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
137 QualType T = E->getType();
138 assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast
<void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 138, __PRETTY_FUNCTION__))
;
139 assert(CGF.getContext().hasSameUnqualifiedType(T,((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->
getType()->getPointeeType())) ? static_cast<void> (0
) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 140, __PRETTY_FUNCTION__))
140 E->getArg(0)->getType()->getPointeeType()))((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->
getType()->getPointeeType())) ? static_cast<void> (0
) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 140, __PRETTY_FUNCTION__))
;
141 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->
getType())) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 141, __PRETTY_FUNCTION__))
;
142
143 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
144 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
145
146 llvm::IntegerType *IntType =
147 llvm::IntegerType::get(CGF.getLLVMContext(),
148 CGF.getContext().getTypeSize(T));
149 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
150
151 llvm::Value *Args[2];
152 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
153 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
154 llvm::Type *ValueType = Args[1]->getType();
155 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
156
157 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
158 Kind, Args[0], Args[1], Ordering);
159 return EmitFromInt(CGF, Result, T, ValueType);
160}
161
162static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
163 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
164 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
165
166 // Convert the type of the pointer to a pointer to the stored type.
167 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
168 Value *BC = CGF.Builder.CreateBitCast(
169 Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
170 LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
171 LV.setNontemporal(true);
172 CGF.EmitStoreOfScalar(Val, LV, false);
173 return nullptr;
174}
175
176static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
177 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
178
179 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
180 LV.setNontemporal(true);
181 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
182}
183
184static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
185 llvm::AtomicRMWInst::BinOp Kind,
186 const CallExpr *E) {
187 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
188}
189
190/// Utility to insert an atomic instruction based Intrinsic::ID and
191/// the expression node, where the return value is the result of the
192/// operation.
193static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
194 llvm::AtomicRMWInst::BinOp Kind,
195 const CallExpr *E,
196 Instruction::BinaryOps Op,
197 bool Invert = false) {
198 QualType T = E->getType();
199 assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast
<void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 199, __PRETTY_FUNCTION__))
;
200 assert(CGF.getContext().hasSameUnqualifiedType(T,((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->
getType()->getPointeeType())) ? static_cast<void> (0
) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 201, __PRETTY_FUNCTION__))
201 E->getArg(0)->getType()->getPointeeType()))((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->
getType()->getPointeeType())) ? static_cast<void> (0
) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 201, __PRETTY_FUNCTION__))
;
202 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->
getType())) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 202, __PRETTY_FUNCTION__))
;
203
204 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
205 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
206
207 llvm::IntegerType *IntType =
208 llvm::IntegerType::get(CGF.getLLVMContext(),
209 CGF.getContext().getTypeSize(T));
210 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
211
212 llvm::Value *Args[2];
213 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
214 llvm::Type *ValueType = Args[1]->getType();
215 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
216 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
217
218 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
219 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
220 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
221 if (Invert)
222 Result =
223 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
224 llvm::ConstantInt::getAllOnesValue(IntType));
225 Result = EmitFromInt(CGF, Result, T, ValueType);
226 return RValue::get(Result);
227}
228
229/// Utility to insert an atomic cmpxchg instruction.
230///
231/// @param CGF The current codegen function.
232/// @param E Builtin call expression to convert to cmpxchg.
233/// arg0 - address to operate on
234/// arg1 - value to compare with
235/// arg2 - new value
236/// @param ReturnBool Specifies whether to return success flag of
237/// cmpxchg result or the old value.
238///
239/// @returns result of cmpxchg, according to ReturnBool
240///
241/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
242/// invoke the function EmitAtomicCmpXchgForMSIntrin.
243static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
244 bool ReturnBool) {
245 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
246 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
247 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
248
249 llvm::IntegerType *IntType = llvm::IntegerType::get(
250 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
251 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
252
253 Value *Args[3];
254 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
255 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
256 llvm::Type *ValueType = Args[1]->getType();
257 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
258 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
259
260 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
261 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
262 llvm::AtomicOrdering::SequentiallyConsistent);
263 if (ReturnBool)
264 // Extract boolean success flag and zext it to int.
265 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
266 CGF.ConvertType(E->getType()));
267 else
268 // Extract old value and emit it using the same type as compare value.
269 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
270 ValueType);
271}
272
273/// This function should be invoked to emit atomic cmpxchg for Microsoft's
274/// _InterlockedCompareExchange* intrinsics which have the following signature:
275/// T _InterlockedCompareExchange(T volatile *Destination,
276/// T Exchange,
277/// T Comparand);
278///
279/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
280/// cmpxchg *Destination, Comparand, Exchange.
281/// So we need to swap Comparand and Exchange when invoking
282/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
283/// function MakeAtomicCmpXchgValue since it expects the arguments to be
284/// already swapped.
285
286static
287Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
288 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
289 assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast
<void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 289, __PRETTY_FUNCTION__))
;
290 assert(CGF.getContext().hasSameUnqualifiedType(((CGF.getContext().hasSameUnqualifiedType( E->getType(), E
->getArg(0)->getType()->getPointeeType())) ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 291, __PRETTY_FUNCTION__))
291 E->getType(), E->getArg(0)->getType()->getPointeeType()))((CGF.getContext().hasSameUnqualifiedType( E->getType(), E
->getArg(0)->getType()->getPointeeType())) ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 291, __PRETTY_FUNCTION__))
;
292 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),((CGF.getContext().hasSameUnqualifiedType(E->getType(), E->
getArg(1)->getType())) ? static_cast<void> (0) : __assert_fail
("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 293, __PRETTY_FUNCTION__))
293 E->getArg(1)->getType()))((CGF.getContext().hasSameUnqualifiedType(E->getType(), E->
getArg(1)->getType())) ? static_cast<void> (0) : __assert_fail
("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 293, __PRETTY_FUNCTION__))
;
294 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),((CGF.getContext().hasSameUnqualifiedType(E->getType(), E->
getArg(2)->getType())) ? static_cast<void> (0) : __assert_fail
("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 295, __PRETTY_FUNCTION__))
295 E->getArg(2)->getType()))((CGF.getContext().hasSameUnqualifiedType(E->getType(), E->
getArg(2)->getType())) ? static_cast<void> (0) : __assert_fail
("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 295, __PRETTY_FUNCTION__))
;
296
297 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
298 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
299 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
300
301 // For Release ordering, the failure ordering should be Monotonic.
302 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
303 AtomicOrdering::Monotonic :
304 SuccessOrdering;
305
306 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
307 Destination, Comparand, Exchange,
308 SuccessOrdering, FailureOrdering);
309 Result->setVolatile(true);
310 return CGF.Builder.CreateExtractValue(Result, 0);
311}
312
313static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
314 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
315 assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast
<void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 315, __PRETTY_FUNCTION__))
;
316
317 auto *IntTy = CGF.ConvertType(E->getType());
318 auto *Result = CGF.Builder.CreateAtomicRMW(
319 AtomicRMWInst::Add,
320 CGF.EmitScalarExpr(E->getArg(0)),
321 ConstantInt::get(IntTy, 1),
322 Ordering);
323 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
324}
325
326static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
327 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
328 assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast
<void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 328, __PRETTY_FUNCTION__))
;
329
330 auto *IntTy = CGF.ConvertType(E->getType());
331 auto *Result = CGF.Builder.CreateAtomicRMW(
332 AtomicRMWInst::Sub,
333 CGF.EmitScalarExpr(E->getArg(0)),
334 ConstantInt::get(IntTy, 1),
335 Ordering);
336 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
337}
338
339// Build a plain volatile load.
340static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
341 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
342 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
343 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
344 llvm::Type *ITy =
345 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
346 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
347 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize);
348 Load->setVolatile(true);
349 return Load;
350}
351
352// Build a plain volatile store.
353static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
354 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
355 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
356 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
357 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
358 llvm::Type *ITy =
359 llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
360 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
361 llvm::StoreInst *Store =
362 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
363 Store->setVolatile(true);
364 return Store;
365}
366
367// Emit a simple mangled intrinsic that has 1 argument and a return type
368// matching the argument type. Depending on mode, this may be a constrained
369// floating-point intrinsic.
370static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
371 const CallExpr *E, unsigned IntrinsicID,
372 unsigned ConstrainedIntrinsicID) {
373 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
374
375 if (CGF.Builder.getIsFPConstrained()) {
376 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
377 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
378 } else {
379 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
380 return CGF.Builder.CreateCall(F, Src0);
381 }
382}
383
384// Emit an intrinsic that has 2 operands of the same type as its result.
385// Depending on mode, this may be a constrained floating-point intrinsic.
386static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
387 const CallExpr *E, unsigned IntrinsicID,
388 unsigned ConstrainedIntrinsicID) {
389 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
390 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
391
392 if (CGF.Builder.getIsFPConstrained()) {
393 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
394 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
395 } else {
396 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
397 return CGF.Builder.CreateCall(F, { Src0, Src1 });
398 }
399}
400
401// Emit an intrinsic that has 3 operands of the same type as its result.
402// Depending on mode, this may be a constrained floating-point intrinsic.
403static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
404 const CallExpr *E, unsigned IntrinsicID,
405 unsigned ConstrainedIntrinsicID) {
406 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
407 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
408 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
409
410 if (CGF.Builder.getIsFPConstrained()) {
411 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
412 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
413 } else {
414 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
415 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
416 }
417}
418
419// Emit an intrinsic where all operands are of the same type as the result.
420// Depending on mode, this may be a constrained floating-point intrinsic.
421static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
422 unsigned IntrinsicID,
423 unsigned ConstrainedIntrinsicID,
424 llvm::Type *Ty,
425 ArrayRef<Value *> Args) {
426 Function *F;
427 if (CGF.Builder.getIsFPConstrained())
428 F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
429 else
430 F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
431
432 if (CGF.Builder.getIsFPConstrained())
433 return CGF.Builder.CreateConstrainedFPCall(F, Args);
434 else
435 return CGF.Builder.CreateCall(F, Args);
436}
437
438// Emit a simple mangled intrinsic that has 1 argument and a return type
439// matching the argument type.
440static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
441 const CallExpr *E,
442 unsigned IntrinsicID) {
443 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
444
445 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
446 return CGF.Builder.CreateCall(F, Src0);
447}
448
449// Emit an intrinsic that has 2 operands of the same type as its result.
450static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
451 const CallExpr *E,
452 unsigned IntrinsicID) {
453 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
454 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
455
456 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
457 return CGF.Builder.CreateCall(F, { Src0, Src1 });
458}
459
460// Emit an intrinsic that has 3 operands of the same type as its result.
461static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
462 const CallExpr *E,
463 unsigned IntrinsicID) {
464 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
465 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
466 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
467
468 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
469 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
470}
471
472// Emit an intrinsic that has 1 float or double operand, and 1 integer.
473static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
474 const CallExpr *E,
475 unsigned IntrinsicID) {
476 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
477 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
478
479 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
480 return CGF.Builder.CreateCall(F, {Src0, Src1});
481}
482
483// Emit an intrinsic that has overloaded integer result and fp operand.
484static Value *
485emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
486 unsigned IntrinsicID,
487 unsigned ConstrainedIntrinsicID) {
488 llvm::Type *ResultType = CGF.ConvertType(E->getType());
489 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
490
491 if (CGF.Builder.getIsFPConstrained()) {
492 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
493 {ResultType, Src0->getType()});
494 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
495 } else {
496 Function *F =
497 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
498 return CGF.Builder.CreateCall(F, Src0);
499 }
500}
501
502/// EmitFAbs - Emit a call to @llvm.fabs().
503static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
504 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
505 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
506 Call->setDoesNotAccessMemory();
507 return Call;
508}
509
510/// Emit the computation of the sign bit for a floating point value. Returns
511/// the i1 sign bit value.
512static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
513 LLVMContext &C = CGF.CGM.getLLVMContext();
514
515 llvm::Type *Ty = V->getType();
516 int Width = Ty->getPrimitiveSizeInBits();
517 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
518 V = CGF.Builder.CreateBitCast(V, IntTy);
519 if (Ty->isPPC_FP128Ty()) {
520 // We want the sign bit of the higher-order double. The bitcast we just
521 // did works as if the double-double was stored to memory and then
522 // read as an i128. The "store" will put the higher-order double in the
523 // lower address in both little- and big-Endian modes, but the "load"
524 // will treat those bits as a different part of the i128: the low bits in
525 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
526 // we need to shift the high bits down to the low before truncating.
527 Width >>= 1;
528 if (CGF.getTarget().isBigEndian()) {
529 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
530 V = CGF.Builder.CreateLShr(V, ShiftCst);
531 }
532 // We are truncating value in order to extract the higher-order
533 // double, which we will be using to extract the sign from.
534 IntTy = llvm::IntegerType::get(C, Width);
535 V = CGF.Builder.CreateTrunc(V, IntTy);
536 }
537 Value *Zero = llvm::Constant::getNullValue(IntTy);
538 return CGF.Builder.CreateICmpSLT(V, Zero);
539}
540
541static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
542 const CallExpr *E, llvm::Constant *calleeValue) {
543 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
544 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
545}
546
547/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
548/// depending on IntrinsicID.
549///
550/// \arg CGF The current codegen function.
551/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
552/// \arg X The first argument to the llvm.*.with.overflow.*.
553/// \arg Y The second argument to the llvm.*.with.overflow.*.
554/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
555/// \returns The result (i.e. sum/product) returned by the intrinsic.
556static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
557 const llvm::Intrinsic::ID IntrinsicID,
558 llvm::Value *X, llvm::Value *Y,
559 llvm::Value *&Carry) {
560 // Make sure we have integers of the same width.
561 assert(X->getType() == Y->getType() &&((X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)") ? static_cast<
void> (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 563, __PRETTY_FUNCTION__))
562 "Arguments must be the same type. (Did you forget to make sure both "((X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)") ? static_cast<
void> (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 563, __PRETTY_FUNCTION__))
563 "arguments have the same integer width?)")((X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)") ? static_cast<
void> (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 563, __PRETTY_FUNCTION__))
;
564
565 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
566 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
567 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
568 return CGF.Builder.CreateExtractValue(Tmp, 0);
569}
570
571static Value *emitRangedBuiltin(CodeGenFunction &CGF,
572 unsigned IntrinsicID,
573 int low, int high) {
574 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
575 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
576 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
577 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
578 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
579 return Call;
580}
581
582namespace {
583 struct WidthAndSignedness {
584 unsigned Width;
585 bool Signed;
586 };
587}
588
589static WidthAndSignedness
590getIntegerWidthAndSignedness(const clang::ASTContext &context,
591 const clang::QualType Type) {
592 assert(Type->isIntegerType() && "Given type is not an integer.")((Type->isIntegerType() && "Given type is not an integer."
) ? static_cast<void> (0) : __assert_fail ("Type->isIntegerType() && \"Given type is not an integer.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 592, __PRETTY_FUNCTION__))
;
593 unsigned Width = Type->isBooleanType() ? 1
594 : Type->isExtIntType() ? context.getIntWidth(Type)
595 : context.getTypeInfo(Type).Width;
596 bool Signed = Type->isSignedIntegerType();
597 return {Width, Signed};
598}
599
600// Given one or more integer types, this function produces an integer type that
601// encompasses them: any value in one of the given types could be expressed in
602// the encompassing type.
603static struct WidthAndSignedness
604EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
605 assert(Types.size() > 0 && "Empty list of types.")((Types.size() > 0 && "Empty list of types.") ? static_cast
<void> (0) : __assert_fail ("Types.size() > 0 && \"Empty list of types.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 605, __PRETTY_FUNCTION__))
;
606
607 // If any of the given types is signed, we must return a signed type.
608 bool Signed = false;
609 for (const auto &Type : Types) {
610 Signed |= Type.Signed;
611 }
612
613 // The encompassing type must have a width greater than or equal to the width
614 // of the specified types. Additionally, if the encompassing type is signed,
615 // its width must be strictly greater than the width of any unsigned types
616 // given.
617 unsigned Width = 0;
618 for (const auto &Type : Types) {
619 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
620 if (Width < MinWidth) {
621 Width = MinWidth;
622 }
623 }
624
625 return {Width, Signed};
626}
627
628Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
629 llvm::Type *DestType = Int8PtrTy;
630 if (ArgValue->getType() != DestType)
631 ArgValue =
632 Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
633
634 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
635 return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
636}
637
638/// Checks if using the result of __builtin_object_size(p, @p From) in place of
639/// __builtin_object_size(p, @p To) is correct
640static bool areBOSTypesCompatible(int From, int To) {
641 // Note: Our __builtin_object_size implementation currently treats Type=0 and
642 // Type=2 identically. Encoding this implementation detail here may make
643 // improving __builtin_object_size difficult in the future, so it's omitted.
644 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
645}
646
647static llvm::Value *
648getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
649 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
650}
651
652llvm::Value *
653CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
654 llvm::IntegerType *ResType,
655 llvm::Value *EmittedE,
656 bool IsDynamic) {
657 uint64_t ObjectSize;
658 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
659 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
660 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
661}
662
663/// Returns a Value corresponding to the size of the given expression.
664/// This Value may be either of the following:
665/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
666/// it)
667/// - A call to the @llvm.objectsize intrinsic
668///
669/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
670/// and we wouldn't otherwise try to reference a pass_object_size parameter,
671/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
672llvm::Value *
673CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
674 llvm::IntegerType *ResType,
675 llvm::Value *EmittedE, bool IsDynamic) {
676 // We need to reference an argument if the pointer is a parameter with the
677 // pass_object_size attribute.
678 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
679 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
680 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
681 if (Param != nullptr && PS != nullptr &&
682 areBOSTypesCompatible(PS->getType(), Type)) {
683 auto Iter = SizeArguments.find(Param);
684 assert(Iter != SizeArguments.end())((Iter != SizeArguments.end()) ? static_cast<void> (0) :
__assert_fail ("Iter != SizeArguments.end()", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 684, __PRETTY_FUNCTION__))
;
685
686 const ImplicitParamDecl *D = Iter->second;
687 auto DIter = LocalDeclMap.find(D);
688 assert(DIter != LocalDeclMap.end())((DIter != LocalDeclMap.end()) ? static_cast<void> (0) :
__assert_fail ("DIter != LocalDeclMap.end()", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 688, __PRETTY_FUNCTION__))
;
689
690 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
691 getContext().getSizeType(), E->getBeginLoc());
692 }
693 }
694
695 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
696 // evaluate E for side-effects. In either case, we shouldn't lower to
697 // @llvm.objectsize.
698 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
699 return getDefaultBuiltinObjectSizeResult(Type, ResType);
700
701 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
702 assert(Ptr->getType()->isPointerTy() &&((Ptr->getType()->isPointerTy() && "Non-pointer passed to __builtin_object_size?"
) ? static_cast<void> (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 703, __PRETTY_FUNCTION__))
703 "Non-pointer passed to __builtin_object_size?")((Ptr->getType()->isPointerTy() && "Non-pointer passed to __builtin_object_size?"
) ? static_cast<void> (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 703, __PRETTY_FUNCTION__))
;
704
705 Function *F =
706 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
707
708 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
709 Value *Min = Builder.getInt1((Type & 2) != 0);
710 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
711 Value *NullIsUnknown = Builder.getTrue();
712 Value *Dynamic = Builder.getInt1(IsDynamic);
713 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
714}
715
716namespace {
717/// A struct to generically describe a bit test intrinsic.
718struct BitTest {
719 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
720 enum InterlockingKind : uint8_t {
721 Unlocked,
722 Sequential,
723 Acquire,
724 Release,
725 NoFence
726 };
727
728 ActionKind Action;
729 InterlockingKind Interlocking;
730 bool Is64Bit;
731
732 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
733};
734} // namespace
735
736BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
737 switch (BuiltinID) {
738 // Main portable variants.
739 case Builtin::BI_bittest:
740 return {TestOnly, Unlocked, false};
741 case Builtin::BI_bittestandcomplement:
742 return {Complement, Unlocked, false};
743 case Builtin::BI_bittestandreset:
744 return {Reset, Unlocked, false};
745 case Builtin::BI_bittestandset:
746 return {Set, Unlocked, false};
747 case Builtin::BI_interlockedbittestandreset:
748 return {Reset, Sequential, false};
749 case Builtin::BI_interlockedbittestandset:
750 return {Set, Sequential, false};
751
752 // X86-specific 64-bit variants.
753 case Builtin::BI_bittest64:
754 return {TestOnly, Unlocked, true};
755 case Builtin::BI_bittestandcomplement64:
756 return {Complement, Unlocked, true};
757 case Builtin::BI_bittestandreset64:
758 return {Reset, Unlocked, true};
759 case Builtin::BI_bittestandset64:
760 return {Set, Unlocked, true};
761 case Builtin::BI_interlockedbittestandreset64:
762 return {Reset, Sequential, true};
763 case Builtin::BI_interlockedbittestandset64:
764 return {Set, Sequential, true};
765
766 // ARM/AArch64-specific ordering variants.
767 case Builtin::BI_interlockedbittestandset_acq:
768 return {Set, Acquire, false};
769 case Builtin::BI_interlockedbittestandset_rel:
770 return {Set, Release, false};
771 case Builtin::BI_interlockedbittestandset_nf:
772 return {Set, NoFence, false};
773 case Builtin::BI_interlockedbittestandreset_acq:
774 return {Reset, Acquire, false};
775 case Builtin::BI_interlockedbittestandreset_rel:
776 return {Reset, Release, false};
777 case Builtin::BI_interlockedbittestandreset_nf:
778 return {Reset, NoFence, false};
779 }
780 llvm_unreachable("expected only bittest intrinsics")::llvm::llvm_unreachable_internal("expected only bittest intrinsics"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 780)
;
781}
782
783static char bitActionToX86BTCode(BitTest::ActionKind A) {
784 switch (A) {
785 case BitTest::TestOnly: return '\0';
786 case BitTest::Complement: return 'c';
787 case BitTest::Reset: return 'r';
788 case BitTest::Set: return 's';
789 }
790 llvm_unreachable("invalid action")::llvm::llvm_unreachable_internal("invalid action", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 790)
;
791}
792
793static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
794 BitTest BT,
795 const CallExpr *E, Value *BitBase,
796 Value *BitPos) {
797 char Action = bitActionToX86BTCode(BT.Action);
798 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
799
800 // Build the assembly.
801 SmallString<64> Asm;
802 raw_svector_ostream AsmOS(Asm);
803 if (BT.Interlocking != BitTest::Unlocked)
804 AsmOS << "lock ";
805 AsmOS << "bt";
806 if (Action)
807 AsmOS << Action;
808 AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}";
809
810 // Build the constraints. FIXME: We should support immediates when possible.
811 std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}";
812 llvm::IntegerType *IntType = llvm::IntegerType::get(
813 CGF.getLLVMContext(),
814 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
815 llvm::Type *IntPtrType = IntType->getPointerTo();
816 llvm::FunctionType *FTy =
817 llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
818
819 llvm::InlineAsm *IA =
820 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
821 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
822}
823
824static llvm::AtomicOrdering
825getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
826 switch (I) {
827 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
828 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
829 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
830 case BitTest::Release: return llvm::AtomicOrdering::Release;
831 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
832 }
833 llvm_unreachable("invalid interlocking")::llvm::llvm_unreachable_internal("invalid interlocking", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 833)
;
834}
835
836/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
837/// bits and a bit position and read and optionally modify the bit at that
838/// position. The position index can be arbitrarily large, i.e. it can be larger
839/// than 31 or 63, so we need an indexed load in the general case.
840static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
841 unsigned BuiltinID,
842 const CallExpr *E) {
843 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
844 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
845
846 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
847
848 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
849 // indexing operation internally. Use them if possible.
850 if (CGF.getTarget().getTriple().isX86())
851 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
852
853 // Otherwise, use generic code to load one byte and test the bit. Use all but
854 // the bottom three bits as the array index, and the bottom three bits to form
855 // a mask.
856 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
857 Value *ByteIndex = CGF.Builder.CreateAShr(
858 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
859 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
860 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
861 ByteIndex, "bittest.byteaddr"),
862 CharUnits::One());
863 Value *PosLow =
864 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
865 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
866
867 // The updating instructions will need a mask.
868 Value *Mask = nullptr;
869 if (BT.Action != BitTest::TestOnly) {
870 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
871 "bittest.mask");
872 }
873
874 // Check the action and ordering of the interlocked intrinsics.
875 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
876
877 Value *OldByte = nullptr;
878 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
879 // Emit a combined atomicrmw load/store operation for the interlocked
880 // intrinsics.
881 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
882 if (BT.Action == BitTest::Reset) {
883 Mask = CGF.Builder.CreateNot(Mask);
884 RMWOp = llvm::AtomicRMWInst::And;
885 }
886 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
887 Ordering);
888 } else {
889 // Emit a plain load for the non-interlocked intrinsics.
890 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
891 Value *NewByte = nullptr;
892 switch (BT.Action) {
893 case BitTest::TestOnly:
894 // Don't store anything.
895 break;
896 case BitTest::Complement:
897 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
898 break;
899 case BitTest::Reset:
900 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
901 break;
902 case BitTest::Set:
903 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
904 break;
905 }
906 if (NewByte)
907 CGF.Builder.CreateStore(NewByte, ByteAddr);
908 }
909
910 // However we loaded the old byte, either by plain load or atomicrmw, shift
911 // the bit into the low position and mask it to 0 or 1.
912 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
913 return CGF.Builder.CreateAnd(
914 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
915}
916
917namespace {
918enum class MSVCSetJmpKind {
919 _setjmpex,
920 _setjmp3,
921 _setjmp
922};
923}
924
925/// MSVC handles setjmp a bit differently on different platforms. On every
926/// architecture except 32-bit x86, the frame address is passed. On x86, extra
927/// parameters can be passed as variadic arguments, but we always pass none.
928static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
929 const CallExpr *E) {
930 llvm::Value *Arg1 = nullptr;
931 llvm::Type *Arg1Ty = nullptr;
932 StringRef Name;
933 bool IsVarArg = false;
934 if (SJKind == MSVCSetJmpKind::_setjmp3) {
935 Name = "_setjmp3";
936 Arg1Ty = CGF.Int32Ty;
937 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
938 IsVarArg = true;
939 } else {
940 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
941 Arg1Ty = CGF.Int8PtrTy;
942 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
943 Arg1 = CGF.Builder.CreateCall(
944 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
945 } else
946 Arg1 = CGF.Builder.CreateCall(
947 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
948 llvm::ConstantInt::get(CGF.Int32Ty, 0));
949 }
950
951 // Mark the call site and declaration with ReturnsTwice.
952 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
953 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
954 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
955 llvm::Attribute::ReturnsTwice);
956 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
957 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
958 ReturnsTwiceAttr, /*Local=*/true);
959
960 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
961 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
962 llvm::Value *Args[] = {Buf, Arg1};
963 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
964 CB->setAttributes(ReturnsTwiceAttr);
965 return RValue::get(CB);
966}
967
968// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
969// we handle them here.
970enum class CodeGenFunction::MSVCIntrin {
971 _BitScanForward,
972 _BitScanReverse,
973 _InterlockedAnd,
974 _InterlockedDecrement,
975 _InterlockedExchange,
976 _InterlockedExchangeAdd,
977 _InterlockedExchangeSub,
978 _InterlockedIncrement,
979 _InterlockedOr,
980 _InterlockedXor,
981 _InterlockedExchangeAdd_acq,
982 _InterlockedExchangeAdd_rel,
983 _InterlockedExchangeAdd_nf,
984 _InterlockedExchange_acq,
985 _InterlockedExchange_rel,
986 _InterlockedExchange_nf,
987 _InterlockedCompareExchange_acq,
988 _InterlockedCompareExchange_rel,
989 _InterlockedCompareExchange_nf,
990 _InterlockedOr_acq,
991 _InterlockedOr_rel,
992 _InterlockedOr_nf,
993 _InterlockedXor_acq,
994 _InterlockedXor_rel,
995 _InterlockedXor_nf,
996 _InterlockedAnd_acq,
997 _InterlockedAnd_rel,
998 _InterlockedAnd_nf,
999 _InterlockedIncrement_acq,
1000 _InterlockedIncrement_rel,
1001 _InterlockedIncrement_nf,
1002 _InterlockedDecrement_acq,
1003 _InterlockedDecrement_rel,
1004 _InterlockedDecrement_nf,
1005 __fastfail,
1006};
1007
1008Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
1009 const CallExpr *E) {
1010 switch (BuiltinID) {
1011 case MSVCIntrin::_BitScanForward:
1012 case MSVCIntrin::_BitScanReverse: {
1013 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1014
1015 llvm::Type *ArgType = ArgValue->getType();
1016 llvm::Type *IndexType =
1017 EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
1018 llvm::Type *ResultType = ConvertType(E->getType());
1019
1020 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1021 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1022 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1023
1024 BasicBlock *Begin = Builder.GetInsertBlock();
1025 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1026 Builder.SetInsertPoint(End);
1027 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1028
1029 Builder.SetInsertPoint(Begin);
1030 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1031 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1032 Builder.CreateCondBr(IsZero, End, NotZero);
1033 Result->addIncoming(ResZero, Begin);
1034
1035 Builder.SetInsertPoint(NotZero);
1036 Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
1037
1038 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1039 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1040 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1041 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1042 Builder.CreateStore(ZeroCount, IndexAddress, false);
1043 } else {
1044 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1045 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1046
1047 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1048 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1049 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1050 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1051 Builder.CreateStore(Index, IndexAddress, false);
1052 }
1053 Builder.CreateBr(End);
1054 Result->addIncoming(ResOne, NotZero);
1055
1056 Builder.SetInsertPoint(End);
1057 return Result;
1058 }
1059 case MSVCIntrin::_InterlockedAnd:
1060 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1061 case MSVCIntrin::_InterlockedExchange:
1062 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1063 case MSVCIntrin::_InterlockedExchangeAdd:
1064 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1065 case MSVCIntrin::_InterlockedExchangeSub:
1066 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1067 case MSVCIntrin::_InterlockedOr:
1068 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1069 case MSVCIntrin::_InterlockedXor:
1070 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1071 case MSVCIntrin::_InterlockedExchangeAdd_acq:
1072 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1073 AtomicOrdering::Acquire);
1074 case MSVCIntrin::_InterlockedExchangeAdd_rel:
1075 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1076 AtomicOrdering::Release);
1077 case MSVCIntrin::_InterlockedExchangeAdd_nf:
1078 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1079 AtomicOrdering::Monotonic);
1080 case MSVCIntrin::_InterlockedExchange_acq:
1081 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1082 AtomicOrdering::Acquire);
1083 case MSVCIntrin::_InterlockedExchange_rel:
1084 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1085 AtomicOrdering::Release);
1086 case MSVCIntrin::_InterlockedExchange_nf:
1087 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1088 AtomicOrdering::Monotonic);
1089 case MSVCIntrin::_InterlockedCompareExchange_acq:
1090 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1091 case MSVCIntrin::_InterlockedCompareExchange_rel:
1092 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1093 case MSVCIntrin::_InterlockedCompareExchange_nf:
1094 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1095 case MSVCIntrin::_InterlockedOr_acq:
1096 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1097 AtomicOrdering::Acquire);
1098 case MSVCIntrin::_InterlockedOr_rel:
1099 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1100 AtomicOrdering::Release);
1101 case MSVCIntrin::_InterlockedOr_nf:
1102 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1103 AtomicOrdering::Monotonic);
1104 case MSVCIntrin::_InterlockedXor_acq:
1105 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1106 AtomicOrdering::Acquire);
1107 case MSVCIntrin::_InterlockedXor_rel:
1108 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1109 AtomicOrdering::Release);
1110 case MSVCIntrin::_InterlockedXor_nf:
1111 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1112 AtomicOrdering::Monotonic);
1113 case MSVCIntrin::_InterlockedAnd_acq:
1114 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1115 AtomicOrdering::Acquire);
1116 case MSVCIntrin::_InterlockedAnd_rel:
1117 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1118 AtomicOrdering::Release);
1119 case MSVCIntrin::_InterlockedAnd_nf:
1120 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1121 AtomicOrdering::Monotonic);
1122 case MSVCIntrin::_InterlockedIncrement_acq:
1123 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1124 case MSVCIntrin::_InterlockedIncrement_rel:
1125 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1126 case MSVCIntrin::_InterlockedIncrement_nf:
1127 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1128 case MSVCIntrin::_InterlockedDecrement_acq:
1129 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1130 case MSVCIntrin::_InterlockedDecrement_rel:
1131 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1132 case MSVCIntrin::_InterlockedDecrement_nf:
1133 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1134
1135 case MSVCIntrin::_InterlockedDecrement:
1136 return EmitAtomicDecrementValue(*this, E);
1137 case MSVCIntrin::_InterlockedIncrement:
1138 return EmitAtomicIncrementValue(*this, E);
1139
1140 case MSVCIntrin::__fastfail: {
1141 // Request immediate process termination from the kernel. The instruction
1142 // sequences to do this are documented on MSDN:
1143 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1144 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1145 StringRef Asm, Constraints;
1146 switch (ISA) {
1147 default:
1148 ErrorUnsupported(E, "__fastfail call for this architecture");
1149 break;
1150 case llvm::Triple::x86:
1151 case llvm::Triple::x86_64:
1152 Asm = "int $$0x29";
1153 Constraints = "{cx}";
1154 break;
1155 case llvm::Triple::thumb:
1156 Asm = "udf #251";
1157 Constraints = "{r0}";
1158 break;
1159 case llvm::Triple::aarch64:
1160 Asm = "brk #0xF003";
1161 Constraints = "{w0}";
1162 }
1163 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1164 llvm::InlineAsm *IA =
1165 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1166 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1167 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1168 llvm::Attribute::NoReturn);
1169 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1170 CI->setAttributes(NoReturnAttr);
1171 return CI;
1172 }
1173 }
1174 llvm_unreachable("Incorrect MSVC intrinsic!")::llvm::llvm_unreachable_internal("Incorrect MSVC intrinsic!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1174)
;
1175}
1176
1177namespace {
1178// ARC cleanup for __builtin_os_log_format
1179struct CallObjCArcUse final : EHScopeStack::Cleanup {
1180 CallObjCArcUse(llvm::Value *object) : object(object) {}
1181 llvm::Value *object;
1182
1183 void Emit(CodeGenFunction &CGF, Flags flags) override {
1184 CGF.EmitARCIntrinsicUse(object);
1185 }
1186};
1187}
1188
1189Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1190 BuiltinCheckKind Kind) {
1191 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)(((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
"Unsupported builtin check kind") ? static_cast<void> (
0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1192, __PRETTY_FUNCTION__))
1192 && "Unsupported builtin check kind")(((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
"Unsupported builtin check kind") ? static_cast<void> (
0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1192, __PRETTY_FUNCTION__))
;
1193
1194 Value *ArgValue = EmitScalarExpr(E);
1195 if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
1196 return ArgValue;
1197
1198 SanitizerScope SanScope(this);
1199 Value *Cond = Builder.CreateICmpNE(
1200 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1201 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1202 SanitizerHandler::InvalidBuiltin,
1203 {EmitCheckSourceLocation(E->getExprLoc()),
1204 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1205 None);
1206 return ArgValue;
1207}
1208
1209/// Get the argument type for arguments to os_log_helper.
1210static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1211 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1212 return C.getCanonicalType(UnsignedTy);
1213}
1214
1215llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1216 const analyze_os_log::OSLogBufferLayout &Layout,
1217 CharUnits BufferAlignment) {
1218 ASTContext &Ctx = getContext();
1219
1220 llvm::SmallString<64> Name;
1221 {
1222 raw_svector_ostream OS(Name);
1223 OS << "__os_log_helper";
1224 OS << "_" << BufferAlignment.getQuantity();
1225 OS << "_" << int(Layout.getSummaryByte());
1226 OS << "_" << int(Layout.getNumArgsByte());
1227 for (const auto &Item : Layout.Items)
1228 OS << "_" << int(Item.getSizeByte()) << "_"
1229 << int(Item.getDescriptorByte());
1230 }
1231
1232 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1233 return F;
1234
1235 llvm::SmallVector<QualType, 4> ArgTys;
1236 FunctionArgList Args;
1237 Args.push_back(ImplicitParamDecl::Create(
1238 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1239 ImplicitParamDecl::Other));
1240 ArgTys.emplace_back(Ctx.VoidPtrTy);
1241
1242 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1243 char Size = Layout.Items[I].getSizeByte();
1244 if (!Size)
1245 continue;
1246
1247 QualType ArgTy = getOSLogArgType(Ctx, Size);
1248 Args.push_back(ImplicitParamDecl::Create(
1249 Ctx, nullptr, SourceLocation(),
1250 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1251 ImplicitParamDecl::Other));
1252 ArgTys.emplace_back(ArgTy);
1253 }
1254
1255 QualType ReturnTy = Ctx.VoidTy;
1256 QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
1257
1258 // The helper function has linkonce_odr linkage to enable the linker to merge
1259 // identical functions. To ensure the merging always happens, 'noinline' is
1260 // attached to the function when compiling with -Oz.
1261 const CGFunctionInfo &FI =
1262 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1263 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1264 llvm::Function *Fn = llvm::Function::Create(
1265 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1266 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1267 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
1268 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1269 Fn->setDoesNotThrow();
1270
1271 // Attach 'noinline' at -Oz.
1272 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1273 Fn->addFnAttr(llvm::Attribute::NoInline);
1274
1275 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1276 IdentifierInfo *II = &Ctx.Idents.get(Name);
1277 FunctionDecl *FD = FunctionDecl::Create(
1278 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
1279 FuncionTy, nullptr, SC_PrivateExtern, false, false);
1280 // Avoid generating debug location info for the function.
1281 FD->setImplicit();
1282
1283 StartFunction(FD, ReturnTy, Fn, FI, Args);
1284
1285 // Create a scope with an artificial location for the body of this function.
1286 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1287
1288 CharUnits Offset;
1289 Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
1290 BufferAlignment);
1291 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1292 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1293 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1294 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1295
1296 unsigned I = 1;
1297 for (const auto &Item : Layout.Items) {
1298 Builder.CreateStore(
1299 Builder.getInt8(Item.getDescriptorByte()),
1300 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1301 Builder.CreateStore(
1302 Builder.getInt8(Item.getSizeByte()),
1303 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1304
1305 CharUnits Size = Item.size();
1306 if (!Size.getQuantity())
1307 continue;
1308
1309 Address Arg = GetAddrOfLocalVar(Args[I]);
1310 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1311 Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
1312 "argDataCast");
1313 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1314 Offset += Size;
1315 ++I;
1316 }
1317
1318 FinishFunction();
1319
1320 return Fn;
1321}
1322
1323RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1324 assert(E.getNumArgs() >= 2 &&((E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments"
) ? static_cast<void> (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1325, __PRETTY_FUNCTION__))
1325 "__builtin_os_log_format takes at least 2 arguments")((E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments"
) ? static_cast<void> (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1325, __PRETTY_FUNCTION__))
;
1326 ASTContext &Ctx = getContext();
1327 analyze_os_log::OSLogBufferLayout Layout;
1328 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1329 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1330 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1331
1332 // Ignore argument 1, the format string. It is not currently used.
1333 CallArgList Args;
1334 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1335
1336 for (const auto &Item : Layout.Items) {
1337 int Size = Item.getSizeByte();
1338 if (!Size)
1339 continue;
1340
1341 llvm::Value *ArgVal;
1342
1343 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1344 uint64_t Val = 0;
1345 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1346 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1347 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1348 } else if (const Expr *TheExpr = Item.getExpr()) {
1349 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1350
1351 // If a temporary object that requires destruction after the full
1352 // expression is passed, push a lifetime-extended cleanup to extend its
1353 // lifetime to the end of the enclosing block scope.
1354 auto LifetimeExtendObject = [&](const Expr *E) {
1355 E = E->IgnoreParenCasts();
1356 // Extend lifetimes of objects returned by function calls and message
1357 // sends.
1358
1359 // FIXME: We should do this in other cases in which temporaries are
1360 // created including arguments of non-ARC types (e.g., C++
1361 // temporaries).
1362 if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
1363 return true;
1364 return false;
1365 };
1366
1367 if (TheExpr->getType()->isObjCRetainableType() &&
1368 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
1369 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&((getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
"Only scalar can be a ObjC retainable type") ? static_cast<
void> (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1370, __PRETTY_FUNCTION__))
1370 "Only scalar can be a ObjC retainable type")((getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
"Only scalar can be a ObjC retainable type") ? static_cast<
void> (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1370, __PRETTY_FUNCTION__))
;
1371 if (!isa<Constant>(ArgVal)) {
1372 CleanupKind Cleanup = getARCCleanupKind();
1373 QualType Ty = TheExpr->getType();
1374 Address Alloca = Address::invalid();
1375 Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
1376 ArgVal = EmitARCRetain(Ty, ArgVal);
1377 Builder.CreateStore(ArgVal, Addr);
1378 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
1379 CodeGenFunction::destroyARCStrongPrecise,
1380 Cleanup & EHCleanup);
1381
1382 // Push a clang.arc.use call to ensure ARC optimizer knows that the
1383 // argument has to be alive.
1384 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
1385 pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
1386 }
1387 }
1388 } else {
1389 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1390 }
1391
1392 unsigned ArgValSize =
1393 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1394 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1395 ArgValSize);
1396 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1397 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1398 // If ArgVal has type x86_fp80, zero-extend ArgVal.
1399 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1400 Args.add(RValue::get(ArgVal), ArgTy);
1401 }
1402
1403 const CGFunctionInfo &FI =
1404 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
1405 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
1406 Layout, BufAddr.getAlignment());
1407 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
1408 return RValue::get(BufAddr.getPointer());
1409}
1410
1411/// Determine if a binop is a checked mixed-sign multiply we can specialize.
1412static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
1413 WidthAndSignedness Op1Info,
1414 WidthAndSignedness Op2Info,
1415 WidthAndSignedness ResultInfo) {
1416 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1417 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
1418 Op1Info.Signed != Op2Info.Signed;
1419}
1420
1421/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
1422/// the generic checked-binop irgen.
1423static RValue
1424EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
1425 WidthAndSignedness Op1Info, const clang::Expr *Op2,
1426 WidthAndSignedness Op2Info,
1427 const clang::Expr *ResultArg, QualType ResultQTy,
1428 WidthAndSignedness ResultInfo) {
1429 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,((isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow
, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize"
) ? static_cast<void> (0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1431, __PRETTY_FUNCTION__))
1430 Op2Info, ResultInfo) &&((isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow
, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize"
) ? static_cast<void> (0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1431, __PRETTY_FUNCTION__))
1431 "Not a mixed-sign multipliction we can specialize")((isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow
, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize"
) ? static_cast<void> (0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1431, __PRETTY_FUNCTION__))
;
1432
1433 // Emit the signed and unsigned operands.
1434 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
1435 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
1436 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
1437 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
1438 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
1439 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
1440
1441 // One of the operands may be smaller than the other. If so, [s|z]ext it.
1442 if (SignedOpWidth < UnsignedOpWidth)
1443 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
1444 if (UnsignedOpWidth < SignedOpWidth)
1445 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
1446
1447 llvm::Type *OpTy = Signed->getType();
1448 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
1449 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1450 llvm::Type *ResTy = ResultPtr.getElementType();
1451 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
1452
1453 // Take the absolute value of the signed operand.
1454 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
1455 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
1456 llvm::Value *AbsSigned =
1457 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
1458
1459 // Perform a checked unsigned multiplication.
1460 llvm::Value *UnsignedOverflow;
1461 llvm::Value *UnsignedResult =
1462 EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
1463 Unsigned, UnsignedOverflow);
1464
1465 llvm::Value *Overflow, *Result;
1466 if (ResultInfo.Signed) {
1467 // Signed overflow occurs if the result is greater than INT_MAX or lesser
1468 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
1469 auto IntMax =
1470 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
1471 llvm::Value *MaxResult =
1472 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
1473 CGF.Builder.CreateZExt(IsNegative, OpTy));
1474 llvm::Value *SignedOverflow =
1475 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
1476 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
1477
1478 // Prepare the signed result (possibly by negating it).
1479 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
1480 llvm::Value *SignedResult =
1481 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
1482 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
1483 } else {
1484 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
1485 llvm::Value *Underflow = CGF.Builder.CreateAnd(
1486 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
1487 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
1488 if (ResultInfo.Width < OpWidth) {
1489 auto IntMax =
1490 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
1491 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
1492 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
1493 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
1494 }
1495
1496 // Negate the product if it would be negative in infinite precision.
1497 Result = CGF.Builder.CreateSelect(
1498 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
1499
1500 Result = CGF.Builder.CreateTrunc(Result, ResTy);
1501 }
1502 assert(Overflow && Result && "Missing overflow or result")((Overflow && Result && "Missing overflow or result"
) ? static_cast<void> (0) : __assert_fail ("Overflow && Result && \"Missing overflow or result\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1502, __PRETTY_FUNCTION__))
;
1503
1504 bool isVolatile =
1505 ResultArg->getType()->getPointeeType().isVolatileQualified();
1506 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1507 isVolatile);
1508 return RValue::get(Overflow);
1509}
1510
1511static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
1512 Value *&RecordPtr, CharUnits Align,
1513 llvm::FunctionCallee Func, int Lvl) {
1514 ASTContext &Context = CGF.getContext();
1515 RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
1516 std::string Pad = std::string(Lvl * 4, ' ');
1517
1518 Value *GString =
1519 CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
1520 Value *Res = CGF.Builder.CreateCall(Func, {GString});
1521
1522 static llvm::DenseMap<QualType, const char *> Types;
1523 if (Types.empty()) {
1524 Types[Context.CharTy] = "%c";
1525 Types[Context.BoolTy] = "%d";
1526 Types[Context.SignedCharTy] = "%hhd";
1527 Types[Context.UnsignedCharTy] = "%hhu";
1528 Types[Context.IntTy] = "%d";
1529 Types[Context.UnsignedIntTy] = "%u";
1530 Types[Context.LongTy] = "%ld";
1531 Types[Context.UnsignedLongTy] = "%lu";
1532 Types[Context.LongLongTy] = "%lld";
1533 Types[Context.UnsignedLongLongTy] = "%llu";
1534 Types[Context.ShortTy] = "%hd";
1535 Types[Context.UnsignedShortTy] = "%hu";
1536 Types[Context.VoidPtrTy] = "%p";
1537 Types[Context.FloatTy] = "%f";
1538 Types[Context.DoubleTy] = "%f";
1539 Types[Context.LongDoubleTy] = "%Lf";
1540 Types[Context.getPointerType(Context.CharTy)] = "%s";
1541 Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
1542 }
1543
1544 for (const auto *FD : RD->fields()) {
1545 Value *FieldPtr = RecordPtr;
1546 if (RD->isUnion())
1547 FieldPtr = CGF.Builder.CreatePointerCast(
1548 FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
1549 else
1550 FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
1551 FD->getFieldIndex());
1552
1553 GString = CGF.Builder.CreateGlobalStringPtr(
1554 llvm::Twine(Pad)
1555 .concat(FD->getType().getAsString())
1556 .concat(llvm::Twine(' '))
1557 .concat(FD->getNameAsString())
1558 .concat(" : ")
1559 .str());
1560 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1561 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1562
1563 QualType CanonicalType =
1564 FD->getType().getUnqualifiedType().getCanonicalType();
1565
1566 // We check whether we are in a recursive type
1567 if (CanonicalType->isRecordType()) {
1568 TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
1569 Res = CGF.Builder.CreateAdd(TmpRes, Res);
1570 continue;
1571 }
1572
1573 // We try to determine the best format to print the current field
1574 llvm::Twine Format = Types.find(CanonicalType) == Types.end()
1575 ? Types[Context.VoidPtrTy]
1576 : Types[CanonicalType];
1577
1578 Address FieldAddress = Address(FieldPtr, Align);
1579 FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
1580
1581 // FIXME Need to handle bitfield here
1582 GString = CGF.Builder.CreateGlobalStringPtr(
1583 Format.concat(llvm::Twine('\n')).str());
1584 TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
1585 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1586 }
1587
1588 GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
1589 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1590 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1591 return Res;
1592}
1593
1594static bool
1595TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
1596 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
1597 if (const auto *Arr = Ctx.getAsArrayType(Ty))
1598 Ty = Ctx.getBaseElementType(Arr);
1599
1600 const auto *Record = Ty->getAsCXXRecordDecl();
1601 if (!Record)
1602 return false;
1603
1604 // We've already checked this type, or are in the process of checking it.
1605 if (!Seen.insert(Record).second)
1606 return false;
1607
1608 assert(Record->hasDefinition() &&((Record->hasDefinition() && "Incomplete types should already be diagnosed"
) ? static_cast<void> (0) : __assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1609, __PRETTY_FUNCTION__))
1609 "Incomplete types should already be diagnosed")((Record->hasDefinition() && "Incomplete types should already be diagnosed"
) ? static_cast<void> (0) : __assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1609, __PRETTY_FUNCTION__))
;
1610
1611 if (Record->isDynamicClass())
1612 return true;
1613
1614 for (FieldDecl *F : Record->fields()) {
1615 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
1616 return true;
1617 }
1618 return false;
1619}
1620
1621/// Determine if the specified type requires laundering by checking if it is a
1622/// dynamic class type or contains a subobject which is a dynamic class type.
1623static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
1624 if (!CGM.getCodeGenOpts().StrictVTablePointers)
1625 return false;
1626 llvm::SmallPtrSet<const Decl *, 16> Seen;
1627 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
1628}
1629
1630RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
1631 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
1632 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
1633
1634 // The builtin's shift arg may have a different type than the source arg and
1635 // result, but the LLVM intrinsic uses the same type for all values.
1636 llvm::Type *Ty = Src->getType();
1637 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
1638
1639 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
1640 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1641 Function *F = CGM.getIntrinsic(IID, Ty);
1642 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
1643}
1644
1645RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
1646 const CallExpr *E,
1647 ReturnValueSlot ReturnValue) {
1648 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
1649 // See if we can constant fold this builtin. If so, don't emit it at all.
1650 Expr::EvalResult Result;
1651 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
1652 !Result.hasSideEffects()) {
1653 if (Result.Val.isInt())
1654 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
1655 Result.Val.getInt()));
1656 if (Result.Val.isFloat())
1657 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
1658 Result.Val.getFloat()));
1659 }
1660
1661 // There are LLVM math intrinsics/instructions corresponding to math library
1662 // functions except the LLVM op will never set errno while the math library
1663 // might. Also, math builtins have the same semantics as their math library
1664 // twins. Thus, we can transform math library and builtin calls to their
1665 // LLVM counterparts if the call is marked 'const' (known to never set errno).
1666 if (FD->hasAttr<ConstAttr>()) {
1667 switch (BuiltinID) {
1668 case Builtin::BIceil:
1669 case Builtin::BIceilf:
1670 case Builtin::BIceill:
1671 case Builtin::BI__builtin_ceil:
1672 case Builtin::BI__builtin_ceilf:
1673 case Builtin::BI__builtin_ceilf16:
1674 case Builtin::BI__builtin_ceill:
1675 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1676 Intrinsic::ceil,
1677 Intrinsic::experimental_constrained_ceil));
1678
1679 case Builtin::BIcopysign:
1680 case Builtin::BIcopysignf:
1681 case Builtin::BIcopysignl:
1682 case Builtin::BI__builtin_copysign:
1683 case Builtin::BI__builtin_copysignf:
1684 case Builtin::BI__builtin_copysignf16:
1685 case Builtin::BI__builtin_copysignl:
1686 case Builtin::BI__builtin_copysignf128:
1687 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
1688
1689 case Builtin::BIcos:
1690 case Builtin::BIcosf:
1691 case Builtin::BIcosl:
1692 case Builtin::BI__builtin_cos:
1693 case Builtin::BI__builtin_cosf:
1694 case Builtin::BI__builtin_cosf16:
1695 case Builtin::BI__builtin_cosl:
1696 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1697 Intrinsic::cos,
1698 Intrinsic::experimental_constrained_cos));
1699
1700 case Builtin::BIexp:
1701 case Builtin::BIexpf:
1702 case Builtin::BIexpl:
1703 case Builtin::BI__builtin_exp:
1704 case Builtin::BI__builtin_expf:
1705 case Builtin::BI__builtin_expf16:
1706 case Builtin::BI__builtin_expl:
1707 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1708 Intrinsic::exp,
1709 Intrinsic::experimental_constrained_exp));
1710
1711 case Builtin::BIexp2:
1712 case Builtin::BIexp2f:
1713 case Builtin::BIexp2l:
1714 case Builtin::BI__builtin_exp2:
1715 case Builtin::BI__builtin_exp2f:
1716 case Builtin::BI__builtin_exp2f16:
1717 case Builtin::BI__builtin_exp2l:
1718 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1719 Intrinsic::exp2,
1720 Intrinsic::experimental_constrained_exp2));
1721
1722 case Builtin::BIfabs:
1723 case Builtin::BIfabsf:
1724 case Builtin::BIfabsl:
1725 case Builtin::BI__builtin_fabs:
1726 case Builtin::BI__builtin_fabsf:
1727 case Builtin::BI__builtin_fabsf16:
1728 case Builtin::BI__builtin_fabsl:
1729 case Builtin::BI__builtin_fabsf128:
1730 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
1731
1732 case Builtin::BIfloor:
1733 case Builtin::BIfloorf:
1734 case Builtin::BIfloorl:
1735 case Builtin::BI__builtin_floor:
1736 case Builtin::BI__builtin_floorf:
1737 case Builtin::BI__builtin_floorf16:
1738 case Builtin::BI__builtin_floorl:
1739 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1740 Intrinsic::floor,
1741 Intrinsic::experimental_constrained_floor));
1742
1743 case Builtin::BIfma:
1744 case Builtin::BIfmaf:
1745 case Builtin::BIfmal:
1746 case Builtin::BI__builtin_fma:
1747 case Builtin::BI__builtin_fmaf:
1748 case Builtin::BI__builtin_fmaf16:
1749 case Builtin::BI__builtin_fmal:
1750 return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
1751 Intrinsic::fma,
1752 Intrinsic::experimental_constrained_fma));
1753
1754 case Builtin::BIfmax:
1755 case Builtin::BIfmaxf:
1756 case Builtin::BIfmaxl:
1757 case Builtin::BI__builtin_fmax:
1758 case Builtin::BI__builtin_fmaxf:
1759 case Builtin::BI__builtin_fmaxf16:
1760 case Builtin::BI__builtin_fmaxl:
1761 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
1762 Intrinsic::maxnum,
1763 Intrinsic::experimental_constrained_maxnum));
1764
1765 case Builtin::BIfmin:
1766 case Builtin::BIfminf:
1767 case Builtin::BIfminl:
1768 case Builtin::BI__builtin_fmin:
1769 case Builtin::BI__builtin_fminf:
1770 case Builtin::BI__builtin_fminf16:
1771 case Builtin::BI__builtin_fminl:
1772 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
1773 Intrinsic::minnum,
1774 Intrinsic::experimental_constrained_minnum));
1775
1776 // fmod() is a special-case. It maps to the frem instruction rather than an
1777 // LLVM intrinsic.
1778 case Builtin::BIfmod:
1779 case Builtin::BIfmodf:
1780 case Builtin::BIfmodl:
1781 case Builtin::BI__builtin_fmod:
1782 case Builtin::BI__builtin_fmodf:
1783 case Builtin::BI__builtin_fmodf16:
1784 case Builtin::BI__builtin_fmodl: {
1785 Value *Arg1 = EmitScalarExpr(E->getArg(0));
1786 Value *Arg2 = EmitScalarExpr(E->getArg(1));
1787 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
1788 }
1789
1790 case Builtin::BIlog:
1791 case Builtin::BIlogf:
1792 case Builtin::BIlogl:
1793 case Builtin::BI__builtin_log:
1794 case Builtin::BI__builtin_logf:
1795 case Builtin::BI__builtin_logf16:
1796 case Builtin::BI__builtin_logl:
1797 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1798 Intrinsic::log,
1799 Intrinsic::experimental_constrained_log));
1800
1801 case Builtin::BIlog10:
1802 case Builtin::BIlog10f:
1803 case Builtin::BIlog10l:
1804 case Builtin::BI__builtin_log10:
1805 case Builtin::BI__builtin_log10f:
1806 case Builtin::BI__builtin_log10f16:
1807 case Builtin::BI__builtin_log10l:
1808 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1809 Intrinsic::log10,
1810 Intrinsic::experimental_constrained_log10));
1811
1812 case Builtin::BIlog2:
1813 case Builtin::BIlog2f:
1814 case Builtin::BIlog2l:
1815 case Builtin::BI__builtin_log2:
1816 case Builtin::BI__builtin_log2f:
1817 case Builtin::BI__builtin_log2f16:
1818 case Builtin::BI__builtin_log2l:
1819 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1820 Intrinsic::log2,
1821 Intrinsic::experimental_constrained_log2));
1822
1823 case Builtin::BInearbyint:
1824 case Builtin::BInearbyintf:
1825 case Builtin::BInearbyintl:
1826 case Builtin::BI__builtin_nearbyint:
1827 case Builtin::BI__builtin_nearbyintf:
1828 case Builtin::BI__builtin_nearbyintl:
1829 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1830 Intrinsic::nearbyint,
1831 Intrinsic::experimental_constrained_nearbyint));
1832
1833 case Builtin::BIpow:
1834 case Builtin::BIpowf:
1835 case Builtin::BIpowl:
1836 case Builtin::BI__builtin_pow:
1837 case Builtin::BI__builtin_powf:
1838 case Builtin::BI__builtin_powf16:
1839 case Builtin::BI__builtin_powl:
1840 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
1841 Intrinsic::pow,
1842 Intrinsic::experimental_constrained_pow));
1843
1844 case Builtin::BIrint:
1845 case Builtin::BIrintf:
1846 case Builtin::BIrintl:
1847 case Builtin::BI__builtin_rint:
1848 case Builtin::BI__builtin_rintf:
1849 case Builtin::BI__builtin_rintf16:
1850 case Builtin::BI__builtin_rintl:
1851 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1852 Intrinsic::rint,
1853 Intrinsic::experimental_constrained_rint));
1854
1855 case Builtin::BIround:
1856 case Builtin::BIroundf:
1857 case Builtin::BIroundl:
1858 case Builtin::BI__builtin_round:
1859 case Builtin::BI__builtin_roundf:
1860 case Builtin::BI__builtin_roundf16:
1861 case Builtin::BI__builtin_roundl:
1862 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1863 Intrinsic::round,
1864 Intrinsic::experimental_constrained_round));
1865
1866 case Builtin::BIsin:
1867 case Builtin::BIsinf:
1868 case Builtin::BIsinl:
1869 case Builtin::BI__builtin_sin:
1870 case Builtin::BI__builtin_sinf:
1871 case Builtin::BI__builtin_sinf16:
1872 case Builtin::BI__builtin_sinl:
1873 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1874 Intrinsic::sin,
1875 Intrinsic::experimental_constrained_sin));
1876
1877 case Builtin::BIsqrt:
1878 case Builtin::BIsqrtf:
1879 case Builtin::BIsqrtl:
1880 case Builtin::BI__builtin_sqrt:
1881 case Builtin::BI__builtin_sqrtf:
1882 case Builtin::BI__builtin_sqrtf16:
1883 case Builtin::BI__builtin_sqrtl:
1884 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1885 Intrinsic::sqrt,
1886 Intrinsic::experimental_constrained_sqrt));
1887
1888 case Builtin::BItrunc:
1889 case Builtin::BItruncf:
1890 case Builtin::BItruncl:
1891 case Builtin::BI__builtin_trunc:
1892 case Builtin::BI__builtin_truncf:
1893 case Builtin::BI__builtin_truncf16:
1894 case Builtin::BI__builtin_truncl:
1895 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1896 Intrinsic::trunc,
1897 Intrinsic::experimental_constrained_trunc));
1898
1899 case Builtin::BIlround:
1900 case Builtin::BIlroundf:
1901 case Builtin::BIlroundl:
1902 case Builtin::BI__builtin_lround:
1903 case Builtin::BI__builtin_lroundf:
1904 case Builtin::BI__builtin_lroundl:
1905 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1906 *this, E, Intrinsic::lround,
1907 Intrinsic::experimental_constrained_lround));
1908
1909 case Builtin::BIllround:
1910 case Builtin::BIllroundf:
1911 case Builtin::BIllroundl:
1912 case Builtin::BI__builtin_llround:
1913 case Builtin::BI__builtin_llroundf:
1914 case Builtin::BI__builtin_llroundl:
1915 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1916 *this, E, Intrinsic::llround,
1917 Intrinsic::experimental_constrained_llround));
1918
1919 case Builtin::BIlrint:
1920 case Builtin::BIlrintf:
1921 case Builtin::BIlrintl:
1922 case Builtin::BI__builtin_lrint:
1923 case Builtin::BI__builtin_lrintf:
1924 case Builtin::BI__builtin_lrintl:
1925 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1926 *this, E, Intrinsic::lrint,
1927 Intrinsic::experimental_constrained_lrint));
1928
1929 case Builtin::BIllrint:
1930 case Builtin::BIllrintf:
1931 case Builtin::BIllrintl:
1932 case Builtin::BI__builtin_llrint:
1933 case Builtin::BI__builtin_llrintf:
1934 case Builtin::BI__builtin_llrintl:
1935 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1936 *this, E, Intrinsic::llrint,
1937 Intrinsic::experimental_constrained_llrint));
1938
1939 default:
1940 break;
1941 }
1942 }
1943
1944 switch (BuiltinID) {
1945 default: break;
1946 case Builtin::BI__builtin___CFStringMakeConstantString:
1947 case Builtin::BI__builtin___NSStringMakeConstantString:
1948 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
1949 case Builtin::BI__builtin_stdarg_start:
1950 case Builtin::BI__builtin_va_start:
1951 case Builtin::BI__va_start:
1952 case Builtin::BI__builtin_va_end:
1953 return RValue::get(
1954 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
1955 ? EmitScalarExpr(E->getArg(0))
1956 : EmitVAListRef(E->getArg(0)).getPointer(),
1957 BuiltinID != Builtin::BI__builtin_va_end));
1958 case Builtin::BI__builtin_va_copy: {
1959 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
1960 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
1961
1962 llvm::Type *Type = Int8PtrTy;
1963
1964 DstPtr = Builder.CreateBitCast(DstPtr, Type);
1965 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
1966 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
1967 {DstPtr, SrcPtr}));
1968 }
1969 case Builtin::BI__builtin_abs:
1970 case Builtin::BI__builtin_labs:
1971 case Builtin::BI__builtin_llabs: {
1972 // X < 0 ? -X : X
1973 // The negation has 'nsw' because abs of INT_MIN is undefined.
1974 Value *ArgValue = EmitScalarExpr(E->getArg(0));
1975 Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
1976 Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
1977 Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
1978 Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
1979 return RValue::get(Result);
1980 }
1981 case Builtin::BI__builtin_complex: {
1982 Value *Real = EmitScalarExpr(E->getArg(0));
1983 Value *Imag = EmitScalarExpr(E->getArg(1));
1984 return RValue::getComplex({Real, Imag});
1985 }
1986 case Builtin::BI__builtin_conj:
1987 case Builtin::BI__builtin_conjf:
1988 case Builtin::BI__builtin_conjl:
1989 case Builtin::BIconj:
1990 case Builtin::BIconjf:
1991 case Builtin::BIconjl: {
1992 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
1993 Value *Real = ComplexVal.first;
1994 Value *Imag = ComplexVal.second;
1995 Imag = Builder.CreateFNeg(Imag, "neg");
1996 return RValue::getComplex(std::make_pair(Real, Imag));
1997 }
1998 case Builtin::BI__builtin_creal:
1999 case Builtin::BI__builtin_crealf:
2000 case Builtin::BI__builtin_creall:
2001 case Builtin::BIcreal:
2002 case Builtin::BIcrealf:
2003 case Builtin::BIcreall: {
2004 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2005 return RValue::get(ComplexVal.first);
2006 }
2007
2008 case Builtin::BI__builtin_dump_struct: {
2009 llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
2010 llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
2011 LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
2012
2013 Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
2014 CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
2015
2016 const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
2017 QualType Arg0Type = Arg0->getType()->getPointeeType();
2018
2019 Value *RecordPtr = EmitScalarExpr(Arg0);
2020 Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
2021 {LLVMFuncType, Func}, 0);
2022 return RValue::get(Res);
2023 }
2024
2025 case Builtin::BI__builtin_preserve_access_index: {
2026 // Only enabled preserved access index region when debuginfo
2027 // is available as debuginfo is needed to preserve user-level
2028 // access pattern.
2029 if (!getDebugInfo()) {
2030 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
2031 return RValue::get(EmitScalarExpr(E->getArg(0)));
2032 }
2033
2034 // Nested builtin_preserve_access_index() not supported
2035 if (IsInPreservedAIRegion) {
2036 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
2037 return RValue::get(EmitScalarExpr(E->getArg(0)));
2038 }
2039
2040 IsInPreservedAIRegion = true;
2041 Value *Res = EmitScalarExpr(E->getArg(0));
2042 IsInPreservedAIRegion = false;
2043 return RValue::get(Res);
2044 }
2045
2046 case Builtin::BI__builtin_cimag:
2047 case Builtin::BI__builtin_cimagf:
2048 case Builtin::BI__builtin_cimagl:
2049 case Builtin::BIcimag:
2050 case Builtin::BIcimagf:
2051 case Builtin::BIcimagl: {
2052 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2053 return RValue::get(ComplexVal.second);
2054 }
2055
2056 case Builtin::BI__builtin_clrsb:
2057 case Builtin::BI__builtin_clrsbl:
2058 case Builtin::BI__builtin_clrsbll: {
2059 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
2060 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2061
2062 llvm::Type *ArgType = ArgValue->getType();
2063 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2064
2065 llvm::Type *ResultType = ConvertType(E->getType());
2066 Value *Zero = llvm::Constant::getNullValue(ArgType);
2067 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
2068 Value *Inverse = Builder.CreateNot(ArgValue, "not");
2069 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
2070 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
2071 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
2072 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2073 "cast");
2074 return RValue::get(Result);
2075 }
2076 case Builtin::BI__builtin_ctzs:
2077 case Builtin::BI__builtin_ctz:
2078 case Builtin::BI__builtin_ctzl:
2079 case Builtin::BI__builtin_ctzll: {
2080 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
2081
2082 llvm::Type *ArgType = ArgValue->getType();
2083 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2084
2085 llvm::Type *ResultType = ConvertType(E->getType());
2086 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2087 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2088 if (Result->getType() != ResultType)
2089 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2090 "cast");
2091 return RValue::get(Result);
2092 }
2093 case Builtin::BI__builtin_clzs:
2094 case Builtin::BI__builtin_clz:
2095 case Builtin::BI__builtin_clzl:
2096 case Builtin::BI__builtin_clzll: {
2097 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
2098
2099 llvm::Type *ArgType = ArgValue->getType();
2100 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2101
2102 llvm::Type *ResultType = ConvertType(E->getType());
2103 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2104 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2105 if (Result->getType() != ResultType)
2106 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2107 "cast");
2108 return RValue::get(Result);
2109 }
2110 case Builtin::BI__builtin_ffs:
2111 case Builtin::BI__builtin_ffsl:
2112 case Builtin::BI__builtin_ffsll: {
2113 // ffs(x) -> x ? cttz(x) + 1 : 0
2114 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2115
2116 llvm::Type *ArgType = ArgValue->getType();
2117 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2118
2119 llvm::Type *ResultType = ConvertType(E->getType());
2120 Value *Tmp =
2121 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
2122 llvm::ConstantInt::get(ArgType, 1));
2123 Value *Zero = llvm::Constant::getNullValue(ArgType);
2124 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
2125 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
2126 if (Result->getType() != ResultType)
2127 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2128 "cast");
2129 return RValue::get(Result);
2130 }
2131 case Builtin::BI__builtin_parity:
2132 case Builtin::BI__builtin_parityl:
2133 case Builtin::BI__builtin_parityll: {
2134 // parity(x) -> ctpop(x) & 1
2135 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2136
2137 llvm::Type *ArgType = ArgValue->getType();
2138 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2139
2140 llvm::Type *ResultType = ConvertType(E->getType());
2141 Value *Tmp = Builder.CreateCall(F, ArgValue);
2142 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
2143 if (Result->getType() != ResultType)
2144 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2145 "cast");
2146 return RValue::get(Result);
2147 }
2148 case Builtin::BI__lzcnt16:
2149 case Builtin::BI__lzcnt:
2150 case Builtin::BI__lzcnt64: {
2151 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2152
2153 llvm::Type *ArgType = ArgValue->getType();
2154 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2155
2156 llvm::Type *ResultType = ConvertType(E->getType());
2157 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2158 if (Result->getType() != ResultType)
2159 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2160 "cast");
2161 return RValue::get(Result);
2162 }
2163 case Builtin::BI__popcnt16:
2164 case Builtin::BI__popcnt:
2165 case Builtin::BI__popcnt64:
2166 case Builtin::BI__builtin_popcount:
2167 case Builtin::BI__builtin_popcountl:
2168 case Builtin::BI__builtin_popcountll: {
2169 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2170
2171 llvm::Type *ArgType = ArgValue->getType();
2172 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2173
2174 llvm::Type *ResultType = ConvertType(E->getType());
2175 Value *Result = Builder.CreateCall(F, ArgValue);
2176 if (Result->getType() != ResultType)
2177 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2178 "cast");
2179 return RValue::get(Result);
2180 }
2181 case Builtin::BI__builtin_unpredictable: {
2182 // Always return the argument of __builtin_unpredictable. LLVM does not
2183 // handle this builtin. Metadata for this builtin should be added directly
2184 // to instructions such as branches or switches that use it.
2185 return RValue::get(EmitScalarExpr(E->getArg(0)));
2186 }
2187 case Builtin::BI__builtin_expect: {
2188 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2189 llvm::Type *ArgType = ArgValue->getType();
2190
2191 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2192 // Don't generate llvm.expect on -O0 as the backend won't use it for
2193 // anything.
2194 // Note, we still IRGen ExpectedValue because it could have side-effects.
2195 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2196 return RValue::get(ArgValue);
2197
2198 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
2199 Value *Result =
2200 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
2201 return RValue::get(Result);
2202 }
2203 case Builtin::BI__builtin_expect_with_probability: {
2204 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2205 llvm::Type *ArgType = ArgValue->getType();
2206
2207 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2208 llvm::APFloat Probability(0.0);
2209 const Expr *ProbArg = E->getArg(2);
2210 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
2211 assert(EvalSucceed && "probability should be able to evaluate as float")((EvalSucceed && "probability should be able to evaluate as float"
) ? static_cast<void> (0) : __assert_fail ("EvalSucceed && \"probability should be able to evaluate as float\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2211, __PRETTY_FUNCTION__))
;
2212 (void)EvalSucceed;
2213 bool LoseInfo = false;
2214 Probability.convert(llvm::APFloat::IEEEdouble(),
2215 llvm::RoundingMode::Dynamic, &LoseInfo);
2216 llvm::Type *Ty = ConvertType(ProbArg->getType());
2217 Constant *Confidence = ConstantFP::get(Ty, Probability);
2218 // Don't generate llvm.expect.with.probability on -O0 as the backend
2219 // won't use it for anything.
2220 // Note, we still IRGen ExpectedValue because it could have side-effects.
2221 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2222 return RValue::get(ArgValue);
2223
2224 Function *FnExpect =
2225 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
2226 Value *Result = Builder.CreateCall(
2227 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
2228 return RValue::get(Result);
2229 }
2230 case Builtin::BI__builtin_assume_aligned: {
2231 const Expr *Ptr = E->getArg(0);
2232 Value *PtrValue = EmitScalarExpr(Ptr);
2233 Value *OffsetValue =
2234 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
2235
2236 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
2237 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
2238 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
2239 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
2240 llvm::Value::MaximumAlignment);
2241
2242 emitAlignmentAssumption(PtrValue, Ptr,
2243 /*The expr loc is sufficient.*/ SourceLocation(),
2244 AlignmentCI, OffsetValue);
2245 return RValue::get(PtrValue);
2246 }
2247 case Builtin::BI__assume:
2248 case Builtin::BI__builtin_assume: {
2249 if (E->getArg(0)->HasSideEffects(getContext()))
2250 return RValue::get(nullptr);
2251
2252 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2253 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
2254 return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
2255 }
2256 case Builtin::BI__builtin_bswap16:
2257 case Builtin::BI__builtin_bswap32:
2258 case Builtin::BI__builtin_bswap64: {
2259 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
2260 }
2261 case Builtin::BI__builtin_bitreverse8:
2262 case Builtin::BI__builtin_bitreverse16:
2263 case Builtin::BI__builtin_bitreverse32:
2264 case Builtin::BI__builtin_bitreverse64: {
2265 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
2266 }
2267 case Builtin::BI__builtin_rotateleft8:
2268 case Builtin::BI__builtin_rotateleft16:
2269 case Builtin::BI__builtin_rotateleft32:
2270 case Builtin::BI__builtin_rotateleft64:
2271 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2272 case Builtin::BI_rotl16:
2273 case Builtin::BI_rotl:
2274 case Builtin::BI_lrotl:
2275 case Builtin::BI_rotl64:
2276 return emitRotate(E, false);
2277
2278 case Builtin::BI__builtin_rotateright8:
2279 case Builtin::BI__builtin_rotateright16:
2280 case Builtin::BI__builtin_rotateright32:
2281 case Builtin::BI__builtin_rotateright64:
2282 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2283 case Builtin::BI_rotr16:
2284 case Builtin::BI_rotr:
2285 case Builtin::BI_lrotr:
2286 case Builtin::BI_rotr64:
2287 return emitRotate(E, true);
2288
2289 case Builtin::BI__builtin_constant_p: {
2290 llvm::Type *ResultType = ConvertType(E->getType());
2291
2292 const Expr *Arg = E->getArg(0);
2293 QualType ArgType = Arg->getType();
2294 // FIXME: The allowance for Obj-C pointers and block pointers is historical
2295 // and likely a mistake.
2296 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
2297 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
2298 // Per the GCC documentation, only numeric constants are recognized after
2299 // inlining.
2300 return RValue::get(ConstantInt::get(ResultType, 0));
2301
2302 if (Arg->HasSideEffects(getContext()))
2303 // The argument is unevaluated, so be conservative if it might have
2304 // side-effects.
2305 return RValue::get(ConstantInt::get(ResultType, 0));
2306
2307 Value *ArgValue = EmitScalarExpr(Arg);
2308 if (ArgType->isObjCObjectPointerType()) {
2309 // Convert Objective-C objects to id because we cannot distinguish between
2310 // LLVM types for Obj-C classes as they are opaque.
2311 ArgType = CGM.getContext().getObjCIdType();
2312 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
2313 }
2314 Function *F =
2315 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
2316 Value *Result = Builder.CreateCall(F, ArgValue);
2317 if (Result->getType() != ResultType)
2318 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
2319 return RValue::get(Result);
2320 }
2321 case Builtin::BI__builtin_dynamic_object_size:
2322 case Builtin::BI__builtin_object_size: {
2323 unsigned Type =
2324 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
2325 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
2326
2327 // We pass this builtin onto the optimizer so that it can figure out the
2328 // object size in more complex cases.
2329 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
2330 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
2331 /*EmittedE=*/nullptr, IsDynamic));
2332 }
2333 case Builtin::BI__builtin_prefetch: {
2334 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
2335 // FIXME: Technically these constants should of type 'int', yes?
2336 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
2337 llvm::ConstantInt::get(Int32Ty, 0);
2338 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
2339 llvm::ConstantInt::get(Int32Ty, 3);
2340 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
2341 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
2342 return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
2343 }
2344 case Builtin::BI__builtin_readcyclecounter: {
2345 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
2346 return RValue::get(Builder.CreateCall(F));
2347 }
2348 case Builtin::BI__builtin___clear_cache: {
2349 Value *Begin = EmitScalarExpr(E->getArg(0));
2350 Value *End = EmitScalarExpr(E->getArg(1));
2351 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
2352 return RValue::get(Builder.CreateCall(F, {Begin, End}));
2353 }
2354 case Builtin::BI__builtin_trap:
2355 return RValue::get(EmitTrapCall(Intrinsic::trap));
2356 case Builtin::BI__debugbreak:
2357 return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
2358 case Builtin::BI__builtin_unreachable: {
2359 EmitUnreachable(E->getExprLoc());
2360
2361 // We do need to preserve an insertion point.
2362 EmitBlock(createBasicBlock("unreachable.cont"));
2363
2364 return RValue::get(nullptr);
2365 }
2366
2367 case Builtin::BI__builtin_powi:
2368 case Builtin::BI__builtin_powif:
2369 case Builtin::BI__builtin_powil:
2370 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
2371 *this, E, Intrinsic::powi, Intrinsic::experimental_constrained_powi));
2372
2373 case Builtin::BI__builtin_isgreater:
2374 case Builtin::BI__builtin_isgreaterequal:
2375 case Builtin::BI__builtin_isless:
2376 case Builtin::BI__builtin_islessequal:
2377 case Builtin::BI__builtin_islessgreater:
2378 case Builtin::BI__builtin_isunordered: {
2379 // Ordered comparisons: we know the arguments to these are matching scalar
2380 // floating point values.
2381 Value *LHS = EmitScalarExpr(E->getArg(0));
2382 Value *RHS = EmitScalarExpr(E->getArg(1));
2383
2384 switch (BuiltinID) {
2385 default: llvm_unreachable("Unknown ordered comparison")::llvm::llvm_unreachable_internal("Unknown ordered comparison"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2385)
;
2386 case Builtin::BI__builtin_isgreater:
2387 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
2388 break;
2389 case Builtin::BI__builtin_isgreaterequal:
2390 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
2391 break;
2392 case Builtin::BI__builtin_isless:
2393 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
2394 break;
2395 case Builtin::BI__builtin_islessequal:
2396 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
2397 break;
2398 case Builtin::BI__builtin_islessgreater:
2399 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
2400 break;
2401 case Builtin::BI__builtin_isunordered:
2402 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
2403 break;
2404 }
2405 // ZExt bool to int type.
2406 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
2407 }
2408 case Builtin::BI__builtin_isnan: {
2409 Value *V = EmitScalarExpr(E->getArg(0));
2410 V = Builder.CreateFCmpUNO(V, V, "cmp");
2411 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2412 }
2413
2414 case Builtin::BI__builtin_matrix_transpose: {
2415 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
2416 Value *MatValue = EmitScalarExpr(E->getArg(0));
2417 MatrixBuilder<CGBuilderTy> MB(Builder);
2418 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
2419 MatrixTy->getNumColumns());
2420 return RValue::get(Result);
2421 }
2422
2423 case Builtin::BI__builtin_matrix_column_major_load: {
2424 MatrixBuilder<CGBuilderTy> MB(Builder);
2425 // Emit everything that isn't dependent on the first parameter type
2426 Value *Stride = EmitScalarExpr(E->getArg(3));
2427 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
2428 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
2429 assert(PtrTy && "arg0 must be of pointer type")((PtrTy && "arg0 must be of pointer type") ? static_cast
<void> (0) : __assert_fail ("PtrTy && \"arg0 must be of pointer type\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2429, __PRETTY_FUNCTION__))
;
2430 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
2431
2432 Address Src = EmitPointerWithAlignment(E->getArg(0));
2433 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
2434 E->getArg(0)->getExprLoc(), FD, 0);
2435 Value *Result = MB.CreateColumnMajorLoad(
2436 Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
2437 IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
2438 "matrix");
2439 return RValue::get(Result);
2440 }
2441
2442 case Builtin::BI__builtin_matrix_column_major_store: {
2443 MatrixBuilder<CGBuilderTy> MB(Builder);
2444 Value *Matrix = EmitScalarExpr(E->getArg(0));
2445 Address Dst = EmitPointerWithAlignment(E->getArg(1));
2446 Value *Stride = EmitScalarExpr(E->getArg(2));
2447
2448 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
2449 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
2450 assert(PtrTy && "arg1 must be of pointer type")((PtrTy && "arg1 must be of pointer type") ? static_cast
<void> (0) : __assert_fail ("PtrTy && \"arg1 must be of pointer type\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2450, __PRETTY_FUNCTION__))
;
2451 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
2452
2453 EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
2454 E->getArg(1)->getExprLoc(), FD, 0);
2455 Value *Result = MB.CreateColumnMajorStore(
2456 Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
2457 Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
2458 return RValue::get(Result);
2459 }
2460
2461 case Builtin::BIfinite:
2462 case Builtin::BI__finite:
2463 case Builtin::BIfinitef:
2464 case Builtin::BI__finitef:
2465 case Builtin::BIfinitel:
2466 case Builtin::BI__finitel:
2467 case Builtin::BI__builtin_isinf:
2468 case Builtin::BI__builtin_isfinite: {
2469 // isinf(x) --> fabs(x) == infinity
2470 // isfinite(x) --> fabs(x) != infinity
2471 // x != NaN via the ordered compare in either case.
2472 Value *V = EmitScalarExpr(E->getArg(0));
2473 Value *Fabs = EmitFAbs(*this, V);
2474 Constant *Infinity = ConstantFP::getInfinity(V->getType());
2475 CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
2476 ? CmpInst::FCMP_OEQ
2477 : CmpInst::FCMP_ONE;
2478 Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
2479 return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
2480 }
2481
2482 case Builtin::BI__builtin_isinf_sign: {
2483 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
2484 Value *Arg = EmitScalarExpr(E->getArg(0));
2485 Value *AbsArg = EmitFAbs(*this, Arg);
2486 Value *IsInf = Builder.CreateFCmpOEQ(
2487 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
2488 Value *IsNeg = EmitSignBit(*this, Arg);
2489
2490 llvm::Type *IntTy = ConvertType(E->getType());
2491 Value *Zero = Constant::getNullValue(IntTy);
2492 Value *One = ConstantInt::get(IntTy, 1);
2493 Value *NegativeOne = ConstantInt::get(IntTy, -1);
2494 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
2495 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
2496 return RValue::get(Result);
2497 }
2498
2499 case Builtin::BI__builtin_isnormal: {
2500 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
2501 Value *V = EmitScalarExpr(E->getArg(0));
2502 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
2503
2504 Value *Abs = EmitFAbs(*this, V);
2505 Value *IsLessThanInf =
2506 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
2507 APFloat Smallest = APFloat::getSmallestNormalized(
2508 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
2509 Value *IsNormal =
2510 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
2511 "isnormal");
2512 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
2513 V = Builder.CreateAnd(V, IsNormal, "and");
2514 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2515 }
2516
2517 case Builtin::BI__builtin_flt_rounds: {
2518 Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
2519
2520 llvm::Type *ResultType = ConvertType(E->getType());
2521 Value *Result = Builder.CreateCall(F);
2522 if (Result->getType() != ResultType)
2523 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2524 "cast");
2525 return RValue::get(Result);
2526 }
2527
2528 case Builtin::BI__builtin_fpclassify: {
2529 Value *V = EmitScalarExpr(E->getArg(5));
2530 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
2531
2532 // Create Result
2533 BasicBlock *Begin = Builder.GetInsertBlock();
2534 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
2535 Builder.SetInsertPoint(End);
2536 PHINode *Result =
2537 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
2538 "fpclassify_result");
2539
2540 // if (V==0) return FP_ZERO
2541 Builder.SetInsertPoint(Begin);
2542 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
2543 "iszero");
2544 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
2545 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
2546 Builder.CreateCondBr(IsZero, End, NotZero);
2547 Result->addIncoming(ZeroLiteral, Begin);
2548
2549 // if (V != V) return FP_NAN
2550 Builder.SetInsertPoint(NotZero);
2551 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
2552 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
2553 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
2554 Builder.CreateCondBr(IsNan, End, NotNan);
2555 Result->addIncoming(NanLiteral, NotZero);
2556
2557 // if (fabs(V) == infinity) return FP_INFINITY
2558 Builder.SetInsertPoint(NotNan);
2559 Value *VAbs = EmitFAbs(*this, V);
2560 Value *IsInf =
2561 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
2562 "isinf");
2563 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
2564 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
2565 Builder.CreateCondBr(IsInf, End, NotInf);
2566 Result->addIncoming(InfLiteral, NotNan);
2567
2568 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
2569 Builder.SetInsertPoint(NotInf);
2570 APFloat Smallest = APFloat::getSmallestNormalized(
2571 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
2572 Value *IsNormal =
2573 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
2574 "isnormal");
2575 Value *NormalResult =
2576 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
2577 EmitScalarExpr(E->getArg(3)));
2578 Builder.CreateBr(End);
2579 Result->addIncoming(NormalResult, NotInf);
2580
2581 // return Result
2582 Builder.SetInsertPoint(End);
2583 return RValue::get(Result);
2584 }
2585
2586 case Builtin::BIalloca:
2587 case Builtin::BI_alloca:
2588 case Builtin::BI__builtin_alloca: {
2589 Value *Size = EmitScalarExpr(E->getArg(0));
2590 const TargetInfo &TI = getContext().getTargetInfo();
2591 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
2592 const Align SuitableAlignmentInBytes =
2593 CGM.getContext()
2594 .toCharUnitsFromBits(TI.getSuitableAlign())
2595 .getAsAlign();
2596 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
2597 AI->setAlignment(SuitableAlignmentInBytes);
2598 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
2599 return RValue::get(AI);
2600 }
2601
2602 case Builtin::BI__builtin_alloca_with_align: {
2603 Value *Size = EmitScalarExpr(E->getArg(0));
2604 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
2605 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
2606 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
2607 const Align AlignmentInBytes =
2608 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
2609 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
2610 AI->setAlignment(AlignmentInBytes);
2611 initializeAlloca(*this, AI, Size, AlignmentInBytes);
2612 return RValue::get(AI);
2613 }
2614
2615 case Builtin::BIbzero:
2616 case Builtin::BI__builtin_bzero: {
2617 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2618 Value *SizeVal = EmitScalarExpr(E->getArg(1));
2619 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2620 E->getArg(0)->getExprLoc(), FD, 0);
2621 Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
2622 return RValue::get(nullptr);
2623 }
2624 case Builtin::BImemcpy:
2625 case Builtin::BI__builtin_memcpy:
2626 case Builtin::BImempcpy:
2627 case Builtin::BI__builtin_mempcpy: {
2628 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2629 Address Src = EmitPointerWithAlignment(E->getArg(1));
2630 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2631 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2632 E->getArg(0)->getExprLoc(), FD, 0);
2633 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2634 E->getArg(1)->getExprLoc(), FD, 1);
2635 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
2636 if (BuiltinID == Builtin::BImempcpy ||
2637 BuiltinID == Builtin::BI__builtin_mempcpy)
2638 return RValue::get(Builder.CreateInBoundsGEP(Dest.getPointer(), SizeVal));
2639 else
2640 return RValue::get(Dest.getPointer());
2641 }
2642
2643 case Builtin::BI__builtin_memcpy_inline: {
2644 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2645 Address Src = EmitPointerWithAlignment(E->getArg(1));
2646 uint64_t Size =
2647 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
2648 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2649 E->getArg(0)->getExprLoc(), FD, 0);
2650 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2651 E->getArg(1)->getExprLoc(), FD, 1);
2652 Builder.CreateMemCpyInline(Dest, Src, Size);
2653 return RValue::get(nullptr);
2654 }
2655
2656 case Builtin::BI__builtin_char_memchr:
2657 BuiltinID = Builtin::BI__builtin_memchr;
2658 break;
2659
2660 case Builtin::BI__builtin___memcpy_chk: {
2661 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
2662 Expr::EvalResult SizeResult, DstSizeResult;
2663 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2664 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2665 break;
2666 llvm::APSInt Size = SizeResult.Val.getInt();
2667 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2668 if (Size.ugt(DstSize))
2669 break;
2670 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2671 Address Src = EmitPointerWithAlignment(E->getArg(1));
2672 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2673 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
2674 return RValue::get(Dest.getPointer());
2675 }
2676
2677 case Builtin::BI__builtin_objc_memmove_collectable: {
2678 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
2679 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
2680 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2681 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
2682 DestAddr, SrcAddr, SizeVal);
2683 return RValue::get(DestAddr.getPointer());
2684 }
2685
2686 case Builtin::BI__builtin___memmove_chk: {
2687 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
2688 Expr::EvalResult SizeResult, DstSizeResult;
2689 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2690 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2691 break;
2692 llvm::APSInt Size = SizeResult.Val.getInt();
2693 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2694 if (Size.ugt(DstSize))
2695 break;
2696 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2697 Address Src = EmitPointerWithAlignment(E->getArg(1));
2698 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2699 Builder.CreateMemMove(Dest, Src, SizeVal, false);
2700 return RValue::get(Dest.getPointer());
2701 }
2702
2703 case Builtin::BImemmove:
2704 case Builtin::BI__builtin_memmove: {
2705 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2706 Address Src = EmitPointerWithAlignment(E->getArg(1));
2707 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2708 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2709 E->getArg(0)->getExprLoc(), FD, 0);
2710 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2711 E->getArg(1)->getExprLoc(), FD, 1);
2712 Builder.CreateMemMove(Dest, Src, SizeVal, false);
2713 return RValue::get(Dest.getPointer());
2714 }
2715 case Builtin::BImemset:
2716 case Builtin::BI__builtin_memset: {
2717 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2718 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
2719 Builder.getInt8Ty());
2720 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2721 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2722 E->getArg(0)->getExprLoc(), FD, 0);
2723 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
2724 return RValue::get(Dest.getPointer());
2725 }
2726 case Builtin::BI__builtin___memset_chk: {
2727 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
2728 Expr::EvalResult SizeResult, DstSizeResult;
2729 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2730 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2731 break;
2732 llvm::APSInt Size = SizeResult.Val.getInt();
2733 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2734 if (Size.ugt(DstSize))
2735 break;
2736 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2737 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
2738 Builder.getInt8Ty());
2739 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2740 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
2741 return RValue::get(Dest.getPointer());
2742 }
2743 case Builtin::BI__builtin_wmemcmp: {
2744 // The MSVC runtime library does not provide a definition of wmemcmp, so we
2745 // need an inline implementation.
2746 if (!getTarget().getTriple().isOSMSVCRT())
2747 break;
2748
2749 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
2750
2751 Value *Dst = EmitScalarExpr(E->getArg(0));
2752 Value *Src = EmitScalarExpr(E->getArg(1));
2753 Value *Size = EmitScalarExpr(E->getArg(2));
2754
2755 BasicBlock *Entry = Builder.GetInsertBlock();
2756 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
2757 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
2758 BasicBlock *Next = createBasicBlock("wmemcmp.next");
2759 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
2760 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
2761 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
2762
2763 EmitBlock(CmpGT);
2764 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
2765 DstPhi->addIncoming(Dst, Entry);
2766 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
2767 SrcPhi->addIncoming(Src, Entry);
2768 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
2769 SizePhi->addIncoming(Size, Entry);
2770 CharUnits WCharAlign =
2771 getContext().getTypeAlignInChars(getContext().WCharTy);
2772 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
2773 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
2774 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
2775 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
2776
2777 EmitBlock(CmpLT);
2778 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
2779 Builder.CreateCondBr(DstLtSrc, Exit, Next);
2780
2781 EmitBlock(Next);
2782 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
2783 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
2784 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
2785 Value *NextSizeEq0 =
2786 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
2787 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
2788 DstPhi->addIncoming(NextDst, Next);
2789 SrcPhi->addIncoming(NextSrc, Next);
2790 SizePhi->addIncoming(NextSize, Next);
2791
2792 EmitBlock(Exit);
2793 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
2794 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
2795 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
2796 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
2797 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
2798 return RValue::get(Ret);
2799 }
2800 case Builtin::BI__builtin_dwarf_cfa: {
2801 // The offset in bytes from the first argument to the CFA.
2802 //
2803 // Why on earth is this in the frontend? Is there any reason at
2804 // all that the backend can't reasonably determine this while
2805 // lowering llvm.eh.dwarf.cfa()?
2806 //
2807 // TODO: If there's a satisfactory reason, add a target hook for
2808 // this instead of hard-coding 0, which is correct for most targets.
2809 int32_t Offset = 0;
2810
2811 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
2812 return RValue::get(Builder.CreateCall(F,
2813 llvm::ConstantInt::get(Int32Ty, Offset)));
2814 }
2815 case Builtin::BI__builtin_return_address: {
2816 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
2817 getContext().UnsignedIntTy);
2818 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
2819 return RValue::get(Builder.CreateCall(F, Depth));
2820 }
2821 case Builtin::BI_ReturnAddress: {
2822 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
2823 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
2824 }
2825 case Builtin::BI__builtin_frame_address: {
2826 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
2827 getContext().UnsignedIntTy);
2828 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
2829 return RValue::get(Builder.CreateCall(F, Depth));
2830 }
2831 case Builtin::BI__builtin_extract_return_addr: {
2832 Value *Address = EmitScalarExpr(E->getArg(0));
2833 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
2834 return RValue::get(Result);
2835 }
2836 case Builtin::BI__builtin_frob_return_addr: {
2837 Value *Address = EmitScalarExpr(E->getArg(0));
2838 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
2839 return RValue::get(Result);
2840 }
2841 case Builtin::BI__builtin_dwarf_sp_column: {
2842 llvm::IntegerType *Ty
2843 = cast<llvm::IntegerType>(ConvertType(E->getType()));
2844 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
2845 if (Column == -1) {
2846 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
2847 return RValue::get(llvm::UndefValue::get(Ty));
2848 }
2849 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
2850 }
2851 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
2852 Value *Address = EmitScalarExpr(E->getArg(0));
2853 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
2854 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
2855 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
2856 }
2857 case Builtin::BI__builtin_eh_return: {
2858 Value *Int = EmitScalarExpr(E->getArg(0));
2859 Value *Ptr = EmitScalarExpr(E->getArg(1));
2860
2861 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
2862 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&(((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() ==
64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"
) ? static_cast<void> (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2863, __PRETTY_FUNCTION__))
2863 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants")(((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() ==
64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"
) ? static_cast<void> (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2863, __PRETTY_FUNCTION__))
;
2864 Function *F =
2865 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
2866 : Intrinsic::eh_return_i64);
2867 Builder.CreateCall(F, {Int, Ptr});
2868 Builder.CreateUnreachable();
2869
2870 // We do need to preserve an insertion point.
2871 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
2872
2873 return RValue::get(nullptr);
2874 }
2875 case Builtin::BI__builtin_unwind_init: {
2876 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
2877 return RValue::get(Builder.CreateCall(F));
2878 }
2879 case Builtin::BI__builtin_extend_pointer: {
2880 // Extends a pointer to the size of an _Unwind_Word, which is
2881 // uint64_t on all platforms. Generally this gets poked into a
2882 // register and eventually used as an address, so if the
2883 // addressing registers are wider than pointers and the platform
2884 // doesn't implicitly ignore high-order bits when doing
2885 // addressing, we need to make sure we zext / sext based on
2886 // the platform's expectations.
2887 //
2888 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
2889
2890 // Cast the pointer to intptr_t.
2891 Value *Ptr = EmitScalarExpr(E->getArg(0));
2892 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
2893
2894 // If that's 64 bits, we're done.
2895 if (IntPtrTy->getBitWidth() == 64)
2896 return RValue::get(Result);
2897
2898 // Otherwise, ask the codegen data what to do.
2899 if (getTargetHooks().extendPointerWithSExt())
2900 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
2901 else
2902 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
2903 }
2904 case Builtin::BI__builtin_setjmp: {
2905 // Buffer is a void**.
2906 Address Buf = EmitPointerWithAlignment(E->getArg(0));
2907
2908 // Store the frame pointer to the setjmp buffer.
2909 Value *FrameAddr = Builder.CreateCall(
2910 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
2911 ConstantInt::get(Int32Ty, 0));
2912 Builder.CreateStore(FrameAddr, Buf);
2913
2914 // Store the stack pointer to the setjmp buffer.
2915 Value *StackAddr =
2916 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
2917 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
2918 Builder.CreateStore(StackAddr, StackSaveSlot);
2919
2920 // Call LLVM's EH setjmp, which is lightweight.
2921 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
2922 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
2923 return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
2924 }
2925 case Builtin::BI__builtin_longjmp: {
2926 Value *Buf = EmitScalarExpr(E->getArg(0));
2927 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
2928
2929 // Call LLVM's EH longjmp, which is lightweight.
2930 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
2931
2932 // longjmp doesn't return; mark this as unreachable.
2933 Builder.CreateUnreachable();
2934
2935 // We do need to preserve an insertion point.
2936 EmitBlock(createBasicBlock("longjmp.cont"));
2937
2938 return RValue::get(nullptr);
2939 }
2940 case Builtin::BI__builtin_launder: {
2941 const Expr *Arg = E->getArg(0);
2942 QualType ArgTy = Arg->getType()->getPointeeType();
2943 Value *Ptr = EmitScalarExpr(Arg);
2944 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
2945 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
2946
2947 return RValue::get(Ptr);
2948 }
2949 case Builtin::BI__sync_fetch_and_add:
2950 case Builtin::BI__sync_fetch_and_sub:
2951 case Builtin::BI__sync_fetch_and_or:
2952 case Builtin::BI__sync_fetch_and_and:
2953 case Builtin::BI__sync_fetch_and_xor:
2954 case Builtin::BI__sync_fetch_and_nand:
2955 case Builtin::BI__sync_add_and_fetch:
2956 case Builtin::BI__sync_sub_and_fetch:
2957 case Builtin::BI__sync_and_and_fetch:
2958 case Builtin::BI__sync_or_and_fetch:
2959 case Builtin::BI__sync_xor_and_fetch:
2960 case Builtin::BI__sync_nand_and_fetch:
2961 case Builtin::BI__sync_val_compare_and_swap:
2962 case Builtin::BI__sync_bool_compare_and_swap:
2963 case Builtin::BI__sync_lock_test_and_set:
2964 case Builtin::BI__sync_lock_release:
2965 case Builtin::BI__sync_swap:
2966 llvm_unreachable("Shouldn't make it through sema")::llvm::llvm_unreachable_internal("Shouldn't make it through sema"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2966)
;
2967 case Builtin::BI__sync_fetch_and_add_1:
2968 case Builtin::BI__sync_fetch_and_add_2:
2969 case Builtin::BI__sync_fetch_and_add_4:
2970 case Builtin::BI__sync_fetch_and_add_8:
2971 case Builtin::BI__sync_fetch_and_add_16:
2972 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
2973 case Builtin::BI__sync_fetch_and_sub_1:
2974 case Builtin::BI__sync_fetch_and_sub_2:
2975 case Builtin::BI__sync_fetch_and_sub_4:
2976 case Builtin::BI__sync_fetch_and_sub_8:
2977 case Builtin::BI__sync_fetch_and_sub_16:
2978 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
2979 case Builtin::BI__sync_fetch_and_or_1:
2980 case Builtin::BI__sync_fetch_and_or_2:
2981 case Builtin::BI__sync_fetch_and_or_4:
2982 case Builtin::BI__sync_fetch_and_or_8:
2983 case Builtin::BI__sync_fetch_and_or_16:
2984 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
2985 case Builtin::BI__sync_fetch_and_and_1:
2986 case Builtin::BI__sync_fetch_and_and_2:
2987 case Builtin::BI__sync_fetch_and_and_4:
2988 case Builtin::BI__sync_fetch_and_and_8:
2989 case Builtin::BI__sync_fetch_and_and_16:
2990 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
2991 case Builtin::BI__sync_fetch_and_xor_1:
2992 case Builtin::BI__sync_fetch_and_xor_2:
2993 case Builtin::BI__sync_fetch_and_xor_4:
2994 case Builtin::BI__sync_fetch_and_xor_8:
2995 case Builtin::BI__sync_fetch_and_xor_16:
2996 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
2997 case Builtin::BI__sync_fetch_and_nand_1:
2998 case Builtin::BI__sync_fetch_and_nand_2:
2999 case Builtin::BI__sync_fetch_and_nand_4:
3000 case Builtin::BI__sync_fetch_and_nand_8:
3001 case Builtin::BI__sync_fetch_and_nand_16:
3002 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
3003
3004 // Clang extensions: not overloaded yet.
3005 case Builtin::BI__sync_fetch_and_min:
3006 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
3007 case Builtin::BI__sync_fetch_and_max:
3008 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
3009 case Builtin::BI__sync_fetch_and_umin:
3010 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
3011 case Builtin::BI__sync_fetch_and_umax:
3012 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
3013
3014 case Builtin::BI__sync_add_and_fetch_1:
3015 case Builtin::BI__sync_add_and_fetch_2:
3016 case Builtin::BI__sync_add_and_fetch_4:
3017 case Builtin::BI__sync_add_and_fetch_8:
3018 case Builtin::BI__sync_add_and_fetch_16:
3019 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
3020 llvm::Instruction::Add);
3021 case Builtin::BI__sync_sub_and_fetch_1:
3022 case Builtin::BI__sync_sub_and_fetch_2:
3023 case Builtin::BI__sync_sub_and_fetch_4:
3024 case Builtin::BI__sync_sub_and_fetch_8:
3025 case Builtin::BI__sync_sub_and_fetch_16:
3026 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
3027 llvm::Instruction::Sub);
3028 case Builtin::BI__sync_and_and_fetch_1:
3029 case Builtin::BI__sync_and_and_fetch_2:
3030 case Builtin::BI__sync_and_and_fetch_4:
3031 case Builtin::BI__sync_and_and_fetch_8:
3032 case Builtin::BI__sync_and_and_fetch_16:
3033 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
3034 llvm::Instruction::And);
3035 case Builtin::BI__sync_or_and_fetch_1:
3036 case Builtin::BI__sync_or_and_fetch_2:
3037 case Builtin::BI__sync_or_and_fetch_4:
3038 case Builtin::BI__sync_or_and_fetch_8:
3039 case Builtin::BI__sync_or_and_fetch_16:
3040 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
3041 llvm::Instruction::Or);
3042 case Builtin::BI__sync_xor_and_fetch_1:
3043 case Builtin::BI__sync_xor_and_fetch_2:
3044 case Builtin::BI__sync_xor_and_fetch_4:
3045 case Builtin::BI__sync_xor_and_fetch_8:
3046 case Builtin::BI__sync_xor_and_fetch_16:
3047 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
3048 llvm::Instruction::Xor);
3049 case Builtin::BI__sync_nand_and_fetch_1:
3050 case Builtin::BI__sync_nand_and_fetch_2:
3051 case Builtin::BI__sync_nand_and_fetch_4:
3052 case Builtin::BI__sync_nand_and_fetch_8:
3053 case Builtin::BI__sync_nand_and_fetch_16:
3054 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
3055 llvm::Instruction::And, true);
3056
3057 case Builtin::BI__sync_val_compare_and_swap_1:
3058 case Builtin::BI__sync_val_compare_and_swap_2:
3059 case Builtin::BI__sync_val_compare_and_swap_4:
3060 case Builtin::BI__sync_val_compare_and_swap_8:
3061 case Builtin::BI__sync_val_compare_and_swap_16:
3062 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
3063
3064 case Builtin::BI__sync_bool_compare_and_swap_1:
3065 case Builtin::BI__sync_bool_compare_and_swap_2:
3066 case Builtin::BI__sync_bool_compare_and_swap_4:
3067 case Builtin::BI__sync_bool_compare_and_swap_8:
3068 case Builtin::BI__sync_bool_compare_and_swap_16:
3069 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
3070
3071 case Builtin::BI__sync_swap_1:
3072 case Builtin::BI__sync_swap_2:
3073 case Builtin::BI__sync_swap_4:
3074 case Builtin::BI__sync_swap_8:
3075 case Builtin::BI__sync_swap_16:
3076 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3077
3078 case Builtin::BI__sync_lock_test_and_set_1:
3079 case Builtin::BI__sync_lock_test_and_set_2:
3080 case Builtin::BI__sync_lock_test_and_set_4:
3081 case Builtin::BI__sync_lock_test_and_set_8:
3082 case Builtin::BI__sync_lock_test_and_set_16:
3083 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3084
3085 case Builtin::BI__sync_lock_release_1:
3086 case Builtin::BI__sync_lock_release_2:
3087 case Builtin::BI__sync_lock_release_4:
3088 case Builtin::BI__sync_lock_release_8:
3089 case Builtin::BI__sync_lock_release_16: {
3090 Value *Ptr = EmitScalarExpr(E->getArg(0));
3091 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
3092 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
3093 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
3094 StoreSize.getQuantity() * 8);
3095 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
3096 llvm::StoreInst *Store =
3097 Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
3098 StoreSize);
3099 Store->setAtomic(llvm::AtomicOrdering::Release);
3100 return RValue::get(nullptr);
3101 }
3102
3103 case Builtin::BI__sync_synchronize: {
3104 // We assume this is supposed to correspond to a C++0x-style
3105 // sequentially-consistent fence (i.e. this is only usable for
3106 // synchronization, not device I/O or anything like that). This intrinsic
3107 // is really badly designed in the sense that in theory, there isn't
3108 // any way to safely use it... but in practice, it mostly works
3109 // to use it with non-atomic loads and stores to get acquire/release
3110 // semantics.
3111 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
3112 return RValue::get(nullptr);
3113 }
3114
3115 case Builtin::BI__builtin_nontemporal_load:
3116 return RValue::get(EmitNontemporalLoad(*this, E));
3117 case Builtin::BI__builtin_nontemporal_store:
3118 return RValue::get(EmitNontemporalStore(*this, E));
3119 case Builtin::BI__c11_atomic_is_lock_free:
3120 case Builtin::BI__atomic_is_lock_free: {
3121 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
3122 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
3123 // _Atomic(T) is always properly-aligned.
3124 const char *LibCallName = "__atomic_is_lock_free";
3125 CallArgList Args;
3126 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
3127 getContext().getSizeType());
3128 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
3129 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
3130 getContext().VoidPtrTy);
3131 else
3132 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
3133 getContext().VoidPtrTy);
3134 const CGFunctionInfo &FuncInfo =
3135 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
3136 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3137 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3138 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
3139 ReturnValueSlot(), Args);
3140 }
3141
3142 case Builtin::BI__atomic_test_and_set: {
3143 // Look at the argument type to determine whether this is a volatile
3144 // operation. The parameter type is always volatile.
3145 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3146 bool Volatile =
3147 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3148
3149 Value *Ptr = EmitScalarExpr(E->getArg(0));
3150 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
3151 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3152 Value *NewVal = Builder.getInt8(1);
3153 Value *Order = EmitScalarExpr(E->getArg(1));
3154 if (isa<llvm::ConstantInt>(Order)) {
3155 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3156 AtomicRMWInst *Result = nullptr;
3157 switch (ord) {
3158 case 0: // memory_order_relaxed
3159 default: // invalid order
3160 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3161 llvm::AtomicOrdering::Monotonic);
3162 break;
3163 case 1: // memory_order_consume
3164 case 2: // memory_order_acquire
3165 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3166 llvm::AtomicOrdering::Acquire);
3167 break;
3168 case 3: // memory_order_release
3169 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3170 llvm::AtomicOrdering::Release);
3171 break;
3172 case 4: // memory_order_acq_rel
3173
3174 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3175 llvm::AtomicOrdering::AcquireRelease);
3176 break;
3177 case 5: // memory_order_seq_cst
3178 Result = Builder.CreateAtomicRMW(
3179 llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3180 llvm::AtomicOrdering::SequentiallyConsistent);
3181 break;
3182 }
3183 Result->setVolatile(Volatile);
3184 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3185 }
3186
3187 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3188
3189 llvm::BasicBlock *BBs[5] = {
3190 createBasicBlock("monotonic", CurFn),
3191 createBasicBlock("acquire", CurFn),
3192 createBasicBlock("release", CurFn),
3193 createBasicBlock("acqrel", CurFn),
3194 createBasicBlock("seqcst", CurFn)
3195 };
3196 llvm::AtomicOrdering Orders[5] = {
3197 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
3198 llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
3199 llvm::AtomicOrdering::SequentiallyConsistent};
3200
3201 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3202 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3203
3204 Builder.SetInsertPoint(ContBB);
3205 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
3206
3207 for (unsigned i = 0; i < 5; ++i) {
3208 Builder.SetInsertPoint(BBs[i]);
3209 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
3210 Ptr, NewVal, Orders[i]);
3211 RMW->setVolatile(Volatile);
3212 Result->addIncoming(RMW, BBs[i]);
3213 Builder.CreateBr(ContBB);
3214 }
3215
3216 SI->addCase(Builder.getInt32(0), BBs[0]);
3217 SI->addCase(Builder.getInt32(1), BBs[1]);
3218 SI->addCase(Builder.getInt32(2), BBs[1]);
3219 SI->addCase(Builder.getInt32(3), BBs[2]);
3220 SI->addCase(Builder.getInt32(4), BBs[3]);
3221 SI->addCase(Builder.getInt32(5), BBs[4]);
3222
3223 Builder.SetInsertPoint(ContBB);
3224 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3225 }
3226
3227 case Builtin::BI__atomic_clear: {
3228 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3229 bool Volatile =
3230 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3231
3232 Address Ptr = EmitPointerWithAlignment(E->getArg(0));
3233 unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
3234 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3235 Value *NewVal = Builder.getInt8(0);
3236 Value *Order = EmitScalarExpr(E->getArg(1));
3237 if (isa<llvm::ConstantInt>(Order)) {
3238 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3239 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3240 switch (ord) {
3241 case 0: // memory_order_relaxed
3242 default: // invalid order
3243 Store->setOrdering(llvm::AtomicOrdering::Monotonic);
3244 break;
3245 case 3: // memory_order_release
3246 Store->setOrdering(llvm::AtomicOrdering::Release);
3247 break;
3248 case 5: // memory_order_seq_cst
3249 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
3250 break;
3251 }
3252 return RValue::get(nullptr);
3253 }
3254
3255 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3256
3257 llvm::BasicBlock *BBs[3] = {
3258 createBasicBlock("monotonic", CurFn),
3259 createBasicBlock("release", CurFn),
3260 createBasicBlock("seqcst", CurFn)
3261 };
3262 llvm::AtomicOrdering Orders[3] = {
3263 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
3264 llvm::AtomicOrdering::SequentiallyConsistent};
3265
3266 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3267 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3268
3269 for (unsigned i = 0; i < 3; ++i) {
3270 Builder.SetInsertPoint(BBs[i]);
3271 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3272 Store->setOrdering(Orders[i]);
3273 Builder.CreateBr(ContBB);
3274 }
3275
3276 SI->addCase(Builder.getInt32(0), BBs[0]);
3277 SI->addCase(Builder.getInt32(3), BBs[1]);
3278 SI->addCase(Builder.getInt32(5), BBs[2]);
3279
3280 Builder.SetInsertPoint(ContBB);
3281 return RValue::get(nullptr);
3282 }
3283
3284 case Builtin::BI__atomic_thread_fence:
3285 case Builtin::BI__atomic_signal_fence:
3286 case Builtin::BI__c11_atomic_thread_fence:
3287 case Builtin::BI__c11_atomic_signal_fence: {
3288 llvm::SyncScope::ID SSID;
3289 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
3290 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
3291 SSID = llvm::SyncScope::SingleThread;
3292 else
3293 SSID = llvm::SyncScope::System;
3294 Value *Order = EmitScalarExpr(E->getArg(0));
3295 if (isa<llvm::ConstantInt>(Order)) {
3296 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3297 switch (ord) {
3298 case 0: // memory_order_relaxed
3299 default: // invalid order
3300 break;
3301 case 1: // memory_order_consume
3302 case 2: // memory_order_acquire
3303 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3304 break;
3305 case 3: // memory_order_release
3306 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3307 break;
3308 case 4: // memory_order_acq_rel
3309 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3310 break;
3311 case 5: // memory_order_seq_cst
3312 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3313 break;
3314 }
3315 return RValue::get(nullptr);
3316 }
3317
3318 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
3319 AcquireBB = createBasicBlock("acquire", CurFn);
3320 ReleaseBB = createBasicBlock("release", CurFn);
3321 AcqRelBB = createBasicBlock("acqrel", CurFn);
3322 SeqCstBB = createBasicBlock("seqcst", CurFn);
3323 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3324
3325 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3326 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
3327
3328 Builder.SetInsertPoint(AcquireBB);
3329 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3330 Builder.CreateBr(ContBB);
3331 SI->addCase(Builder.getInt32(1), AcquireBB);
3332 SI->addCase(Builder.getInt32(2), AcquireBB);
3333
3334 Builder.SetInsertPoint(ReleaseBB);
3335 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3336 Builder.CreateBr(ContBB);
3337 SI->addCase(Builder.getInt32(3), ReleaseBB);
3338
3339 Builder.SetInsertPoint(AcqRelBB);
3340 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3341 Builder.CreateBr(ContBB);
3342 SI->addCase(Builder.getInt32(4), AcqRelBB);
3343
3344 Builder.SetInsertPoint(SeqCstBB);
3345 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3346 Builder.CreateBr(ContBB);
3347 SI->addCase(Builder.getInt32(5), SeqCstBB);
3348
3349 Builder.SetInsertPoint(ContBB);
3350 return RValue::get(nullptr);
3351 }
3352
3353 case Builtin::BI__builtin_signbit:
3354 case Builtin::BI__builtin_signbitf:
3355 case Builtin::BI__builtin_signbitl: {
3356 return RValue::get(
3357 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
3358 ConvertType(E->getType())));
3359 }
3360 case Builtin::BI__warn_memset_zero_len:
3361 return RValue::getIgnored();
3362 case Builtin::BI__annotation: {
3363 // Re-encode each wide string to UTF8 and make an MDString.
3364 SmallVector<Metadata *, 1> Strings;
3365 for (const Expr *Arg : E->arguments()) {
3366 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
3367 assert(Str->getCharByteWidth() == 2)((Str->getCharByteWidth() == 2) ? static_cast<void> (
0) : __assert_fail ("Str->getCharByteWidth() == 2", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3367, __PRETTY_FUNCTION__))
;
3368 StringRef WideBytes = Str->getBytes();
3369 std::string StrUtf8;
3370 if (!convertUTF16ToUTF8String(
3371 makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
3372 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
3373 continue;
3374 }
3375 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
3376 }
3377
3378 // Build and MDTuple of MDStrings and emit the intrinsic call.
3379 llvm::Function *F =
3380 CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
3381 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
3382 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
3383 return RValue::getIgnored();
3384 }
3385 case Builtin::BI__builtin_annotation: {
3386 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
3387 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
3388 AnnVal->getType());
3389
3390 // Get the annotation string, go through casts. Sema requires this to be a
3391 // non-wide string literal, potentially casted, so the cast<> is safe.
3392 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
3393 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
3394 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
3395 }
3396 case Builtin::BI__builtin_addcb:
3397 case Builtin::BI__builtin_addcs:
3398 case Builtin::BI__builtin_addc:
3399 case Builtin::BI__builtin_addcl:
3400 case Builtin::BI__builtin_addcll:
3401 case Builtin::BI__builtin_subcb:
3402 case Builtin::BI__builtin_subcs:
3403 case Builtin::BI__builtin_subc:
3404 case Builtin::BI__builtin_subcl:
3405 case Builtin::BI__builtin_subcll: {
3406
3407 // We translate all of these builtins from expressions of the form:
3408 // int x = ..., y = ..., carryin = ..., carryout, result;
3409 // result = __builtin_addc(x, y, carryin, &carryout);
3410 //
3411 // to LLVM IR of the form:
3412 //
3413 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
3414 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
3415 // %carry1 = extractvalue {i32, i1} %tmp1, 1
3416 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
3417 // i32 %carryin)
3418 // %result = extractvalue {i32, i1} %tmp2, 0
3419 // %carry2 = extractvalue {i32, i1} %tmp2, 1
3420 // %tmp3 = or i1 %carry1, %carry2
3421 // %tmp4 = zext i1 %tmp3 to i32
3422 // store i32 %tmp4, i32* %carryout
3423
3424 // Scalarize our inputs.
3425 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3426 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3427 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
3428 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
3429
3430 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
3431 llvm::Intrinsic::ID IntrinsicId;
3432 switch (BuiltinID) {
3433 default: llvm_unreachable("Unknown multiprecision builtin id.")::llvm::llvm_unreachable_internal("Unknown multiprecision builtin id."
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3433)
;
3434 case Builtin::BI__builtin_addcb:
3435 case Builtin::BI__builtin_addcs:
3436 case Builtin::BI__builtin_addc:
3437 case Builtin::BI__builtin_addcl:
3438 case Builtin::BI__builtin_addcll:
3439 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3440 break;
3441 case Builtin::BI__builtin_subcb:
3442 case Builtin::BI__builtin_subcs:
3443 case Builtin::BI__builtin_subc:
3444 case Builtin::BI__builtin_subcl:
3445 case Builtin::BI__builtin_subcll:
3446 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3447 break;
3448 }
3449
3450 // Construct our resulting LLVM IR expression.
3451 llvm::Value *Carry1;
3452 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
3453 X, Y, Carry1);
3454 llvm::Value *Carry2;
3455 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
3456 Sum1, Carryin, Carry2);
3457 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
3458 X->getType());
3459 Builder.CreateStore(CarryOut, CarryOutPtr);
3460 return RValue::get(Sum2);
3461 }
3462
3463 case Builtin::BI__builtin_add_overflow:
3464 case Builtin::BI__builtin_sub_overflow:
3465 case Builtin::BI__builtin_mul_overflow: {
3466 const clang::Expr *LeftArg = E->getArg(0);
3467 const clang::Expr *RightArg = E->getArg(1);
3468 const clang::Expr *ResultArg = E->getArg(2);
3469
3470 clang::QualType ResultQTy =
3471 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
3472
3473 WidthAndSignedness LeftInfo =
3474 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
3475 WidthAndSignedness RightInfo =
3476 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
3477 WidthAndSignedness ResultInfo =
3478 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
3479
3480 // Handle mixed-sign multiplication as a special case, because adding
3481 // runtime or backend support for our generic irgen would be too expensive.
3482 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
3483 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
3484 RightInfo, ResultArg, ResultQTy,
3485 ResultInfo);
3486
3487 WidthAndSignedness EncompassingInfo =
3488 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
3489
3490 llvm::Type *EncompassingLLVMTy =
3491 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
3492
3493 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
3494
3495 llvm::Intrinsic::ID IntrinsicId;
3496 switch (BuiltinID) {
3497 default:
3498 llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id."
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3498)
;
3499 case Builtin::BI__builtin_add_overflow:
3500 IntrinsicId = EncompassingInfo.Signed
3501 ? llvm::Intrinsic::sadd_with_overflow
3502 : llvm::Intrinsic::uadd_with_overflow;
3503 break;
3504 case Builtin::BI__builtin_sub_overflow:
3505 IntrinsicId = EncompassingInfo.Signed
3506 ? llvm::Intrinsic::ssub_with_overflow
3507 : llvm::Intrinsic::usub_with_overflow;
3508 break;
3509 case Builtin::BI__builtin_mul_overflow:
3510 IntrinsicId = EncompassingInfo.Signed
3511 ? llvm::Intrinsic::smul_with_overflow
3512 : llvm::Intrinsic::umul_with_overflow;
3513 break;
3514 }
3515
3516 llvm::Value *Left = EmitScalarExpr(LeftArg);
3517 llvm::Value *Right = EmitScalarExpr(RightArg);
3518 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
3519
3520 // Extend each operand to the encompassing type.
3521 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
3522 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
3523
3524 // Perform the operation on the extended values.
3525 llvm::Value *Overflow, *Result;
3526 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
3527
3528 if (EncompassingInfo.Width > ResultInfo.Width) {
3529 // The encompassing type is wider than the result type, so we need to
3530 // truncate it.
3531 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
3532
3533 // To see if the truncation caused an overflow, we will extend
3534 // the result and then compare it to the original result.
3535 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
3536 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
3537 llvm::Value *TruncationOverflow =
3538 Builder.CreateICmpNE(Result, ResultTruncExt);
3539
3540 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
3541 Result = ResultTrunc;
3542 }
3543
3544 // Finally, store the result using the pointer.
3545 bool isVolatile =
3546 ResultArg->getType()->getPointeeType().isVolatileQualified();
3547 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
3548
3549 return RValue::get(Overflow);
3550 }
3551
3552 case Builtin::BI__builtin_uadd_overflow:
3553 case Builtin::BI__builtin_uaddl_overflow:
3554 case Builtin::BI__builtin_uaddll_overflow:
3555 case Builtin::BI__builtin_usub_overflow:
3556 case Builtin::BI__builtin_usubl_overflow:
3557 case Builtin::BI__builtin_usubll_overflow:
3558 case Builtin::BI__builtin_umul_overflow:
3559 case Builtin::BI__builtin_umull_overflow:
3560 case Builtin::BI__builtin_umulll_overflow:
3561 case Builtin::BI__builtin_sadd_overflow:
3562 case Builtin::BI__builtin_saddl_overflow:
3563 case Builtin::BI__builtin_saddll_overflow:
3564 case Builtin::BI__builtin_ssub_overflow:
3565 case Builtin::BI__builtin_ssubl_overflow:
3566 case Builtin::BI__builtin_ssubll_overflow:
3567 case Builtin::BI__builtin_smul_overflow:
3568 case Builtin::BI__builtin_smull_overflow:
3569 case Builtin::BI__builtin_smulll_overflow: {
3570
3571 // We translate all of these builtins directly to the relevant llvm IR node.
3572
3573 // Scalarize our inputs.
3574 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3575 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3576 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
3577
3578 // Decide which of the overflow intrinsics we are lowering to:
3579 llvm::Intrinsic::ID IntrinsicId;
3580 switch (BuiltinID) {
3581 default: llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id."
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3581)
;
3582 case Builtin::BI__builtin_uadd_overflow:
3583 case Builtin::BI__builtin_uaddl_overflow:
3584 case Builtin::BI__builtin_uaddll_overflow:
3585 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3586 break;
3587 case Builtin::BI__builtin_usub_overflow:
3588 case Builtin::BI__builtin_usubl_overflow:
3589 case Builtin::BI__builtin_usubll_overflow:
3590 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3591 break;
3592 case Builtin::BI__builtin_umul_overflow:
3593 case Builtin::BI__builtin_umull_overflow:
3594 case Builtin::BI__builtin_umulll_overflow:
3595 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
3596 break;
3597 case Builtin::BI__builtin_sadd_overflow:
3598 case Builtin::BI__builtin_saddl_overflow:
3599 case Builtin::BI__builtin_saddll_overflow:
3600 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
3601 break;
3602 case Builtin::BI__builtin_ssub_overflow:
3603 case Builtin::BI__builtin_ssubl_overflow:
3604 case Builtin::BI__builtin_ssubll_overflow:
3605 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
3606 break;
3607 case Builtin::BI__builtin_smul_overflow:
3608 case Builtin::BI__builtin_smull_overflow:
3609 case Builtin::BI__builtin_smulll_overflow:
3610 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
3611 break;
3612 }
3613
3614
3615 llvm::Value *Carry;
3616 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
3617 Builder.CreateStore(Sum, SumOutPtr);
3618
3619 return RValue::get(Carry);
3620 }
3621 case Builtin::BI__builtin_addressof:
3622 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
3623 case Builtin::BI__builtin_operator_new:
3624 return EmitBuiltinNewDeleteCall(
3625 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
3626 case Builtin::BI__builtin_operator_delete:
3627 return EmitBuiltinNewDeleteCall(
3628 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
3629
3630 case Builtin::BI__builtin_is_aligned:
3631 return EmitBuiltinIsAligned(E);
3632 case Builtin::BI__builtin_align_up:
3633 return EmitBuiltinAlignTo(E, true);
3634 case Builtin::BI__builtin_align_down:
3635 return EmitBuiltinAlignTo(E, false);
3636
3637 case Builtin::BI__noop:
3638 // __noop always evaluates to an integer literal zero.
3639 return RValue::get(ConstantInt::get(IntTy, 0));
3640 case Builtin::BI__builtin_call_with_static_chain: {
3641 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
3642 const Expr *Chain = E->getArg(1);
3643 return EmitCall(Call->getCallee()->getType(),
3644 EmitCallee(Call->getCallee()), Call, ReturnValue,
3645 EmitScalarExpr(Chain));
3646 }
3647 case Builtin::BI_InterlockedExchange8:
3648 case Builtin::BI_InterlockedExchange16:
3649 case Builtin::BI_InterlockedExchange:
3650 case Builtin::BI_InterlockedExchangePointer:
3651 return RValue::get(
3652 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
3653 case Builtin::BI_InterlockedCompareExchangePointer:
3654 case Builtin::BI_InterlockedCompareExchangePointer_nf: {
3655 llvm::Type *RTy;
3656 llvm::IntegerType *IntType =
3657 IntegerType::get(getLLVMContext(),
3658 getContext().getTypeSize(E->getType()));
3659 llvm::Type *IntPtrType = IntType->getPointerTo();
3660
3661 llvm::Value *Destination =
3662 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
3663
3664 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
3665 RTy = Exchange->getType();
3666 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
3667
3668 llvm::Value *Comparand =
3669 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
3670
3671 auto Ordering =
3672 BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
3673 AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
3674
3675 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
3676 Ordering, Ordering);
3677 Result->setVolatile(true);
3678
3679 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
3680 0),
3681 RTy));
3682 }
3683 case Builtin::BI_InterlockedCompareExchange8:
3684 case Builtin::BI_InterlockedCompareExchange16:
3685 case Builtin::BI_InterlockedCompareExchange:
3686 case Builtin::BI_InterlockedCompareExchange64:
3687 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
3688 case Builtin::BI_InterlockedIncrement16:
3689 case Builtin::BI_InterlockedIncrement:
3690 return RValue::get(
3691 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
3692 case Builtin::BI_InterlockedDecrement16:
3693 case Builtin::BI_InterlockedDecrement:
3694 return RValue::get(
3695 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
3696 case Builtin::BI_InterlockedAnd8:
3697 case Builtin::BI_InterlockedAnd16:
3698 case Builtin::BI_InterlockedAnd:
3699 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
3700 case Builtin::BI_InterlockedExchangeAdd8:
3701 case Builtin::BI_InterlockedExchangeAdd16:
3702 case Builtin::BI_InterlockedExchangeAdd:
3703 return RValue::get(
3704 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
3705 case Builtin::BI_InterlockedExchangeSub8:
3706 case Builtin::BI_InterlockedExchangeSub16:
3707 case Builtin::BI_InterlockedExchangeSub:
3708 return RValue::get(
3709 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
3710 case Builtin::BI_InterlockedOr8:
3711 case Builtin::BI_InterlockedOr16:
3712 case Builtin::BI_InterlockedOr:
3713 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
3714 case Builtin::BI_InterlockedXor8:
3715 case Builtin::BI_InterlockedXor16:
3716 case Builtin::BI_InterlockedXor:
3717 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
3718
3719 case Builtin::BI_bittest64:
3720 case Builtin::BI_bittest:
3721 case Builtin::BI_bittestandcomplement64:
3722 case Builtin::BI_bittestandcomplement:
3723 case Builtin::BI_bittestandreset64:
3724 case Builtin::BI_bittestandreset:
3725 case Builtin::BI_bittestandset64:
3726 case Builtin::BI_bittestandset:
3727 case Builtin::BI_interlockedbittestandreset:
3728 case Builtin::BI_interlockedbittestandreset64:
3729 case Builtin::BI_interlockedbittestandset64:
3730 case Builtin::BI_interlockedbittestandset:
3731 case Builtin::BI_interlockedbittestandset_acq:
3732 case Builtin::BI_interlockedbittestandset_rel:
3733 case Builtin::BI_interlockedbittestandset_nf:
3734 case Builtin::BI_interlockedbittestandreset_acq:
3735 case Builtin::BI_interlockedbittestandreset_rel:
3736 case Builtin::BI_interlockedbittestandreset_nf:
3737 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
3738
3739 // These builtins exist to emit regular volatile loads and stores not
3740 // affected by the -fms-volatile setting.
3741 case Builtin::BI__iso_volatile_load8:
3742 case Builtin::BI__iso_volatile_load16:
3743 case Builtin::BI__iso_volatile_load32:
3744 case Builtin::BI__iso_volatile_load64:
3745 return RValue::get(EmitISOVolatileLoad(*this, E));
3746 case Builtin::BI__iso_volatile_store8:
3747 case Builtin::BI__iso_volatile_store16:
3748 case Builtin::BI__iso_volatile_store32:
3749 case Builtin::BI__iso_volatile_store64:
3750 return RValue::get(EmitISOVolatileStore(*this, E));
3751
3752 case Builtin::BI__exception_code:
3753 case Builtin::BI_exception_code:
3754 return RValue::get(EmitSEHExceptionCode());
3755 case Builtin::BI__exception_info:
3756 case Builtin::BI_exception_info:
3757 return RValue::get(EmitSEHExceptionInfo());
3758 case Builtin::BI__abnormal_termination:
3759 case Builtin::BI_abnormal_termination:
3760 return RValue::get(EmitSEHAbnormalTermination());
3761 case Builtin::BI_setjmpex:
3762 if (getTarget().getTriple().isOSMSVCRT())
3763 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
3764 break;
3765 case Builtin::BI_setjmp:
3766 if (getTarget().getTriple().isOSMSVCRT()) {
3767 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
3768 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
3769 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
3770 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
3771 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
3772 }
3773 break;
3774
3775 case Builtin::BI__GetExceptionInfo: {
3776 if (llvm::GlobalVariable *GV =
3777 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
3778 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
3779 break;
3780 }
3781
3782 case Builtin::BI__fastfail:
3783 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
3784
3785 case Builtin::BI__builtin_coro_size: {
3786 auto & Context = getContext();
3787 auto SizeTy = Context.getSizeType();
3788 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3789 Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
3790 return RValue::get(Builder.CreateCall(F));
3791 }
3792
3793 case Builtin::BI__builtin_coro_id:
3794 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
3795 case Builtin::BI__builtin_coro_promise:
3796 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
3797 case Builtin::BI__builtin_coro_resume:
3798 return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
3799 case Builtin::BI__builtin_coro_frame:
3800 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
3801 case Builtin::BI__builtin_coro_noop:
3802 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
3803 case Builtin::BI__builtin_coro_free:
3804 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
3805 case Builtin::BI__builtin_coro_destroy:
3806 return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
3807 case Builtin::BI__builtin_coro_done:
3808 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
3809 case Builtin::BI__builtin_coro_alloc:
3810 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
3811 case Builtin::BI__builtin_coro_begin:
3812 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
3813 case Builtin::BI__builtin_coro_end:
3814 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
3815 case Builtin::BI__builtin_coro_suspend:
3816 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
3817 case Builtin::BI__builtin_coro_param:
3818 return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
3819
3820 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
3821 case Builtin::BIread_pipe:
3822 case Builtin::BIwrite_pipe: {
3823 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3824 *Arg1 = EmitScalarExpr(E->getArg(1));
3825 CGOpenCLRuntime OpenCLRT(CGM);
3826 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3827 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3828
3829 // Type of the generic packet parameter.
3830 unsigned GenericAS =
3831 getContext().getTargetAddressSpace(LangAS::opencl_generic);
3832 llvm::Type *I8PTy = llvm::PointerType::get(
3833 llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
3834
3835 // Testing which overloaded version we should generate the call for.
3836 if (2U == E->getNumArgs()) {
3837 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
3838 : "__write_pipe_2";
3839 // Creating a generic function type to be able to call with any builtin or
3840 // user defined type.
3841 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
3842 llvm::FunctionType *FTy = llvm::FunctionType::get(
3843 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3844 Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
3845 return RValue::get(
3846 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3847 {Arg0, BCast, PacketSize, PacketAlign}));
3848 } else {
3849 assert(4 == E->getNumArgs() &&((4 == E->getNumArgs() && "Illegal number of parameters to pipe function"
) ? static_cast<void> (0) : __assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3850, __PRETTY_FUNCTION__))
3850 "Illegal number of parameters to pipe function")((4 == E->getNumArgs() && "Illegal number of parameters to pipe function"
) ? static_cast<void> (0) : __assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3850, __PRETTY_FUNCTION__))
;
3851 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
3852 : "__write_pipe_4";
3853
3854 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
3855 Int32Ty, Int32Ty};
3856 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
3857 *Arg3 = EmitScalarExpr(E->getArg(3));
3858 llvm::FunctionType *FTy = llvm::FunctionType::get(
3859 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3860 Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
3861 // We know the third argument is an integer type, but we may need to cast
3862 // it to i32.
3863 if (Arg2->getType() != Int32Ty)
3864 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
3865 return RValue::get(Builder.CreateCall(
3866 CGM.CreateRuntimeFunction(FTy, Name),
3867 {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
3868 }
3869 }
3870 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
3871 // functions
3872 case Builtin::BIreserve_read_pipe:
3873 case Builtin::BIreserve_write_pipe:
3874 case Builtin::BIwork_group_reserve_read_pipe:
3875 case Builtin::BIwork_group_reserve_write_pipe:
3876 case Builtin::BIsub_group_reserve_read_pipe:
3877 case Builtin::BIsub_group_reserve_write_pipe: {
3878 // Composing the mangled name for the function.
3879 const char *Name;
3880 if (BuiltinID == Builtin::BIreserve_read_pipe)
3881 Name = "__reserve_read_pipe";
3882 else if (BuiltinID == Builtin::BIreserve_write_pipe)
3883 Name = "__reserve_write_pipe";
3884 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
3885 Name = "__work_group_reserve_read_pipe";
3886 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
3887 Name = "__work_group_reserve_write_pipe";
3888 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
3889 Name = "__sub_group_reserve_read_pipe";
3890 else
3891 Name = "__sub_group_reserve_write_pipe";
3892
3893 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3894 *Arg1 = EmitScalarExpr(E->getArg(1));
3895 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
3896 CGOpenCLRuntime OpenCLRT(CGM);
3897 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3898 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3899
3900 // Building the generic function prototype.
3901 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
3902 llvm::FunctionType *FTy = llvm::FunctionType::get(
3903 ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3904 // We know the second argument is an integer type, but we may need to cast
3905 // it to i32.
3906 if (Arg1->getType() != Int32Ty)
3907 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
3908 return RValue::get(
3909 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3910 {Arg0, Arg1, PacketSize, PacketAlign}));
3911 }
3912 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
3913 // functions
3914 case Builtin::BIcommit_read_pipe:
3915 case Builtin::BIcommit_write_pipe:
3916 case Builtin::BIwork_group_commit_read_pipe:
3917 case Builtin::BIwork_group_commit_write_pipe:
3918 case Builtin::BIsub_group_commit_read_pipe:
3919 case Builtin::BIsub_group_commit_write_pipe: {
3920 const char *Name;
3921 if (BuiltinID == Builtin::BIcommit_read_pipe)
3922 Name = "__commit_read_pipe";
3923 else if (BuiltinID == Builtin::BIcommit_write_pipe)
3924 Name = "__commit_write_pipe";
3925 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
3926 Name = "__work_group_commit_read_pipe";
3927 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
3928 Name = "__work_group_commit_write_pipe";
3929 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
3930 Name = "__sub_group_commit_read_pipe";
3931 else
3932 Name = "__sub_group_commit_write_pipe";
3933
3934 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3935 *Arg1 = EmitScalarExpr(E->getArg(1));
3936 CGOpenCLRuntime OpenCLRT(CGM);
3937 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3938 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3939
3940 // Building the generic function prototype.
3941 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
3942 llvm::FunctionType *FTy =
3943 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
3944 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3945
3946 return RValue::get(
3947 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3948 {Arg0, Arg1, PacketSize, PacketAlign}));
3949 }
3950 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
3951 case Builtin::BIget_pipe_num_packets:
3952 case Builtin::BIget_pipe_max_packets: {
3953 const char *BaseName;
3954 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
3955 if (BuiltinID == Builtin::BIget_pipe_num_packets)
3956 BaseName = "__get_pipe_num_packets";
3957 else
3958 BaseName = "__get_pipe_max_packets";
3959 std::string Name = std::string(BaseName) +
3960 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
3961
3962 // Building the generic function prototype.
3963 Value *Arg0 = EmitScalarExpr(E->getArg(0));
3964 CGOpenCLRuntime OpenCLRT(CGM);
3965 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3966 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3967 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
3968 llvm::FunctionType *FTy = llvm::FunctionType::get(
3969 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3970
3971 return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3972 {Arg0, PacketSize, PacketAlign}));
3973 }
3974
3975 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
3976 case Builtin::BIto_global:
3977 case Builtin::BIto_local:
3978 case Builtin::BIto_private: {
3979 auto Arg0 = EmitScalarExpr(E->getArg(0));
3980 auto NewArgT = llvm::PointerType::get(Int8Ty,
3981 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
3982 auto NewRetT = llvm::PointerType::get(Int8Ty,
3983 CGM.getContext().getTargetAddressSpace(
3984 E->getType()->getPointeeType().getAddressSpace()));
3985 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
3986 llvm::Value *NewArg;
3987 if (Arg0->getType()->getPointerAddressSpace() !=
3988 NewArgT->getPointerAddressSpace())
3989 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
3990 else
3991 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
3992 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
3993 auto NewCall =
3994 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
3995 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
3996 ConvertType(E->getType())));
3997 }
3998
3999 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
4000 // It contains four different overload formats specified in Table 6.13.17.1.
4001 case Builtin::BIenqueue_kernel: {
4002 StringRef Name; // Generated function call name
4003 unsigned NumArgs = E->getNumArgs();
4004
4005 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
4006 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4007 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4008
4009 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
4010 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
4011 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
4012 llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
4013 llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
4014
4015 if (NumArgs == 4) {
4016 // The most basic form of the call with parameters:
4017 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
4018 Name = "__enqueue_kernel_basic";
4019 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
4020 GenericVoidPtrTy};
4021 llvm::FunctionType *FTy = llvm::FunctionType::get(
4022 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4023
4024 auto Info =
4025 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4026 llvm::Value *Kernel =
4027 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4028 llvm::Value *Block =
4029 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4030
4031 AttrBuilder B;
4032 B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
4033 llvm::AttributeList ByValAttrSet =
4034 llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
4035
4036 auto RTCall =
4037 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
4038 {Queue, Flags, Range, Kernel, Block});
4039 RTCall->setAttributes(ByValAttrSet);
4040 return RValue::get(RTCall);
4041 }
4042 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature")((NumArgs >= 5 && "Invalid enqueue_kernel signature"
) ? static_cast<void> (0) : __assert_fail ("NumArgs >= 5 && \"Invalid enqueue_kernel signature\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4042, __PRETTY_FUNCTION__))
;
4043
4044 // Create a temporary array to hold the sizes of local pointer arguments
4045 // for the block. \p First is the position of the first size argument.
4046 auto CreateArrayForSizeVar = [=](unsigned First)
4047 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
4048 llvm::APInt ArraySize(32, NumArgs - First);
4049 QualType SizeArrayTy = getContext().getConstantArrayType(
4050 getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
4051 /*IndexTypeQuals=*/0);
4052 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
4053 llvm::Value *TmpPtr = Tmp.getPointer();
4054 llvm::Value *TmpSize = EmitLifetimeStart(
4055 CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
4056 llvm::Value *ElemPtr;
4057 // Each of the following arguments specifies the size of the corresponding
4058 // argument passed to the enqueued block.
4059 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
4060 for (unsigned I = First; I < NumArgs; ++I) {
4061 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
4062 auto *GEP = Builder.CreateGEP(TmpPtr, {Zero, Index});
4063 if (I == First)
4064 ElemPtr = GEP;
4065 auto *V =
4066 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
4067 Builder.CreateAlignedStore(
4068 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
4069 }
4070 return std::tie(ElemPtr, TmpSize, TmpPtr);
4071 };
4072
4073 // Could have events and/or varargs.
4074 if (E->getArg(3)->getType()->isBlockPointerType()) {
4075 // No events passed, but has variadic arguments.
4076 Name = "__enqueue_kernel_varargs";
4077 auto Info =
4078 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4079 llvm::Value *Kernel =
4080 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4081 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4082 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4083 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
4084
4085 // Create a vector of the arguments, as well as a constant value to
4086 // express to the runtime the number of variadic arguments.
4087 llvm::Value *const Args[] = {Queue, Flags,
4088 Range, Kernel,
4089 Block, ConstantInt::get(IntTy, NumArgs - 4),
4090 ElemPtr};
4091 llvm::Type *const ArgTys[] = {
4092 QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
4093 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
4094
4095 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
4096 auto Call = RValue::get(
4097 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
4098 if (TmpSize)
4099 EmitLifetimeEnd(TmpSize, TmpPtr);
4100 return Call;
4101 }
4102 // Any calls now have event arguments passed.
4103 if (NumArgs >= 7) {
4104 llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
4105 llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
4106 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4107
4108 llvm::Value *NumEvents =
4109 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
4110
4111 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
4112 // to be a null pointer constant (including `0` literal), we can take it
4113 // into account and emit null pointer directly.
4114 llvm::Value *EventWaitList = nullptr;
4115 if (E->getArg(4)->isNullPointerConstant(
4116 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4117 EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
4118 } else {
4119 EventWaitList = E->getArg(4)->getType()->isArrayType()
4120 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
4121 : EmitScalarExpr(E->getArg(4));
4122 // Convert to generic address space.
4123 EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
4124 }
4125 llvm::Value *EventRet = nullptr;
4126 if (E->getArg(5)->isNullPointerConstant(
4127 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4128 EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
4129 } else {
4130 EventRet =
4131 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
4132 }
4133
4134 auto Info =
4135 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
4136 llvm::Value *Kernel =
4137 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4138 llvm::Value *Block =
4139 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4140
4141 std::vector<llvm::Type *> ArgTys = {
4142 QueueTy, Int32Ty, RangeTy, Int32Ty,
4143 EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
4144
4145 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
4146 NumEvents, EventWaitList, EventRet,
4147 Kernel, Block};
4148
4149 if (NumArgs == 7) {
4150 // Has events but no variadics.
4151 Name = "__enqueue_kernel_basic_events";
4152 llvm::FunctionType *FTy = llvm::FunctionType::get(
4153 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4154 return RValue::get(
4155 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4156 llvm::ArrayRef<llvm::Value *>(Args)));
4157 }
4158 // Has event info and variadics
4159 // Pass the number of variadics to the runtime function too.
4160 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
4161 ArgTys.push_back(Int32Ty);
4162 Name = "__enqueue_kernel_events_varargs";
4163
4164 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4165 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
4166 Args.push_back(ElemPtr);
4167 ArgTys.push_back(ElemPtr->getType());
4168
4169 llvm::FunctionType *FTy = llvm::FunctionType::get(
4170 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4171 auto Call =
4172 RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4173 llvm::ArrayRef<llvm::Value *>(Args)));
4174 if (TmpSize)
4175 EmitLifetimeEnd(TmpSize, TmpPtr);
4176 return Call;
4177 }
4178 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4179 }
4180 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
4181 // parameter.
4182 case Builtin::BIget_kernel_work_group_size: {
4183 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4184 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4185 auto Info =
4186 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4187 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4188 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4189 return RValue::get(Builder.CreateCall(
4190 CGM.CreateRuntimeFunction(
4191 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4192 false),
4193 "__get_kernel_work_group_size_impl"),
4194 {Kernel, Arg}));
4195 }
4196 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
4197 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4198 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4199 auto Info =
4200 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4201 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4202 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4203 return RValue::get(Builder.CreateCall(
4204 CGM.CreateRuntimeFunction(
4205 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4206 false),
4207 "__get_kernel_preferred_work_group_size_multiple_impl"),
4208 {Kernel, Arg}));
4209 }
4210 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
4211 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
4212 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4213 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4214 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
4215 llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
4216 auto Info =
4217 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
4218 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4219 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4220 const char *Name =
4221 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
4222 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
4223 : "__get_kernel_sub_group_count_for_ndrange_impl";
4224 return RValue::get(Builder.CreateCall(
4225 CGM.CreateRuntimeFunction(
4226 llvm::FunctionType::get(
4227 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
4228 false),
4229 Name),
4230 {NDRange, Kernel, Block}));
4231 }
4232
4233 case Builtin::BI__builtin_store_half:
4234 case Builtin::BI__builtin_store_halff: {
4235 Value *Val = EmitScalarExpr(E->getArg(0));
4236 Address Address = EmitPointerWithAlignment(E->getArg(1));
4237 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
4238 return RValue::get(Builder.CreateStore(HalfVal, Address));
4239 }
4240 case Builtin::BI__builtin_load_half: {
4241 Address Address = EmitPointerWithAlignment(E->getArg(0));
4242 Value *HalfVal = Builder.CreateLoad(Address);
4243 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
4244 }
4245 case Builtin::BI__builtin_load_halff: {
4246 Address Address = EmitPointerWithAlignment(E->getArg(0));
4247 Value *HalfVal = Builder.CreateLoad(Address);
4248 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
4249 }
4250 case Builtin::BIprintf:
4251 if (getTarget().getTriple().isNVPTX())
4252 return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
4253 if (getTarget().getTriple().getArch() == Triple::amdgcn &&
4254 getLangOpts().HIP)
4255 return EmitAMDGPUDevicePrintfCallExpr(E, ReturnValue);
4256 break;
4257 case Builtin::BI__builtin_canonicalize:
4258 case Builtin::BI__builtin_canonicalizef:
4259 case Builtin::BI__builtin_canonicalizef16:
4260 case Builtin::BI__builtin_canonicalizel:
4261 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
4262
4263 case Builtin::BI__builtin_thread_pointer: {
4264 if (!getContext().getTargetInfo().isTLSSupported())
4265 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
4266 // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
4267 break;
4268 }
4269 case Builtin::BI__builtin_os_log_format:
4270 return emitBuiltinOSLogFormat(*E);
4271
4272 case Builtin::BI__xray_customevent: {
4273 if (!ShouldXRayInstrumentFunction())
4274 return RValue::getIgnored();
4275
4276 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4277 XRayInstrKind::Custom))
4278 return RValue::getIgnored();
4279
4280 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4281 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
4282 return RValue::getIgnored();
4283
4284 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
4285 auto FTy = F->getFunctionType();
4286 auto Arg0 = E->getArg(0);
4287 auto Arg0Val = EmitScalarExpr(Arg0);
4288 auto Arg0Ty = Arg0->getType();
4289 auto PTy0 = FTy->getParamType(0);
4290 if (PTy0 != Arg0Val->getType()) {
4291 if (Arg0Ty->isArrayType())
4292 Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
4293 else
4294 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
4295 }
4296 auto Arg1 = EmitScalarExpr(E->getArg(1));
4297 auto PTy1 = FTy->getParamType(1);
4298 if (PTy1 != Arg1->getType())
4299 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
4300 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
4301 }
4302
4303 case Builtin::BI__xray_typedevent: {
4304 // TODO: There should be a way to always emit events even if the current
4305 // function is not instrumented. Losing events in a stream can cripple
4306 // a trace.
4307 if (!ShouldXRayInstrumentFunction())
4308 return RValue::getIgnored();
4309
4310 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4311 XRayInstrKind::Typed))
4312 return RValue::getIgnored();
4313
4314 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4315 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
4316 return RValue::getIgnored();
4317
4318 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
4319 auto FTy = F->getFunctionType();
4320 auto Arg0 = EmitScalarExpr(E->getArg(0));
4321 auto PTy0 = FTy->getParamType(0);
4322 if (PTy0 != Arg0->getType())
4323 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
4324 auto Arg1 = E->getArg(1);
4325 auto Arg1Val = EmitScalarExpr(Arg1);
4326 auto Arg1Ty = Arg1->getType();
4327 auto PTy1 = FTy->getParamType(1);
4328 if (PTy1 != Arg1Val->getType()) {
4329 if (Arg1Ty->isArrayType())
4330 Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
4331 else
4332 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
4333 }
4334 auto Arg2 = EmitScalarExpr(E->getArg(2));
4335 auto PTy2 = FTy->getParamType(2);
4336 if (PTy2 != Arg2->getType())
4337 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
4338 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
4339 }
4340
4341 case Builtin::BI__builtin_ms_va_start:
4342 case Builtin::BI__builtin_ms_va_end:
4343 return RValue::get(
4344 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
4345 BuiltinID == Builtin::BI__builtin_ms_va_start));
4346
4347 case Builtin::BI__builtin_ms_va_copy: {
4348 // Lower this manually. We can't reliably determine whether or not any
4349 // given va_copy() is for a Win64 va_list from the calling convention
4350 // alone, because it's legal to do this from a System V ABI function.
4351 // With opaque pointer types, we won't have enough information in LLVM
4352 // IR to determine this from the argument types, either. Best to do it
4353 // now, while we have enough information.
4354 Address DestAddr = EmitMSVAListRef(E->getArg(0));
4355 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
4356
4357 llvm::Type *BPP = Int8PtrPtrTy;
4358
4359 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
4360 DestAddr.getAlignment());
4361 SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
4362 SrcAddr.getAlignment());
4363
4364 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
4365 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
4366 }
4367 }
4368
4369 // If this is an alias for a lib function (e.g. __builtin_sin), emit
4370 // the call using the normal call path, but using the unmangled
4371 // version of the function name.
4372 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
4373 return emitLibraryCall(*this, FD, E,
4374 CGM.getBuiltinLibFunction(FD, BuiltinID));
4375
4376 // If this is a predefined lib function (e.g. malloc), emit the call
4377 // using exactly the normal call path.
4378 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
4379 return emitLibraryCall(*this, FD, E,
4380 cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
4381
4382 // Check that a call to a target specific builtin has the correct target
4383 // features.
4384 // This is down here to avoid non-target specific builtins, however, if
4385 // generic builtins start to require generic target features then we
4386 // can move this up to the beginning of the function.
4387 checkTargetFeatures(E, FD);
4388
4389 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
4390 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
4391
4392 // See if we have a target specific intrinsic.
4393 const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
4394 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
4395 StringRef Prefix =
4396 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
4397 if (!Prefix.empty()) {
4398 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
4399 // NOTE we don't need to perform a compatibility flag check here since the
4400 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
4401 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
4402 if (IntrinsicID == Intrinsic::not_intrinsic)
4403 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
4404 }
4405
4406 if (IntrinsicID != Intrinsic::not_intrinsic) {
4407 SmallVector<Value*, 16> Args;
4408
4409 // Find out if any arguments are required to be integer constant
4410 // expressions.
4411 unsigned ICEArguments = 0;
4412 ASTContext::GetBuiltinTypeError Error;
4413 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
4414 assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error"
) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4414, __PRETTY_FUNCTION__))
;
4415
4416 Function *F = CGM.getIntrinsic(IntrinsicID);
4417 llvm::FunctionType *FTy = F->getFunctionType();
4418
4419 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
4420 Value *ArgValue;
4421 // If this is a normal argument, just emit it as a scalar.
4422 if ((ICEArguments & (1 << i)) == 0) {
4423 ArgValue = EmitScalarExpr(E->getArg(i));
4424 } else {
4425 // If this is required to be a constant, constant fold it so that we
4426 // know that the generated intrinsic gets a ConstantInt.
4427 ArgValue = llvm::ConstantInt::get(
4428 getLLVMContext(),
4429 *E->getArg(i)->getIntegerConstantExpr(getContext()));
4430 }
4431
4432 // If the intrinsic arg type is different from the builtin arg type
4433 // we need to do a bit cast.
4434 llvm::Type *PTy = FTy->getParamType(i);
4435 if (PTy != ArgValue->getType()) {
4436 // XXX - vector of pointers?
4437 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
4438 if (PtrTy->getAddressSpace() !=
4439 ArgValue->getType()->getPointerAddressSpace()) {
4440 ArgValue = Builder.CreateAddrSpaceCast(
4441 ArgValue,
4442 ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
4443 }
4444 }
4445
4446 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&((PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param") ? static_cast
<void> (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4447, __PRETTY_FUNCTION__))
4447 "Must be able to losslessly bit cast to param")((PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param") ? static_cast
<void> (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4447, __PRETTY_FUNCTION__))
;
4448 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
4449 }
4450
4451 Args.push_back(ArgValue);
4452 }
4453
4454 Value *V = Builder.CreateCall(F, Args);
4455 QualType BuiltinRetType = E->getType();
4456
4457 llvm::Type *RetTy = VoidTy;
4458 if (!BuiltinRetType->isVoidType())
4459 RetTy = ConvertType(BuiltinRetType);
4460
4461 if (RetTy != V->getType()) {
4462 // XXX - vector of pointers?
4463 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
4464 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
4465 V = Builder.CreateAddrSpaceCast(
4466 V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
4467 }
4468 }
4469
4470 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&((V->getType()->canLosslesslyBitCastTo(RetTy) &&
"Must be able to losslessly bit cast result type") ? static_cast
<void> (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4471, __PRETTY_FUNCTION__))
4471 "Must be able to losslessly bit cast result type")((V->getType()->canLosslesslyBitCastTo(RetTy) &&
"Must be able to losslessly bit cast result type") ? static_cast
<void> (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4471, __PRETTY_FUNCTION__))
;
4472 V = Builder.CreateBitCast(V, RetTy);
4473 }
4474
4475 return RValue::get(V);
4476 }
4477
4478 // Some target-specific builtins can have aggregate return values, e.g.
4479 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
4480 // ReturnValue to be non-null, so that the target-specific emission code can
4481 // always just emit into it.
4482 TypeEvaluationKind EvalKind = getEvaluationKind(E->getType());
4483 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
4484 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
4485 ReturnValue = ReturnValueSlot(DestPtr, false);
4486 }
4487
4488 // Now see if we can emit a target-specific builtin.
4489 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
4490 switch (EvalKind) {
4491 case TEK_Scalar:
4492 return RValue::get(V);
4493 case TEK_Aggregate:
4494 return RValue::getAggregate(ReturnValue.getValue(),
4495 ReturnValue.isVolatile());
4496 case TEK_Complex:
4497 llvm_unreachable("No current target builtin returns complex")::llvm::llvm_unreachable_internal("No current target builtin returns complex"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4497)
;
4498 }
4499 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr")::llvm::llvm_unreachable_internal("Bad evaluation kind in EmitBuiltinExpr"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4499)
;
4500 }
4501
4502 ErrorUnsupported(E, "builtin function");
4503
4504 // Unknown builtin, for now just dump it out and return undef.
4505 return GetUndefRValue(E->getType());
4506}
4507
4508static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
4509 unsigned BuiltinID, const CallExpr *E,
4510 ReturnValueSlot ReturnValue,
4511 llvm::Triple::ArchType Arch) {
4512 switch (Arch) {
4513 case llvm::Triple::arm:
4514 case llvm::Triple::armeb:
4515 case llvm::Triple::thumb:
4516 case llvm::Triple::thumbeb:
4517 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
4518 case llvm::Triple::aarch64:
4519 case llvm::Triple::aarch64_32:
4520 case llvm::Triple::aarch64_be:
4521 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
4522 case llvm::Triple::bpfeb:
4523 case llvm::Triple::bpfel:
4524 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
4525 case llvm::Triple::x86:
4526 case llvm::Triple::x86_64:
4527 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
4528 case llvm::Triple::ppc:
4529 case llvm::Triple::ppc64:
4530 case llvm::Triple::ppc64le:
4531 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
4532 case llvm::Triple::r600:
4533 case llvm::Triple::amdgcn:
4534 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
4535 case llvm::Triple::systemz:
4536 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
4537 case llvm::Triple::nvptx:
4538 case llvm::Triple::nvptx64:
4539 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
4540 case llvm::Triple::wasm32:
4541 case llvm::Triple::wasm64:
4542 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
4543 case llvm::Triple::hexagon:
4544 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
4545 default:
4546 return nullptr;
4547 }
4548}
4549
4550Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
4551 const CallExpr *E,
4552 ReturnValueSlot ReturnValue) {
4553 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
4554 assert(getContext().getAuxTargetInfo() && "Missing aux target info")((getContext().getAuxTargetInfo() && "Missing aux target info"
) ? static_cast<void> (0) : __assert_fail ("getContext().getAuxTargetInfo() && \"Missing aux target info\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4554, __PRETTY_FUNCTION__))
;
4555 return EmitTargetArchBuiltinExpr(
4556 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
4557 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
4558 }
4559
4560 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
4561 getTarget().getTriple().getArch());
4562}
4563
4564static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
4565 NeonTypeFlags TypeFlags,
4566 bool HasLegalHalfType = true,
4567 bool V1Ty = false,
4568 bool AllowBFloatArgsAndRet = true) {
4569 int IsQuad = TypeFlags.isQuad();
4570 switch (TypeFlags.getEltType()) {
4571 case NeonTypeFlags::Int8:
4572 case NeonTypeFlags::Poly8:
4573 return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
4574 case NeonTypeFlags::Int16:
4575 case NeonTypeFlags::Poly16:
4576 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4577 case NeonTypeFlags::BFloat16:
4578 if (AllowBFloatArgsAndRet)
4579 return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
4580 else
4581 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4582 case NeonTypeFlags::Float16:
4583 if (HasLegalHalfType)
4584 return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
4585 else
4586 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4587 case NeonTypeFlags::Int32:
4588 return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
4589 case NeonTypeFlags::Int64:
4590 case NeonTypeFlags::Poly64:
4591 return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
4592 case NeonTypeFlags::Poly128:
4593 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
4594 // There is a lot of i128 and f128 API missing.
4595 // so we use v16i8 to represent poly128 and get pattern matched.
4596 return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
4597 case NeonTypeFlags::Float32:
4598 return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
4599 case NeonTypeFlags::Float64:
4600 return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
4601 }
4602 llvm_unreachable("Unknown vector element type!")::llvm::llvm_unreachable_internal("Unknown vector element type!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4602)
;
4603}
4604
4605static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
4606 NeonTypeFlags IntTypeFlags) {
4607 int IsQuad = IntTypeFlags.isQuad();
4608 switch (IntTypeFlags.getEltType()) {
4609 case NeonTypeFlags::Int16:
4610 return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
4611 case NeonTypeFlags::Int32:
4612 return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
4613 case NeonTypeFlags::Int64:
4614 return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
4615 default:
4616 llvm_unreachable("Type can't be converted to floating-point!")::llvm::llvm_unreachable_internal("Type can't be converted to floating-point!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4616)
;
4617 }
4618}
4619
4620Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
4621 const ElementCount &Count) {
4622 Value *SV = llvm::ConstantVector::getSplat(Count, C);
4623 return Builder.CreateShuffleVector(V, V, SV, "lane");
4624}
4625
4626Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
4627 ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
4628 return EmitNeonSplat(V, C, EC);
4629}
4630
4631Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
4632 const char *name,
4633 unsigned shift, bool rightshift) {
4634 unsigned j = 0;
4635 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
4636 ai != ae; ++ai, ++j) {
4637 if (F->isConstrainedFPIntrinsic())
4638 if (ai->getType()->isMetadataTy())
4639 continue;
4640 if (shift > 0 && shift == j)
4641 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
4642 else
4643 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
4644 }
4645
4646 if (F->isConstrainedFPIntrinsic())
4647 return Builder.CreateConstrainedFPCall(F, Ops, name);
4648 else
4649 return Builder.CreateCall(F, Ops, name);
4650}
4651
4652Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
4653 bool neg) {
4654 int SV = cast<ConstantInt>(V)->getSExtValue();
4655 return ConstantInt::get(Ty, neg ? -SV : SV);
4656}
4657
4658// Right-shift a vector by a constant.
4659Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
4660 llvm::Type *Ty, bool usgn,
4661 const char *name) {
4662 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
4663
4664 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
4665 int EltSize = VTy->getScalarSizeInBits();
4666
4667 Vec = Builder.CreateBitCast(Vec, Ty);
4668
4669 // lshr/ashr are undefined when the shift amount is equal to the vector
4670 // element size.
4671 if (ShiftAmt == EltSize) {
4672 if (usgn) {
4673 // Right-shifting an unsigned value by its size yields 0.
4674 return llvm::ConstantAggregateZero::get(VTy);
4675 } else {
4676 // Right-shifting a signed value by its size is equivalent
4677 // to a shift of size-1.
4678 --ShiftAmt;
4679 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
4680 }
4681 }
4682
4683 Shift = EmitNeonShiftVector(Shift, Ty, false);
4684 if (usgn)
4685 return Builder.CreateLShr(Vec, Shift, name);
4686 else
4687 return Builder.CreateAShr(Vec, Shift, name);
4688}
4689
4690enum {
4691 AddRetType = (1 << 0),
4692 Add1ArgType = (1 << 1),
4693 Add2ArgTypes = (1 << 2),
4694
4695 VectorizeRetType = (1 << 3),
4696 VectorizeArgTypes = (1 << 4),
4697
4698 InventFloatType = (1 << 5),
4699 UnsignedAlts = (1 << 6),
4700
4701 Use64BitVectors = (1 << 7),
4702 Use128BitVectors = (1 << 8),
4703
4704 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
4705 VectorRet = AddRetType | VectorizeRetType,
4706 VectorRetGetArgs01 =
4707 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
4708 FpCmpzModifiers =
4709 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
4710};
4711
4712namespace {
4713struct ARMVectorIntrinsicInfo {
4714 const char *NameHint;
4715 unsigned BuiltinID;
4716 unsigned LLVMIntrinsic;
4717 unsigned AltLLVMIntrinsic;
4718 uint64_t TypeModifier;
4719
4720 bool operator<(unsigned RHSBuiltinID) const {
4721 return BuiltinID < RHSBuiltinID;
4722 }
4723 bool operator<(const ARMVectorIntrinsicInfo &TE) const {
4724 return BuiltinID < TE.BuiltinID;
4725 }
4726};
4727} // end anonymous namespace
4728
4729#define NEONMAP0(NameBase) \
4730 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
4731
4732#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
4733 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
4734 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
4735
4736#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
4737 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
4738 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
4739 TypeModifier }
4740
4741static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
4742 NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0),
4743 NEONMAP0(splat_lane_v),
4744 NEONMAP0(splat_laneq_v),
4745 NEONMAP0(splatq_lane_v),
4746 NEONMAP0(splatq_laneq_v),
4747 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
4748 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
4749 NEONMAP1(vabs_v, arm_neon_vabs, 0),
4750 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
4751 NEONMAP0(vaddhn_v),
4752 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
4753 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
4754 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
4755 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
4756 NEONMAP1(vbfdot_v, arm_neon_bfdot, 0),
4757 NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0),
4758 NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0),
4759 NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0),
4760 NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0),
4761 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
4762 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
4763 NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
4764 NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
4765 NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
4766 NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
4767 NEONMAP1(vcage_v, arm_neon_vacge, 0),
4768 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
4769 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
4770 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
4771 NEONMAP1(vcale_v, arm_neon_vacge, 0),
4772 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
4773 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
4774 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
4775 NEONMAP0(vceqz_v),
4776 NEONMAP0(vceqzq_v),
4777 NEONMAP0(vcgez_v),
4778 NEONMAP0(vcgezq_v),
4779 NEONMAP0(vcgtz_v),
4780 NEONMAP0(vcgtzq_v),
4781 NEONMAP0(vclez_v),
4782 NEONMAP0(vclezq_v),
4783 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
4784 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
4785 NEONMAP0(vcltz_v),
4786 NEONMAP0(vcltzq_v),
4787 NEONMAP1(vclz_v, ctlz, Add1ArgType),
4788 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
4789 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
4790 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
4791 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
4792 NEONMAP0(vcvt_f16_v),
4793 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
4794 NEONMAP0(vcvt_f32_v),
4795 NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4796 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4797 NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
4798 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
4799 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
4800 NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
4801 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
4802 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
4803 NEONMAP0(vcvt_s16_v),
4804 NEONMAP0(vcvt_s32_v),
4805 NEONMAP0(vcvt_s64_v),
4806 NEONMAP0(vcvt_u16_v),
4807 NEONMAP0(vcvt_u32_v),
4808 NEONMAP0(vcvt_u64_v),
4809 NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
4810 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
4811 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
4812 NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
4813 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
4814 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
4815 NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
4816 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
4817 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
4818 NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
4819 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
4820 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
4821 NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
4822 NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
4823 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
4824 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
4825 NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
4826 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
4827 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
4828 NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
4829 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
4830 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
4831 NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
4832 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
4833 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
4834 NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
4835 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
4836 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
4837 NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
4838 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
4839 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
4840 NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
4841 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
4842 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
4843 NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
4844 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
4845 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
4846 NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
4847 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
4848 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
4849 NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
4850 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
4851 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
4852 NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
4853 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
4854 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
4855 NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
4856 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
4857 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
4858 NEONMAP0(vcvtq_f16_v),
4859 NEONMAP0(vcvtq_f32_v),
4860 NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4861 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4862 NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
4863 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
4864 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
4865 NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
4866 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
4867 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
4868 NEONMAP0(vcvtq_s16_v),
4869 NEONMAP0(vcvtq_s32_v),
4870 NEONMAP0(vcvtq_s64_v),
4871 NEONMAP0(vcvtq_u16_v),
4872 NEONMAP0(vcvtq_u32_v),
4873 NEONMAP0(vcvtq_u64_v),
4874 NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
4875 NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
4876 NEONMAP0(vext_v),
4877 NEONMAP0(vextq_v),
4878 NEONMAP0(vfma_v),
4879 NEONMAP0(vfmaq_v),
4880 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
4881 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
4882 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
4883 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
4884 NEONMAP0(vld1_dup_v),
4885 NEONMAP1(vld1_v, arm_neon_vld1, 0),
4886 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
4887 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
4888 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
4889 NEONMAP0(vld1q_dup_v),
4890 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
4891 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
4892 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
4893 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
4894 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
4895 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
4896 NEONMAP1(vld2_v, arm_neon_vld2, 0),
4897 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
4898 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
4899 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
4900 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
4901 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
4902 NEONMAP1(vld3_v, arm_neon_vld3, 0),
4903 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
4904 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
4905 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
4906 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
4907 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
4908 NEONMAP1(vld4_v, arm_neon_vld4, 0),
4909 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
4910 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
4911 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
4912 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
4913 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
4914 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
4915 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
4916 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
4917 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
4918 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
4919 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
4920 NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
4921 NEONMAP0(vmovl_v),
4922 NEONMAP0(vmovn_v),
4923 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
4924 NEONMAP0(vmull_v),
4925 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
4926 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
4927 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
4928 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
4929 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
4930 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
4931 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
4932 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
4933 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
4934 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
4935 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
4936 NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
4937 NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
4938 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
4939 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
4940 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
4941 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
4942 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
4943 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
4944 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
4945 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
4946 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
4947 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
4948 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
4949 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
4950 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
4951 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
4952 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
4953 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
4954 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
4955 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
4956 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
4957 NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
4958 NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
4959 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
4960 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
4961 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
4962 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
4963 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
4964 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
4965 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
4966 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
4967 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
4968 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
4969 NEONMAP0(vrndi_v),
4970 NEONMAP0(vrndiq_v),
4971 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
4972 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
4973 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
4974 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
4975 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
4976 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
4977 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
4978 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
4979 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
4980 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
4981 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
4982 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
4983 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
4984 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
4985 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
4986 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
4987 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
4988 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
4989 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
4990 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
4991 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
4992 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
4993 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
4994 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
4995 NEONMAP0(vshl_n_v),
4996 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
4997 NEONMAP0(vshll_n_v),
4998 NEONMAP0(vshlq_n_v),
4999 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5000 NEONMAP0(vshr_n_v),
5001 NEONMAP0(vshrn_n_v),
5002 NEONMAP0(vshrq_n_v),
5003 NEONMAP1(vst1_v, arm_neon_vst1, 0),
5004 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
5005 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
5006 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
5007 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
5008 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
5009 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
5010 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
5011 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
5012 NEONMAP1(vst2_v, arm_neon_vst2, 0),
5013 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
5014 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
5015 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
5016 NEONMAP1(vst3_v, arm_neon_vst3, 0),
5017 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
5018 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
5019 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
5020 NEONMAP1(vst4_v, arm_neon_vst4, 0),
5021 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
5022 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
5023 NEONMAP0(vsubhn_v),
5024 NEONMAP0(vtrn_v),
5025 NEONMAP0(vtrnq_v),
5026 NEONMAP0(vtst_v),
5027 NEONMAP0(vtstq_v),
5028 NEONMAP1(vusdot_v, arm_neon_usdot, 0),
5029 NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
5030 NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
5031 NEONMAP0(vuzp_v),
5032 NEONMAP0(vuzpq_v),
5033 NEONMAP0(vzip_v),
5034 NEONMAP0(vzipq_v)
5035};
5036
5037static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
5038 NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0),
5039 NEONMAP0(splat_lane_v),
5040 NEONMAP0(splat_laneq_v),
5041 NEONMAP0(splatq_lane_v),
5042 NEONMAP0(splatq_laneq_v),
5043 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
5044 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
5045 NEONMAP0(vaddhn_v),
5046 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
5047 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
5048 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
5049 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
5050 NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
5051 NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
5052 NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
5053 NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0),
5054 NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0),
5055 NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5056 NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5057 NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5058 NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5059 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
5060 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
5061 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
5062 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
5063 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
5064 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
5065 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
5066 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
5067 NEONMAP0(vceqz_v),
5068 NEONMAP0(vceqzq_v),
5069 NEONMAP0(vcgez_v),
5070 NEONMAP0(vcgezq_v),
5071 NEONMAP0(vcgtz_v),
5072 NEONMAP0(vcgtzq_v),
5073 NEONMAP0(vclez_v),
5074 NEONMAP0(vclezq_v),
5075 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
5076 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
5077 NEONMAP0(vcltz_v),
5078 NEONMAP0(vcltzq_v),
5079 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5080 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5081 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5082 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5083 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
5084 NEONMAP0(vcvt_f16_v),
5085 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
5086 NEONMAP0(vcvt_f32_v),
5087 NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5088 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5089 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5090 NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5091 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5092 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5093 NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5094 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5095 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5096 NEONMAP0(vcvtq_f16_v),
5097 NEONMAP0(vcvtq_f32_v),
5098 NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0),
5099 NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5100 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5101 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5102 NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5103 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5104 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5105 NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5106 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5107 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5108 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
5109 NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5110 NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5111 NEONMAP0(vext_v),
5112 NEONMAP0(vextq_v),
5113 NEONMAP0(vfma_v),
5114 NEONMAP0(vfmaq_v),
5115 NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
5116 NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
5117 NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
5118 NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
5119 NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
5120 NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
5121 NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
5122 NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
5123 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5124 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5125 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5126 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5127 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
5128 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
5129 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
5130 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
5131 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
5132 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
5133 NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0),
5134 NEONMAP0(vmovl_v),
5135 NEONMAP0(vmovn_v),
5136 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
5137 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
5138 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
5139 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5140 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5141 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
5142 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
5143 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
5144 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5145 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5146 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
5147 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
5148 NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
5149 NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
5150 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
5151 NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
5152 NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
5153 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
5154 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
5155 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
5156 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
5157 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
5158 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
5159 NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
5160 NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
5161 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
5162 NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
5163 NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
5164 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
5165 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5166 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5167 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
5168 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5169 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
5170 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5171 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
5172 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
5173 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5174 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5175 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
5176 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5177 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5178 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
5179 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
5180 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5181 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5182 NEONMAP0(vrndi_v),
5183 NEONMAP0(vrndiq_v),
5184 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5185 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5186 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5187 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5188 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5189 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5190 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
5191 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
5192 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
5193 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
5194 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
5195 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
5196 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
5197 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
5198 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
5199 NEONMAP0(vshl_n_v),
5200 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5201 NEONMAP0(vshll_n_v),
5202 NEONMAP0(vshlq_n_v),
5203 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5204 NEONMAP0(vshr_n_v),
5205 NEONMAP0(vshrn_n_v),
5206 NEONMAP0(vshrq_n_v),
5207 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
5208 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
5209 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
5210 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
5211 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
5212 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
5213 NEONMAP0(vsubhn_v),
5214 NEONMAP0(vtst_v),
5215 NEONMAP0(vtstq_v),
5216 NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
5217 NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
5218 NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
5219};
5220
5221static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
5222 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
5223 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
5224 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
5225 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5226 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5227 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5228 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5229 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5230 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5231 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5232 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5233 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
5234 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5235 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
5236 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5237 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5238 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5239 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5240 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5241 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5242 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5243 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5244 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5245 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5246 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5247 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5248 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5249 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5250 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5251 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5252 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5253 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5254 NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5255 NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5256 NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
5257 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5258 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5259 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5260 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5261 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5262 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5263 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5264 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5265 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5266 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5267 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5268 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5269 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5270 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5271 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5272 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5273 NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5274 NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5275 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
5276 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5277 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5278 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5279 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5280 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5281 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5282 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5283 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5284 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5285 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5286 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5287 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5288 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5289 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5290 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5291 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5292 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5293 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5294 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5295 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5296 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
5297 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
5298 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
5299 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5300 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5301 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5302 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5303 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5304 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5305 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5306 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5307 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5308 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5309 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5310 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
5311 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5312 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
5313 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5314 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5315 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
5316 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
5317 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5318 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5319 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
5320 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
5321 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
5322 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
5323 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
5324 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
5325 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
5326 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
5327 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5328 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5329 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5330 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5331 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
5332 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5333 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5334 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5335 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
5336 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5337 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
5338 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
5339 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
5340 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5341 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5342 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
5343 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
5344 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5345 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5346 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
5347 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
5348 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
5349 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
5350 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5351 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5352 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5353 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5354 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
5355 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5356 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5357 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5358 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5359 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5360 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5361 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
5362 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
5363 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5364 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5365 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5366 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5367 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
5368 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
5369 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
5370 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
5371 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5372 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5373 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
5374 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
5375 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
5376 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5377 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5378 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5379 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5380 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
5381 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5382 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5383 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5384 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5385 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
5386 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
5387 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5388 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5389 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
5390 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
5391 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
5392 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
5393 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
5394 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
5395 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
5396 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
5397 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
5398 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
5399 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
5400 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
5401 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
5402 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
5403 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
5404 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
5405 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
5406 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
5407 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
5408 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
5409 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5410 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
5411 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5412 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
5413 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
5414 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
5415 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5416 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
5417 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5418 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
5419 // FP16 scalar intrinisics go here.
5420 NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
5421 NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5422 NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5423 NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5424 NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5425 NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5426 NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5427 NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5428 NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5429 NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5430 NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5431 NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5432 NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5433 NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5434 NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5435 NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5436 NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5437 NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5438 NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5439 NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5440 NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5441 NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5442 NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5443 NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5444 NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5445 NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5446 NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5447 NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5448 NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5449 NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
5450 NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
5451 NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
5452 NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
5453 NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
5454};
5455
5456#undef NEONMAP0
5457#undef NEONMAP1
5458#undef NEONMAP2
5459
5460#define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
5461 { \
5462 #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
5463 TypeModifier \
5464 }
5465
5466#define SVEMAP2(NameBase, TypeModifier) \
5467 { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
5468static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
5469#define GET_SVE_LLVM_INTRINSIC_MAP
5470#include "clang/Basic/arm_sve_builtin_cg.inc"
5471#undef GET_SVE_LLVM_INTRINSIC_MAP
5472};
5473
5474#undef SVEMAP1
5475#undef SVEMAP2
5476
5477static bool NEONSIMDIntrinsicsProvenSorted = false;
5478
5479static bool AArch64SIMDIntrinsicsProvenSorted = false;
5480static bool AArch64SISDIntrinsicsProvenSorted = false;
5481static bool AArch64SVEIntrinsicsProvenSorted = false;
5482
5483static const ARMVectorIntrinsicInfo *
5484findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
5485 unsigned BuiltinID, bool &MapProvenSorted) {
5486
5487#ifndef NDEBUG
5488 if (!MapProvenSorted) {
5489 assert(llvm::is_sorted(IntrinsicMap))((llvm::is_sorted(IntrinsicMap)) ? static_cast<void> (0
) : __assert_fail ("llvm::is_sorted(IntrinsicMap)", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5489, __PRETTY_FUNCTION__))
;
5490 MapProvenSorted = true;
5491 }
5492#endif
5493
5494 const ARMVectorIntrinsicInfo *Builtin =
5495 llvm::lower_bound(IntrinsicMap, BuiltinID);
5496
5497 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
5498 return Builtin;
5499
5500 return nullptr;
5501}
5502
5503Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
5504 unsigned Modifier,
5505 llvm::Type *ArgType,
5506 const CallExpr *E) {
5507 int VectorSize = 0;
5508 if (Modifier & Use64BitVectors)
5509 VectorSize = 64;
5510 else if (Modifier & Use128BitVectors)
5511 VectorSize = 128;
5512
5513 // Return type.
5514 SmallVector<llvm::Type *, 3> Tys;
5515 if (Modifier & AddRetType) {
5516 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
5517 if (Modifier & VectorizeRetType)
5518 Ty = llvm::FixedVectorType::get(
5519 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
5520
5521 Tys.push_back(Ty);
5522 }
5523
5524 // Arguments.
5525 if (Modifier & VectorizeArgTypes) {
5526 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
5527 ArgType = llvm::FixedVectorType::get(ArgType, Elts);
5528 }
5529
5530 if (Modifier & (Add1ArgType | Add2ArgTypes))
5531 Tys.push_back(ArgType);
5532
5533 if (Modifier & Add2ArgTypes)
5534 Tys.push_back(ArgType);
5535
5536 if (Modifier & InventFloatType)
5537 Tys.push_back(FloatTy);
5538
5539 return CGM.getIntrinsic(IntrinsicID, Tys);
5540}
5541
5542static Value *EmitCommonNeonSISDBuiltinExpr(
5543 CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
5544 SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
5545 unsigned BuiltinID = SISDInfo.BuiltinID;
5546 unsigned int Int = SISDInfo.LLVMIntrinsic;
5547 unsigned Modifier = SISDInfo.TypeModifier;
5548 const char *s = SISDInfo.NameHint;
5549
5550 switch (BuiltinID) {
5551 case NEON::BI__builtin_neon_vcled_s64:
5552 case NEON::BI__builtin_neon_vcled_u64:
5553 case NEON::BI__builtin_neon_vcles_f32:
5554 case NEON::BI__builtin_neon_vcled_f64:
5555 case NEON::BI__builtin_neon_vcltd_s64:
5556 case NEON::BI__builtin_neon_vcltd_u64:
5557 case NEON::BI__builtin_neon_vclts_f32:
5558 case NEON::BI__builtin_neon_vcltd_f64:
5559 case NEON::BI__builtin_neon_vcales_f32:
5560 case NEON::BI__builtin_neon_vcaled_f64:
5561 case NEON::BI__builtin_neon_vcalts_f32:
5562 case NEON::BI__builtin_neon_vcaltd_f64:
5563 // Only one direction of comparisons actually exist, cmle is actually a cmge
5564 // with swapped operands. The table gives us the right intrinsic but we
5565 // still need to do the swap.
5566 std::swap(Ops[0], Ops[1]);
5567 break;
5568 }
5569
5570 assert(Int && "Generic code assumes a valid intrinsic")((Int && "Generic code assumes a valid intrinsic") ? static_cast
<void> (0) : __assert_fail ("Int && \"Generic code assumes a valid intrinsic\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5570, __PRETTY_FUNCTION__))
;
5571
5572 // Determine the type(s) of this overloaded AArch64 intrinsic.
5573 const Expr *Arg = E->getArg(0);
5574 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
5575 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
5576
5577 int j = 0;
5578 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
5579 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5580 ai != ae; ++ai, ++j) {
5581 llvm::Type *ArgTy = ai->getType();
5582 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
5583 ArgTy->getPrimitiveSizeInBits())
5584 continue;
5585
5586 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy())((ArgTy->isVectorTy() && !Ops[j]->getType()->
isVectorTy()) ? static_cast<void> (0) : __assert_fail (
"ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5586, __PRETTY_FUNCTION__))
;
5587 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
5588 // it before inserting.
5589 Ops[j] = CGF.Builder.CreateTruncOrBitCast(
5590 Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
5591 Ops[j] =
5592 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
5593 }
5594
5595 Value *Result = CGF.EmitNeonCall(F, Ops, s);
5596 llvm::Type *ResultType = CGF.ConvertType(E->getType());
5597 if (ResultType->getPrimitiveSizeInBits() <
5598 Result->getType()->getPrimitiveSizeInBits())
5599 return CGF.Builder.CreateExtractElement(Result, C0);
5600
5601 return CGF.Builder.CreateBitCast(Result, ResultType, s);
5602}
5603
5604Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
5605 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
5606 const char *NameHint, unsigned Modifier, const CallExpr *E,
5607 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
5608 llvm::Triple::ArchType Arch) {
5609 // Get the last argument, which specifies the vector type.
5610 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
5611 Optional<llvm::APSInt> NeonTypeConst =
5612 Arg->getIntegerConstantExpr(getContext());
5613 if (!NeonTypeConst)
5614 return nullptr;
5615
5616 // Determine the type of this overloaded NEON intrinsic.
5617 NeonTypeFlags Type(NeonTypeConst->getZExtValue());
5618 bool Usgn = Type.isUnsigned();
5619 bool Quad = Type.isQuad();
5620 const bool HasLegalHalfType = getTarget().hasLegalHalfType();
5621 const bool AllowBFloatArgsAndRet =
5622 getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
5623
5624 llvm::FixedVectorType *VTy =
5625 GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet);
5626 llvm::Type *Ty = VTy;
5627 if (!Ty)
5628 return nullptr;
5629
5630 auto getAlignmentValue32 = [&](Address addr) -> Value* {
5631 return Builder.getInt32(addr.getAlignment().getQuantity());
5632 };
5633
5634 unsigned Int = LLVMIntrinsic;
5635 if ((Modifier & UnsignedAlts) && !Usgn)
5636 Int = AltLLVMIntrinsic;
5637
5638 switch (BuiltinID) {
5639 default: break;
5640 case NEON::BI__builtin_neon_splat_lane_v:
5641 case NEON::BI__builtin_neon_splat_laneq_v:
5642 case NEON::BI__builtin_neon_splatq_lane_v:
5643 case NEON::BI__builtin_neon_splatq_laneq_v: {
5644 auto NumElements = VTy->getElementCount();
5645 if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
5646 NumElements = NumElements * 2;
5647 if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
5648 NumElements = NumElements / 2;
5649
5650 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
5651 return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
5652 }
5653 case NEON::BI__builtin_neon_vpadd_v:
5654 case NEON::BI__builtin_neon_vpaddq_v:
5655 // We don't allow fp/int overloading of intrinsics.
5656 if (VTy->getElementType()->isFloatingPointTy() &&
5657 Int == Intrinsic::aarch64_neon_addp)
5658 Int = Intrinsic::aarch64_neon_faddp;
5659 break;
5660 case NEON::BI__builtin_neon_vabs_v:
5661 case NEON::BI__builtin_neon_vabsq_v:
5662 if (VTy->getElementType()->isFloatingPointTy())
5663 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
5664 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
5665 case NEON::BI__builtin_neon_vaddhn_v: {
5666 llvm::FixedVectorType *SrcTy =
5667 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
5668
5669 // %sum = add <4 x i32> %lhs, %rhs
5670 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5671 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
5672 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
5673
5674 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
5675 Constant *ShiftAmt =
5676 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
5677 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
5678
5679 // %res = trunc <4 x i32> %high to <4 x i16>
5680 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
5681 }
5682 case NEON::BI__builtin_neon_vcale_v:
5683 case NEON::BI__builtin_neon_vcaleq_v:
5684 case NEON::BI__builtin_neon_vcalt_v:
5685 case NEON::BI__builtin_neon_vcaltq_v:
5686 std::swap(Ops[0], Ops[1]);
5687 LLVM_FALLTHROUGH[[gnu::fallthrough]];
5688 case NEON::BI__builtin_neon_vcage_v:
5689 case NEON::BI__builtin_neon_vcageq_v:
5690 case NEON::BI__builtin_neon_vcagt_v:
5691 case NEON::BI__builtin_neon_vcagtq_v: {
5692 llvm::Type *Ty;
5693 switch (VTy->getScalarSizeInBits()) {
5694 default: llvm_unreachable("unexpected type")::llvm::llvm_unreachable_internal("unexpected type", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5694)
;
5695 case 32:
5696 Ty = FloatTy;
5697 break;
5698 case 64:
5699 Ty = DoubleTy;
5700 break;
5701 case 16:
5702 Ty = HalfTy;
5703 break;
5704 }
5705 auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
5706 llvm::Type *Tys[] = { VTy, VecFlt };
5707 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5708 return EmitNeonCall(F, Ops, NameHint);
5709 }
5710 case NEON::BI__builtin_neon_vceqz_v:
5711 case NEON::BI__builtin_neon_vceqzq_v:
5712 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
5713 ICmpInst::ICMP_EQ, "vceqz");
5714 case NEON::BI__builtin_neon_vcgez_v:
5715 case NEON::BI__builtin_neon_vcgezq_v:
5716 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
5717 ICmpInst::ICMP_SGE, "vcgez");
5718 case NEON::BI__builtin_neon_vclez_v:
5719 case NEON::BI__builtin_neon_vclezq_v:
5720 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
5721 ICmpInst::ICMP_SLE, "vclez");
5722 case NEON::BI__builtin_neon_vcgtz_v:
5723 case NEON::BI__builtin_neon_vcgtzq_v:
5724 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
5725 ICmpInst::ICMP_SGT, "vcgtz");
5726 case NEON::BI__builtin_neon_vcltz_v:
5727 case NEON::BI__builtin_neon_vcltzq_v:
5728 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
5729 ICmpInst::ICMP_SLT, "vcltz");
5730 case NEON::BI__builtin_neon_vclz_v:
5731 case NEON::BI__builtin_neon_vclzq_v:
5732 // We generate target-independent intrinsic, which needs a second argument
5733 // for whether or not clz of zero is undefined; on ARM it isn't.
5734 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
5735 break;
5736 case NEON::BI__builtin_neon_vcvt_f32_v:
5737 case NEON::BI__builtin_neon_vcvtq_f32_v:
5738 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5739 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
5740 HasLegalHalfType);
5741 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5742 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5743 case NEON::BI__builtin_neon_vcvt_f16_v:
5744 case NEON::BI__builtin_neon_vcvtq_f16_v:
5745 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5746 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
5747 HasLegalHalfType);
5748 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5749 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5750 case NEON::BI__builtin_neon_vcvt_n_f16_v:
5751 case NEON::BI__builtin_neon_vcvt_n_f32_v:
5752 case NEON::BI__builtin_neon_vcvt_n_f64_v:
5753 case NEON::BI__builtin_neon_vcvtq_n_f16_v:
5754 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
5755 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
5756 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
5757 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
5758 Function *F = CGM.getIntrinsic(Int, Tys);
5759 return EmitNeonCall(F, Ops, "vcvt_n");
5760 }
5761 case NEON::BI__builtin_neon_vcvt_n_s16_v:
5762 case NEON::BI__builtin_neon_vcvt_n_s32_v:
5763 case NEON::BI__builtin_neon_vcvt_n_u16_v:
5764 case NEON::BI__builtin_neon_vcvt_n_u32_v:
5765 case NEON::BI__builtin_neon_vcvt_n_s64_v:
5766 case NEON::BI__builtin_neon_vcvt_n_u64_v:
5767 case NEON::BI__builtin_neon_vcvtq_n_s16_v:
5768 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
5769 case NEON::BI__builtin_neon_vcvtq_n_u16_v:
5770 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
5771 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
5772 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
5773 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
5774 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5775 return EmitNeonCall(F, Ops, "vcvt_n");
5776 }
5777 case NEON::BI__builtin_neon_vcvt_s32_v:
5778 case NEON::BI__builtin_neon_vcvt_u32_v:
5779 case NEON::BI__builtin_neon_vcvt_s64_v:
5780 case NEON::BI__builtin_neon_vcvt_u64_v:
5781 case NEON::BI__builtin_neon_vcvt_s16_v:
5782 case NEON::BI__builtin_neon_vcvt_u16_v:
5783 case NEON::BI__builtin_neon_vcvtq_s32_v:
5784 case NEON::BI__builtin_neon_vcvtq_u32_v:
5785 case NEON::BI__builtin_neon_vcvtq_s64_v:
5786 case NEON::BI__builtin_neon_vcvtq_u64_v:
5787 case NEON::BI__builtin_neon_vcvtq_s16_v:
5788 case NEON::BI__builtin_neon_vcvtq_u16_v: {
5789 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
5790 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
5791 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
5792 }
5793 case NEON::BI__builtin_neon_vcvta_s16_v:
5794 case NEON::BI__builtin_neon_vcvta_s32_v:
5795 case NEON::BI__builtin_neon_vcvta_s64_v:
5796 case NEON::BI__builtin_neon_vcvta_u16_v:
5797 case NEON::BI__builtin_neon_vcvta_u32_v:
5798 case NEON::BI__builtin_neon_vcvta_u64_v:
5799 case NEON::BI__builtin_neon_vcvtaq_s16_v:
5800 case NEON::BI__builtin_neon_vcvtaq_s32_v:
5801 case NEON::BI__builtin_neon_vcvtaq_s64_v:
5802 case NEON::BI__builtin_neon_vcvtaq_u16_v:
5803 case NEON::BI__builtin_neon_vcvtaq_u32_v:
5804 case NEON::BI__builtin_neon_vcvtaq_u64_v:
5805 case NEON::BI__builtin_neon_vcvtn_s16_v:
5806 case NEON::BI__builtin_neon_vcvtn_s32_v:
5807 case NEON::BI__builtin_neon_vcvtn_s64_v:
5808 case NEON::BI__builtin_neon_vcvtn_u16_v:
5809 case NEON::BI__builtin_neon_vcvtn_u32_v:
5810 case NEON::BI__builtin_neon_vcvtn_u64_v:
5811 case NEON::BI__builtin_neon_vcvtnq_s16_v:
5812 case NEON::BI__builtin_neon_vcvtnq_s32_v:
5813 case NEON::BI__builtin_neon_vcvtnq_s64_v:
5814 case NEON::BI__builtin_neon_vcvtnq_u16_v:
5815 case NEON::BI__builtin_neon_vcvtnq_u32_v:
5816 case NEON::BI__builtin_neon_vcvtnq_u64_v:
5817 case NEON::BI__builtin_neon_vcvtp_s16_v:
5818 case NEON::BI__builtin_neon_vcvtp_s32_v:
5819 case NEON::BI__builtin_neon_vcvtp_s64_v:
5820 case NEON::BI__builtin_neon_vcvtp_u16_v:
5821 case NEON::BI__builtin_neon_vcvtp_u32_v:
5822 case NEON::BI__builtin_neon_vcvtp_u64_v:
5823 case NEON::BI__builtin_neon_vcvtpq_s16_v:
5824 case NEON::BI__builtin_neon_vcvtpq_s32_v:
5825 case NEON::BI__builtin_neon_vcvtpq_s64_v:
5826 case NEON::BI__builtin_neon_vcvtpq_u16_v:
5827 case NEON::BI__builtin_neon_vcvtpq_u32_v:
5828 case NEON::BI__builtin_neon_vcvtpq_u64_v:
5829 case NEON::BI__builtin_neon_vcvtm_s16_v:
5830 case NEON::BI__builtin_neon_vcvtm_s32_v:
5831 case NEON::BI__builtin_neon_vcvtm_s64_v:
5832 case NEON::BI__builtin_neon_vcvtm_u16_v:
5833 case NEON::BI__builtin_neon_vcvtm_u32_v:
5834 case NEON::BI__builtin_neon_vcvtm_u64_v:
5835 case NEON::BI__builtin_neon_vcvtmq_s16_v:
5836 case NEON::BI__builtin_neon_vcvtmq_s32_v:
5837 case NEON::BI__builtin_neon_vcvtmq_s64_v:
5838 case NEON::BI__builtin_neon_vcvtmq_u16_v:
5839 case NEON::BI__builtin_neon_vcvtmq_u32_v:
5840 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
5841 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
5842 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
5843 }
5844 case NEON::BI__builtin_neon_vcvtx_f32_v: {
5845 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
5846 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
5847
5848 }
5849 case NEON::BI__builtin_neon_vext_v:
5850 case NEON::BI__builtin_neon_vextq_v: {
5851 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
5852 SmallVector<int, 16> Indices;
5853 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
5854 Indices.push_back(i+CV);
5855
5856 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5857 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5858 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
5859 }
5860 case NEON::BI__builtin_neon_vfma_v:
5861 case NEON::BI__builtin_neon_vfmaq_v: {
5862 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5863 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5864 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5865
5866 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
5867 return emitCallMaybeConstrainedFPBuiltin(
5868 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
5869 {Ops[1], Ops[2], Ops[0]});
5870 }
5871 case NEON::BI__builtin_neon_vld1_v:
5872 case NEON::BI__builtin_neon_vld1q_v: {
5873 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5874 Ops.push_back(getAlignmentValue32(PtrOp0));
5875 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
5876 }
5877 case NEON::BI__builtin_neon_vld1_x2_v:
5878 case NEON::BI__builtin_neon_vld1q_x2_v:
5879 case NEON::BI__builtin_neon_vld1_x3_v:
5880 case NEON::BI__builtin_neon_vld1q_x3_v:
5881 case NEON::BI__builtin_neon_vld1_x4_v:
5882 case NEON::BI__builtin_neon_vld1q_x4_v: {
5883 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
5884 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5885 llvm::Type *Tys[2] = { VTy, PTy };
5886 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5887 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
5888 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5889 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5890 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5891 }
5892 case NEON::BI__builtin_neon_vld2_v:
5893 case NEON::BI__builtin_neon_vld2q_v:
5894 case NEON::BI__builtin_neon_vld3_v:
5895 case NEON::BI__builtin_neon_vld3q_v:
5896 case NEON::BI__builtin_neon_vld4_v:
5897 case NEON::BI__builtin_neon_vld4q_v:
5898 case NEON::BI__builtin_neon_vld2_dup_v:
5899 case NEON::BI__builtin_neon_vld2q_dup_v:
5900 case NEON::BI__builtin_neon_vld3_dup_v:
5901 case NEON::BI__builtin_neon_vld3q_dup_v:
5902 case NEON::BI__builtin_neon_vld4_dup_v:
5903 case NEON::BI__builtin_neon_vld4q_dup_v: {
5904 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5905 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5906 Value *Align = getAlignmentValue32(PtrOp1);
5907 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
5908 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5909 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5910 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5911 }
5912 case NEON::BI__builtin_neon_vld1_dup_v:
5913 case NEON::BI__builtin_neon_vld1q_dup_v: {
5914 Value *V = UndefValue::get(Ty);
5915 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
5916 PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
5917 LoadInst *Ld = Builder.CreateLoad(PtrOp0);
5918 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
5919 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
5920 return EmitNeonSplat(Ops[0], CI);
5921 }
5922 case NEON::BI__builtin_neon_vld2_lane_v:
5923 case NEON::BI__builtin_neon_vld2q_lane_v:
5924 case NEON::BI__builtin_neon_vld3_lane_v:
5925 case NEON::BI__builtin_neon_vld3q_lane_v:
5926 case NEON::BI__builtin_neon_vld4_lane_v:
5927 case NEON::BI__builtin_neon_vld4q_lane_v: {
5928 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5929 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5930 for (unsigned I = 2; I < Ops.size() - 1; ++I)
5931 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
5932 Ops.push_back(getAlignmentValue32(PtrOp1));
5933 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
5934 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5935 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5936 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5937 }
5938 case NEON::BI__builtin_neon_vmovl_v: {
5939 llvm::FixedVectorType *DTy =
5940 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
5941 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
5942 if (Usgn)
5943 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
5944 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
5945 }
5946 case NEON::BI__builtin_neon_vmovn_v: {
5947 llvm::FixedVectorType *QTy =
5948 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
5949 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
5950 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
5951 }
5952 case NEON::BI__builtin_neon_vmull_v:
5953 // FIXME: the integer vmull operations could be emitted in terms of pure
5954 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
5955 // hoisting the exts outside loops. Until global ISel comes along that can
5956 // see through such movement this leads to bad CodeGen. So we need an
5957 // intrinsic for now.
5958 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
5959 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
5960 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
5961 case NEON::BI__builtin_neon_vpadal_v:
5962 case NEON::BI__builtin_neon_vpadalq_v: {
5963 // The source operand type has twice as many elements of half the size.
5964 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
5965 llvm::Type *EltTy =
5966 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
5967 auto *NarrowTy =
5968 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
5969 llvm::Type *Tys[2] = { Ty, NarrowTy };
5970 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
5971 }
5972 case NEON::BI__builtin_neon_vpaddl_v:
5973 case NEON::BI__builtin_neon_vpaddlq_v: {
5974 // The source operand type has twice as many elements of half the size.
5975 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
5976 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
5977 auto *NarrowTy =
5978 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
5979 llvm::Type *Tys[2] = { Ty, NarrowTy };
5980 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
5981 }
5982 case NEON::BI__builtin_neon_vqdmlal_v:
5983 case NEON::BI__builtin_neon_vqdmlsl_v: {
5984 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
5985 Ops[1] =
5986 EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
5987 Ops.resize(2);
5988 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
5989 }
5990 case NEON::BI__builtin_neon_vqdmulhq_lane_v:
5991 case NEON::BI__builtin_neon_vqdmulh_lane_v:
5992 case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
5993 case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
5994 auto *RTy = cast<llvm::FixedVectorType>(Ty);
5995 if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
5996 BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
5997 RTy = llvm::FixedVectorType::get(RTy->getElementType(),
5998 RTy->getNumElements() * 2);
5999 llvm::Type *Tys[2] = {
6000 RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6001 /*isQuad*/ false))};
6002 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6003 }
6004 case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
6005 case NEON::BI__builtin_neon_vqdmulh_laneq_v:
6006 case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
6007 case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
6008 llvm::Type *Tys[2] = {
6009 Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6010 /*isQuad*/ true))};
6011 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6012 }
6013 case NEON::BI__builtin_neon_vqshl_n_v:
6014 case NEON::BI__builtin_neon_vqshlq_n_v:
6015 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
6016 1, false);
6017 case NEON::BI__builtin_neon_vqshlu_n_v:
6018 case NEON::BI__builtin_neon_vqshluq_n_v:
6019 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
6020 1, false);
6021 case NEON::BI__builtin_neon_vrecpe_v:
6022 case NEON::BI__builtin_neon_vrecpeq_v:
6023 case NEON::BI__builtin_neon_vrsqrte_v:
6024 case NEON::BI__builtin_neon_vrsqrteq_v:
6025 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
6026 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6027 case NEON::BI__builtin_neon_vrndi_v:
6028 case NEON::BI__builtin_neon_vrndiq_v:
6029 Int = Builder.getIsFPConstrained()
6030 ? Intrinsic::experimental_constrained_nearbyint
6031 : Intrinsic::nearbyint;
6032 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6033 case NEON::BI__builtin_neon_vrshr_n_v:
6034 case NEON::BI__builtin_neon_vrshrq_n_v:
6035 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
6036 1, true);
6037 case NEON::BI__builtin_neon_vshl_n_v:
6038 case NEON::BI__builtin_neon_vshlq_n_v:
6039 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
6040 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
6041 "vshl_n");
6042 case NEON::BI__builtin_neon_vshll_n_v: {
6043 llvm::FixedVectorType *SrcTy =
6044 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
6045 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6046 if (Usgn)
6047 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
6048 else
6049 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
6050 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
6051 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
6052 }
6053 case NEON::BI__builtin_neon_vshrn_n_v: {
6054 llvm::FixedVectorType *SrcTy =
6055 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6056 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6057 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
6058 if (Usgn)
6059 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
6060 else
6061 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
6062 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
6063 }
6064 case NEON::BI__builtin_neon_vshr_n_v:
6065 case NEON::BI__builtin_neon_vshrq_n_v:
6066 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
6067 case NEON::BI__builtin_neon_vst1_v:
6068 case NEON::BI__builtin_neon_vst1q_v:
6069 case NEON::BI__builtin_neon_vst2_v:
6070 case NEON::BI__builtin_neon_vst2q_v:
6071 case NEON::BI__builtin_neon_vst3_v:
6072 case NEON::BI__builtin_neon_vst3q_v:
6073 case NEON::BI__builtin_neon_vst4_v:
6074 case NEON::BI__builtin_neon_vst4q_v:
6075 case NEON::BI__builtin_neon_vst2_lane_v:
6076 case NEON::BI__builtin_neon_vst2q_lane_v:
6077 case NEON::BI__builtin_neon_vst3_lane_v:
6078 case NEON::BI__builtin_neon_vst3q_lane_v:
6079 case NEON::BI__builtin_neon_vst4_lane_v:
6080 case NEON::BI__builtin_neon_vst4q_lane_v: {
6081 llvm::Type *Tys[] = {Int8PtrTy, Ty};
6082 Ops.push_back(getAlignmentValue32(PtrOp0));
6083 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
6084 }
6085 case NEON::BI__builtin_neon_vst1_x2_v:
6086 case NEON::BI__builtin_neon_vst1q_x2_v:
6087 case NEON::BI__builtin_neon_vst1_x3_v:
6088 case NEON::BI__builtin_neon_vst1q_x3_v:
6089 case NEON::BI__builtin_neon_vst1_x4_v:
6090 case NEON::BI__builtin_neon_vst1q_x4_v: {
6091 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
6092 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
6093 // in AArch64 it comes last. We may want to stick to one or another.
6094 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
6095 Arch == llvm::Triple::aarch64_32) {
6096 llvm::Type *Tys[2] = { VTy, PTy };
6097 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
6098 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
6099 }
6100 llvm::Type *Tys[2] = { PTy, VTy };
6101 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
6102 }
6103 case NEON::BI__builtin_neon_vsubhn_v: {
6104 llvm::FixedVectorType *SrcTy =
6105 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6106
6107 // %sum = add <4 x i32> %lhs, %rhs
6108 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6109 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
6110 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
6111
6112 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
6113 Constant *ShiftAmt =
6114 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
6115 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
6116
6117 // %res = trunc <4 x i32> %high to <4 x i16>
6118 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
6119 }
6120 case NEON::BI__builtin_neon_vtrn_v:
6121 case NEON::BI__builtin_neon_vtrnq_v: {
6122 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6123 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6124 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6125 Value *SV = nullptr;
6126
6127 for (unsigned vi = 0; vi != 2; ++vi) {
6128 SmallVector<int, 16> Indices;
6129 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6130 Indices.push_back(i+vi);
6131 Indices.push_back(i+e+vi);
6132 }
6133 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6134 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
6135 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6136 }
6137 return SV;
6138 }
6139 case NEON::BI__builtin_neon_vtst_v:
6140 case NEON::BI__builtin_neon_vtstq_v: {
6141 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6142 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6143 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
6144 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
6145 ConstantAggregateZero::get(Ty));
6146 return Builder.CreateSExt(Ops[0], Ty, "vtst");
6147 }
6148 case NEON::BI__builtin_neon_vuzp_v:
6149 case NEON::BI__builtin_neon_vuzpq_v: {
6150 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6151 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6152 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6153 Value *SV = nullptr;
6154
6155 for (unsigned vi = 0; vi != 2; ++vi) {
6156 SmallVector<int, 16> Indices;
6157 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
6158 Indices.push_back(2*i+vi);
6159
6160 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6161 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
6162 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6163 }
6164 return SV;
6165 }
6166 case NEON::BI__builtin_neon_vzip_v:
6167 case NEON::BI__builtin_neon_vzipq_v: {
6168 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6169 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6170 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6171 Value *SV = nullptr;
6172
6173 for (unsigned vi = 0; vi != 2; ++vi) {
6174 SmallVector<int, 16> Indices;
6175 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6176 Indices.push_back((i + vi*e) >> 1);
6177 Indices.push_back(((i + vi*e) >> 1)+e);
6178 }
6179 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6180 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
6181 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6182 }
6183 return SV;
6184 }
6185 case NEON::BI__builtin_neon_vdot_v:
6186 case NEON::BI__builtin_neon_vdotq_v: {
6187 auto *InputTy =
6188 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6189 llvm::Type *Tys[2] = { Ty, InputTy };
6190 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6191 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
6192 }
6193 case NEON::BI__builtin_neon_vfmlal_low_v:
6194 case NEON::BI__builtin_neon_vfmlalq_low_v: {
6195 auto *InputTy =
6196 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6197 llvm::Type *Tys[2] = { Ty, InputTy };
6198 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
6199 }
6200 case NEON::BI__builtin_neon_vfmlsl_low_v:
6201 case NEON::BI__builtin_neon_vfmlslq_low_v: {
6202 auto *InputTy =
6203 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6204 llvm::Type *Tys[2] = { Ty, InputTy };
6205 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
6206 }
6207 case NEON::BI__builtin_neon_vfmlal_high_v:
6208 case NEON::BI__builtin_neon_vfmlalq_high_v: {
6209 auto *InputTy =
6210 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6211 llvm::Type *Tys[2] = { Ty, InputTy };
6212 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
6213 }
6214 case NEON::BI__builtin_neon_vfmlsl_high_v:
6215 case NEON::BI__builtin_neon_vfmlslq_high_v: {
6216 auto *InputTy =
6217 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6218 llvm::Type *Tys[2] = { Ty, InputTy };
6219 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
6220 }
6221 case NEON::BI__builtin_neon_vmmlaq_v: {
6222 auto *InputTy =
6223 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6224 llvm::Type *Tys[2] = { Ty, InputTy };
6225 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6226 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla");
6227 }
6228 case NEON::BI__builtin_neon_vusmmlaq_v: {
6229 auto *InputTy =
6230 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6231 llvm::Type *Tys[2] = { Ty, InputTy };
6232 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
6233 }
6234 case NEON::BI__builtin_neon_vusdot_v:
6235 case NEON::BI__builtin_neon_vusdotq_v: {
6236 auto *InputTy =
6237 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6238 llvm::Type *Tys[2] = { Ty, InputTy };
6239 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
6240 }
6241 case NEON::BI__builtin_neon_vbfdot_v:
6242 case NEON::BI__builtin_neon_vbfdotq_v: {
6243 llvm::Type *InputTy =
6244 llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
6245 llvm::Type *Tys[2] = { Ty, InputTy };
6246 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
6247 }
6248 case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
6249 llvm::Type *Tys[1] = { Ty };
6250 Function *F = CGM.getIntrinsic(Int, Tys);
6251 return EmitNeonCall(F, Ops, "vcvtfp2bf");
6252 }
6253
6254 }
6255
6256 assert(Int && "Expected valid intrinsic number")((Int && "Expected valid intrinsic number") ? static_cast
<void> (0) : __assert_fail ("Int && \"Expected valid intrinsic number\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6256, __PRETTY_FUNCTION__))
;
6257
6258 // Determine the type(s) of this overloaded AArch64 intrinsic.
6259 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
6260
6261 Value *Result = EmitNeonCall(F, Ops, NameHint);
6262 llvm::Type *ResultType = ConvertType(E->getType());
6263 // AArch64 intrinsic one-element vector type cast to
6264 // scalar type expected by the builtin
6265 return Builder.CreateBitCast(Result, ResultType, NameHint);
6266}
6267
6268Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
6269 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
6270 const CmpInst::Predicate Ip, const Twine &Name) {
6271 llvm::Type *OTy = Op->getType();
6272
6273 // FIXME: this is utterly horrific. We should not be looking at previous
6274 // codegen context to find out what needs doing. Unfortunately TableGen
6275 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
6276 // (etc).
6277 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
6278 OTy = BI->getOperand(0)->getType();
6279
6280 Op = Builder.CreateBitCast(Op, OTy);
6281 if (OTy->getScalarType()->isFloatingPointTy()) {
6282 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
6283 } else {
6284 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
6285 }
6286 return Builder.CreateSExt(Op, Ty, Name);
6287}
6288
6289static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
6290 Value *ExtOp, Value *IndexOp,
6291 llvm::Type *ResTy, unsigned IntID,
6292 const char *Name) {
6293 SmallVector<Value *, 2> TblOps;
6294 if (ExtOp)
6295 TblOps.push_back(ExtOp);
6296
6297 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
6298 SmallVector<int, 16> Indices;
6299 auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
6300 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
6301 Indices.push_back(2*i);
6302 Indices.push_back(2*i+1);
6303 }
6304
6305 int PairPos = 0, End = Ops.size() - 1;
6306 while (PairPos < End) {
6307 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6308 Ops[PairPos+1], Indices,
6309 Name));
6310 PairPos += 2;
6311 }
6312
6313 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
6314 // of the 128-bit lookup table with zero.
6315 if (PairPos == End) {
6316 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
6317 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6318 ZeroTbl, Indices, Name));
6319 }
6320
6321 Function *TblF;
6322 TblOps.push_back(IndexOp);
6323 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
6324
6325 return CGF.EmitNeonCall(TblF, TblOps, Name);
6326}
6327
6328Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
6329 unsigned Value;
6330 switch (BuiltinID) {
6331 default:
6332 return nullptr;
6333 case ARM::BI__builtin_arm_nop:
6334 Value = 0;
6335 break;
6336 case ARM::BI__builtin_arm_yield:
6337 case ARM::BI__yield:
6338 Value = 1;
6339 break;
6340 case ARM::BI__builtin_arm_wfe:
6341 case ARM::BI__wfe:
6342 Value = 2;
6343 break;
6344 case ARM::BI__builtin_arm_wfi:
6345 case ARM::BI__wfi:
6346 Value = 3;
6347 break;
6348 case ARM::BI__builtin_arm_sev:
6349 case ARM::BI__sev:
6350 Value = 4;
6351 break;
6352 case ARM::BI__builtin_arm_sevl:
6353 case ARM::BI__sevl:
6354 Value = 5;
6355 break;
6356 }
6357
6358 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
6359 llvm::ConstantInt::get(Int32Ty, Value));
6360}
6361
6362enum SpecialRegisterAccessKind {
6363 NormalRead,
6364 VolatileRead,
6365 Write,
6366};
6367
6368// Generates the IR for the read/write special register builtin,
6369// ValueType is the type of the value that is to be written or read,
6370// RegisterType is the type of the register being written to or read from.
6371static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
6372 const CallExpr *E,
6373 llvm::Type *RegisterType,
6374 llvm::Type *ValueType,
6375 SpecialRegisterAccessKind AccessKind,
6376 StringRef SysReg = "") {
6377 // write and register intrinsics only support 32 and 64 bit operations.
6378 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))(((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy
(64)) && "Unsupported size for register.") ? static_cast
<void> (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6379, __PRETTY_FUNCTION__))
6379 && "Unsupported size for register.")(((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy
(64)) && "Unsupported size for register.") ? static_cast
<void> (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6379, __PRETTY_FUNCTION__))
;
6380
6381 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6382 CodeGen::CodeGenModule &CGM = CGF.CGM;
6383 LLVMContext &Context = CGM.getLLVMContext();
6384
6385 if (SysReg.empty()) {
6386 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
6387 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
6388 }
6389
6390 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
6391 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
6392 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
6393
6394 llvm::Type *Types[] = { RegisterType };
6395
6396 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
6397 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))((!(RegisterType->isIntegerTy(32) && ValueType->
isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register"
) ? static_cast<void> (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6398, __PRETTY_FUNCTION__))
6398 && "Can't fit 64-bit value in 32-bit register")((!(RegisterType->isIntegerTy(32) && ValueType->
isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register"
) ? static_cast<void> (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6398, __PRETTY_FUNCTION__))
;
6399
6400 if (AccessKind != Write) {
6401 assert(AccessKind == NormalRead || AccessKind == VolatileRead)((AccessKind == NormalRead || AccessKind == VolatileRead) ? static_cast
<void> (0) : __assert_fail ("AccessKind == NormalRead || AccessKind == VolatileRead"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6401, __PRETTY_FUNCTION__))
;
6402 llvm::Function *F = CGM.getIntrinsic(
6403 AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
6404 : llvm::Intrinsic::read_register,
6405 Types);
6406 llvm::Value *Call = Builder.CreateCall(F, Metadata);
6407
6408 if (MixedTypes)
6409 // Read into 64 bit register and then truncate result to 32 bit.
6410 return Builder.CreateTrunc(Call, ValueType);
6411
6412 if (ValueType->isPointerTy())
6413 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
6414 return Builder.CreateIntToPtr(Call, ValueType);
6415
6416 return Call;
6417 }
6418
6419 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
6420 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
6421 if (MixedTypes) {
6422 // Extend 32 bit write value to 64 bit to pass to write.
6423 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
6424 return Builder.CreateCall(F, { Metadata, ArgValue });
6425 }
6426
6427 if (ValueType->isPointerTy()) {
6428 // Have VoidPtrTy ArgValue but want to return an i32/i64.
6429 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
6430 return Builder.CreateCall(F, { Metadata, ArgValue });
6431 }
6432
6433 return Builder.CreateCall(F, { Metadata, ArgValue });
6434}
6435
6436/// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
6437/// argument that specifies the vector type.
6438static bool HasExtraNeonArgument(unsigned BuiltinID) {
6439 switch (BuiltinID) {
6440 default: break;
6441 case NEON::BI__builtin_neon_vget_lane_i8:
6442 case NEON::BI__builtin_neon_vget_lane_i16:
6443 case NEON::BI__builtin_neon_vget_lane_bf16:
6444 case NEON::BI__builtin_neon_vget_lane_i32:
6445 case NEON::BI__builtin_neon_vget_lane_i64:
6446 case NEON::BI__builtin_neon_vget_lane_f32:
6447 case NEON::BI__builtin_neon_vgetq_lane_i8:
6448 case NEON::BI__builtin_neon_vgetq_lane_i16:
6449 case NEON::BI__builtin_neon_vgetq_lane_bf16:
6450 case NEON::BI__builtin_neon_vgetq_lane_i32:
6451 case NEON::BI__builtin_neon_vgetq_lane_i64:
6452 case NEON::BI__builtin_neon_vgetq_lane_f32:
6453 case NEON::BI__builtin_neon_vduph_lane_bf16:
6454 case NEON::BI__builtin_neon_vduph_laneq_bf16:
6455 case NEON::BI__builtin_neon_vset_lane_i8:
6456 case NEON::BI__builtin_neon_vset_lane_i16:
6457 case NEON::BI__builtin_neon_vset_lane_bf16:
6458 case NEON::BI__builtin_neon_vset_lane_i32:
6459 case NEON::BI__builtin_neon_vset_lane_i64:
6460 case NEON::BI__builtin_neon_vset_lane_f32:
6461 case NEON::BI__builtin_neon_vsetq_lane_i8:
6462 case NEON::BI__builtin_neon_vsetq_lane_i16:
6463 case NEON::BI__builtin_neon_vsetq_lane_bf16:
6464 case NEON::BI__builtin_neon_vsetq_lane_i32:
6465 case NEON::BI__builtin_neon_vsetq_lane_i64:
6466 case NEON::BI__builtin_neon_vsetq_lane_f32:
6467 case NEON::BI__builtin_neon_vsha1h_u32:
6468 case NEON::BI__builtin_neon_vsha1cq_u32:
6469 case NEON::BI__builtin_neon_vsha1pq_u32:
6470 case NEON::BI__builtin_neon_vsha1mq_u32:
6471 case NEON::BI__builtin_neon_vcvth_bf16_f32:
6472 case clang::ARM::BI_MoveToCoprocessor:
6473 case clang::ARM::BI_MoveToCoprocessor2:
6474 return false;
6475 }
6476 return true;
6477}
6478
6479Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
6480 const CallExpr *E,
6481 ReturnValueSlot ReturnValue,
6482 llvm::Triple::ArchType Arch) {
6483 if (auto Hint = GetValueForARMHint(BuiltinID))
6484 return Hint;
6485
6486 if (BuiltinID == ARM::BI__emit) {
6487 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
6488 llvm::FunctionType *FTy =
6489 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
6490
6491 Expr::EvalResult Result;
6492 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
6493 llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6493)
;
6494
6495 llvm::APSInt Value = Result.Val.getInt();
6496 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
6497
6498 llvm::InlineAsm *Emit =
6499 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
6500 /*hasSideEffects=*/true)
6501 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
6502 /*hasSideEffects=*/true);
6503
6504 return Builder.CreateCall(Emit);
6505 }
6506
6507 if (BuiltinID == ARM::BI__builtin_arm_dbg) {
6508 Value *Option = EmitScalarExpr(E->getArg(0));
6509 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
6510 }
6511
6512 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
6513 Value *Address = EmitScalarExpr(E->getArg(0));
6514 Value *RW = EmitScalarExpr(E->getArg(1));
6515 Value *IsData = EmitScalarExpr(E->getArg(2));
6516
6517 // Locality is not supported on ARM target
6518 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
6519
6520 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
6521 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
6522 }
6523
6524 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
6525 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6526 return Builder.CreateCall(
6527 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
6528 }
6529
6530 if (BuiltinID == ARM::BI__builtin_arm_cls) {
6531 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6532 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
6533 }
6534 if (BuiltinID == ARM::BI__builtin_arm_cls64) {
6535 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6536 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
6537 "cls");
6538 }
6539
6540 if (BuiltinID == ARM::BI__clear_cache) {
6541 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")((E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"
) ? static_cast<void> (0) : __assert_fail ("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6541, __PRETTY_FUNCTION__))
;
6542 const FunctionDecl *FD = E->getDirectCallee();
6543 Value *Ops[2];
6544 for (unsigned i = 0; i < 2; i++)
6545 Ops[i] = EmitScalarExpr(E->getArg(i));
6546 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
6547 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
6548 StringRef Name = FD->getName();
6549 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
6550 }
6551
6552 if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
6553 BuiltinID == ARM::BI__builtin_arm_mcrr2) {
6554 Function *F;
6555
6556 switch (BuiltinID) {
6557 default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6557)
;
6558 case ARM::BI__builtin_arm_mcrr:
6559 F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
6560 break;
6561 case ARM::BI__builtin_arm_mcrr2:
6562 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
6563 break;
6564 }
6565
6566 // MCRR{2} instruction has 5 operands but
6567 // the intrinsic has 4 because Rt and Rt2
6568 // are represented as a single unsigned 64
6569 // bit integer in the intrinsic definition
6570 // but internally it's represented as 2 32
6571 // bit integers.
6572
6573 Value *Coproc = EmitScalarExpr(E->getArg(0));
6574 Value *Opc1 = EmitScalarExpr(E->getArg(1));
6575 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
6576 Value *CRm = EmitScalarExpr(E->getArg(3));
6577
6578 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
6579 Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
6580 Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
6581 Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
6582
6583 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
6584 }
6585
6586 if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
6587 BuiltinID == ARM::BI__builtin_arm_mrrc2) {
6588 Function *F;
6589
6590 switch (BuiltinID) {
6591 default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6591)
;
6592 case ARM::BI__builtin_arm_mrrc:
6593 F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
6594 break;
6595 case ARM::BI__builtin_arm_mrrc2:
6596 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
6597 break;
6598 }
6599
6600 Value *Coproc = EmitScalarExpr(E->getArg(0));
6601 Value *Opc1 = EmitScalarExpr(E->getArg(1));
6602 Value *CRm = EmitScalarExpr(E->getArg(2));
6603 Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
6604
6605 // Returns an unsigned 64 bit integer, represented
6606 // as two 32 bit integers.
6607
6608 Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
6609 Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
6610 Rt = Builder.CreateZExt(Rt, Int64Ty);
6611 Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
6612
6613 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
6614 RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
6615 RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
6616
6617 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
6618 }
6619
6620 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
6621 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
6622 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
6623 getContext().getTypeSize(E->getType()) == 64) ||
6624 BuiltinID == ARM::BI__ldrexd) {
6625 Function *F;
6626
6627 switch (BuiltinID) {
6628 default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6628)
;
6629 case ARM::BI__builtin_arm_ldaex:
6630 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
6631 break;
6632 case ARM::BI__builtin_arm_ldrexd:
6633 case ARM::BI__builtin_arm_ldrex:
6634 case ARM::BI__ldrexd:
6635 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
6636 break;
6637 }
6638
6639 Value *LdPtr = EmitScalarExpr(E->getArg(0));
6640 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
6641 "ldrexd");
6642
6643 Value *Val0 = Builder.CreateExtractValue(Val, 1);
6644 Value *Val1 = Builder.CreateExtractValue(Val, 0);
6645 Val0 = Builder.CreateZExt(Val0, Int64Ty);
6646 Val1 = Builder.CreateZExt(Val1, Int64Ty);
6647
6648 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
6649 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
6650 Val = Builder.CreateOr(Val, Val1);
6651 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
6652 }
6653
6654 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
6655 BuiltinID == ARM::BI__builtin_arm_ldaex) {
6656 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
6657
6658 QualType Ty = E->getType();
6659 llvm::Type *RealResTy = ConvertType(Ty);
6660 llvm::Type *PtrTy = llvm::IntegerType::get(
6661 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
6662 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
6663
6664 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
6665 ? Intrinsic::arm_ldaex
6666 : Intrinsic::arm_ldrex,
6667 PtrTy);
6668 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
6669
6670 if (RealResTy->isPointerTy())
6671 return Builder.CreateIntToPtr(Val, RealResTy);
6672 else {
6673 llvm::Type *IntResTy = llvm::IntegerType::get(
6674 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
6675 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
6676 return Builder.CreateBitCast(Val, RealResTy);
6677 }
6678 }
6679
6680 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
6681 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
6682 BuiltinID == ARM::BI__builtin_arm_strex) &&
6683 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
6684 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
6685 ? Intrinsic::arm_stlexd
6686 : Intrinsic::arm_strexd);
6687 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
6688
6689 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
6690 Value *Val = EmitScalarExpr(E->getArg(0));
6691 Builder.CreateStore(Val, Tmp);
6692
6693 Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
6694 Val = Builder.CreateLoad(LdPtr);
6695
6696 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
6697 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
6698 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
6699 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
6700 }
6701
6702 if (BuiltinID == ARM::BI__builtin_arm_strex ||
6703 BuiltinID == ARM::BI__builtin_arm_stlex) {
6704 Value *StoreVal = EmitScalarExpr(E->getArg(0));
6705 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
6706
6707 QualType Ty = E->getArg(0)->getType();
6708 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
6709 getContext().getTypeSize(Ty));
6710 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
6711
6712 if (StoreVal->getType()->isPointerTy())
6713 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
6714 else {
6715 llvm::Type *IntTy = llvm::IntegerType::get(
6716 getLLVMContext(),
6717 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
6718 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
6719 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
6720 }
6721
6722 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
6723 ? Intrinsic::arm_stlex
6724 : Intrinsic::arm_strex,
6725 StoreAddr->getType());
6726 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
6727 }
6728
6729 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
6730 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
6731 return Builder.CreateCall(F);
6732 }
6733
6734 // CRC32
6735 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
6736 switch (BuiltinID) {
6737 case ARM::BI__builtin_arm_crc32b:
6738 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
6739 case ARM::BI__builtin_arm_crc32cb:
6740 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
6741 case ARM::BI__builtin_arm_crc32h:
6742 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
6743 case ARM::BI__builtin_arm_crc32ch:
6744 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
6745 case ARM::BI__builtin_arm_crc32w:
6746 case ARM::BI__builtin_arm_crc32d:
6747 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
6748 case ARM::BI__builtin_arm_crc32cw:
6749 case ARM::BI__builtin_arm_crc32cd:
6750 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
6751 }
6752
6753 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
6754 Value *Arg0 = EmitScalarExpr(E->getArg(0));
6755 Value *Arg1 = EmitScalarExpr(E->getArg(1));
6756
6757 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
6758 // intrinsics, hence we need different codegen for these cases.
6759 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
6760 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
6761 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
6762 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
6763 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
6764 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
6765
6766 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
6767 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
6768 return Builder.CreateCall(F, {Res, Arg1b});
6769 } else {
6770 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
6771
6772 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
6773 return Builder.CreateCall(F, {Arg0, Arg1});
6774 }
6775 }
6776
6777 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
6778 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6779 BuiltinID == ARM::BI__builtin_arm_rsrp ||
6780 BuiltinID == ARM::BI__builtin_arm_wsr ||
6781 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
6782 BuiltinID == ARM::BI__builtin_arm_wsrp) {
6783
6784 SpecialRegisterAccessKind AccessKind = Write;
6785 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
6786 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6787 BuiltinID == ARM::BI__builtin_arm_rsrp)
6788 AccessKind = VolatileRead;
6789
6790 bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
6791 BuiltinID == ARM::BI__builtin_arm_wsrp;
6792
6793 bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6794 BuiltinID == ARM::BI__builtin_arm_wsr64;
6795
6796 llvm::Type *ValueType;
6797 llvm::Type *RegisterType;
6798 if (IsPointerBuiltin) {
6799 ValueType = VoidPtrTy;
6800 RegisterType = Int32Ty;
6801 } else if (Is64Bit) {
6802 ValueType = RegisterType = Int64Ty;
6803 } else {
6804 ValueType = RegisterType = Int32Ty;
6805 }
6806
6807 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
6808 AccessKind);
6809 }
6810
6811 // Deal with MVE builtins
6812 if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
6813 return Result;
6814 // Handle CDE builtins
6815 if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
6816 return Result;
6817
6818 // Find out if any arguments are required to be integer constant
6819 // expressions.
6820 unsigned ICEArguments = 0;
6821 ASTContext::GetBuiltinTypeError Error;
6822 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6823 assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error"
) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6823, __PRETTY_FUNCTION__))
;
6824
6825 auto getAlignmentValue32 = [&](Address addr) -> Value* {
6826 return Builder.getInt32(addr.getAlignment().getQuantity());
6827 };
6828
6829 Address PtrOp0 = Address::invalid();
6830 Address PtrOp1 = Address::invalid();
6831 SmallVector<Value*, 4> Ops;
6832 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
6833 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
6834 for (unsigned i = 0, e = NumArgs; i != e; i++) {
6835 if (i == 0) {
6836 switch (BuiltinID) {
6837 case NEON::BI__builtin_neon_vld1_v:
6838 case NEON::BI__builtin_neon_vld1q_v:
6839 case NEON::BI__builtin_neon_vld1q_lane_v:
6840 case NEON::BI__builtin_neon_vld1_lane_v:
6841 case NEON::BI__builtin_neon_vld1_dup_v:
6842 case NEON::BI__builtin_neon_vld1q_dup_v:
6843 case NEON::BI__builtin_neon_vst1_v:
6844 case NEON::BI__builtin_neon_vst1q_v:
6845 case NEON::BI__builtin_neon_vst1q_lane_v:
6846 case NEON::BI__builtin_neon_vst1_lane_v:
6847 case NEON::BI__builtin_neon_vst2_v:
6848 case NEON::BI__builtin_neon_vst2q_v:
6849 case NEON::BI__builtin_neon_vst2_lane_v:
6850 case NEON::BI__builtin_neon_vst2q_lane_v:
6851 case NEON::BI__builtin_neon_vst3_v:
6852 case NEON::BI__builtin_neon_vst3q_v:
6853 case NEON::BI__builtin_neon_vst3_lane_v:
6854 case NEON::BI__builtin_neon_vst3q_lane_v:
6855 case NEON::BI__builtin_neon_vst4_v:
6856 case NEON::BI__builtin_neon_vst4q_v:
6857 case NEON::BI__builtin_neon_vst4_lane_v:
6858 case NEON::BI__builtin_neon_vst4q_lane_v:
6859 // Get the alignment for the argument in addition to the value;
6860 // we'll use it later.
6861 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
6862 Ops.push_back(PtrOp0.getPointer());
6863 continue;
6864 }
6865 }
6866 if (i == 1) {
6867 switch (BuiltinID) {
6868 case NEON::BI__builtin_neon_vld2_v:
6869 case NEON::BI__builtin_neon_vld2q_v:
6870 case NEON::BI__builtin_neon_vld3_v:
6871 case NEON::BI__builtin_neon_vld3q_v:
6872 case NEON::BI__builtin_neon_vld4_v:
6873 case NEON::BI__builtin_neon_vld4q_v:
6874 case NEON::BI__builtin_neon_vld2_lane_v:
6875 case NEON::BI__builtin_neon_vld2q_lane_v:
6876 case NEON::BI__builtin_neon_vld3_lane_v:
6877 case NEON::BI__builtin_neon_vld3q_lane_v:
6878 case NEON::BI__builtin_neon_vld4_lane_v:
6879 case NEON::BI__builtin_neon_vld4q_lane_v:
6880 case NEON::BI__builtin_neon_vld2_dup_v:
6881 case NEON::BI__builtin_neon_vld2q_dup_v:
6882 case NEON::BI__builtin_neon_vld3_dup_v:
6883 case NEON::BI__builtin_neon_vld3q_dup_v:
6884 case NEON::BI__builtin_neon_vld4_dup_v:
6885 case NEON::BI__builtin_neon_vld4q_dup_v:
6886 // Get the alignment for the argument in addition to the value;
6887 // we'll use it later.
6888 PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
6889 Ops.push_back(PtrOp1.getPointer());
6890 continue;
6891 }
6892 }
6893
6894 if ((ICEArguments & (1 << i)) == 0) {
6895 Ops.push_back(EmitScalarExpr(E->getArg(i)));
6896 } else {
6897 // If this is required to be a constant, constant fold it so that we know
6898 // that the generated intrinsic gets a ConstantInt.
6899 Ops.push_back(llvm::ConstantInt::get(
6900 getLLVMContext(),
6901 *E->getArg(i)->getIntegerConstantExpr(getContext())));
6902 }
6903 }
6904
6905 switch (BuiltinID) {
6906 default: break;
6907
6908 case NEON::BI__builtin_neon_vget_lane_i8:
6909 case NEON::BI__builtin_neon_vget_lane_i16:
6910 case NEON::BI__builtin_neon_vget_lane_i32:
6911 case NEON::BI__builtin_neon_vget_lane_i64:
6912 case NEON::BI__builtin_neon_vget_lane_bf16:
6913 case NEON::BI__builtin_neon_vget_lane_f32:
6914 case NEON::BI__builtin_neon_vgetq_lane_i8:
6915 case NEON::BI__builtin_neon_vgetq_lane_i16:
6916 case NEON::BI__builtin_neon_vgetq_lane_i32:
6917 case NEON::BI__builtin_neon_vgetq_lane_i64:
6918 case NEON::BI__builtin_neon_vgetq_lane_bf16:
6919 case NEON::BI__builtin_neon_vgetq_lane_f32:
6920 case NEON::BI__builtin_neon_vduph_lane_bf16:
6921 case NEON::BI__builtin_neon_vduph_laneq_bf16:
6922 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
6923
6924 case NEON::BI__builtin_neon_vrndns_f32: {
6925 Value *Arg = EmitScalarExpr(E->getArg(0));
6926 llvm::Type *Tys[] = {Arg->getType()};
6927 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
6928 return Builder.CreateCall(F, {Arg}, "vrndn"); }
6929
6930 case NEON::BI__builtin_neon_vset_lane_i8:
6931 case NEON::BI__builtin_neon_vset_lane_i16:
6932 case NEON::BI__builtin_neon_vset_lane_i32:
6933 case NEON::BI__builtin_neon_vset_lane_i64:
6934 case NEON::BI__builtin_neon_vset_lane_bf16:
6935 case NEON::BI__builtin_neon_vset_lane_f32:
6936 case NEON::BI__builtin_neon_vsetq_lane_i8:
6937 case NEON::BI__builtin_neon_vsetq_lane_i16:
6938 case NEON::BI__builtin_neon_vsetq_lane_i32:
6939 case NEON::BI__builtin_neon_vsetq_lane_i64:
6940 case NEON::BI__builtin_neon_vsetq_lane_bf16:
6941 case NEON::BI__builtin_neon_vsetq_lane_f32:
6942 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
6943
6944 case NEON::BI__builtin_neon_vsha1h_u32:
6945 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
6946 "vsha1h");
6947 case NEON::BI__builtin_neon_vsha1cq_u32:
6948 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
6949 "vsha1h");
6950 case NEON::BI__builtin_neon_vsha1pq_u32:
6951 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
6952 "vsha1h");
6953 case NEON::BI__builtin_neon_vsha1mq_u32:
6954 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
6955 "vsha1h");
6956
6957 case NEON::BI__builtin_neon_vcvth_bf16_f32: {
6958 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
6959 "vcvtbfp2bf");
6960 }
6961
6962 // The ARM _MoveToCoprocessor builtins put the input register value as
6963 // the first argument, but the LLVM intrinsic expects it as the third one.
6964 case ARM::BI_MoveToCoprocessor:
6965 case ARM::BI_MoveToCoprocessor2: {
6966 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
6967 Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
6968 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
6969 Ops[3], Ops[4], Ops[5]});
6970 }
6971 case ARM::BI_BitScanForward:
6972 case ARM::BI_BitScanForward64:
6973 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
6974 case ARM::BI_BitScanReverse:
6975 case ARM::BI_BitScanReverse64:
6976 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
6977
6978 case ARM::BI_InterlockedAnd64:
6979 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
6980 case ARM::BI_InterlockedExchange64:
6981 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
6982 case ARM::BI_InterlockedExchangeAdd64:
6983 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
6984 case ARM::BI_InterlockedExchangeSub64:
6985 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
6986 case ARM::BI_InterlockedOr64:
6987 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
6988 case ARM::BI_InterlockedXor64:
6989 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
6990 case ARM::BI_InterlockedDecrement64:
6991 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
6992 case ARM::BI_InterlockedIncrement64:
6993 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
6994 case ARM::BI_InterlockedExchangeAdd8_acq:
6995 case ARM::BI_InterlockedExchangeAdd16_acq:
6996 case ARM::BI_InterlockedExchangeAdd_acq:
6997 case ARM::BI_InterlockedExchangeAdd64_acq:
6998 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
6999 case ARM::BI_InterlockedExchangeAdd8_rel:
7000 case ARM::BI_InterlockedExchangeAdd16_rel:
7001 case ARM::BI_InterlockedExchangeAdd_rel:
7002 case ARM::BI_InterlockedExchangeAdd64_rel:
7003 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
7004 case ARM::BI_InterlockedExchangeAdd8_nf:
7005 case ARM::BI_InterlockedExchangeAdd16_nf:
7006 case ARM::BI_InterlockedExchangeAdd_nf:
7007 case ARM::BI_InterlockedExchangeAdd64_nf:
7008 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
7009 case ARM::BI_InterlockedExchange8_acq:
7010 case ARM::BI_InterlockedExchange16_acq:
7011 case ARM::BI_InterlockedExchange_acq:
7012 case ARM::BI_InterlockedExchange64_acq:
7013 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
7014 case ARM::BI_InterlockedExchange8_rel:
7015 case ARM::BI_InterlockedExchange16_rel:
7016 case ARM::BI_InterlockedExchange_rel:
7017 case ARM::BI_InterlockedExchange64_rel:
7018 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
7019 case ARM::BI_InterlockedExchange8_nf:
7020 case ARM::BI_InterlockedExchange16_nf:
7021 case ARM::BI_InterlockedExchange_nf:
7022 case ARM::BI_InterlockedExchange64_nf:
7023 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
7024 case ARM::BI_InterlockedCompareExchange8_acq:
7025 case ARM::BI_InterlockedCompareExchange16_acq:
7026 case ARM::BI_InterlockedCompareExchange_acq:
7027 case ARM::BI_InterlockedCompareExchange64_acq:
7028 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
7029 case ARM::BI_InterlockedCompareExchange8_rel:
7030 case ARM::BI_InterlockedCompareExchange16_rel:
7031 case ARM::BI_InterlockedCompareExchange_rel:
7032 case ARM::BI_InterlockedCompareExchange64_rel:
7033 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
7034 case ARM::BI_InterlockedCompareExchange8_nf:
7035 case ARM::BI_InterlockedCompareExchange16_nf:
7036 case ARM::BI_InterlockedCompareExchange_nf:
7037 case ARM::BI_InterlockedCompareExchange64_nf:
7038 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
7039 case ARM::BI_InterlockedOr8_acq:
7040 case ARM::BI_InterlockedOr16_acq:
7041 case ARM::BI_InterlockedOr_acq:
7042 case ARM::BI_InterlockedOr64_acq:
7043 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
7044 case ARM::BI_InterlockedOr8_rel:
7045 case ARM::BI_InterlockedOr16_rel:
7046 case ARM::BI_InterlockedOr_rel:
7047 case ARM::BI_InterlockedOr64_rel:
7048 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
7049 case ARM::BI_InterlockedOr8_nf:
7050 case ARM::BI_InterlockedOr16_nf:
7051 case ARM::BI_InterlockedOr_nf:
7052 case ARM::BI_InterlockedOr64_nf:
7053 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
7054 case ARM::BI_InterlockedXor8_acq:
7055 case ARM::BI_InterlockedXor16_acq:
7056 case ARM::BI_InterlockedXor_acq:
7057 case ARM::BI_InterlockedXor64_acq:
7058 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
7059 case ARM::BI_InterlockedXor8_rel:
7060 case ARM::BI_InterlockedXor16_rel:
7061 case ARM::BI_InterlockedXor_rel:
7062 case ARM::BI_InterlockedXor64_rel:
7063 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
7064 case ARM::BI_InterlockedXor8_nf:
7065 case ARM::BI_InterlockedXor16_nf:
7066 case ARM::BI_InterlockedXor_nf:
7067 case ARM::BI_InterlockedXor64_nf:
7068 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
7069 case ARM::BI_InterlockedAnd8_acq:
7070 case ARM::BI_InterlockedAnd16_acq:
7071 case ARM::BI_InterlockedAnd_acq:
7072 case ARM::BI_InterlockedAnd64_acq:
7073 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
7074 case ARM::BI_InterlockedAnd8_rel:
7075 case ARM::BI_InterlockedAnd16_rel:
7076 case ARM::BI_InterlockedAnd_rel:
7077 case ARM::BI_InterlockedAnd64_rel:
7078 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
7079 case ARM::BI_InterlockedAnd8_nf:
7080 case ARM::BI_InterlockedAnd16_nf:
7081 case ARM::BI_InterlockedAnd_nf:
7082 case ARM::BI_InterlockedAnd64_nf:
7083 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
7084 case ARM::BI_InterlockedIncrement16_acq:
7085 case ARM::BI_InterlockedIncrement_acq:
7086 case ARM::BI_InterlockedIncrement64_acq:
7087 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
7088 case ARM::BI_InterlockedIncrement16_rel:
7089 case ARM::BI_InterlockedIncrement_rel:
7090 case ARM::BI_InterlockedIncrement64_rel:
7091 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
7092 case ARM::BI_InterlockedIncrement16_nf:
7093 case ARM::BI_InterlockedIncrement_nf:
7094 case ARM::BI_InterlockedIncrement64_nf:
7095 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
7096 case ARM::BI_InterlockedDecrement16_acq:
7097 case ARM::BI_InterlockedDecrement_acq:
7098 case ARM::BI_InterlockedDecrement64_acq:
7099 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
7100 case ARM::BI_InterlockedDecrement16_rel:
7101 case ARM::BI_InterlockedDecrement_rel:
7102 case ARM::BI_InterlockedDecrement64_rel:
7103 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
7104 case ARM::BI_InterlockedDecrement16_nf:
7105 case ARM::BI_InterlockedDecrement_nf:
7106 case ARM::BI_InterlockedDecrement64_nf:
7107 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
7108 }
7109
7110 // Get the last argument, which specifies the vector type.
7111 assert(HasExtraArg)((HasExtraArg) ? static_cast<void> (0) : __assert_fail (
"HasExtraArg", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7111, __PRETTY_FUNCTION__))
;
7112 const Expr *Arg = E->getArg(E->getNumArgs()-1);
7113 Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext());
7114 if (!Result)
7115 return nullptr;
7116
7117 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
7118 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
7119 // Determine the overloaded type of this builtin.
7120 llvm::Type *Ty;
7121 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
7122 Ty = FloatTy;
7123 else
7124 Ty = DoubleTy;
7125
7126 // Determine whether this is an unsigned conversion or not.
7127 bool usgn = Result->getZExtValue() == 1;
7128 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
7129
7130 // Call the appropriate intrinsic.
7131 Function *F = CGM.getIntrinsic(Int, Ty);
7132 return Builder.CreateCall(F, Ops, "vcvtr");
7133 }
7134
7135 // Determine the type of this overloaded NEON intrinsic.
7136 NeonTypeFlags Type = Result->getZExtValue();
7137 bool usgn = Type.isUnsigned();
7138 bool rightShift = false;
7139
7140 llvm::FixedVectorType *VTy =
7141 GetNeonType(this, Type, getTarget().hasLegalHalfType(), false,
7142 getTarget().hasBFloat16Type());
7143 llvm::Type *Ty = VTy;
7144 if (!Ty)
7145 return nullptr;
7146
7147 // Many NEON builtins have identical semantics and uses in ARM and
7148 // AArch64. Emit these in a single function.
7149 auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
7150 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
7151 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
7152 if (Builtin)
7153 return EmitCommonNeonBuiltinExpr(
7154 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
7155 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
7156
7157 unsigned Int;
7158 switch (BuiltinID) {
7159 default: return nullptr;
7160 case NEON::BI__builtin_neon_vld1q_lane_v:
7161 // Handle 64-bit integer elements as a special case. Use shuffles of
7162 // one-element vectors to avoid poor code for i64 in the backend.
7163 if (VTy->getElementType()->isIntegerTy(64)) {
7164 // Extract the other lane.
7165 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7166 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
7167 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
7168 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7169 // Load the value as a one-element vector.
7170 Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
7171 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7172 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
7173 Value *Align = getAlignmentValue32(PtrOp0);
7174 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
7175 // Combine them.
7176 int Indices[] = {1 - Lane, Lane};
7177 return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
7178 }
7179 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7180 case NEON::BI__builtin_neon_vld1_lane_v: {
7181 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7182 PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
7183 Value *Ld = Builder.CreateLoad(PtrOp0);
7184 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
7185 }
7186 case NEON::BI__builtin_neon_vqrshrn_n_v:
7187 Int =
7188 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
7189 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
7190 1, true);
7191 case NEON::BI__builtin_neon_vqrshrun_n_v:
7192 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
7193 Ops, "vqrshrun_n", 1, true);
7194 case NEON::BI__builtin_neon_vqshrn_n_v:
7195 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
7196 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
7197 1, true);
7198 case NEON::BI__builtin_neon_vqshrun_n_v:
7199 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
7200 Ops, "vqshrun_n", 1, true);
7201 case NEON::BI__builtin_neon_vrecpe_v:
7202 case NEON::BI__builtin_neon_vrecpeq_v:
7203 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
7204 Ops, "vrecpe");
7205 case NEON::BI__builtin_neon_vrshrn_n_v:
7206 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
7207 Ops, "vrshrn_n", 1, true);
7208 case NEON::BI__builtin_neon_vrsra_n_v:
7209 case NEON::BI__builtin_neon_vrsraq_n_v:
7210 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7211 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7212 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
7213 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
7214 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
7215 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
7216 case NEON::BI__builtin_neon_vsri_n_v:
7217 case NEON::BI__builtin_neon_vsriq_n_v:
7218 rightShift = true;
7219 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7220 case NEON::BI__builtin_neon_vsli_n_v:
7221 case NEON::BI__builtin_neon_vsliq_n_v:
7222 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
7223 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
7224 Ops, "vsli_n");
7225 case NEON::BI__builtin_neon_vsra_n_v:
7226 case NEON::BI__builtin_neon_vsraq_n_v:
7227 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7228 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
7229 return Builder.CreateAdd(Ops[0], Ops[1]);
7230 case NEON::BI__builtin_neon_vst1q_lane_v:
7231 // Handle 64-bit integer elements as a special case. Use a shuffle to get
7232 // a one-element vector and avoid poor code for i64 in the backend.
7233 if (VTy->getElementType()->isIntegerTy(64)) {
7234 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7235 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
7236 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7237 Ops[2] = getAlignmentValue32(PtrOp0);
7238 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
7239 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
7240 Tys), Ops);
7241 }
7242 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7243 case NEON::BI__builtin_neon_vst1_lane_v: {
7244 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7245 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
7246 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
7247 auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
7248 return St;
7249 }
7250 case NEON::BI__builtin_neon_vtbl1_v:
7251 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
7252 Ops, "vtbl1");
7253 case NEON::BI__builtin_neon_vtbl2_v:
7254 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
7255 Ops, "vtbl2");
7256 case NEON::BI__builtin_neon_vtbl3_v:
7257 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
7258 Ops, "vtbl3");
7259 case NEON::BI__builtin_neon_vtbl4_v:
7260 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
7261 Ops, "vtbl4");
7262 case NEON::BI__builtin_neon_vtbx1_v:
7263 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
7264 Ops, "vtbx1");
7265 case NEON::BI__builtin_neon_vtbx2_v:
7266 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
7267 Ops, "vtbx2");
7268 case NEON::BI__builtin_neon_vtbx3_v:
7269 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
7270 Ops, "vtbx3");
7271 case NEON::BI__builtin_neon_vtbx4_v:
7272 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
7273 Ops, "vtbx4");
7274 }
7275}
7276
7277template<typename Integer>
7278static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
7279 return E->getIntegerConstantExpr(Context)->getExtValue();
7280}
7281
7282static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
7283 llvm::Type *T, bool Unsigned) {
7284 // Helper function called by Tablegen-constructed ARM MVE builtin codegen,
7285 // which finds it convenient to specify signed/unsigned as a boolean flag.
7286 return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T);
7287}
7288
7289static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
7290 uint32_t Shift, bool Unsigned) {
7291 // MVE helper function for integer shift right. This must handle signed vs
7292 // unsigned, and also deal specially with the case where the shift count is
7293 // equal to the lane size. In LLVM IR, an LShr with that parameter would be
7294 // undefined behavior, but in MVE it's legal, so we must convert it to code
7295 // that is not undefined in IR.
7296 unsigned LaneBits = cast<llvm::VectorType>(V->getType())
7297 ->getElementType()
7298 ->getPrimitiveSizeInBits();
7299 if (Shift == LaneBits) {
7300 // An unsigned shift of the full lane size always generates zero, so we can
7301 // simply emit a zero vector. A signed shift of the full lane size does the
7302 // same thing as shifting by one bit fewer.
7303 if (Unsigned)
7304 return llvm::Constant::getNullValue(V->getType());
7305 else
7306 --Shift;
7307 }
7308 return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift);
7309}
7310
7311static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
7312 // MVE-specific helper function for a vector splat, which infers the element
7313 // count of the output vector by knowing that MVE vectors are all 128 bits
7314 // wide.
7315 unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits();
7316 return Builder.CreateVectorSplat(Elements, V);
7317}
7318
7319static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
7320 CodeGenFunction *CGF,
7321 llvm::Value *V,
7322 llvm::Type *DestType) {
7323 // Convert one MVE vector type into another by reinterpreting its in-register
7324 // format.
7325 //
7326 // Little-endian, this is identical to a bitcast (which reinterprets the
7327 // memory format). But big-endian, they're not necessarily the same, because
7328 // the register and memory formats map to each other differently depending on
7329 // the lane size.
7330 //
7331 // We generate a bitcast whenever we can (if we're little-endian, or if the
7332 // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
7333 // that performs the different kind of reinterpretation.
7334 if (CGF->getTarget().isBigEndian() &&
7335 V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
7336 return Builder.CreateCall(
7337 CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
7338 {DestType, V->getType()}),
7339 V);
7340 } else {
7341 return Builder.CreateBitCast(V, DestType);
7342 }
7343}
7344
7345static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
7346 // Make a shufflevector that extracts every other element of a vector (evens
7347 // or odds, as desired).
7348 SmallVector<int, 16> Indices;
7349 unsigned InputElements =
7350 cast<llvm::FixedVectorType>(V->getType())->getNumElements();
7351 for (unsigned i = 0; i < InputElements; i += 2)
7352 Indices.push_back(i + Odd);
7353 return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
7354 Indices);
7355}
7356
7357static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
7358 llvm::Value *V1) {
7359 // Make a shufflevector that interleaves two vectors element by element.
7360 assert(V0->getType() == V1->getType() && "Can't zip different vector types")((V0->getType() == V1->getType() && "Can't zip different vector types"
) ? static_cast<void> (0) : __assert_fail ("V0->getType() == V1->getType() && \"Can't zip different vector types\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7360, __PRETTY_FUNCTION__))
;
7361 SmallVector<int, 16> Indices;
7362 unsigned InputElements =
7363 cast<llvm::FixedVectorType>(V0->getType())->getNumElements();
7364 for (unsigned i = 0; i < InputElements; i++) {
7365 Indices.push_back(i);
7366 Indices.push_back(i + InputElements);
7367 }
7368 return Builder.CreateShuffleVector(V0, V1, Indices);
7369}
7370
7371template<unsigned HighBit, unsigned OtherBits>
7372static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
7373 // MVE-specific helper function to make a vector splat of a constant such as
7374 // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
7375 llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
7376 unsigned LaneBits = T->getPrimitiveSizeInBits();
7377 uint32_t Value = HighBit << (LaneBits - 1);
7378 if (OtherBits)
7379 Value |= (1UL << (LaneBits - 1)) - 1;
7380 llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
7381 return ARMMVEVectorSplat(Builder, Lane);
7382}
7383
7384static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
7385 llvm::Value *V,
7386 unsigned ReverseWidth) {
7387 // MVE-specific helper function which reverses the elements of a
7388 // vector within every (ReverseWidth)-bit collection of lanes.
7389 SmallVector<int, 16> Indices;
7390 unsigned LaneSize = V->getType()->getScalarSizeInBits();
7391 unsigned Elements = 128 / LaneSize;
7392 unsigned Mask = ReverseWidth / LaneSize - 1;
7393 for (unsigned i = 0; i < Elements; i++)
7394 Indices.push_back(i ^ Mask);
7395 return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
7396 Indices);
7397}
7398
7399Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
7400 const CallExpr *E,
7401 ReturnValueSlot ReturnValue,
7402 llvm::Triple::ArchType Arch) {
7403 enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType;
7404 Intrinsic::ID IRIntr;
7405 unsigned NumVectors;
7406
7407 // Code autogenerated by Tablegen will handle all the simple builtins.
7408 switch (BuiltinID) {
7409 #include "clang/Basic/arm_mve_builtin_cg.inc"
7410
7411 // If we didn't match an MVE builtin id at all, go back to the
7412 // main EmitARMBuiltinExpr.
7413 default:
7414 return nullptr;
7415 }
7416
7417 // Anything that breaks from that switch is an MVE builtin that
7418 // needs handwritten code to generate.
7419
7420 switch (CustomCodeGenType) {
7421
7422 case CustomCodeGen::VLD24: {
7423 llvm::SmallVector<Value *, 4> Ops;
7424 llvm::SmallVector<llvm::Type *, 4> Tys;
7425
7426 auto MvecCType = E->getType();
7427 auto MvecLType = ConvertType(MvecCType);
7428 assert(MvecLType->isStructTy() &&((MvecLType->isStructTy() && "Return type for vld[24]q should be a struct"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->isStructTy() && \"Return type for vld[24]q should be a struct\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7429, __PRETTY_FUNCTION__))
7429 "Return type for vld[24]q should be a struct")((MvecLType->isStructTy() && "Return type for vld[24]q should be a struct"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->isStructTy() && \"Return type for vld[24]q should be a struct\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7429, __PRETTY_FUNCTION__))
;
7430 assert(MvecLType->getStructNumElements() == 1 &&((MvecLType->getStructNumElements() == 1 && "Return-type struct for vld[24]q should have one element"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Return-type struct for vld[24]q should have one element\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7431, __PRETTY_FUNCTION__))
7431 "Return-type struct for vld[24]q should have one element")((MvecLType->getStructNumElements() == 1 && "Return-type struct for vld[24]q should have one element"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Return-type struct for vld[24]q should have one element\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7431, __PRETTY_FUNCTION__))
;
7432 auto MvecLTypeInner = MvecLType->getStructElementType(0);
7433 assert(MvecLTypeInner->isArrayTy() &&((MvecLTypeInner->isArrayTy() && "Return-type struct for vld[24]q should contain an array"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Return-type struct for vld[24]q should contain an array\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7434, __PRETTY_FUNCTION__))
7434 "Return-type struct for vld[24]q should contain an array")((MvecLTypeInner->isArrayTy() && "Return-type struct for vld[24]q should contain an array"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Return-type struct for vld[24]q should contain an array\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7434, __PRETTY_FUNCTION__))
;
7435 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&((MvecLTypeInner->getArrayNumElements() == NumVectors &&
"Array member of return-type struct vld[24]q has wrong length"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7436, __PRETTY_FUNCTION__))
7436 "Array member of return-type struct vld[24]q has wrong length")((MvecLTypeInner->getArrayNumElements() == NumVectors &&
"Array member of return-type struct vld[24]q has wrong length"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7436, __PRETTY_FUNCTION__))
;
7437 auto VecLType = MvecLTypeInner->getArrayElementType();
7438
7439 Tys.push_back(VecLType);
7440
7441 auto Addr = E->getArg(0);
7442 Ops.push_back(EmitScalarExpr(Addr));
7443 Tys.push_back(ConvertType(Addr->getType()));
7444
7445 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
7446 Value *LoadResult = Builder.CreateCall(F, Ops);
7447 Value *MvecOut = UndefValue::get(MvecLType);
7448 for (unsigned i = 0; i < NumVectors; ++i) {
7449 Value *Vec = Builder.CreateExtractValue(LoadResult, i);
7450 MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
7451 }
7452
7453 if (ReturnValue.isNull())
7454 return MvecOut;
7455 else
7456 return Builder.CreateStore(MvecOut, ReturnValue.getValue());
7457 }
7458
7459 case CustomCodeGen::VST24: {
7460 llvm::SmallVector<Value *, 4> Ops;
7461 llvm::SmallVector<llvm::Type *, 4> Tys;
7462
7463 auto Addr = E->getArg(0);
7464 Ops.push_back(EmitScalarExpr(Addr));
7465 Tys.push_back(ConvertType(Addr->getType()));
7466
7467 auto MvecCType = E->getArg(1)->getType();
7468 auto MvecLType = ConvertType(MvecCType);
7469 assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct")((MvecLType->isStructTy() && "Data type for vst2q should be a struct"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->isStructTy() && \"Data type for vst2q should be a struct\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7469, __PRETTY_FUNCTION__))
;
7470 assert(MvecLType->getStructNumElements() == 1 &&((MvecLType->getStructNumElements() == 1 && "Data-type struct for vst2q should have one element"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Data-type struct for vst2q should have one element\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7471, __PRETTY_FUNCTION__))
7471 "Data-type struct for vst2q should have one element")((MvecLType->getStructNumElements() == 1 && "Data-type struct for vst2q should have one element"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Data-type struct for vst2q should have one element\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7471, __PRETTY_FUNCTION__))
;
7472 auto MvecLTypeInner = MvecLType->getStructElementType(0);
7473 assert(MvecLTypeInner->isArrayTy() &&((MvecLTypeInner->isArrayTy() && "Data-type struct for vst2q should contain an array"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Data-type struct for vst2q should contain an array\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7474, __PRETTY_FUNCTION__))
7474 "Data-type struct for vst2q should contain an array")((MvecLTypeInner->isArrayTy() && "Data-type struct for vst2q should contain an array"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Data-type struct for vst2q should contain an array\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7474, __PRETTY_FUNCTION__))
;
7475 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&((MvecLTypeInner->getArrayNumElements() == NumVectors &&
"Array member of return-type struct vld[24]q has wrong length"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7476, __PRETTY_FUNCTION__))
7476 "Array member of return-type struct vld[24]q has wrong length")((MvecLTypeInner->getArrayNumElements() == NumVectors &&
"Array member of return-type struct vld[24]q has wrong length"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7476, __PRETTY_FUNCTION__))
;
7477 auto VecLType = MvecLTypeInner->getArrayElementType();
7478
7479 Tys.push_back(VecLType);
7480
7481 AggValueSlot MvecSlot = CreateAggTemp(MvecCType);
7482 EmitAggExpr(E->getArg(1), MvecSlot);
7483 auto Mvec = Builder.CreateLoad(MvecSlot.getAddress());
7484 for (unsigned i = 0; i < NumVectors; i++)
7485 Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
7486
7487 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
7488 Value *ToReturn = nullptr;
7489 for (unsigned i = 0; i < NumVectors; i++) {
7490 Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
7491 ToReturn = Builder.CreateCall(F, Ops);
7492 Ops.pop_back();
7493 }
7494 return ToReturn;
7495 }
7496 }
7497 llvm_unreachable("unknown custom codegen type.")::llvm::llvm_unreachable_internal("unknown custom codegen type."
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7497)
;
7498}
7499
7500Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
7501 const CallExpr *E,
7502 ReturnValueSlot ReturnValue,
7503 llvm::Triple::ArchType Arch) {
7504 switch (BuiltinID) {
7505 default:
7506 return nullptr;
7507#include "clang/Basic/arm_cde_builtin_cg.inc"
7508 }
7509}
7510
7511static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
7512 const CallExpr *E,
7513 SmallVectorImpl<Value *> &Ops,
7514 llvm::Triple::ArchType Arch) {
7515 unsigned int Int = 0;
7516 const char *s = nullptr;
7517
7518 switch (BuiltinID) {
7519 default:
7520 return nullptr;
7521 case NEON::BI__builtin_neon_vtbl1_v:
7522 case NEON::BI__builtin_neon_vqtbl1_v:
7523 case NEON::BI__builtin_neon_vqtbl1q_v:
7524 case NEON::BI__builtin_neon_vtbl2_v:
7525 case NEON::BI__builtin_neon_vqtbl2_v:
7526 case NEON::BI__builtin_neon_vqtbl2q_v:
7527 case NEON::BI__builtin_neon_vtbl3_v:
7528 case NEON::BI__builtin_neon_vqtbl3_v:
7529 case NEON::BI__builtin_neon_vqtbl3q_v:
7530 case NEON::BI__builtin_neon_vtbl4_v:
7531 case NEON::BI__builtin_neon_vqtbl4_v:
7532 case NEON::BI__builtin_neon_vqtbl4q_v:
7533 break;
7534 case NEON::BI__builtin_neon_vtbx1_v:
7535 case NEON::BI__builtin_neon_vqtbx1_v:
7536 case NEON::BI__builtin_neon_vqtbx1q_v:
7537 case NEON::BI__builtin_neon_vtbx2_v:
7538 case NEON::BI__builtin_neon_vqtbx2_v:
7539 case NEON::BI__builtin_neon_vqtbx2q_v:
7540 case NEON::BI__builtin_neon_vtbx3_v:
7541 case NEON::BI__builtin_neon_vqtbx3_v:
7542 case NEON::BI__builtin_neon_vqtbx3q_v:
7543 case NEON::BI__builtin_neon_vtbx4_v:
7544 case NEON::BI__builtin_neon_vqtbx4_v:
7545 case NEON::BI__builtin_neon_vqtbx4q_v:
7546 break;
7547 }
7548
7549 assert(E->getNumArgs() >= 3)((E->getNumArgs() >= 3) ? static_cast<void> (0) :
__assert_fail ("E->getNumArgs() >= 3", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7549, __PRETTY_FUNCTION__))
;
7550
7551 // Get the last argument, which specifies the vector type.
7552 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
7553 Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext());
7554 if (!Result)
7555 return nullptr;
7556
7557 // Determine the type of this overloaded NEON intrinsic.
7558 NeonTypeFlags Type = Result->getZExtValue();
7559 llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type);
7560 if (!Ty)
7561 return nullptr;
7562
7563 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7564
7565 // AArch64 scalar builtins are not overloaded, they do not have an extra
7566 // argument that specifies the vector type, need to handle each case.
7567 switch (BuiltinID) {
7568 case NEON::BI__builtin_neon_vtbl1_v: {
7569 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
7570 Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
7571 "vtbl1");
7572 }
7573 case NEON::BI__builtin_neon_vtbl2_v: {
7574 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
7575 Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
7576 "vtbl1");
7577 }
7578 case NEON::BI__builtin_neon_vtbl3_v: {
7579 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
7580 Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
7581 "vtbl2");
7582 }
7583 case NEON::BI__builtin_neon_vtbl4_v: {
7584 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
7585 Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
7586 "vtbl2");
7587 }
7588 case NEON::BI__builtin_neon_vtbx1_v: {
7589 Value *TblRes =
7590 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
7591 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
7592
7593 llvm::Constant *EightV = ConstantInt::get(Ty, 8);
7594 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
7595 CmpRes = Builder.CreateSExt(CmpRes, Ty);
7596
7597 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
7598 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
7599 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
7600 }
7601 case NEON::BI__builtin_neon_vtbx2_v: {
7602 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
7603 Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
7604 "vtbx1");
7605 }
7606 case NEON::BI__builtin_neon_vtbx3_v: {
7607 Value *TblRes =
7608 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
7609 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
7610
7611 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
7612 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
7613 TwentyFourV);
7614 CmpRes = Builder.CreateSExt(CmpRes, Ty);
7615
7616 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
7617 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
7618 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
7619 }
7620 case NEON::BI__builtin_neon_vtbx4_v: {
7621 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
7622 Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
7623 "vtbx2");
7624 }
7625 case NEON::BI__builtin_neon_vqtbl1_v:
7626 case NEON::BI__builtin_neon_vqtbl1q_v:
7627 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
7628 case NEON::BI__builtin_neon_vqtbl2_v:
7629 case NEON::BI__builtin_neon_vqtbl2q_v: {
7630 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
7631 case NEON::BI__builtin_neon_vqtbl3_v:
7632 case NEON::BI__builtin_neon_vqtbl3q_v:
7633 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
7634 case NEON::BI__builtin_neon_vqtbl4_v:
7635 case NEON::BI__builtin_neon_vqtbl4q_v:
7636 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
7637 case NEON::BI__builtin_neon_vqtbx1_v:
7638 case NEON::BI__builtin_neon_vqtbx1q_v:
7639 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
7640 case NEON::BI__builtin_neon_vqtbx2_v:
7641 case NEON::BI__builtin_neon_vqtbx2q_v:
7642 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
7643 case NEON::BI__builtin_neon_vqtbx3_v:
7644 case NEON::BI__builtin_neon_vqtbx3q_v:
7645 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
7646 case NEON::BI__builtin_neon_vqtbx4_v:
7647 case NEON::BI__builtin_neon_vqtbx4q_v:
7648 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
7649 }
7650 }
7651
7652 if (!Int)
7653 return nullptr;
7654
7655 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
7656 return CGF.EmitNeonCall(F, Ops, s);
7657}
7658
7659Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
7660 auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
7661 Op = Builder.CreateBitCast(Op, Int16Ty);
7662 Value *V = UndefValue::get(VTy);
7663 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
7664 Op = Builder.CreateInsertElement(V, Op, CI);
7665 return Op;
7666}
7667
7668/// SVEBuiltinMemEltTy - Returns the memory element type for this memory
7669/// access builtin. Only required if it can't be inferred from the base pointer
7670/// operand.
7671llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(SVETypeFlags TypeFlags) {
7672 switch (TypeFlags.getMemEltType()) {
7673 case SVETypeFlags::MemEltTyDefault:
7674 return getEltType(TypeFlags);
7675 case SVETypeFlags::MemEltTyInt8:
7676 return Builder.getInt8Ty();
7677 case SVETypeFlags::MemEltTyInt16:
7678 return Builder.getInt16Ty();
7679 case SVETypeFlags::MemEltTyInt32:
7680 return Builder.getInt32Ty();
7681 case SVETypeFlags::MemEltTyInt64:
7682 return Builder.getInt64Ty();
7683 }
7684 llvm_unreachable("Unknown MemEltType")::llvm::llvm_unreachable_internal("Unknown MemEltType", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7684)
;
7685}
7686
7687llvm::Type *CodeGenFunction::getEltType(SVETypeFlags TypeFlags) {
7688 switch (TypeFlags.getEltType()) {
7689 default:
7690 llvm_unreachable("Invalid SVETypeFlag!")::llvm::llvm_unreachable_internal("Invalid SVETypeFlag!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7690)
;
7691
7692 case SVETypeFlags::EltTyInt8:
7693 return Builder.getInt8Ty();
7694 case SVETypeFlags::EltTyInt16:
7695 return Builder.getInt16Ty();
7696 case SVETypeFlags::EltTyInt32:
7697 return Builder.getInt32Ty();
7698 case SVETypeFlags::EltTyInt64:
7699 return Builder.getInt64Ty();
7700
7701 case SVETypeFlags::EltTyFloat16:
7702 return Builder.getHalfTy();
7703 case SVETypeFlags::EltTyFloat32:
7704 return Builder.getFloatTy();
7705 case SVETypeFlags::EltTyFloat64:
7706 return Builder.getDoubleTy();
7707
7708 case SVETypeFlags::EltTyBFloat16:
7709 return Builder.getBFloatTy();
7710
7711 case SVETypeFlags::EltTyBool8:
7712 case SVETypeFlags::EltTyBool16:
7713 case SVETypeFlags::EltTyBool32:
7714 case SVETypeFlags::EltTyBool64:
7715 return Builder.getInt1Ty();
7716 }
7717}
7718
7719// Return the llvm predicate vector type corresponding to the specified element
7720// TypeFlags.
7721llvm::ScalableVectorType *
7722CodeGenFunction::getSVEPredType(SVETypeFlags TypeFlags) {
7723 switch (TypeFlags.getEltType()) {
7724 default: llvm_unreachable("Unhandled SVETypeFlag!")::llvm::llvm_unreachable_internal("Unhandled SVETypeFlag!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7724)
;
7725
7726 case SVETypeFlags::EltTyInt8:
7727 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
7728 case SVETypeFlags::EltTyInt16:
7729 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7730 case SVETypeFlags::EltTyInt32:
7731 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
7732 case SVETypeFlags::EltTyInt64:
7733 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
7734
7735 case SVETypeFlags::EltTyBFloat16:
7736 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7737 case SVETypeFlags::EltTyFloat16:
7738 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7739 case SVETypeFlags::EltTyFloat32:
7740 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
7741 case SVETypeFlags::EltTyFloat64:
7742 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
7743
7744 case SVETypeFlags::EltTyBool8:
7745 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
7746 case SVETypeFlags::EltTyBool16:
7747 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7748 case SVETypeFlags::EltTyBool32:
7749 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
7750 case SVETypeFlags::EltTyBool64:
7751 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
7752 }
7753}
7754
7755// Return the llvm vector type corresponding to the specified element TypeFlags.
7756llvm::ScalableVectorType *
7757CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
7758 switch (TypeFlags.getEltType()) {
7759 default:
7760 llvm_unreachable("Invalid SVETypeFlag!")::llvm::llvm_unreachable_internal("Invalid SVETypeFlag!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7760)
;
7761
7762 case SVETypeFlags::EltTyInt8:
7763 return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
7764 case SVETypeFlags::EltTyInt16:
7765 return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
7766 case SVETypeFlags::EltTyInt32:
7767 return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
7768 case SVETypeFlags::EltTyInt64:
7769 return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
7770
7771 case SVETypeFlags::EltTyFloat16:
7772 return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
7773 case SVETypeFlags::EltTyBFloat16:
7774 return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
7775 case SVETypeFlags::EltTyFloat32:
7776 return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
7777 case SVETypeFlags::EltTyFloat64:
7778 return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
7779
7780 case SVETypeFlags::EltTyBool8:
7781 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
7782 case SVETypeFlags::EltTyBool16:
7783 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7784 case SVETypeFlags::EltTyBool32:
7785 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
7786 case SVETypeFlags::EltTyBool64:
7787 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
7788 }
7789}
7790
7791llvm::Value *CodeGenFunction::EmitSVEAllTruePred(SVETypeFlags TypeFlags) {
7792 Function *Ptrue =
7793 CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
7794 return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
7795}
7796
7797constexpr unsigned SVEBitsPerBlock = 128;
7798
7799static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
7800 unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
7801 return llvm::ScalableVectorType::get(EltTy, NumElts);
7802}
7803
7804// Reinterpret the input predicate so that it can be used to correctly isolate
7805// the elements of the specified datatype.
7806Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
7807 llvm::ScalableVectorType *VTy) {
7808 auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
7809 if (Pred->getType() == RTy)
7810 return Pred;
7811
7812 unsigned IntID;
7813 llvm::Type *IntrinsicTy;
7814 switch (VTy->getMinNumElements()) {
7815 default:
7816 llvm_unreachable("unsupported element count!")::llvm::llvm_unreachable_internal("unsupported element count!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7816)
;
7817 case 2:
7818 case 4:
7819 case 8:
7820 IntID = Intrinsic::aarch64_sve_convert_from_svbool;
7821 IntrinsicTy = RTy;
7822 break;
7823 case 16:
7824 IntID = Intrinsic::aarch64_sve_convert_to_svbool;
7825 IntrinsicTy = Pred->getType();
7826 break;
7827 }
7828
7829 Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
7830 Value *C = Builder.CreateCall(F, Pred);
7831 assert(C->getType() == RTy && "Unexpected return type!")((C->getType() == RTy && "Unexpected return type!"
) ? static_cast<void> (0) : __assert_fail ("C->getType() == RTy && \"Unexpected return type!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7831, __PRETTY_FUNCTION__))
;
7832 return C;
7833}
7834
7835Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags,
7836 SmallVectorImpl<Value *> &Ops,
7837 unsigned IntID) {
7838 auto *ResultTy = getSVEType(TypeFlags);
7839 auto *OverloadedTy =
7840 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
7841
7842 // At the ACLE level there's only one predicate type, svbool_t, which is
7843 // mapped to <n x 16 x i1>. However, this might be incompatible with the
7844 // actual type being loaded. For example, when loading doubles (i64) the
7845 // predicated should be <n x 2 x i1> instead. At the IR level the type of
7846 // the predicate and the data being loaded must match. Cast accordingly.
7847 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
7848
7849 Function *F = nullptr;
7850 if (Ops[1]->getType()->isVectorTy())
7851 // This is the "vector base, scalar offset" case. In order to uniquely
7852 // map this built-in to an LLVM IR intrinsic, we need both the return type
7853 // and the type of the vector base.
7854 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
7855 else
7856 // This is the "scalar base, vector offset case". The type of the offset
7857 // is encoded in the name of the intrinsic. We only need to specify the
7858 // return type in order to uniquely map this built-in to an LLVM IR
7859 // intrinsic.
7860 F = CGM.getIntrinsic(IntID, OverloadedTy);
7861
7862 // Pass 0 when the offset is missing. This can only be applied when using
7863 // the "vector base" addressing mode for which ACLE allows no offset. The
7864 // corresponding LLVM IR always requires an offset.
7865 if (Ops.size() == 2) {
7866 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset")((Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset"
) ? static_cast<void> (0) : __assert_fail ("Ops[1]->getType()->isVectorTy() && \"Scalar base requires an offset\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7866, __PRETTY_FUNCTION__))
;
7867 Ops.push_back(ConstantInt::get(Int64Ty, 0));
7868 }
7869
7870 // For "vector base, scalar index" scale the index so that it becomes a
7871 // scalar offset.
7872 if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
7873 unsigned BytesPerElt =
7874 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
7875 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
7876 Ops[2] = Builder.CreateMul(Ops[2], Scale);
7877 }
7878
7879 Value *Call = Builder.CreateCall(F, Ops);
7880
7881 // The following sext/zext is only needed when ResultTy != OverloadedTy. In
7882 // other cases it's folded into a nop.
7883 return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
7884 : Builder.CreateSExt(Call, ResultTy);
7885}
7886
7887Value *CodeGenFunction::EmitSVEScatterStore(SVETypeFlags TypeFlags,
7888 SmallVectorImpl<Value *> &Ops,
7889 unsigned IntID) {
7890 auto *SrcDataTy = getSVEType(TypeFlags);
7891 auto *OverloadedTy =
7892 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
7893
7894 // In ACLE the source data is passed in the last argument, whereas in LLVM IR
7895 // it's the first argument. Move it accordingly.
7896 Ops.insert(Ops.begin(), Ops.pop_back_val());
7897
7898 Function *F = nullptr;
7899 if (Ops[2]->getType()->isVectorTy())
7900 // This is the "vector base, scalar offset" case. In order to uniquely
7901 // map this built-in to an LLVM IR intrinsic, we need both the return type
7902 // and the type of the vector base.
7903 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
7904 else
7905 // This is the "scalar base, vector offset case". The type of the offset
7906 // is encoded in the name of the intrinsic. We only need to specify the
7907 // return type in order to uniquely map this built-in to an LLVM IR
7908 // intrinsic.
7909 F = CGM.getIntrinsic(IntID, OverloadedTy);
7910
7911 // Pass 0 when the offset is missing. This can only be applied when using
7912 // the "vector base" addressing mode for which ACLE allows no offset. The
7913 // corresponding LLVM IR always requires an offset.
7914 if (Ops.size() == 3) {
7915 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset")((Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset"
) ? static_cast<void> (0) : __assert_fail ("Ops[1]->getType()->isVectorTy() && \"Scalar base requires an offset\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7915, __PRETTY_FUNCTION__))
;
7916 Ops.push_back(ConstantInt::get(Int64Ty, 0));
7917 }
7918
7919 // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
7920 // folded into a nop.
7921 Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
7922
7923 // At the ACLE level there's only one predicate type, svbool_t, which is
7924 // mapped to <n x 16 x i1>. However, this might be incompatible with the
7925 // actual type being stored. For example, when storing doubles (i64) the
7926 // predicated should be <n x 2 x i1> instead. At the IR level the type of
7927 // the predicate and the data being stored must match. Cast accordingly.
7928 Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
7929
7930 // For "vector base, scalar index" scale the index so that it becomes a
7931 // scalar offset.
7932 if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
7933 unsigned BytesPerElt =
7934 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
7935 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
7936 Ops[3] = Builder.CreateMul(Ops[3], Scale);
7937 }
7938
7939 return Builder.CreateCall(F, Ops);
7940}
7941
7942Value *CodeGenFunction::EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
7943 SmallVectorImpl<Value *> &Ops,
7944 unsigned IntID) {
7945 // The gather prefetches are overloaded on the vector input - this can either
7946 // be the vector of base addresses or vector of offsets.
7947 auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
7948 if (!OverloadedTy)
7949 OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
7950
7951 // Cast the predicate from svbool_t to the right number of elements.
7952 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
7953
7954 // vector + imm addressing modes
7955 if (Ops[1]->getType()->isVectorTy()) {
7956 if (Ops.size() == 3) {
7957 // Pass 0 for 'vector+imm' when the index is omitted.
7958 Ops.push_back(ConstantInt::get(Int64Ty, 0));
7959
7960 // The sv_prfop is the last operand in the builtin and IR intrinsic.
7961 std::swap(Ops[2], Ops[3]);
7962 } else {
7963 // Index needs to be passed as scaled offset.
7964 llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
7965 unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
7966 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
7967 Ops[2] = Builder.CreateMul(Ops[2], Scale);
7968 }
7969 }
7970
7971 Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
7972 return Builder.CreateCall(F, Ops);
7973}
7974
7975Value *CodeGenFunction::EmitSVEStructLoad(SVETypeFlags TypeFlags,
7976 SmallVectorImpl<Value*> &Ops,
7977 unsigned IntID) {
7978 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
7979 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
7980 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
7981
7982 unsigned N;
7983 switch (IntID) {
7984 case Intrinsic::aarch64_sve_ld2:
7985 N = 2;
7986 break;
7987 case Intrinsic::aarch64_sve_ld3:
7988 N = 3;
7989 break;
7990 case Intrinsic::aarch64_sve_ld4:
7991 N = 4;
7992 break;
7993 default:
7994 llvm_unreachable("unknown intrinsic!")::llvm::llvm_unreachable_internal("unknown intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7994)
;
7995 }
7996 auto RetTy = llvm::VectorType::get(VTy->getElementType(),
7997 VTy->getElementCount() * N);
7998
7999 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8000 Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy);
8001 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8002 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8003 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8004
8005 Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()});
8006 return Builder.CreateCall(F, { Predicate, BasePtr });
8007}
8008
8009Value *CodeGenFunction::EmitSVEStructStore(SVETypeFlags TypeFlags,
8010 SmallVectorImpl<Value*> &Ops,
8011 unsigned IntID) {
8012 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8013 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8014 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8015
8016 unsigned N;
8017 switch (IntID) {
8018 case Intrinsic::aarch64_sve_st2:
8019 N = 2;
8020 break;
8021 case Intrinsic::aarch64_sve_st3:
8022 N = 3;
8023 break;
8024 case Intrinsic::aarch64_sve_st4:
8025 N = 4;
8026 break;
8027 default:
8028 llvm_unreachable("unknown intrinsic!")::llvm::llvm_unreachable_internal("unknown intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8028)
;
8029 }
8030 auto TupleTy =
8031 llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N);
8032
8033 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8034 Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy);
8035 Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0);
8036 Value *Val = Ops.back();
8037 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8038 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8039
8040 // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
8041 // need to break up the tuple vector.
8042 SmallVector<llvm::Value*, 5> Operands;
8043 Function *FExtr =
8044 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8045 for (unsigned I = 0; I < N; ++I)
8046 Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)}));
8047 Operands.append({Predicate, BasePtr});
8048
8049 Function *F = CGM.getIntrinsic(IntID, { VTy });
8050 return Builder.CreateCall(F, Operands);
8051}
8052
8053// SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
8054// svpmullt_pair intrinsics, with the exception that their results are bitcast
8055// to a wider type.
8056Value *CodeGenFunction::EmitSVEPMull(SVETypeFlags TypeFlags,
8057 SmallVectorImpl<Value *> &Ops,
8058 unsigned BuiltinID) {
8059 // Splat scalar operand to vector (intrinsics with _n infix)
8060 if (TypeFlags.hasSplatOperand()) {
8061 unsigned OpNo = TypeFlags.getSplatOperand();
8062 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8063 }
8064
8065 // The pair-wise function has a narrower overloaded type.
8066 Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
8067 Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
8068
8069 // Now bitcast to the wider result type.
8070 llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
8071 return EmitSVEReinterpret(Call, Ty);
8072}
8073
8074Value *CodeGenFunction::EmitSVEMovl(SVETypeFlags TypeFlags,
8075 ArrayRef<Value *> Ops, unsigned BuiltinID) {
8076 llvm::Type *OverloadedTy = getSVEType(TypeFlags);
8077 Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
8078 return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
8079}
8080
8081Value *CodeGenFunction::EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
8082 SmallVectorImpl<Value *> &Ops,
8083 unsigned BuiltinID) {
8084 auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8085 auto *VectorTy = getSVEVectorForElementType(MemEltTy);
8086 auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8087
8088 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8089 Value *BasePtr = Ops[1];
8090
8091 // Implement the index operand if not omitted.
8092 if (Ops.size() > 3) {
8093 BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
8094 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
8095 }
8096
8097 // Prefetch intriniscs always expect an i8*
8098 BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
8099 Value *PrfOp = Ops.back();
8100
8101 Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
8102 return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
8103}
8104
8105Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
8106 llvm::Type *ReturnTy,
8107 SmallVectorImpl<Value *> &Ops,
8108 unsigned BuiltinID,
8109 bool IsZExtReturn) {
8110 QualType LangPTy = E->getArg(1)->getType();
8111 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8112 LangPTy->getAs<PointerType>()->getPointeeType());
8113
8114 // The vector type that is returned may be different from the
8115 // eventual type loaded from memory.
8116 auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
8117 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8118
8119 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8120 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8121 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8122 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8123
8124 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8125 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8126 Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
8127
8128 return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
8129 : Builder.CreateSExt(Load, VectorTy);
8130}
8131
8132Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
8133 SmallVectorImpl<Value *> &Ops,
8134 unsigned BuiltinID) {
8135 QualType LangPTy = E->getArg(1)->getType();
8136 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8137 LangPTy->getAs<PointerType>()->getPointeeType());
8138
8139 // The vector type that is stored may be different from the
8140 // eventual type stored to memory.
8141 auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
8142 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8143
8144 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8145 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8146 Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0);
8147 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8148
8149 // Last value is always the data
8150 llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
8151
8152 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8153 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8154 return Builder.CreateCall(F, {Val, Predicate, BasePtr});
8155}
8156
8157// Limit the usage of scalable llvm IR generated by the ACLE by using the
8158// sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
8159Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
8160 auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
8161 return Builder.CreateCall(F, Scalar);
8162}
8163
8164Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
8165 return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
8166}
8167
8168Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
8169 // FIXME: For big endian this needs an additional REV, or needs a separate
8170 // intrinsic that is code-generated as a no-op, because the LLVM bitcast
8171 // instruction is defined as 'bitwise' equivalent from memory point of
8172 // view (when storing/reloading), whereas the svreinterpret builtin
8173 // implements bitwise equivalent cast from register point of view.
8174 // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
8175 return Builder.CreateBitCast(Val, Ty);
8176}
8177
8178static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8179 SmallVectorImpl<Value *> &Ops) {
8180 auto *SplatZero = Constant::getNullValue(Ty);
8181 Ops.insert(Ops.begin(), SplatZero);
8182}
8183
8184static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8185 SmallVectorImpl<Value *> &Ops) {
8186 auto *SplatUndef = UndefValue::get(Ty);
8187 Ops.insert(Ops.begin(), SplatUndef);
8188}
8189
8190SmallVector<llvm::Type *, 2> CodeGenFunction::getSVEOverloadTypes(
8191 SVETypeFlags TypeFlags, llvm::Type *ResultType, ArrayRef<Value *> Ops) {
8192 if (TypeFlags.isOverloadNone())
8193 return {};
8194
8195 llvm::Type *DefaultType = getSVEType(TypeFlags);
8196
8197 if (TypeFlags.isOverloadWhile())
8198 return {DefaultType, Ops[1]->getType()};
8199
8200 if (TypeFlags.isOverloadWhileRW())
8201 return {getSVEPredType(TypeFlags), Ops[0]->getType()};
8202
8203 if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
8204 return {Ops[0]->getType(), Ops.back()->getType()};
8205
8206 if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
8207 return {ResultType, Ops[0]->getType()};
8208
8209 assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads")((TypeFlags.isOverloadDefault() && "Unexpected value for overloads"
) ? static_cast<void> (0) : __assert_fail ("TypeFlags.isOverloadDefault() && \"Unexpected value for overloads\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8209, __PRETTY_FUNCTION__))
;
8210 return {DefaultType};
8211}
8212
8213Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
8214 const CallExpr *E) {
8215 // Find out if any arguments are required to be integer constant expressions.
8216 unsigned ICEArguments = 0;
8217 ASTContext::GetBuiltinTypeError Error;
8218 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
8219 assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error"
) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8219, __PRETTY_FUNCTION__))
;
8220
8221 llvm::Type *Ty = ConvertType(E->getType());
8222 if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
8223 BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
8224 Value *Val = EmitScalarExpr(E->getArg(0));
8225 return EmitSVEReinterpret(Val, Ty);
8226 }
8227
8228 llvm::SmallVector<Value *, 4> Ops;
8229 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
8230 if ((ICEArguments & (1 << i)) == 0)
8231 Ops.push_back(EmitScalarExpr(E->getArg(i)));
8232 else {
8233 // If this is required to be a constant, constant fold it so that we know
8234 // that the generated intrinsic gets a ConstantInt.
8235 Optional<llvm::APSInt> Result =
8236 E->getArg(i)->getIntegerConstantExpr(getContext());
8237 assert(Result && "Expected argument to be a constant")((Result && "Expected argument to be a constant") ? static_cast
<void> (0) : __assert_fail ("Result && \"Expected argument to be a constant\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8237, __PRETTY_FUNCTION__))
;
8238
8239 // Immediates for SVE llvm intrinsics are always 32bit. We can safely
8240 // truncate because the immediate has been range checked and no valid
8241 // immediate requires more than a handful of bits.
8242 *Result = Result->extOrTrunc(32);
8243 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
8244 }
8245 }
8246
8247 auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
8248 AArch64SVEIntrinsicsProvenSorted);
8249 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8250 if (TypeFlags.isLoad())
8251 return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
8252 TypeFlags.isZExtReturn());
8253 else if (TypeFlags.isStore())
8254 return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
8255 else if (TypeFlags.isGatherLoad())
8256 return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8257 else if (TypeFlags.isScatterStore())
8258 return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8259 else if (TypeFlags.isPrefetch())
8260 return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8261 else if (TypeFlags.isGatherPrefetch())
8262 return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8263 else if (TypeFlags.isStructLoad())
8264 return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8265 else if (TypeFlags.isStructStore())
8266 return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8267 else if (TypeFlags.isUndef())
8268 return UndefValue::get(Ty);
8269 else if (Builtin->LLVMIntrinsic != 0) {
8270 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
8271 InsertExplicitZeroOperand(Builder, Ty, Ops);
8272
8273 if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
8274 InsertExplicitUndefOperand(Builder, Ty, Ops);
8275
8276 // Some ACLE builtins leave out the argument to specify the predicate
8277 // pattern, which is expected to be expanded to an SV_ALL pattern.
8278 if (TypeFlags.isAppendSVALL())
8279 Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
8280 if (TypeFlags.isInsertOp1SVALL())
8281 Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
8282
8283 // Predicates must match the main datatype.
8284 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
8285 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
8286 if (PredTy->getElementType()->isIntegerTy(1))
8287 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
8288
8289 // Splat scalar operand to vector (intrinsics with _n infix)
8290 if (TypeFlags.hasSplatOperand()) {
8291 unsigned OpNo = TypeFlags.getSplatOperand();
8292 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8293 }
8294
8295 if (TypeFlags.isReverseCompare())
8296 std::swap(Ops[1], Ops[2]);
8297
8298 if (TypeFlags.isReverseUSDOT())
8299 std::swap(Ops[1], Ops[2]);
8300
8301 // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
8302 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
8303 llvm::Type *OpndTy = Ops[1]->getType();
8304 auto *SplatZero = Constant::getNullValue(OpndTy);
8305 Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
8306 Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
8307 }
8308
8309 Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
8310 getSVEOverloadTypes(TypeFlags, Ty, Ops));
8311 Value *Call = Builder.CreateCall(F, Ops);
8312
8313 // Predicate results must be converted to svbool_t.
8314 if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
8315 if (PredTy->getScalarType()->isIntegerTy(1))
8316 Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
8317
8318 return Call;
8319 }
8320
8321 switch (BuiltinID) {
8322 default:
8323 return nullptr;
8324
8325 case SVE::BI__builtin_sve_svmov_b_z: {
8326 // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
8327 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8328 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
8329 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
8330 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
8331 }
8332
8333 case SVE::BI__builtin_sve_svnot_b_z: {
8334 // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
8335 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8336 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
8337 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
8338 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
8339 }
8340
8341 case SVE::BI__builtin_sve_svmovlb_u16:
8342 case SVE::BI__builtin_sve_svmovlb_u32:
8343 case SVE::BI__builtin_sve_svmovlb_u64:
8344 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
8345
8346 case SVE::BI__builtin_sve_svmovlb_s16:
8347 case SVE::BI__builtin_sve_svmovlb_s32:
8348 case SVE::BI__builtin_sve_svmovlb_s64:
8349 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
8350
8351 case SVE::BI__builtin_sve_svmovlt_u16:
8352 case SVE::BI__builtin_sve_svmovlt_u32:
8353 case SVE::BI__builtin_sve_svmovlt_u64:
8354 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
8355
8356 case SVE::BI__builtin_sve_svmovlt_s16:
8357 case SVE::BI__builtin_sve_svmovlt_s32:
8358 case SVE::BI__builtin_sve_svmovlt_s64:
8359 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
8360
8361 case SVE::BI__builtin_sve_svpmullt_u16:
8362 case SVE::BI__builtin_sve_svpmullt_u64:
8363 case SVE::BI__builtin_sve_svpmullt_n_u16:
8364 case SVE::BI__builtin_sve_svpmullt_n_u64:
8365 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
8366
8367 case SVE::BI__builtin_sve_svpmullb_u16:
8368 case SVE::BI__builtin_sve_svpmullb_u64:
8369 case SVE::BI__builtin_sve_svpmullb_n_u16:
8370 case SVE::BI__builtin_sve_svpmullb_n_u64:
8371 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
8372
8373 case SVE::BI__builtin_sve_svdup_n_b8:
8374 case SVE::BI__builtin_sve_svdup_n_b16:
8375 case SVE::BI__builtin_sve_svdup_n_b32:
8376 case SVE::BI__builtin_sve_svdup_n_b64: {
8377 Value *CmpNE =
8378 Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
8379 llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
8380 Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
8381 return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
8382 }
8383
8384 case SVE::BI__builtin_sve_svdupq_n_b8:
8385 case SVE::BI__builtin_sve_svdupq_n_b16:
8386 case SVE::BI__builtin_sve_svdupq_n_b32:
8387 case SVE::BI__builtin_sve_svdupq_n_b64:
8388 case SVE::BI__builtin_sve_svdupq_n_u8:
8389 case SVE::BI__builtin_sve_svdupq_n_s8:
8390 case SVE::BI__builtin_sve_svdupq_n_u64:
8391 case SVE::BI__builtin_sve_svdupq_n_f64:
8392 case SVE::BI__builtin_sve_svdupq_n_s64:
8393 case SVE::BI__builtin_sve_svdupq_n_u16:
8394 case SVE::BI__builtin_sve_svdupq_n_f16:
8395 case SVE::BI__builtin_sve_svdupq_n_bf16:
8396 case SVE::BI__builtin_sve_svdupq_n_s16:
8397 case SVE::BI__builtin_sve_svdupq_n_u32:
8398 case SVE::BI__builtin_sve_svdupq_n_f32:
8399 case SVE::BI__builtin_sve_svdupq_n_s32: {
8400 // These builtins are implemented by storing each element to an array and using
8401 // ld1rq to materialize a vector.
8402 unsigned NumOpnds = Ops.size();
8403
8404 bool IsBoolTy =
8405 cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
8406
8407 // For svdupq_n_b* the element type of is an integer of type 128/numelts,
8408 // so that the compare can use the width that is natural for the expected
8409 // number of predicate lanes.
8410 llvm::Type *EltTy = Ops[0]->getType();
8411 if (IsBoolTy)
8412 EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
8413
8414 Address Alloca = CreateTempAlloca(llvm::ArrayType::get(EltTy, NumOpnds),
8415 CharUnits::fromQuantity(16));
8416 for (unsigned I = 0; I < NumOpnds; ++I)
8417 Builder.CreateDefaultAlignedStore(
8418 IsBoolTy ? Builder.CreateZExt(Ops[I], EltTy) : Ops[I],
8419 Builder.CreateGEP(Alloca.getPointer(),
8420 {Builder.getInt64(0), Builder.getInt64(I)}));
8421
8422 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8423 Value *Pred = EmitSVEAllTruePred(TypeFlags);
8424
8425 llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
8426 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_ld1rq, OverloadedTy);
8427 Value *Alloca0 = Builder.CreateGEP(
8428 Alloca.getPointer(), {Builder.getInt64(0), Builder.getInt64(0)});
8429 Value *LD1RQ = Builder.CreateCall(F, {Pred, Alloca0});
8430
8431 if (!IsBoolTy)
8432 return LD1RQ;
8433
8434 // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
8435 F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
8436 : Intrinsic::aarch64_sve_cmpne_wide,
8437 OverloadedTy);
8438 Value *Call =
8439 Builder.CreateCall(F, {Pred, LD1RQ, EmitSVEDupX(Builder.getInt64(0))});
8440 return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
8441 }
8442
8443 case SVE::BI__builtin_sve_svpfalse_b:
8444 return ConstantInt::getFalse(Ty);
8445
8446 case SVE::BI__builtin_sve_svlen_bf16:
8447 case SVE::BI__builtin_sve_svlen_f16:
8448 case SVE::BI__builtin_sve_svlen_f32:
8449 case SVE::BI__builtin_sve_svlen_f64:
8450 case SVE::BI__builtin_sve_svlen_s8:
8451 case SVE::BI__builtin_sve_svlen_s16:
8452 case SVE::BI__builtin_sve_svlen_s32:
8453 case SVE::BI__builtin_sve_svlen_s64:
8454 case SVE::BI__builtin_sve_svlen_u8:
8455 case SVE::BI__builtin_sve_svlen_u16:
8456 case SVE::BI__builtin_sve_svlen_u32:
8457 case SVE::BI__builtin_sve_svlen_u64: {
8458 SVETypeFlags TF(Builtin->TypeModifier);
8459 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
8460 auto *NumEls =
8461 llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue());
8462
8463 Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
8464 return Builder.CreateMul(NumEls, Builder.CreateCall(F));
8465 }
8466
8467 case SVE::BI__builtin_sve_svtbl2_u8:
8468 case SVE::BI__builtin_sve_svtbl2_s8:
8469 case SVE::BI__builtin_sve_svtbl2_u16:
8470 case SVE::BI__builtin_sve_svtbl2_s16:
8471 case SVE::BI__builtin_sve_svtbl2_u32:
8472 case SVE::BI__builtin_sve_svtbl2_s32:
8473 case SVE::BI__builtin_sve_svtbl2_u64:
8474 case SVE::BI__builtin_sve_svtbl2_s64:
8475 case SVE::BI__builtin_sve_svtbl2_f16:
8476 case SVE::BI__builtin_sve_svtbl2_bf16:
8477 case SVE::BI__builtin_sve_svtbl2_f32:
8478 case SVE::BI__builtin_sve_svtbl2_f64: {
8479 SVETypeFlags TF(Builtin->TypeModifier);
8480 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
8481 auto TupleTy = llvm::VectorType::get(VTy->getElementType(),
8482 VTy->getElementCount() * 2);
8483 Function *FExtr =
8484 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8485 Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
8486 Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)});
8487 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
8488 return Builder.CreateCall(F, {V0, V1, Ops[1]});
8489 }
8490 }
8491
8492 /// Should not happen
8493 return nullptr;
8494}
8495
8496Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
8497 const CallExpr *E,
8498 llvm::Triple::ArchType Arch) {
8499 if (BuiltinID >= AArch64::FirstSVEBuiltin &&
8500 BuiltinID <= AArch64::LastSVEBuiltin)
8501 return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
8502
8503 unsigned HintID = static_cast<unsigned>(-1);
8504 switch (BuiltinID) {
8505 default: break;
8506 case AArch64::BI__builtin_arm_nop:
8507 HintID = 0;
8508 break;
8509 case AArch64::BI__builtin_arm_yield:
8510 case AArch64::BI__yield:
8511 HintID = 1;
8512 break;
8513 case AArch64::BI__builtin_arm_wfe:
8514 case AArch64::BI__wfe:
8515 HintID = 2;
8516 break;
8517 case AArch64::BI__builtin_arm_wfi:
8518 case AArch64::BI__wfi:
8519 HintID = 3;
8520 break;
8521 case AArch64::BI__builtin_arm_sev:
8522 case AArch64::BI__sev:
8523 HintID = 4;
8524 break;
8525 case AArch64::BI__builtin_arm_sevl:
8526 case AArch64::BI__sevl:
8527 HintID = 5;
8528 break;
8529 }
8530
8531 if (HintID != static_cast<unsigned>(-1)) {
8532 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
8533 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
8534 }
8535
8536 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
8537 Value *Address = EmitScalarExpr(E->getArg(0));
8538 Value *RW = EmitScalarExpr(E->getArg(1));
8539 Value *CacheLevel = EmitScalarExpr(E->getArg(2));
8540 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
8541 Value *IsData = EmitScalarExpr(E->getArg(4));
8542
8543 Value *Locality = nullptr;
8544 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
8545 // Temporal fetch, needs to convert cache level to locality.
8546 Locality = llvm::ConstantInt::get(Int32Ty,
8547 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
8548 } else {
8549 // Streaming fetch.
8550 Locality = llvm::ConstantInt::get(Int32Ty, 0);
8551 }
8552
8553 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
8554 // PLDL3STRM or PLDL2STRM.
8555 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
8556 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
8557 }
8558
8559 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
8560 assert((getContext().getTypeSize(E->getType()) == 32) &&(((getContext().getTypeSize(E->getType()) == 32) &&
"rbit of unusual size!") ? static_cast<void> (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8561, __PRETTY_FUNCTION__))
8561 "rbit of unusual size!")(((getContext().getTypeSize(E->getType()) == 32) &&
"rbit of unusual size!") ? static_cast<void> (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8561, __PRETTY_FUNCTION__))
;
8562 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8563 return Builder.CreateCall(
8564 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
8565 }
8566 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
8567 assert((getContext().getTypeSize(E->getType()) == 64) &&(((getContext().getTypeSize(E->getType()) == 64) &&
"rbit of unusual size!") ? static_cast<void> (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8568, __PRETTY_FUNCTION__))
8568 "rbit of unusual size!")(((getContext().getTypeSize(E->getType()) == 64) &&
"rbit of unusual size!") ? static_cast<void> (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8568, __PRETTY_FUNCTION__))
;
8569 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8570 return Builder.CreateCall(
8571 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
8572 }
8573
8574 if (BuiltinID == AArch64::BI__builtin_arm_cls) {
8575 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8576 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
8577 "cls");
8578 }
8579 if (BuiltinID == AArch64::BI__builtin_arm_cls64) {
8580 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8581 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
8582 "cls");
8583 }
8584
8585 if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
8586 assert((getContext().getTypeSize(E->getType()) == 32) &&(((getContext().getTypeSize(E->getType()) == 32) &&
"__jcvt of unusual size!") ? static_cast<void> (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 32) && \"__jcvt of unusual size!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8587, __PRETTY_FUNCTION__))
8587 "__jcvt of unusual size!")(((getContext().getTypeSize(E->getType()) == 32) &&
"__jcvt of unusual size!") ? static_cast<void> (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 32) && \"__jcvt of unusual size!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8587, __PRETTY_FUNCTION__))
;
8588 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8589 return Builder.CreateCall(
8590 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
8591 }
8592
8593 if (BuiltinID == AArch64::BI__clear_cache) {
8594 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")((E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"
) ? static_cast<void> (0) : __assert_fail ("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8594, __PRETTY_FUNCTION__))
;
8595 const FunctionDecl *FD = E->getDirectCallee();
8596 Value *Ops[2];
8597 for (unsigned i = 0; i < 2; i++)
8598 Ops[i] = EmitScalarExpr(E->getArg(i));
8599 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
8600 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
8601 StringRef Name = FD->getName();
8602 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
8603 }
8604
8605 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
8606 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
8607 getContext().getTypeSize(E->getType()) == 128) {
8608 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
8609 ? Intrinsic::aarch64_ldaxp
8610 : Intrinsic::aarch64_ldxp);
8611
8612 Value *LdPtr = EmitScalarExpr(E->getArg(0));
8613 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
8614 "ldxp");
8615
8616 Value *Val0 = Builder.CreateExtractValue(Val, 1);
8617 Value *Val1 = Builder.CreateExtractValue(Val, 0);
8618 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
8619 Val0 = Builder.CreateZExt(Val0, Int128Ty);
8620 Val1 = Builder.CreateZExt(Val1, Int128Ty);
8621
8622 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
8623 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
8624 Val = Builder.CreateOr(Val, Val1);
8625 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
8626 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
8627 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
8628 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
8629
8630 QualType Ty = E->getType();
8631 llvm::Type *RealResTy = ConvertType(Ty);
8632 llvm::Type *PtrTy = llvm::IntegerType::get(
8633 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
8634 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
8635
8636 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
8637 ? Intrinsic::aarch64_ldaxr
8638 : Intrinsic::aarch64_ldxr,
8639 PtrTy);
8640 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
8641
8642 if (RealResTy->isPointerTy())
8643 return Builder.CreateIntToPtr(Val, RealResTy);
8644
8645 llvm::Type *IntResTy = llvm::IntegerType::get(
8646 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
8647 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
8648 return Builder.CreateBitCast(Val, RealResTy);
8649 }
8650
8651 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
8652 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
8653 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
8654 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
8655 ? Intrinsic::aarch64_stlxp
8656 : Intrinsic::aarch64_stxp);
8657 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
8658
8659 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
8660 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
8661
8662 Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
8663 llvm::Value *Val = Builder.CreateLoad(Tmp);
8664
8665 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
8666 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
8667 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
8668 Int8PtrTy);
8669 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
8670 }
8671
8672 if (BuiltinID == AArch64::BI__builtin_arm_strex ||
8673 BuiltinID == AArch64::BI__builtin_arm_stlex) {
8674 Value *StoreVal = EmitScalarExpr(E->getArg(0));
8675 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
8676
8677 QualType Ty = E->getArg(0)->getType();
8678 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
8679 getContext().getTypeSize(Ty));
8680 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
8681
8682 if (StoreVal->getType()->isPointerTy())
8683 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
8684 else {
8685 llvm::Type *IntTy = llvm::IntegerType::get(
8686 getLLVMContext(),
8687 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
8688 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
8689 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
8690 }
8691
8692 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
8693 ? Intrinsic::aarch64_stlxr
8694 : Intrinsic::aarch64_stxr,
8695 StoreAddr->getType());
8696 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
8697 }
8698
8699 if (BuiltinID == AArch64::BI__getReg) {
8700 Expr::EvalResult Result;
8701 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
8702 llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8702)
;
8703
8704 llvm::APSInt Value = Result.Val.getInt();
8705 LLVMContext &Context = CGM.getLLVMContext();
8706 std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10);
8707
8708 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
8709 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
8710 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
8711
8712 llvm::Function *F =
8713 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
8714 return Builder.CreateCall(F, Metadata);
8715 }
8716
8717 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
8718 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
8719 return Builder.CreateCall(F);
8720 }
8721
8722 if (BuiltinID == AArch64::BI_ReadWriteBarrier)
8723 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
8724 llvm::SyncScope::SingleThread);
8725
8726 // CRC32
8727 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
8728 switch (BuiltinID) {
8729 case AArch64::BI__builtin_arm_crc32b:
8730 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
8731 case AArch64::BI__builtin_arm_crc32cb:
8732 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
8733 case AArch64::BI__builtin_arm_crc32h:
8734 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
8735 case AArch64::BI__builtin_arm_crc32ch:
8736 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
8737 case AArch64::BI__builtin_arm_crc32w:
8738 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
8739 case AArch64::BI__builtin_arm_crc32cw:
8740 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
8741 case AArch64::BI__builtin_arm_crc32d:
8742 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
8743 case AArch64::BI__builtin_arm_crc32cd:
8744 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
8745 }
8746
8747 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
8748 Value *Arg0 = EmitScalarExpr(E->getArg(0));
8749 Value *Arg1 = EmitScalarExpr(E->getArg(1));
8750 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
8751
8752 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
8753 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
8754
8755 return Builder.CreateCall(F, {Arg0, Arg1});
8756 }
8757
8758 // Memory Tagging Extensions (MTE) Intrinsics
8759 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
8760 switch (BuiltinID) {
8761 case AArch64::BI__builtin_arm_irg:
8762 MTEIntrinsicID = Intrinsic::aarch64_irg; break;
8763 case AArch64::BI__builtin_arm_addg:
8764 MTEIntrinsicID = Intrinsic::aarch64_addg; break;
8765 case AArch64::BI__builtin_arm_gmi:
8766 MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
8767 case AArch64::BI__builtin_arm_ldg:
8768 MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
8769 case AArch64::BI__builtin_arm_stg:
8770 MTEIntrinsicID = Intrinsic::aarch64_stg; break;
8771 case AArch64::BI__builtin_arm_subp:
8772 MTEIntrinsicID = Intrinsic::aarch64_subp; break;
8773 }
8774
8775 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
8776 llvm::Type *T = ConvertType(E->getType());
8777
8778 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
8779 Value *Pointer = EmitScalarExpr(E->getArg(0));
8780 Value *Mask = EmitScalarExpr(E->getArg(1));
8781
8782 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
8783 Mask = Builder.CreateZExt(Mask, Int64Ty);
8784 Value *RV = Builder.CreateCall(
8785 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
8786 return Builder.CreatePointerCast(RV, T);
8787 }
8788 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
8789 Value *Pointer = EmitScalarExpr(E->getArg(0));
8790 Value *TagOffset = EmitScalarExpr(E->getArg(1));
8791
8792 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
8793 TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
8794 Value *RV = Builder.CreateCall(
8795 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
8796 return Builder.CreatePointerCast(RV, T);
8797 }
8798 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
8799 Value *Pointer = EmitScalarExpr(E->getArg(0));
8800 Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
8801
8802 ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
8803 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
8804 return Builder.CreateCall(
8805 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
8806 }
8807 // Although it is possible to supply a different return
8808 // address (first arg) to this intrinsic, for now we set
8809 // return address same as input address.
8810 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
8811 Value *TagAddress = EmitScalarExpr(E->getArg(0));
8812 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
8813 Value *RV = Builder.CreateCall(
8814 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
8815 return Builder.CreatePointerCast(RV, T);
8816 }
8817 // Although it is possible to supply a different tag (to set)
8818 // to this intrinsic (as first arg), for now we supply
8819 // the tag that is in input address arg (common use case).
8820 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
8821 Value *TagAddress = EmitScalarExpr(E->getArg(0));
8822 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
8823 return Builder.CreateCall(
8824 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
8825 }
8826 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
8827 Value *PointerA = EmitScalarExpr(E->getArg(0));
8828 Value *PointerB = EmitScalarExpr(E->getArg(1));
8829 PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
8830 PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
8831 return Builder.CreateCall(
8832 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
8833 }
8834 }
8835
8836 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
8837 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
8838 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
8839 BuiltinID == AArch64::BI__builtin_arm_wsr ||
8840 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
8841 BuiltinID == AArch64::BI__builtin_arm_wsrp) {
8842
8843 SpecialRegisterAccessKind AccessKind = Write;
8844 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
8845 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
8846 BuiltinID == AArch64::BI__builtin_arm_rsrp)
8847 AccessKind = VolatileRead;
8848
8849 bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
8850 BuiltinID == AArch64::BI__builtin_arm_wsrp;
8851
8852 bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
8853 BuiltinID != AArch64::BI__builtin_arm_wsr;
8854
8855 llvm::Type *ValueType;
8856 llvm::Type *RegisterType = Int64Ty;
8857 if (IsPointerBuiltin) {
8858 ValueType = VoidPtrTy;
8859 } else if (Is64Bit) {
8860 ValueType = Int64Ty;
8861 } else {
8862 ValueType = Int32Ty;
8863 }
8864
8865 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
8866 AccessKind);
8867 }
8868
8869 if (BuiltinID == AArch64::BI_ReadStatusReg ||
8870 BuiltinID == AArch64::BI_WriteStatusReg) {
8871 LLVMContext &Context = CGM.getLLVMContext();
8872
8873 unsigned SysReg =
8874 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
8875
8876 std::string SysRegStr;
8877 llvm::raw_string_ostream(SysRegStr) <<
8878 ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
8879 ((SysReg >> 11) & 7) << ":" <<
8880 ((SysReg >> 7) & 15) << ":" <<
8881 ((SysReg >> 3) & 15) << ":" <<
8882 ( SysReg & 7);
8883
8884 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
8885 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
8886 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
8887
8888 llvm::Type *RegisterType = Int64Ty;
8889 llvm::Type *Types[] = { RegisterType };
8890
8891 if (BuiltinID == AArch64::BI_ReadStatusReg) {
8892 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
8893
8894 return Builder.CreateCall(F, Metadata);
8895 }
8896
8897 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
8898 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
8899
8900 return Builder.CreateCall(F, { Metadata, ArgValue });
8901 }
8902
8903 if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
8904 llvm::Function *F =
8905 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
8906 return Builder.CreateCall(F);
8907 }
8908
8909 if (BuiltinID == AArch64::BI__builtin_sponentry) {
8910 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
8911 return Builder.CreateCall(F);
8912 }
8913
8914 // Find out if any arguments are required to be integer constant
8915 // expressions.
8916 unsigned ICEArguments = 0;
8917 ASTContext::GetBuiltinTypeError Error;
8918 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
8919 assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error"
) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8919, __PRETTY_FUNCTION__))
;
8920
8921 llvm::SmallVector<Value*, 4> Ops;
8922 Address PtrOp0 = Address::invalid();
8923 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
8924 if (i == 0) {
8925 switch (BuiltinID) {
8926 case NEON::BI__builtin_neon_vld1_v:
8927 case NEON::BI__builtin_neon_vld1q_v:
8928 case NEON::BI__builtin_neon_vld1_dup_v:
8929 case NEON::BI__builtin_neon_vld1q_dup_v:
8930 case NEON::BI__builtin_neon_vld1_lane_v:
8931 case NEON::BI__builtin_neon_vld1q_lane_v:
8932 case NEON::BI__builtin_neon_vst1_v:
8933 case NEON::BI__builtin_neon_vst1q_v:
8934 case NEON::BI__builtin_neon_vst1_lane_v:
8935 case NEON::BI__builtin_neon_vst1q_lane_v:
8936 // Get the alignment for the argument in addition to the value;
8937 // we'll use it later.
8938 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
8939 Ops.push_back(PtrOp0.getPointer());
8940 continue;
8941 }
8942 }
8943 if ((ICEArguments & (1 << i)) == 0) {
8944 Ops.push_back(EmitScalarExpr(E->getArg(i)));
8945 } else {
8946 // If this is required to be a constant, constant fold it so that we know
8947 // that the generated intrinsic gets a ConstantInt.
8948 Ops.push_back(llvm::ConstantInt::get(
8949 getLLVMContext(),
8950 *E->getArg(i)->getIntegerConstantExpr(getContext())));
8951 }
8952 }
8953
8954 auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
8955 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
8956 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
8957
8958 if (Builtin) {
8959 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
8960 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
8961 assert(Result && "SISD intrinsic should have been handled")((Result && "SISD intrinsic should have been handled"
) ? static_cast<void> (0) : __assert_fail ("Result && \"SISD intrinsic should have been handled\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8961, __PRETTY_FUNCTION__))
;
8962 return Result;
8963 }
8964
8965 const Expr *Arg = E->getArg(E->getNumArgs()-1);
8966 NeonTypeFlags Type(0);
8967 if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()))
8968 // Determine the type of this overloaded NEON intrinsic.
8969 Type = NeonTypeFlags(Result->getZExtValue());
8970
8971 bool usgn = Type.isUnsigned();
8972 bool quad = Type.isQuad();
8973
8974 // Handle non-overloaded intrinsics first.
8975 switch (BuiltinID) {
8976 default: break;
8977 case NEON::BI__builtin_neon_vabsh_f16:
8978 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8979 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
8980 case NEON::BI__builtin_neon_vldrq_p128: {
8981 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
8982 llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
8983 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
8984 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
8985 CharUnits::fromQuantity(16));
8986 }
8987 case NEON::BI__builtin_neon_vstrq_p128: {
8988 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
8989 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
8990 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
8991 }
8992 case NEON::BI__builtin_neon_vcvts_f32_u32:
8993 case NEON::BI__builtin_neon_vcvtd_f64_u64:
8994 usgn = true;
8995 LLVM_FALLTHROUGH[[gnu::fallthrough]];
8996 case NEON::BI__builtin_neon_vcvts_f32_s32:
8997 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
8998 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8999 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
9000 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
9001 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
9002 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9003 if (usgn)
9004 return Builder.CreateUIToFP(Ops[0], FTy);
9005 return Builder.CreateSIToFP(Ops[0], FTy);
9006 }
9007 case NEON::BI__builtin_neon_vcvth_f16_u16:
9008 case NEON::BI__builtin_neon_vcvth_f16_u32:
9009 case NEON::BI__builtin_neon_vcvth_f16_u64:
9010 usgn = true;
9011 LLVM_FALLTHROUGH[[gnu::fallthrough]];
9012 case NEON::BI__builtin_neon_vcvth_f16_s16:
9013 case NEON::BI__builtin_neon_vcvth_f16_s32:
9014 case NEON::BI__builtin_neon_vcvth_f16_s64: {
9015 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9016 llvm::Type *FTy = HalfTy;
9017 llvm::Type *InTy;
9018 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
9019 InTy = Int64Ty;
9020 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
9021 InTy = Int32Ty;
9022 else
9023 InTy = Int16Ty;
9024 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9025 if (usgn)
9026 return Builder.CreateUIToFP(Ops[0], FTy);
9027 return Builder.CreateSIToFP(Ops[0], FTy);
9028 }
9029 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9030 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9031 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9032 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9033 case NEON::BI__builtin_neon_vcvth_u16_f16:
9034 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9035 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9036 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9037 case NEON::BI__builtin_neon_vcvtph_s16_f16:
9038 case NEON::BI__builtin_neon_vcvth_s16_f16: {
9039 unsigned Int;
9040 llvm::Type* InTy = Int32Ty;
9041 llvm::Type* FTy = HalfTy;
9042 llvm::Type *Tys[2] = {InTy, FTy};
9043 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9044 switch (BuiltinID) {
9045 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9045)
;
9046 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9047 Int = Intrinsic::aarch64_neon_fcvtau; break;
9048 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9049 Int = Intrinsic::aarch64_neon_fcvtmu; break;
9050 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9051 Int = Intrinsic::aarch64_neon_fcvtnu; break;
9052 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9053 Int = Intrinsic::aarch64_neon_fcvtpu; break;
9054 case NEON::BI__builtin_neon_vcvth_u16_f16:
9055 Int = Intrinsic::aarch64_neon_fcvtzu; break;
9056 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9057 Int = Intrinsic::aarch64_neon_fcvtas; break;
9058 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9059 Int = Intrinsic::aarch64_neon_fcvtms; break;
9060 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9061 Int = Intrinsic::aarch64_neon_fcvtns; break;
9062 case NEON::BI__builtin_neon_vcvtph_s16_f16:
9063 Int = Intrinsic::aarch64_neon_fcvtps; break;
9064 case NEON::BI__builtin_neon_vcvth_s16_f16:
9065 Int = Intrinsic::aarch64_neon_fcvtzs; break;
9066 }
9067 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
9068 return Builder.CreateTrunc(Ops[0], Int16Ty);
9069 }
9070 case NEON::BI__builtin_neon_vcaleh_f16:
9071 case NEON::BI__builtin_neon_vcalth_f16:
9072 case NEON::BI__builtin_neon_vcageh_f16:
9073 case NEON::BI__builtin_neon_vcagth_f16: {
9074 unsigned Int;
9075 llvm::Type* InTy = Int32Ty;
9076 llvm::Type* FTy = HalfTy;
9077 llvm::Type *Tys[2] = {InTy, FTy};
9078 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9079 switch (BuiltinID) {
9080 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9080)
;
9081 case NEON::BI__builtin_neon_vcageh_f16:
9082 Int = Intrinsic::aarch64_neon_facge; break;
9083 case NEON::BI__builtin_neon_vcagth_f16:
9084 Int = Intrinsic::aarch64_neon_facgt; break;
9085 case NEON::BI__builtin_neon_vcaleh_f16:
9086 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
9087 case NEON::BI__builtin_neon_vcalth_f16:
9088 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
9089 }
9090 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
9091 return Builder.CreateTrunc(Ops[0], Int16Ty);
9092 }
9093 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
9094 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
9095 unsigned Int;
9096 llvm::Type* InTy = Int32Ty;
9097 llvm::Type* FTy = HalfTy;
9098 llvm::Type *Tys[2] = {InTy, FTy};
9099 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9100 switch (BuiltinID) {
9101 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9101)
;
9102 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
9103 Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
9104 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
9105 Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
9106 }
9107 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
9108 return Builder.CreateTrunc(Ops[0], Int16Ty);
9109 }
9110 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
9111 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
9112 unsigned Int;
9113 llvm::Type* FTy = HalfTy;
9114 llvm::Type* InTy = Int32Ty;
9115 llvm::Type *Tys[2] = {FTy, InTy};
9116 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9117 switch (BuiltinID) {
9118 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9118)
;
9119 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
9120 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
9121 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
9122 break;
9123 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
9124 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
9125 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
9126 break;
9127 }
9128 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
9129 }
9130 case NEON::BI__builtin_neon_vpaddd_s64: {
9131 auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
9132 Value *Vec = EmitScalarExpr(E->getArg(0));
9133 // The vector is v2f64, so make sure it's bitcast to that.
9134 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
9135 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9136 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9137 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9138 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9139 // Pairwise addition of a v2f64 into a scalar f64.
9140 return Builder.CreateAdd(Op0, Op1, "vpaddd");
9141 }
9142 case NEON::BI__builtin_neon_vpaddd_f64: {
9143 auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
9144 Value *Vec = EmitScalarExpr(E->getArg(0));
9145 // The vector is v2f64, so make sure it's bitcast to that.
9146 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
9147 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9148 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9149 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9150 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9151 // Pairwise addition of a v2f64 into a scalar f64.
9152 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
9153 }
9154 case NEON::BI__builtin_neon_vpadds_f32: {
9155 auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
9156 Value *Vec = EmitScalarExpr(E->getArg(0));
9157 // The vector is v2f32, so make sure it's bitcast to that.
9158 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
9159 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9160 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9161 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9162 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9163 // Pairwise addition of a v2f32 into a scalar f32.
9164 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
9165 }
9166 case NEON::BI__builtin_neon_vceqzd_s64:
9167 case NEON::BI__builtin_neon_vceqzd_f64:
9168 case NEON::BI__builtin_neon_vceqzs_f32:
9169 case NEON::BI__builtin_neon_vceqzh_f16:
9170 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9171 return EmitAArch64CompareBuiltinExpr(
9172 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9173 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
9174 case NEON::BI__builtin_neon_vcgezd_s64:
9175 case NEON::BI__builtin_neon_vcgezd_f64:
9176 case NEON::BI__builtin_neon_vcgezs_f32:
9177 case NEON::BI__builtin_neon_vcgezh_f16:
9178 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9179 return EmitAArch64CompareBuiltinExpr(
9180 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9181 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
9182 case NEON::BI__builtin_neon_vclezd_s64:
9183 case NEON::BI__builtin_neon_vclezd_f64:
9184 case NEON::BI__builtin_neon_vclezs_f32:
9185 case NEON::BI__builtin_neon_vclezh_f16:
9186 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9187 return EmitAArch64CompareBuiltinExpr(
9188 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9189 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
9190 case NEON::BI__builtin_neon_vcgtzd_s64:
9191 case NEON::BI__builtin_neon_vcgtzd_f64:
9192 case NEON::BI__builtin_neon_vcgtzs_f32:
9193 case NEON::BI__builtin_neon_vcgtzh_f16:
9194 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9195 return EmitAArch64CompareBuiltinExpr(
9196 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9197 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
9198 case NEON::BI__builtin_neon_vcltzd_s64:
9199 case NEON::BI__builtin_neon_vcltzd_f64:
9200 case NEON::BI__builtin_neon_vcltzs_f32:
9201 case NEON::BI__builtin_neon_vcltzh_f16:
9202 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9203 return EmitAArch64CompareBuiltinExpr(
9204 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9205 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
9206
9207 case NEON::BI__builtin_neon_vceqzd_u64: {
9208 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9209 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9210 Ops[0] =
9211 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
9212 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
9213 }
9214 case NEON::BI__builtin_neon_vceqd_f64:
9215 case NEON::BI__builtin_neon_vcled_f64:
9216 case NEON::BI__builtin_neon_vcltd_f64:
9217 case NEON::BI__builtin_neon_vcged_f64:
9218 case NEON::BI__builtin_neon_vcgtd_f64: {
9219 llvm::CmpInst::Predicate P;
9220 switch (BuiltinID) {
9221 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9221)
;
9222 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
9223 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
9224 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
9225 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
9226 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
9227 }
9228 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9229 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
9230 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
9231 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9232 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
9233 }
9234 case NEON::BI__builtin_neon_vceqs_f32:
9235 case NEON::BI__builtin_neon_vcles_f32:
9236 case NEON::BI__builtin_neon_vclts_f32:
9237 case NEON::BI__builtin_neon_vcges_f32:
9238 case NEON::BI__builtin_neon_vcgts_f32: {
9239 llvm::CmpInst::Predicate P;
9240 switch (BuiltinID) {
9241 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9241)
;
9242 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
9243 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
9244 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
9245 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
9246 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
9247 }
9248 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9249 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
9250 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
9251 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9252 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
9253 }
9254 case NEON::BI__builtin_neon_vceqh_f16:
9255 case NEON::BI__builtin_neon_vcleh_f16:
9256 case NEON::BI__builtin_neon_vclth_f16:
9257 case NEON::BI__builtin_neon_vcgeh_f16:
9258 case NEON::BI__builtin_neon_vcgth_f16: {
9259 llvm::CmpInst::Predicate P;
9260 switch (BuiltinID) {
9261 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9261)
;
9262 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
9263 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
9264 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
9265 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
9266 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
9267 }
9268 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9269 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
9270 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
9271 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9272 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
9273 }
9274 case NEON::BI__builtin_neon_vceqd_s64:
9275 case NEON::BI__builtin_neon_vceqd_u64:
9276 case NEON::BI__builtin_neon_vcgtd_s64:
9277 case NEON::BI__builtin_neon_vcgtd_u64:
9278 case NEON::BI__builtin_neon_vcltd_s64:
9279 case NEON::BI__builtin_neon_vcltd_u64:
9280 case NEON::BI__builtin_neon_vcged_u64:
9281 case NEON::BI__builtin_neon_vcged_s64:
9282 case NEON::BI__builtin_neon_vcled_u64:
9283 case NEON::BI__builtin_neon_vcled_s64: {
9284 llvm::CmpInst::Predicate P;
9285 switch (BuiltinID) {
9286 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9286)
;
9287 case NEON::BI__builtin_neon_vceqd_s64:
9288 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
9289 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
9290 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
9291 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
9292 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
9293 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
9294 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
9295 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
9296 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
9297 }
9298 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9299 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9300 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9301 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
9302 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
9303 }
9304 case NEON::BI__builtin_neon_vtstd_s64:
9305 case NEON::BI__builtin_neon_vtstd_u64: {
9306 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9307 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9308 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9309 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
9310 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
9311 llvm::Constant::getNullValue(Int64Ty));
9312 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
9313 }
9314 case NEON::BI__builtin_neon_vset_lane_i8:
9315 case NEON::BI__builtin_neon_vset_lane_i16:
9316 case NEON::BI__builtin_neon_vset_lane_i32:
9317 case NEON::BI__builtin_neon_vset_lane_i64:
9318 case NEON::BI__builtin_neon_vset_lane_bf16:
9319 case NEON::BI__builtin_neon_vset_lane_f32:
9320 case NEON::BI__builtin_neon_vsetq_lane_i8:
9321 case NEON::BI__builtin_neon_vsetq_lane_i16:
9322 case NEON::BI__builtin_neon_vsetq_lane_i32:
9323 case NEON::BI__builtin_neon_vsetq_lane_i64:
9324 case NEON::BI__builtin_neon_vsetq_lane_bf16:
9325 case NEON::BI__builtin_neon_vsetq_lane_f32:
9326 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9327 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9328 case NEON::BI__builtin_neon_vset_lane_f64:
9329 // The vector type needs a cast for the v1f64 variant.
9330 Ops[1] =
9331 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
9332 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9333 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9334 case NEON::BI__builtin_neon_vsetq_lane_f64:
9335 // The vector type needs a cast for the v2f64 variant.
9336 Ops[1] =
9337 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
9338 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9339 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9340
9341 case NEON::BI__builtin_neon_vget_lane_i8:
9342 case NEON::BI__builtin_neon_vdupb_lane_i8:
9343 Ops[0] =
9344 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
9345 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9346 "vget_lane");
9347 case NEON::BI__builtin_neon_vgetq_lane_i8:
9348 case NEON::BI__builtin_neon_vdupb_laneq_i8:
9349 Ops[0] =
9350 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
9351 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9352 "vgetq_lane");
9353 case NEON::BI__builtin_neon_vget_lane_i16:
9354 case NEON::BI__builtin_neon_vduph_lane_i16:
9355 Ops[0] =
9356 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
9357 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9358 "vget_lane");
9359 case NEON::BI__builtin_neon_vgetq_lane_i16:
9360 case NEON::BI__builtin_neon_vduph_laneq_i16:
9361 Ops[0] =
9362 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
9363 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9364 "vgetq_lane");
9365 case NEON::BI__builtin_neon_vget_lane_i32:
9366 case NEON::BI__builtin_neon_vdups_lane_i32:
9367 Ops[0] =
9368 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
9369 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9370 "vget_lane");
9371 case NEON::BI__builtin_neon_vdups_lane_f32:
9372 Ops[0] =
9373 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
9374 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9375 "vdups_lane");
9376 case NEON::BI__builtin_neon_vgetq_lane_i32:
9377 case NEON::BI__builtin_neon_vdups_laneq_i32:
9378 Ops[0] =
9379 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
9380 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9381 "vgetq_lane");
9382 case NEON::BI__builtin_neon_vget_lane_i64:
9383 case NEON::BI__builtin_neon_vdupd_lane_i64:
9384 Ops[0] =
9385 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
9386 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9387 "vget_lane");
9388 case NEON::BI__builtin_neon_vdupd_lane_f64:
9389 Ops[0] =
9390 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
9391 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9392 "vdupd_lane");
9393 case NEON::BI__builtin_neon_vgetq_lane_i64:
9394 case NEON::BI__builtin_neon_vdupd_laneq_i64:
9395 Ops[0] =
9396 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
9397 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9398 "vgetq_lane");
9399 case NEON::BI__builtin_neon_vget_lane_f32:
9400 Ops[0] =
9401 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
9402 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9403 "vget_lane");
9404 case NEON::BI__builtin_neon_vget_lane_f64:
9405 Ops[0] =
9406 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
9407 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9408 "vget_lane");
9409 case NEON::BI__builtin_neon_vgetq_lane_f32:
9410 case NEON::BI__builtin_neon_vdups_laneq_f32:
9411 Ops[0] =
9412 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
9413 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9414 "vgetq_lane");
9415 case NEON::BI__builtin_neon_vgetq_lane_f64:
9416 case NEON::BI__builtin_neon_vdupd_laneq_f64:
9417 Ops[0] =
9418 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
9419 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9420 "vgetq_lane");
9421 case NEON::BI__builtin_neon_vaddh_f16:
9422 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9423 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
9424 case NEON::BI__builtin_neon_vsubh_f16:
9425 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9426 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
9427 case NEON::BI__builtin_neon_vmulh_f16:
9428 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9429 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
9430 case NEON::BI__builtin_neon_vdivh_f16:
9431 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9432 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
9433 case NEON::BI__builtin_neon_vfmah_f16:
9434 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
9435 return emitCallMaybeConstrainedFPBuiltin(
9436 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
9437 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
9438 case NEON::BI__builtin_neon_vfmsh_f16: {
9439 // FIXME: This should be an fneg instruction:
9440 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
9441 Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
9442
9443 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
9444 return emitCallMaybeConstrainedFPBuiltin(
9445 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
9446 {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
9447 }
9448 case NEON::BI__builtin_neon_vaddd_s64:
9449 case NEON::BI__builtin_neon_vaddd_u64:
9450 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
9451 case NEON::BI__builtin_neon_vsubd_s64:
9452 case NEON::BI__builtin_neon_vsubd_u64:
9453 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
9454 case NEON::BI__builtin_neon_vqdmlalh_s16:
9455 case NEON::BI__builtin_neon_vqdmlslh_s16: {
9456 SmallVector<Value *, 2> ProductOps;
9457 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
9458 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
9459 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
9460 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
9461 ProductOps, "vqdmlXl");
9462 Constant *CI = ConstantInt::get(SizeTy, 0);
9463 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
9464
9465 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
9466 ? Intrinsic::aarch64_neon_sqadd
9467 : Intrinsic::aarch64_neon_sqsub;
9468 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
9469 }
9470 case NEON::BI__builtin_neon_vqshlud_n_s64: {
9471 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9472 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
9473 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
9474 Ops, "vqshlu_n");
9475 }
9476 case NEON::BI__builtin_neon_vqshld_n_u64:
9477 case NEON::BI__builtin_neon_vqshld_n_s64: {
9478 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
9479 ? Intrinsic::aarch64_neon_uqshl
9480 : Intrinsic::aarch64_neon_sqshl;
9481 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9482 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
9483 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
9484 }
9485 case NEON::BI__builtin_neon_vrshrd_n_u64:
9486 case NEON::BI__builtin_neon_vrshrd_n_s64: {
9487 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
9488 ? Intrinsic::aarch64_neon_urshl
9489 : Intrinsic::aarch64_neon_srshl;
9490 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9491 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
9492 Ops[1] = ConstantInt::get(Int64Ty, -SV);
9493 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
9494 }
9495 case NEON::BI__builtin_neon_vrsrad_n_u64:
9496 case NEON::BI__builtin_neon_vrsrad_n_s64: {
9497 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
9498 ? Intrinsic::aarch64_neon_urshl
9499 : Intrinsic::aarch64_neon_srshl;
9500 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9501 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
9502 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
9503 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
9504 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
9505 }
9506 case NEON::BI__builtin_neon_vshld_n_s64:
9507 case NEON::BI__builtin_neon_vshld_n_u64: {
9508 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9509 return Builder.CreateShl(
9510 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
9511 }
9512 case NEON::BI__builtin_neon_vshrd_n_s64: {
9513 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9514 return Builder.CreateAShr(
9515 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
9516 Amt->getZExtValue())),
9517 "shrd_n");
9518 }
9519 case NEON::BI__builtin_neon_vshrd_n_u64: {
9520 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9521 uint64_t ShiftAmt = Amt->getZExtValue();
9522 // Right-shifting an unsigned value by its size yields 0.
9523 if (ShiftAmt == 64)
9524 return ConstantInt::get(Int64Ty, 0);
9525 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
9526 "shrd_n");
9527 }
9528 case NEON::BI__builtin_neon_vsrad_n_s64: {
9529 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
9530 Ops[1] = Builder.CreateAShr(
9531 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
9532 Amt->getZExtValue())),
9533 "shrd_n");
9534 return Builder.CreateAdd(Ops[0], Ops[1]);
9535 }
9536 case NEON::BI__builtin_neon_vsrad_n_u64: {
9537 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
9538 uint64_t ShiftAmt = Amt->getZExtValue();
9539 // Right-shifting an unsigned value by its size yields 0.
9540 // As Op + 0 = Op, return Ops[0] directly.
9541 if (ShiftAmt == 64)
9542 return Ops[0];
9543 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
9544 "shrd_n");
9545 return Builder.CreateAdd(Ops[0], Ops[1]);
9546 }
9547 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
9548 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
9549 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
9550 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
9551 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
9552 "lane");
9553 SmallVector<Value *, 2> ProductOps;
9554 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
9555 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
9556 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
9557 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
9558 ProductOps, "vqdmlXl");
9559 Constant *CI = ConstantInt::get(SizeTy, 0);
9560 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
9561 Ops.pop_back();
9562
9563 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
9564 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
9565 ? Intrinsic::aarch64_neon_sqadd
9566 : Intrinsic::aarch64_neon_sqsub;
9567 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
9568 }
9569 case NEON::BI__builtin_neon_vqdmlals_s32:
9570 case NEON::BI__builtin_neon_vqdmlsls_s32: {
9571 SmallVector<Value *, 2> ProductOps;
9572 ProductOps.push_back(Ops[1]);
9573 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
9574 Ops[1] =
9575 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
9576 ProductOps, "vqdmlXl");
9577
9578 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
9579 ? Intrinsic::aarch64_neon_sqadd
9580 : Intrinsic::aarch64_neon_sqsub;
9581 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
9582 }
9583 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
9584 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
9585 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
9586 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
9587 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
9588 "lane");
9589 SmallVector<Value *, 2> ProductOps;
9590 ProductOps.push_back(Ops[1]);
9591 ProductOps.push_back(Ops[2]);
9592 Ops[1] =
9593 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
9594 ProductOps, "vqdmlXl");
9595 Ops.pop_back();
9596
9597 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
9598 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
9599 ? Intrinsic::aarch64_neon_sqadd
9600 : Intrinsic::aarch64_neon_sqsub;
9601 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
9602 }
9603 case NEON::BI__builtin_neon_vget_lane_bf16:
9604 case NEON::BI__builtin_neon_vduph_lane_bf16:
9605 case NEON::BI__builtin_neon_vduph_lane_f16: {
9606 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9607 "vget_lane");
9608 }
9609 case NEON::BI__builtin_neon_vgetq_lane_bf16:
9610 case NEON::BI__builtin_neon_vduph_laneq_bf16:
9611 case NEON::BI__builtin_neon_vduph_laneq_f16: {
9612 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9613 "vgetq_lane");
9614 }
9615 case AArch64::BI_BitScanForward:
9616 case AArch64::BI_BitScanForward64:
9617 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
9618 case AArch64::BI_BitScanReverse:
9619 case AArch64::BI_BitScanReverse64:
9620 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
9621 case AArch64::BI_InterlockedAnd64:
9622 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
9623 case AArch64::BI_InterlockedExchange64:
9624 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
9625 case AArch64::BI_InterlockedExchangeAdd64:
9626 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
9627 case AArch64::BI_InterlockedExchangeSub64:
9628 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
9629 case AArch64::BI_InterlockedOr64:
9630 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
9631 case AArch64::BI_InterlockedXor64:
9632 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
9633 case AArch64::BI_InterlockedDecrement64:
9634 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
9635 case AArch64::BI_InterlockedIncrement64:
9636 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
9637 case AArch64::BI_InterlockedExchangeAdd8_acq:
9638 case AArch64::BI_InterlockedExchangeAdd16_acq:
9639 case AArch64::BI_InterlockedExchangeAdd_acq:
9640 case AArch64::BI_InterlockedExchangeAdd64_acq:
9641 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
9642 case AArch64::BI_InterlockedExchangeAdd8_rel:
9643 case AArch64::BI_InterlockedExchangeAdd16_rel:
9644 case AArch64::BI_InterlockedExchangeAdd_rel:
9645 case AArch64::BI_InterlockedExchangeAdd64_rel:
9646 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
9647 case AArch64::BI_InterlockedExchangeAdd8_nf:
9648 case AArch64::BI_InterlockedExchangeAdd16_nf:
9649 case AArch64::BI_InterlockedExchangeAdd_nf:
9650 case AArch64::BI_InterlockedExchangeAdd64_nf:
9651 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
9652 case AArch64::BI_InterlockedExchange8_acq:
9653 case AArch64::BI_InterlockedExchange16_acq:
9654 case AArch64::BI_InterlockedExchange_acq:
9655 case AArch64::BI_InterlockedExchange64_acq:
9656 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
9657 case AArch64::BI_InterlockedExchange8_rel:
9658 case AArch64::BI_InterlockedExchange16_rel:
9659 case AArch64::BI_InterlockedExchange_rel:
9660 case AArch64::BI_InterlockedExchange64_rel:
9661 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
9662 case AArch64::BI_InterlockedExchange8_nf:
9663 case AArch64::BI_InterlockedExchange16_nf:
9664 case AArch64::BI_InterlockedExchange_nf:
9665 case AArch64::BI_InterlockedExchange64_nf:
9666 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
9667 case AArch64::BI_InterlockedCompareExchange8_acq:
9668 case AArch64::BI_InterlockedCompareExchange16_acq:
9669 case AArch64::BI_InterlockedCompareExchange_acq:
9670 case AArch64::BI_InterlockedCompareExchange64_acq:
9671 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
9672 case AArch64::BI_InterlockedCompareExchange8_rel:
9673 case AArch64::BI_InterlockedCompareExchange16_rel:
9674 case AArch64::BI_InterlockedCompareExchange_rel:
9675 case AArch64::BI_InterlockedCompareExchange64_rel:
9676 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
9677 case AArch64::BI_InterlockedCompareExchange8_nf:
9678 case AArch64::BI_InterlockedCompareExchange16_nf:
9679 case AArch64::BI_InterlockedCompareExchange_nf:
9680 case AArch64::BI_InterlockedCompareExchange64_nf:
9681 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
9682 case AArch64::BI_InterlockedOr8_acq:
9683 case AArch64::BI_InterlockedOr16_acq:
9684 case AArch64::BI_InterlockedOr_acq:
9685 case AArch64::BI_InterlockedOr64_acq:
9686 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
9687 case AArch64::BI_InterlockedOr8_rel:
9688 case AArch64::BI_InterlockedOr16_rel:
9689 case AArch64::BI_InterlockedOr_rel:
9690 case AArch64::BI_InterlockedOr64_rel:
9691 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
9692 case AArch64::BI_InterlockedOr8_nf:
9693 case AArch64::BI_InterlockedOr16_nf:
9694 case AArch64::BI_InterlockedOr_nf:
9695 case AArch64::BI_InterlockedOr64_nf:
9696 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
9697 case AArch64::BI_InterlockedXor8_acq:
9698 case AArch64::BI_InterlockedXor16_acq:
9699 case AArch64::BI_InterlockedXor_acq:
9700 case AArch64::BI_InterlockedXor64_acq:
9701 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
9702 case AArch64::BI_InterlockedXor8_rel:
9703 case AArch64::BI_InterlockedXor16_rel:
9704 case AArch64::BI_InterlockedXor_rel:
9705 case AArch64::BI_InterlockedXor64_rel:
9706 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
9707 case AArch64::BI_InterlockedXor8_nf:
9708 case AArch64::BI_InterlockedXor16_nf:
9709 case AArch64::BI_InterlockedXor_nf:
9710 case AArch64::BI_InterlockedXor64_nf:
9711 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
9712 case AArch64::BI_InterlockedAnd8_acq:
9713 case AArch64::BI_InterlockedAnd16_acq:
9714 case AArch64::BI_InterlockedAnd_acq:
9715 case AArch64::BI_InterlockedAnd64_acq:
9716 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
9717 case AArch64::BI_InterlockedAnd8_rel:
9718 case AArch64::BI_InterlockedAnd16_rel:
9719 case AArch64::BI_InterlockedAnd_rel:
9720 case AArch64::BI_InterlockedAnd64_rel:
9721 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
9722 case AArch64::BI_InterlockedAnd8_nf:
9723 case AArch64::BI_InterlockedAnd16_nf:
9724 case AArch64::BI_InterlockedAnd_nf:
9725 case AArch64::BI_InterlockedAnd64_nf:
9726 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
9727 case AArch64::BI_InterlockedIncrement16_acq:
9728 case AArch64::BI_InterlockedIncrement_acq:
9729 case AArch64::BI_InterlockedIncrement64_acq:
9730 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
9731 case AArch64::BI_InterlockedIncrement16_rel:
9732 case AArch64::BI_InterlockedIncrement_rel:
9733 case AArch64::BI_InterlockedIncrement64_rel:
9734 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
9735 case AArch64::BI_InterlockedIncrement16_nf:
9736 case AArch64::BI_InterlockedIncrement_nf:
9737 case AArch64::BI_InterlockedIncrement64_nf:
9738 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
9739 case AArch64::BI_InterlockedDecrement16_acq:
9740 case AArch64::BI_InterlockedDecrement_acq:
9741 case AArch64::BI_InterlockedDecrement64_acq:
9742 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
9743 case AArch64::BI_InterlockedDecrement16_rel:
9744 case AArch64::BI_InterlockedDecrement_rel:
9745 case AArch64::BI_InterlockedDecrement64_rel:
9746 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
9747 case AArch64::BI_InterlockedDecrement16_nf:
9748 case AArch64::BI_InterlockedDecrement_nf:
9749 case AArch64::BI_InterlockedDecrement64_nf:
9750 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
9751
9752 case AArch64::BI_InterlockedAdd: {
9753 Value *Arg0 = EmitScalarExpr(E->getArg(0));
9754 Value *Arg1 = EmitScalarExpr(E->getArg(1));
9755 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
9756 AtomicRMWInst::Add, Arg0, Arg1,
9757 llvm::AtomicOrdering::SequentiallyConsistent);
9758 return Builder.CreateAdd(RMWI, Arg1);
9759 }
9760 }
9761
9762 llvm::FixedVectorType *VTy = GetNeonType(this, Type);
9763 llvm::Type *Ty = VTy;
9764 if (!Ty)
9765 return nullptr;
9766
9767 // Not all intrinsics handled by the common case work for AArch64 yet, so only
9768 // defer to common code if it's been added to our special map.
9769 Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
9770 AArch64SIMDIntrinsicsProvenSorted);
9771
9772 if (Builtin)
9773 return EmitCommonNeonBuiltinExpr(
9774 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
9775 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
9776 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
9777
9778 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
9779 return V;
9780
9781 unsigned Int;
9782 switch (BuiltinID) {
9783 default: return nullptr;
9784 case NEON::BI__builtin_neon_vbsl_v:
9785 case NEON::BI__builtin_neon_vbslq_v: {
9786 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
9787 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
9788 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
9789 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
9790
9791 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
9792 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
9793 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
9794 return Builder.CreateBitCast(Ops[0], Ty);
9795 }
9796 case NEON::BI__builtin_neon_vfma_lane_v:
9797 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
9798 // The ARM builtins (and instructions) have the addend as the first
9799 // operand, but the 'fma' intrinsics have it last. Swap it around here.
9800 Value *Addend = Ops[0];
9801 Value *Multiplicand = Ops[1];
9802 Value *LaneSource = Ops[2];
9803 Ops[0] = Multiplicand;
9804 Ops[1] = LaneSource;
9805 Ops[2] = Addend;
9806
9807 // Now adjust things to handle the lane access.
9808 auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
9809 ? llvm::FixedVectorType::get(VTy->getElementType(),
9810 VTy->getNumElements() / 2)
9811 : VTy;
9812 llvm::Constant *cst = cast<Constant>(Ops[3]);
9813 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
9814 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
9815 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
9816
9817 Ops.pop_back();
9818 Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
9819 : Intrinsic::fma;
9820 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
9821 }
9822 case NEON::BI__builtin_neon_vfma_laneq_v: {
9823 auto *VTy = cast<llvm::FixedVectorType>(Ty);
9824 // v1f64 fma should be mapped to Neon scalar f64 fma
9825 if (VTy && VTy->getElementType() == DoubleTy) {
9826 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
9827 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
9828 llvm::FixedVectorType *VTy =
9829 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
9830 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
9831 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
9832 Value *Result;
9833 Result = emitCallMaybeConstrainedFPBuiltin(
9834 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
9835 DoubleTy, {Ops[1], Ops[2], Ops[0]});
9836 return Builder.CreateBitCast(Result, Ty);
9837 }
9838 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9839 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9840
9841 auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
9842 VTy->getNumElements() * 2);
9843 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
9844 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
9845 cast<ConstantInt>(Ops[3]));
9846 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
9847
9848 return emitCallMaybeConstrainedFPBuiltin(
9849 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
9850 {Ops[2], Ops[1], Ops[0]});
9851 }
9852 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
9853 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9854 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9855
9856 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9857 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
9858 return emitCallMaybeConstrainedFPBuiltin(
9859 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
9860 {Ops[2], Ops[1], Ops[0]});
9861 }
9862 case NEON::BI__builtin_neon_vfmah_lane_f16:
9863 case NEON::BI__builtin_neon_vfmas_lane_f32:
9864 case NEON::BI__builtin_neon_vfmah_laneq_f16:
9865 case NEON::BI__builtin_neon_vfmas_laneq_f32:
9866 case NEON::BI__builtin_neon_vfmad_lane_f64:
9867 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
9868 Ops.push_back(EmitScalarExpr(E->getArg(3)));
9869 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
9870 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
9871 return emitCallMaybeConstrainedFPBuiltin(
9872 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
9873 {Ops[1], Ops[2], Ops[0]});
9874 }
9875 case NEON::BI__builtin_neon_vmull_v:
9876 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9877 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
9878 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
9879 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
9880 case NEON::BI__builtin_neon_vmax_v:
9881 case NEON::BI__builtin_neon_vmaxq_v:
9882 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9883 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
9884 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
9885 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
9886 case NEON::BI__builtin_neon_vmaxh_f16: {
9887 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9888 Int = Intrinsic::aarch64_neon_fmax;
9889 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
9890 }
9891 case NEON::BI__builtin_neon_vmin_v:
9892 case NEON::BI__builtin_neon_vminq_v:
9893 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9894 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
9895 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
9896 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
9897 case NEON::BI__builtin_neon_vminh_f16: {
9898 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9899 Int = Intrinsic::aarch64_neon_fmin;
9900 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
9901 }
9902 case NEON::BI__builtin_neon_vabd_v:
9903 case NEON::BI__builtin_neon_vabdq_v:
9904 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9905 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
9906 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
9907 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
9908 case NEON::BI__builtin_neon_vpadal_v:
9909 case NEON::BI__builtin_neon_vpadalq_v: {
9910 unsigned ArgElts = VTy->getNumElements();
9911 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
9912 unsigned BitWidth = EltTy->getBitWidth();
9913 auto *ArgTy = llvm::FixedVectorType::get(
9914 llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
9915 llvm::Type* Tys[2] = { VTy, ArgTy };
9916 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
9917 SmallVector<llvm::Value*, 1> TmpOps;
9918 TmpOps.push_back(Ops[1]);
9919 Function *F = CGM.getIntrinsic(Int, Tys);
9920 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
9921 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
9922 return Builder.CreateAdd(tmp, addend);
9923 }
9924 case NEON::BI__builtin_neon_vpmin_v:
9925 case NEON::BI__builtin_neon_vpminq_v:
9926 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9927 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
9928 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
9929 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
9930 case NEON::BI__builtin_neon_vpmax_v:
9931 case NEON::BI__builtin_neon_vpmaxq_v:
9932 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9933 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
9934 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
9935 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
9936 case NEON::BI__builtin_neon_vminnm_v:
9937 case NEON::BI__builtin_neon_vminnmq_v:
9938 Int = Intrinsic::aarch64_neon_fminnm;
9939 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
9940 case NEON::BI__builtin_neon_vminnmh_f16:
9941 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9942 Int = Intrinsic::aarch64_neon_fminnm;
9943 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
9944 case NEON::BI__builtin_neon_vmaxnm_v:
9945 case NEON::BI__builtin_neon_vmaxnmq_v:
9946 Int = Intrinsic::aarch64_neon_fmaxnm;
9947 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
9948 case NEON::BI__builtin_neon_vmaxnmh_f16:
9949 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9950 Int = Intrinsic::aarch64_neon_fmaxnm;
9951 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
9952 case NEON::BI__builtin_neon_vrecpss_f32: {
9953 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9954 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
9955 Ops, "vrecps");
9956 }
9957 case NEON::BI__builtin_neon_vrecpsd_f64:
9958 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9959 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
9960 Ops, "vrecps");
9961 case NEON::BI__builtin_neon_vrecpsh_f16:
9962 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9963 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
9964 Ops, "vrecps");
9965 case NEON::BI__builtin_neon_vqshrun_n_v:
9966 Int = Intrinsic::aarch64_neon_sqshrun;
9967 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
9968 case NEON::BI__builtin_neon_vqrshrun_n_v:
9969 Int = Intrinsic::aarch64_neon_sqrshrun;
9970 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
9971 case NEON::BI__builtin_neon_vqshrn_n_v:
9972 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
9973 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
9974 case NEON::BI__builtin_neon_vrshrn_n_v:
9975 Int = Intrinsic::aarch64_neon_rshrn;
9976 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
9977 case NEON::BI__builtin_neon_vqrshrn_n_v:
9978 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
9979 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
9980 case NEON::BI__builtin_neon_vrndah_f16: {
9981 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9982 Int = Builder.getIsFPConstrained()
9983 ? Intrinsic::experimental_constrained_round
9984 : Intrinsic::round;
9985 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
9986 }
9987 case NEON::BI__builtin_neon_vrnda_v:
9988 case NEON::BI__builtin_neon_vrndaq_v: {
9989 Int = Builder.getIsFPConstrained()
9990 ? Intrinsic::experimental_constrained_round
9991 : Intrinsic::round;
9992 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
9993 }
9994 case NEON::BI__builtin_neon_vrndih_f16: {
9995 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9996 Int = Builder.getIsFPConstrained()
9997 ? Intrinsic::experimental_constrained_nearbyint
9998 : Intrinsic::nearbyint;
9999 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
10000 }
10001 case NEON::BI__builtin_neon_vrndmh_f16: {
10002 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10003 Int = Builder.getIsFPConstrained()
10004 ? Intrinsic::experimental_constrained_floor
10005 : Intrinsic::floor;
10006 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
10007 }
10008 case NEON::BI__builtin_neon_vrndm_v:
10009 case NEON::BI__builtin_neon_vrndmq_v: {
10010 Int = Builder.getIsFPConstrained()
10011 ? Intrinsic::experimental_constrained_floor
10012 : Intrinsic::floor;
10013 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
10014 }
10015 case NEON::BI__builtin_neon_vrndnh_f16: {
10016 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10017 Int = Intrinsic::aarch64_neon_frintn;
10018 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
10019 }
10020 case NEON::BI__builtin_neon_vrndn_v:
10021 case NEON::BI__builtin_neon_vrndnq_v: {
10022 Int = Intrinsic::aarch64_neon_frintn;
10023 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
10024 }
10025 case NEON::BI__builtin_neon_vrndns_f32: {
10026 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10027 Int = Intrinsic::aarch64_neon_frintn;
10028 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
10029 }
10030 case NEON::BI__builtin_neon_vrndph_f16: {
10031 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10032 Int = Builder.getIsFPConstrained()
10033 ? Intrinsic::experimental_constrained_ceil
10034 : Intrinsic::ceil;
10035 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
10036 }
10037 case NEON::BI__builtin_neon_vrndp_v:
10038 case NEON::BI__builtin_neon_vrndpq_v: {
10039 Int = Builder.getIsFPConstrained()
10040 ? Intrinsic::experimental_constrained_ceil
10041 : Intrinsic::ceil;
10042 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
10043 }
10044 case NEON::BI__builtin_neon_vrndxh_f16: {
10045 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10046 Int = Builder.getIsFPConstrained()
10047 ? Intrinsic::experimental_constrained_rint
10048 : Intrinsic::rint;
10049 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
10050 }
10051 case NEON::BI__builtin_neon_vrndx_v:
10052 case NEON::BI__builtin_neon_vrndxq_v: {
10053 Int = Builder.getIsFPConstrained()
10054 ? Intrinsic::experimental_constrained_rint
10055 : Intrinsic::rint;
10056 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
10057 }
10058 case NEON::BI__builtin_neon_vrndh_f16: {
10059 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10060 Int = Builder.getIsFPConstrained()
10061 ? Intrinsic::experimental_constrained_trunc
10062 : Intrinsic::trunc;
10063 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
10064 }
10065 case NEON::BI__builtin_neon_vrnd_v:
10066 case NEON::BI__builtin_neon_vrndq_v: {
10067 Int = Builder.getIsFPConstrained()
10068 ? Intrinsic::experimental_constrained_trunc
10069 : Intrinsic::trunc;
10070 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
10071 }
10072 case NEON::BI__builtin_neon_vcvt_f64_v:
10073 case NEON::BI__builtin_neon_vcvtq_f64_v:
10074 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10075 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
10076 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
10077 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
10078 case NEON::BI__builtin_neon_vcvt_f64_f32: {
10079 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&((Type.getEltType() == NeonTypeFlags::Float64 && quad
&& "unexpected vcvt_f64_f32 builtin") ? static_cast<
void> (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10080, __PRETTY_FUNCTION__))
10080 "unexpected vcvt_f64_f32 builtin")((Type.getEltType() == NeonTypeFlags::Float64 && quad
&& "unexpected vcvt_f64_f32 builtin") ? static_cast<
void> (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10080, __PRETTY_FUNCTION__))
;
10081 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
10082 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10083
10084 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
10085 }
10086 case NEON::BI__builtin_neon_vcvt_f32_f64: {
10087 assert(Type.getEltType() == NeonTypeFlags::Float32 &&((Type.getEltType() == NeonTypeFlags::Float32 && "unexpected vcvt_f32_f64 builtin"
) ? static_cast<void> (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10088, __PRETTY_FUNCTION__))
10088 "unexpected vcvt_f32_f64 builtin")((Type.getEltType() == NeonTypeFlags::Float32 && "unexpected vcvt_f32_f64 builtin"
) ? static_cast<void> (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10088, __PRETTY_FUNCTION__))
;
10089 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
10090 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10091
10092 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
10093 }
10094 case NEON::BI__builtin_neon_vcvt_s32_v:
10095 case NEON::BI__builtin_neon_vcvt_u32_v:
10096 case NEON::BI__builtin_neon_vcvt_s64_v:
10097 case NEON::BI__builtin_neon_vcvt_u64_v:
10098 case NEON::BI__builtin_neon_vcvt_s16_v:
10099 case NEON::BI__builtin_neon_vcvt_u16_v:
10100 case NEON::BI__builtin_neon_vcvtq_s32_v:
10101 case NEON::BI__builtin_neon_vcvtq_u32_v:
10102 case NEON::BI__builtin_neon_vcvtq_s64_v:
10103 case NEON::BI__builtin_neon_vcvtq_u64_v:
10104 case NEON::BI__builtin_neon_vcvtq_s16_v:
10105 case NEON::BI__builtin_neon_vcvtq_u16_v: {
10106 Int =
10107 usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
10108 llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
10109 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
10110 }
10111 case NEON::BI__builtin_neon_vcvta_s16_v:
10112 case NEON::BI__builtin_neon_vcvta_u16_v:
10113 case NEON::BI__builtin_neon_vcvta_s32_v:
10114 case NEON::BI__builtin_neon_vcvtaq_s16_v:
10115 case NEON::BI__builtin_neon_vcvtaq_s32_v:
10116 case NEON::BI__builtin_neon_vcvta_u32_v:
10117 case NEON::BI__builtin_neon_vcvtaq_u16_v:
10118 case NEON::BI__builtin_neon_vcvtaq_u32_v:
10119 case NEON::BI__builtin_neon_vcvta_s64_v:
10120 case NEON::BI__builtin_neon_vcvtaq_s64_v:
10121 case NEON::BI__builtin_neon_vcvta_u64_v:
10122 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
10123 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
10124 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10125 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
10126 }
10127 case NEON::BI__builtin_neon_vcvtm_s16_v:
10128 case NEON::BI__builtin_neon_vcvtm_s32_v:
10129 case NEON::BI__builtin_neon_vcvtmq_s16_v:
10130 case NEON::BI__builtin_neon_vcvtmq_s32_v:
10131 case NEON::BI__builtin_neon_vcvtm_u16_v:
10132 case NEON::BI__builtin_neon_vcvtm_u32_v:
10133 case NEON::BI__builtin_neon_vcvtmq_u16_v:
10134 case NEON::BI__builtin_neon_vcvtmq_u32_v:
10135 case NEON::BI__builtin_neon_vcvtm_s64_v:
10136 case NEON::BI__builtin_neon_vcvtmq_s64_v:
10137 case NEON::BI__builtin_neon_vcvtm_u64_v:
10138 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
10139 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
10140 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10141 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
10142 }
10143 case NEON::BI__builtin_neon_vcvtn_s16_v:
10144 case NEON::BI__builtin_neon_vcvtn_s32_v:
10145 case NEON::BI__builtin_neon_vcvtnq_s16_v:
10146 case NEON::BI__builtin_neon_vcvtnq_s32_v:
10147 case NEON::BI__builtin_neon_vcvtn_u16_v:
10148 case NEON::BI__builtin_neon_vcvtn_u32_v:
10149 case NEON::BI__builtin_neon_vcvtnq_u16_v:
10150 case NEON::BI__builtin_neon_vcvtnq_u32_v:
10151 case NEON::BI__builtin_neon_vcvtn_s64_v:
10152 case NEON::BI__builtin_neon_vcvtnq_s64_v:
10153 case NEON::BI__builtin_neon_vcvtn_u64_v:
10154 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
10155 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
10156 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10157 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
10158 }
10159 case NEON::BI__builtin_neon_vcvtp_s16_v:
10160 case NEON::BI__builtin_neon_vcvtp_s32_v:
10161 case NEON::BI__builtin_neon_vcvtpq_s16_v:
10162 case NEON::BI__builtin_neon_vcvtpq_s32_v:
10163 case NEON::BI__builtin_neon_vcvtp_u16_v:
10164 case NEON::BI__builtin_neon_vcvtp_u32_v:
10165 case NEON::BI__builtin_neon_vcvtpq_u16_v:
10166 case NEON::BI__builtin_neon_vcvtpq_u32_v:
10167 case NEON::BI__builtin_neon_vcvtp_s64_v:
10168 case NEON::BI__builtin_neon_vcvtpq_s64_v:
10169 case NEON::BI__builtin_neon_vcvtp_u64_v:
10170 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
10171 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
10172 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10173 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
10174 }
10175 case NEON::BI__builtin_neon_vmulx_v:
10176 case NEON::BI__builtin_neon_vmulxq_v: {
10177 Int = Intrinsic::aarch64_neon_fmulx;
10178 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
10179 }
10180 case NEON::BI__builtin_neon_vmulxh_lane_f16:
10181 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
10182 // vmulx_lane should be mapped to Neon scalar mulx after
10183 // extracting the scalar element
10184 Ops.push_back(EmitScalarExpr(E->getArg(2)));
10185 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
10186 Ops.pop_back();
10187 Int = Intrinsic::aarch64_neon_fmulx;
10188 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
10189 }
10190 case NEON::BI__builtin_neon_vmul_lane_v:
10191 case NEON::BI__builtin_neon_vmul_laneq_v: {
10192 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
10193 bool Quad = false;
10194 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
10195 Quad = true;
10196 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10197 llvm::FixedVectorType *VTy =
10198 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
10199 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
10200 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
10201 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
10202 return Builder.CreateBitCast(Result, Ty);
10203 }
10204 case NEON::BI__builtin_neon_vnegd_s64:
10205 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
10206 case NEON::BI__builtin_neon_vnegh_f16:
10207 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
10208 case NEON::BI__builtin_neon_vpmaxnm_v:
10209 case NEON::BI__builtin_neon_vpmaxnmq_v: {
10210 Int = Intrinsic::aarch64_neon_fmaxnmp;
10211 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
10212 }
10213 case NEON::BI__builtin_neon_vpminnm_v:
10214 case NEON::BI__builtin_neon_vpminnmq_v: {
10215 Int = Intrinsic::aarch64_neon_fminnmp;
10216 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
10217 }
10218 case NEON::BI__builtin_neon_vsqrth_f16: {
10219 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10220 Int = Builder.getIsFPConstrained()
10221 ? Intrinsic::experimental_constrained_sqrt
10222 : Intrinsic::sqrt;
10223 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
10224 }
10225 case NEON::BI__builtin_neon_vsqrt_v:
10226 case NEON::BI__builtin_neon_vsqrtq_v: {
10227 Int = Builder.getIsFPConstrained()
10228 ? Intrinsic::experimental_constrained_sqrt
10229 : Intrinsic::sqrt;
10230 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10231 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
10232 }
10233 case NEON::BI__builtin_neon_vrbit_v:
10234 case NEON::BI__builtin_neon_vrbitq_v: {
10235 Int = Intrinsic::aarch64_neon_rbit;
10236 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
10237 }
10238 case NEON::BI__builtin_neon_vaddv_u8:
10239 // FIXME: These are handled by the AArch64 scalar code.
10240 usgn = true;
10241 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10242 case NEON::BI__builtin_neon_vaddv_s8: {
10243 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10244 Ty = Int32Ty;
10245 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10246 llvm::Type *Tys[2] = { Ty, VTy };
10247 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10248 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10249 return Builder.CreateTrunc(Ops[0], Int8Ty);
10250 }
10251 case NEON::BI__builtin_neon_vaddv_u16:
10252 usgn = true;
10253 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10254 case NEON::BI__builtin_neon_vaddv_s16: {
10255 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10256 Ty = Int32Ty;
10257 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10258 llvm::Type *Tys[2] = { Ty, VTy };
10259 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10260 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10261 return Builder.CreateTrunc(Ops[0], Int16Ty);
10262 }
10263 case NEON::BI__builtin_neon_vaddvq_u8:
10264 usgn = true;
10265 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10266 case NEON::BI__builtin_neon_vaddvq_s8: {
10267 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10268 Ty = Int32Ty;
10269 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10270 llvm::Type *Tys[2] = { Ty, VTy };
10271 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10272 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10273 return Builder.CreateTrunc(Ops[0], Int8Ty);
10274 }
10275 case NEON::BI__builtin_neon_vaddvq_u16:
10276 usgn = true;
10277 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10278 case NEON::BI__builtin_neon_vaddvq_s16: {
10279 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10280 Ty = Int32Ty;
10281 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10282 llvm::Type *Tys[2] = { Ty, VTy };
10283 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10284 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10285 return Builder.CreateTrunc(Ops[0], Int16Ty);
10286 }
10287 case NEON::BI__builtin_neon_vmaxv_u8: {
10288 Int = Intrinsic::aarch64_neon_umaxv;
10289 Ty = Int32Ty;
10290 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10291 llvm::Type *Tys[2] = { Ty, VTy };
10292 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10293 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10294 return Builder.CreateTrunc(Ops[0], Int8Ty);
10295 }
10296 case NEON::BI__builtin_neon_vmaxv_u16: {
10297 Int = Intrinsic::aarch64_neon_umaxv;
10298 Ty = Int32Ty;
10299 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10300 llvm::Type *Tys[2] = { Ty, VTy };
10301 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10302 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10303 return Builder.CreateTrunc(Ops[0], Int16Ty);
10304 }
10305 case NEON::BI__builtin_neon_vmaxvq_u8: {
10306 Int = Intrinsic::aarch64_neon_umaxv;
10307 Ty = Int32Ty;
10308 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10309 llvm::Type *Tys[2] = { Ty, VTy };
10310 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10311 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10312 return Builder.CreateTrunc(Ops[0], Int8Ty);
10313 }
10314 case NEON::BI__builtin_neon_vmaxvq_u16: {
10315 Int = Intrinsic::aarch64_neon_umaxv;
10316 Ty = Int32Ty;
10317 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10318 llvm::Type *Tys[2] = { Ty, VTy };
10319 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10320 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10321 return Builder.CreateTrunc(Ops[0], Int16Ty);
10322 }
10323 case NEON::BI__builtin_neon_vmaxv_s8: {
10324 Int = Intrinsic::aarch64_neon_smaxv;
10325 Ty = Int32Ty;
10326 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10327 llvm::Type *Tys[2] = { Ty, VTy };
10328 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10329 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10330 return Builder.CreateTrunc(Ops[0], Int8Ty);
10331 }
10332 case NEON::BI__builtin_neon_vmaxv_s16: {
10333 Int = Intrinsic::aarch64_neon_smaxv;
10334 Ty = Int32Ty;
10335 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10336 llvm::Type *Tys[2] = { Ty, VTy };
10337 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10338 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10339 return Builder.CreateTrunc(Ops[0], Int16Ty);
10340 }
10341 case NEON::BI__builtin_neon_vmaxvq_s8: {
10342 Int = Intrinsic::aarch64_neon_smaxv;
10343 Ty = Int32Ty;
10344 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10345 llvm::Type *Tys[2] = { Ty, VTy };
10346 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10347 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10348 return Builder.CreateTrunc(Ops[0], Int8Ty);
10349 }
10350 case NEON::BI__builtin_neon_vmaxvq_s16: {
10351 Int = Intrinsic::aarch64_neon_smaxv;
10352 Ty = Int32Ty;
10353 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10354 llvm::Type *Tys[2] = { Ty, VTy };
10355 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10356 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10357 return Builder.CreateTrunc(Ops[0], Int16Ty);
10358 }
10359 case NEON::BI__builtin_neon_vmaxv_f16: {
10360 Int = Intrinsic::aarch64_neon_fmaxv;
10361 Ty = HalfTy;
10362 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10363 llvm::Type *Tys[2] = { Ty, VTy };
10364 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10365 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10366 return Builder.CreateTrunc(Ops[0], HalfTy);
10367 }
10368 case NEON::BI__builtin_neon_vmaxvq_f16: {
10369 Int = Intrinsic::aarch64_neon_fmaxv;
10370 Ty = HalfTy;
10371 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10372 llvm::Type *Tys[2] = { Ty, VTy };
10373 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10374 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10375 return Builder.CreateTrunc(Ops[0], HalfTy);
10376 }
10377 case NEON::BI__builtin_neon_vminv_u8: {
10378 Int = Intrinsic::aarch64_neon_uminv;
10379 Ty = Int32Ty;
10380 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10381 llvm::Type *Tys[2] = { Ty, VTy };
10382 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10383 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10384 return Builder.CreateTrunc(Ops[0], Int8Ty);
10385 }
10386 case NEON::BI__builtin_neon_vminv_u16: {
10387 Int = Intrinsic::aarch64_neon_uminv;
10388 Ty = Int32Ty;
10389 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10390 llvm::Type *Tys[2] = { Ty, VTy };
10391 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10392 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10393 return Builder.CreateTrunc(Ops[0], Int16Ty);
10394 }
10395 case NEON::BI__builtin_neon_vminvq_u8: {
10396 Int = Intrinsic::aarch64_neon_uminv;
10397 Ty = Int32Ty;
10398 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10399 llvm::Type *Tys[2] = { Ty, VTy };
10400 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10401 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10402 return Builder.CreateTrunc(Ops[0], Int8Ty);
10403 }
10404 case NEON::BI__builtin_neon_vminvq_u16: {
10405 Int = Intrinsic::aarch64_neon_uminv;
10406 Ty = Int32Ty;
10407 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10408 llvm::Type *Tys[2] = { Ty, VTy };
10409 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10410 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10411 return Builder.CreateTrunc(Ops[0], Int16Ty);
10412 }
10413 case NEON::BI__builtin_neon_vminv_s8: {
10414 Int = Intrinsic::aarch64_neon_sminv;
10415 Ty = Int32Ty;
10416 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10417 llvm::Type *Tys[2] = { Ty, VTy };
10418 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10419 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10420 return Builder.CreateTrunc(Ops[0], Int8Ty);
10421 }
10422 case NEON::BI__builtin_neon_vminv_s16: {
10423 Int = Intrinsic::aarch64_neon_sminv;
10424 Ty = Int32Ty;
10425 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10426 llvm::Type *Tys[2] = { Ty, VTy };
10427 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10428 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10429 return Builder.CreateTrunc(Ops[0], Int16Ty);
10430 }
10431 case NEON::BI__builtin_neon_vminvq_s8: {
10432 Int = Intrinsic::aarch64_neon_sminv;
10433 Ty = Int32Ty;
10434 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10435 llvm::Type *Tys[2] = { Ty, VTy };
10436 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10437 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10438 return Builder.CreateTrunc(Ops[0], Int8Ty);
10439 }
10440 case NEON::BI__builtin_neon_vminvq_s16: {
10441 Int = Intrinsic::aarch64_neon_sminv;
10442 Ty = Int32Ty;
10443 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10444 llvm::Type *Tys[2] = { Ty, VTy };
10445 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10446 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10447 return Builder.CreateTrunc(Ops[0], Int16Ty);
10448 }
10449 case NEON::BI__builtin_neon_vminv_f16: {
10450 Int = Intrinsic::aarch64_neon_fminv;
10451 Ty = HalfTy;
10452 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10453 llvm::Type *Tys[2] = { Ty, VTy };
10454 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10455 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10456 return Builder.CreateTrunc(Ops[0], HalfTy);
10457 }
10458 case NEON::BI__builtin_neon_vminvq_f16: {
10459 Int = Intrinsic::aarch64_neon_fminv;
10460 Ty = HalfTy;
10461 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10462 llvm::Type *Tys[2] = { Ty, VTy };
10463 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10464 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10465 return Builder.CreateTrunc(Ops[0], HalfTy);
10466 }
10467 case NEON::BI__builtin_neon_vmaxnmv_f16: {
10468 Int = Intrinsic::aarch64_neon_fmaxnmv;
10469 Ty = HalfTy;
10470 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10471 llvm::Type *Tys[2] = { Ty, VTy };
10472 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10473 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
10474 return Builder.CreateTrunc(Ops[0], HalfTy);
10475 }
10476 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
10477 Int = Intrinsic::aarch64_neon_fmaxnmv;
10478 Ty = HalfTy;
10479 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10480 llvm::Type *Tys[2] = { Ty, VTy };
10481 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10482 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
10483 return Builder.CreateTrunc(Ops[0], HalfTy);
10484 }
10485 case NEON::BI__builtin_neon_vminnmv_f16: {
10486 Int = Intrinsic::aarch64_neon_fminnmv;
10487 Ty = HalfTy;
10488 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10489 llvm::Type *Tys[2] = { Ty, VTy };
10490 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10491 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
10492 return Builder.CreateTrunc(Ops[0], HalfTy);
10493 }
10494 case NEON::BI__builtin_neon_vminnmvq_f16: {
10495 Int = Intrinsic::aarch64_neon_fminnmv;
10496 Ty = HalfTy;
10497 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10498 llvm::Type *Tys[2] = { Ty, VTy };
10499 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10500 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
10501 return Builder.CreateTrunc(Ops[0], HalfTy);
10502 }
10503 case NEON::BI__builtin_neon_vmul_n_f64: {
10504 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10505 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
10506 return Builder.CreateFMul(Ops[0], RHS);
10507 }
10508 case NEON::BI__builtin_neon_vaddlv_u8: {
10509 Int = Intrinsic::aarch64_neon_uaddlv;
10510 Ty = Int32Ty;
10511 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10512 llvm::Type *Tys[2] = { Ty, VTy };
10513 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10514 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10515 return Builder.CreateTrunc(Ops[0], Int16Ty);
10516 }
10517 case NEON::BI__builtin_neon_vaddlv_u16: {
10518 Int = Intrinsic::aarch64_neon_uaddlv;
10519 Ty = Int32Ty;
10520 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10521 llvm::Type *Tys[2] = { Ty, VTy };
10522 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10523 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10524 }
10525 case NEON::BI__builtin_neon_vaddlvq_u8: {
10526 Int = Intrinsic::aarch64_neon_uaddlv;
10527 Ty = Int32Ty;
10528 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10529 llvm::Type *Tys[2] = { Ty, VTy };
10530 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10531 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10532 return Builder.CreateTrunc(Ops[0], Int16Ty);
10533 }
10534 case NEON::BI__builtin_neon_vaddlvq_u16: {
10535 Int = Intrinsic::aarch64_neon_uaddlv;
10536 Ty = Int32Ty;
10537 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10538 llvm::Type *Tys[2] = { Ty, VTy };
10539 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10540 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10541 }
10542 case NEON::BI__builtin_neon_vaddlv_s8: {
10543 Int = Intrinsic::aarch64_neon_saddlv;
10544 Ty = Int32Ty;
10545 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10546 llvm::Type *Tys[2] = { Ty, VTy };
10547 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10548 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10549 return Builder.CreateTrunc(Ops[0], Int16Ty);
10550 }
10551 case NEON::BI__builtin_neon_vaddlv_s16: {
10552 Int = Intrinsic::aarch64_neon_saddlv;
10553 Ty = Int32Ty;
10554 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10555 llvm::Type *Tys[2] = { Ty, VTy };
10556 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10557 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10558 }
10559 case NEON::BI__builtin_neon_vaddlvq_s8: {
10560 Int = Intrinsic::aarch64_neon_saddlv;
10561 Ty = Int32Ty;
10562 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10563 llvm::Type *Tys[2] = { Ty, VTy };
10564 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10565 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10566 return Builder.CreateTrunc(Ops[0], Int16Ty);
10567 }
10568 case NEON::BI__builtin_neon_vaddlvq_s16: {
10569 Int = Intrinsic::aarch64_neon_saddlv;
10570 Ty = Int32Ty;
10571 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10572 llvm::Type *Tys[2] = { Ty, VTy };
10573 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10574 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10575 }
10576 case NEON::BI__builtin_neon_vsri_n_v:
10577 case NEON::BI__builtin_neon_vsriq_n_v: {
10578 Int = Intrinsic::aarch64_neon_vsri;
10579 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
10580 return EmitNeonCall(Intrin, Ops, "vsri_n");
10581 }
10582 case NEON::BI__builtin_neon_vsli_n_v:
10583 case NEON::BI__builtin_neon_vsliq_n_v: {
10584 Int = Intrinsic::aarch64_neon_vsli;
10585 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
10586 return EmitNeonCall(Intrin, Ops, "vsli_n");
10587 }
10588 case NEON::BI__builtin_neon_vsra_n_v:
10589 case NEON::BI__builtin_neon_vsraq_n_v:
10590 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10591 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
10592 return Builder.CreateAdd(Ops[0], Ops[1]);
10593 case NEON::BI__builtin_neon_vrsra_n_v:
10594 case NEON::BI__builtin_neon_vrsraq_n_v: {
10595 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
10596 SmallVector<llvm::Value*,2> TmpOps;
10597 TmpOps.push_back(Ops[1]);
10598 TmpOps.push_back(Ops[2]);
10599 Function* F = CGM.getIntrinsic(Int, Ty);
10600 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
10601 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
10602 return Builder.CreateAdd(Ops[0], tmp);
10603 }
10604 case NEON::BI__builtin_neon_vld1_v:
10605 case NEON::BI__builtin_neon_vld1q_v: {
10606 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
10607 return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
10608 }
10609 case NEON::BI__builtin_neon_vst1_v:
10610 case NEON::BI__builtin_neon_vst1q_v:
10611 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
10612 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
10613 return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
10614 case NEON::BI__builtin_neon_vld1_lane_v:
10615 case NEON::BI__builtin_neon_vld1q_lane_v: {
10616 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10617 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
10618 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10619 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
10620 PtrOp0.getAlignment());
10621 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
10622 }
10623 case NEON::BI__builtin_neon_vld1_dup_v:
10624 case NEON::BI__builtin_neon_vld1q_dup_v: {
10625 Value *V = UndefValue::get(Ty);
10626 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
10627 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10628 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
10629 PtrOp0.getAlignment());
10630 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
10631 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
10632 return EmitNeonSplat(Ops[0], CI);
10633 }
10634 case NEON::BI__builtin_neon_vst1_lane_v:
10635 case NEON::BI__builtin_neon_vst1q_lane_v:
10636 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10637 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
10638 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10639 return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
10640 PtrOp0.getAlignment());
10641 case NEON::BI__builtin_neon_vld2_v:
10642 case NEON::BI__builtin_neon_vld2q_v: {
10643 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
10644 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10645 llvm::Type *Tys[2] = { VTy, PTy };
10646 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
10647 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
10648 Ops[0] = Builder.CreateBitCast(Ops[0],
10649 llvm::PointerType::getUnqual(Ops[1]->getType()));
10650 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10651 }
10652 case NEON::BI__builtin_neon_vld3_v:
10653 case NEON::BI__builtin_neon_vld3q_v: {
10654 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
10655 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10656 llvm::Type *Tys[2] = { VTy, PTy };
10657 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
10658 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
10659 Ops[0] = Builder.CreateBitCast(Ops[0],
10660 llvm::PointerType::getUnqual(Ops[1]->getType()));
10661 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10662 }
10663 case NEON::BI__builtin_neon_vld4_v:
10664 case NEON::BI__builtin_neon_vld4q_v: {
10665 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
10666 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10667 llvm::Type *Tys[2] = { VTy, PTy };
10668 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
10669 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
10670 Ops[0] = Builder.CreateBitCast(Ops[0],
10671 llvm::PointerType::getUnqual(Ops[1]->getType()));
10672 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10673 }
10674 case NEON::BI__builtin_neon_vld2_dup_v:
10675 case NEON::BI__builtin_neon_vld2q_dup_v: {
10676 llvm::Type *PTy =
10677 llvm::PointerType::getUnqual(VTy->getElementType());
10678 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10679 llvm::Type *Tys[2] = { VTy, PTy };
10680 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
10681 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
10682 Ops[0] = Builder.CreateBitCast(Ops[0],
10683 llvm::PointerType::getUnqual(Ops[1]->getType()));
10684 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10685 }
10686 case NEON::BI__builtin_neon_vld3_dup_v:
10687 case NEON::BI__builtin_neon_vld3q_dup_v: {
10688 llvm::Type *PTy =
10689 llvm::PointerType::getUnqual(VTy->getElementType());
10690 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10691 llvm::Type *Tys[2] = { VTy, PTy };
10692 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
10693 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
10694 Ops[0] = Builder.CreateBitCast(Ops[0],
10695 llvm::PointerType::getUnqual(Ops[1]->getType()));
10696 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10697 }
10698 case NEON::BI__builtin_neon_vld4_dup_v:
10699 case NEON::BI__builtin_neon_vld4q_dup_v: {
10700 llvm::Type *PTy =
10701 llvm::PointerType::getUnqual(VTy->getElementType());
10702 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10703 llvm::Type *Tys[2] = { VTy, PTy };
10704 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
10705 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
10706 Ops[0] = Builder.CreateBitCast(Ops[0],
10707 llvm::PointerType::getUnqual(Ops[1]->getType()));
10708 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10709 }
10710 case NEON::BI__builtin_neon_vld2_lane_v:
10711 case NEON::BI__builtin_neon_vld2q_lane_v: {
10712 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
10713 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
10714 Ops.push_back(Ops[1]);
10715 Ops.erase(Ops.begin()+1);
10716 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10717 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10718 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
10719 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
10720 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10721 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10722 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10723 }
10724 case NEON::BI__builtin_neon_vld3_lane_v:
10725 case NEON::BI__builtin_neon_vld3q_lane_v: {
10726 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
10727 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
10728 Ops.push_back(Ops[1]);
10729 Ops.erase(Ops.begin()+1);
10730 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10731 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10732 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
10733 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
10734 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
10735 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10736 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10737 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10738 }
10739 case NEON::BI__builtin_neon_vld4_lane_v:
10740 case NEON::BI__builtin_neon_vld4q_lane_v: {
10741 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
10742 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
10743 Ops.push_back(Ops[1]);
10744 Ops.erase(Ops.begin()+1);
10745 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10746 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10747 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
10748 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
10749 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
10750 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
10751 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10752 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10753 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10754 }
10755 case NEON::BI__builtin_neon_vst2_v:
10756 case NEON::BI__builtin_neon_vst2q_v: {
10757 Ops.push_back(Ops[0]);
10758 Ops.erase(Ops.begin());
10759 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
10760 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
10761 Ops, "");
10762 }
10763 case NEON::BI__builtin_neon_vst2_lane_v:
10764 case NEON::BI__builtin_neon_vst2q_lane_v: {
10765 Ops.push_back(Ops[0]);
10766 Ops.erase(Ops.begin());
10767 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
10768 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
10769 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
10770 Ops, "");
10771 }
10772 case NEON::BI__builtin_neon_vst3_v:
10773 case NEON::BI__builtin_neon_vst3q_v: {
10774 Ops.push_back(Ops[0]);
10775 Ops.erase(Ops.begin());
10776 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
10777 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
10778 Ops, "");
10779 }
10780 case NEON::BI__builtin_neon_vst3_lane_v:
10781 case NEON::BI__builtin_neon_vst3q_lane_v: {
10782 Ops.push_back(Ops[0]);
10783 Ops.erase(Ops.begin());
10784 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
10785 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
10786 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
10787 Ops, "");
10788 }
10789 case NEON::BI__builtin_neon_vst4_v:
10790 case NEON::BI__builtin_neon_vst4q_v: {
10791 Ops.push_back(Ops[0]);
10792 Ops.erase(Ops.begin());
10793 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
10794 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
10795 Ops, "");
10796 }
10797 case NEON::BI__builtin_neon_vst4_lane_v:
10798 case NEON::BI__builtin_neon_vst4q_lane_v: {
10799 Ops.push_back(Ops[0]);
10800 Ops.erase(Ops.begin());
10801 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
10802 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
10803 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
10804 Ops, "");
10805 }
10806 case NEON::BI__builtin_neon_vtrn_v:
10807 case NEON::BI__builtin_neon_vtrnq_v: {
10808 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
10809 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10810 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10811 Value *SV = nullptr;
10812
10813 for (unsigned vi = 0; vi != 2; ++vi) {
10814 SmallVector<int, 16> Indices;
10815 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
10816 Indices.push_back(i+vi);
10817 Indices.push_back(i+e+vi);
10818 }
10819 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
10820 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
10821 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
10822 }
10823 return SV;
10824 }
10825 case NEON::BI__builtin_neon_vuzp_v:
10826 case NEON::BI__builtin_neon_vuzpq_v: {
10827 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
10828 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10829 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10830 Value *SV = nullptr;
10831
10832 for (unsigned vi = 0; vi != 2; ++vi) {
10833 SmallVector<int, 16> Indices;
10834 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
10835 Indices.push_back(2*i+vi);
10836
10837 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
10838 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
10839 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
10840 }
10841 return SV;
10842 }
10843 case NEON::BI__builtin_neon_vzip_v:
10844 case NEON::BI__builtin_neon_vzipq_v: {
10845 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
10846 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10847 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10848 Value *SV = nullptr;
10849
10850 for (unsigned vi = 0; vi != 2; ++vi) {
10851 SmallVector<int, 16> Indices;
10852 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
10853 Indices.push_back((i + vi*e) >> 1);
10854 Indices.push_back(((i + vi*e) >> 1)+e);
10855 }
10856 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
10857 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
10858 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
10859 }
10860 return SV;
10861 }
10862 case NEON::BI__builtin_neon_vqtbl1q_v: {
10863 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
10864 Ops, "vtbl1");
10865 }
10866 case NEON::BI__builtin_neon_vqtbl2q_v: {
10867 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
10868 Ops, "vtbl2");
10869 }
10870 case NEON::BI__builtin_neon_vqtbl3q_v: {
10871 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
10872 Ops, "vtbl3");
10873 }
10874 case NEON::BI__builtin_neon_vqtbl4q_v: {
10875 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
10876 Ops, "vtbl4");
10877 }
10878 case NEON::BI__builtin_neon_vqtbx1q_v: {
10879 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
10880 Ops, "vtbx1");
10881 }
10882 case NEON::BI__builtin_neon_vqtbx2q_v: {
10883 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
10884 Ops, "vtbx2");
10885 }
10886 case NEON::BI__builtin_neon_vqtbx3q_v: {
10887 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
10888 Ops, "vtbx3");
10889 }
10890 case NEON::BI__builtin_neon_vqtbx4q_v: {
10891 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
10892 Ops, "vtbx4");
10893 }
10894 case NEON::BI__builtin_neon_vsqadd_v:
10895 case NEON::BI__builtin_neon_vsqaddq_v: {
10896 Int = Intrinsic::aarch64_neon_usqadd;
10897 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
10898 }
10899 case NEON::BI__builtin_neon_vuqadd_v:
10900 case NEON::BI__builtin_neon_vuqaddq_v: {
10901 Int = Intrinsic::aarch64_neon_suqadd;
10902 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
10903 }
10904 }
10905}
10906
10907Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
10908 const CallExpr *E) {
10909 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||(((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID
== BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info
|| BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin") ? static_cast<void> (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10913, __PRETTY_FUNCTION__))
10910 BuiltinID == BPF::BI__builtin_btf_type_id ||(((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID
== BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info
|| BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin") ? static_cast<void> (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10913, __PRETTY_FUNCTION__))
10911 BuiltinID == BPF::BI__builtin_preserve_type_info ||(((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID
== BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info
|| BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin") ? static_cast<void> (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10913, __PRETTY_FUNCTION__))
10912 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&(((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID
== BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info
|| BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin") ? static_cast<void> (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10913, __PRETTY_FUNCTION__))
10913 "unexpected BPF builtin")(((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID
== BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info
|| BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin") ? static_cast<void> (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10913, __PRETTY_FUNCTION__))
;
10914
10915 // A sequence number, injected into IR builtin functions, to
10916 // prevent CSE given the only difference of the funciton
10917 // may just be the debuginfo metadata.
10918 static uint32_t BuiltinSeqNum;
10919
10920 switch (BuiltinID) {
10921 default:
10922 llvm_unreachable("Unexpected BPF builtin")::llvm::llvm_unreachable_internal("Unexpected BPF builtin", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10922)
;
10923 case BPF::BI__builtin_preserve_field_info: {
10924 const Expr *Arg = E->getArg(0);
10925 bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
10926
10927 if (!getDebugInfo()) {
10928 CGM.Error(E->getExprLoc(),
10929 "using __builtin_preserve_field_info() without -g");
10930 return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
10931 : EmitLValue(Arg).getPointer(*this);
10932 }
10933
10934 // Enable underlying preserve_*_access_index() generation.
10935 bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
10936 IsInPreservedAIRegion = true;
10937 Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
10938 : EmitLValue(Arg).getPointer(*this);
10939 IsInPreservedAIRegion = OldIsInPreservedAIRegion;
10940
10941 ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10942 Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
10943
10944 // Built the IR for the preserve_field_info intrinsic.
10945 llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
10946 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
10947 {FieldAddr->getType()});
10948 return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
10949 }
10950 case BPF::BI__builtin_btf_type_id:
10951 case BPF::BI__builtin_preserve_type_info: {
10952 if (!getDebugInfo()) {
10953 CGM.Error(E->getExprLoc(), "using builtin function without -g");
10954 return nullptr;
10955 }
10956
10957 const Expr *Arg0 = E->getArg(0);
10958 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
10959 Arg0->getType(), Arg0->getExprLoc());
10960
10961 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10962 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
10963 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
10964
10965 llvm::Function *FnDecl;
10966 if (BuiltinID == BPF::BI__builtin_btf_type_id)
10967 FnDecl = llvm::Intrinsic::getDeclaration(
10968 &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
10969 else
10970 FnDecl = llvm::Intrinsic::getDeclaration(
10971 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
10972 CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
10973 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
10974 return Fn;
10975 }
10976 case BPF::BI__builtin_preserve_enum_value: {
10977 if (!getDebugInfo()) {
10978 CGM.Error(E->getExprLoc(), "using builtin function without -g");
10979 return nullptr;
10980 }
10981
10982 const Expr *Arg0 = E->getArg(0);
10983 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
10984 Arg0->getType(), Arg0->getExprLoc());
10985
10986 // Find enumerator
10987 const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens());
10988 const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr());
10989 const auto *DR = cast<DeclRefExpr>(CE->getSubExpr());
10990 const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl());
10991
10992 auto &InitVal = Enumerator->getInitVal();
10993 std::string InitValStr;
10994 if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX(9223372036854775807L)))
10995 InitValStr = std::to_string(InitVal.getSExtValue());
10996 else
10997 InitValStr = std::to_string(InitVal.getZExtValue());
10998 std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr;
10999 Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr);
11000
11001 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11002 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11003 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
11004
11005 llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration(
11006 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
11007 CallInst *Fn =
11008 Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
11009 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11010 return Fn;
11011 }
11012 }
11013}
11014
11015llvm::Value *CodeGenFunction::
11016BuildVector(ArrayRef<llvm::Value*> Ops) {
11017 assert((Ops.size() & (Ops.size() - 1)) == 0 &&(((Ops.size() & (Ops.size() - 1)) == 0 && "Not a power-of-two sized vector!"
) ? static_cast<void> (0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11018, __PRETTY_FUNCTION__))
11018 "Not a power-of-two sized vector!")(((Ops.size() & (Ops.size() - 1)) == 0 && "Not a power-of-two sized vector!"
) ? static_cast<void> (0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11018, __PRETTY_FUNCTION__))
;
11019 bool AllConstants = true;
11020 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
11021 AllConstants &= isa<Constant>(Ops[i]);
11022
11023 // If this is a constant vector, create a ConstantVector.
11024 if (AllConstants) {
11025 SmallVector<llvm::Constant*, 16> CstOps;
11026 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11027 CstOps.push_back(cast<Constant>(Ops[i]));
11028 return llvm::ConstantVector::get(CstOps);
11029 }
11030
11031 // Otherwise, insertelement the values to build the vector.
11032 Value *Result = llvm::UndefValue::get(
11033 llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
11034
11035 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11036 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
11037
11038 return Result;
11039}
11040
11041// Convert the mask from an integer type to a vector of i1.
11042static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
11043 unsigned NumElts) {
11044
11045 auto *MaskTy = llvm::FixedVectorType::get(
11046 CGF.Builder.getInt1Ty(),
11047 cast<IntegerType>(Mask->getType())->getBitWidth());
11048 Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
11049
11050 // If we have less than 8 elements, then the starting mask was an i8 and
11051 // we need to extract down to the right number of elements.
11052 if (NumElts < 8) {
11053 int Indices[4];
11054 for (unsigned i = 0; i != NumElts; ++i)
11055 Indices[i] = i;
11056 MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
11057 makeArrayRef(Indices, NumElts),
11058 "extract");
11059 }
11060 return MaskVec;
11061}
11062
11063static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11064 Align Alignment) {
11065 // Cast the pointer to right type.
11066 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11067 llvm::PointerType::getUnqual(Ops[1]->getType()));
11068
11069 Value *MaskVec = getMaskVecValue(
11070 CGF, Ops[2],
11071 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
11072
11073 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
11074}
11075
11076static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11077 Align Alignment) {
11078 // Cast the pointer to right type.
11079 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11080 llvm::PointerType::getUnqual(Ops[1]->getType()));
11081
11082 Value *MaskVec = getMaskVecValue(
11083 CGF, Ops[2],
11084 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
11085
11086 return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
11087}
11088
11089static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
11090 ArrayRef<Value *> Ops) {
11091 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
11092 llvm::Type *PtrTy = ResultTy->getElementType();
11093
11094 // Cast the pointer to element type.
11095 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11096 llvm::PointerType::getUnqual(PtrTy));
11097
11098 Value *MaskVec = getMaskVecValue(
11099 CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
11100
11101 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
11102 ResultTy);
11103 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
11104}
11105
11106static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
11107 ArrayRef<Value *> Ops,
11108 bool IsCompress) {
11109 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11110
11111 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11112
11113 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
11114 : Intrinsic::x86_avx512_mask_expand;
11115 llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
11116 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
11117}
11118
11119static Value *EmitX86CompressStore(CodeGenFunction &CGF,
11120 ArrayRef<Value *> Ops) {
11121 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11122 llvm::Type *PtrTy = ResultTy->getElementType();
11123
11124 // Cast the pointer to element type.
11125 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11126 llvm::PointerType::getUnqual(PtrTy));
11127
11128 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11129
11130 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
11131 ResultTy);
11132 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
11133}
11134
11135static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
11136 ArrayRef<Value *> Ops,
11137 bool InvertLHS = false) {
11138 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11139 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
11140 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
11141
11142 if (InvertLHS)
11143 LHS = CGF.Builder.CreateNot(LHS);
11144
11145 return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
11146 Ops[0]->getType());
11147}
11148
11149static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
11150 Value *Amt, bool IsRight) {
11151 llvm::Type *Ty = Op0->getType();
11152
11153 // Amount may be scalar immediate, in which case create a splat vector.
11154 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
11155 // we only care about the lowest log2 bits anyway.
11156 if (Amt->getType() != Ty) {
11157 unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements();
11158 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
11159 Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
11160 }
11161
11162 unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
11163 Function *F = CGF.CGM.getIntrinsic(IID, Ty);
11164 return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
11165}
11166
11167static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11168 bool IsSigned) {
11169 Value *Op0 = Ops[0];
11170 Value *Op1 = Ops[1];
11171 llvm::Type *Ty = Op0->getType();
11172 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
11173
11174 CmpInst::Predicate Pred;
11175 switch (Imm) {
11176 case 0x0:
11177 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
11178 break;
11179 case 0x1:
11180 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
11181 break;
11182 case 0x2:
11183 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
11184 break;
11185 case 0x3:
11186 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
11187 break;
11188 case 0x4:
11189 Pred = ICmpInst::ICMP_EQ;
11190 break;
11191 case 0x5:
11192 Pred = ICmpInst::ICMP_NE;
11193 break;
11194 case 0x6:
11195 return llvm::Constant::getNullValue(Ty); // FALSE
11196 case 0x7:
11197 return llvm::Constant::getAllOnesValue(Ty); // TRUE
11198 default:
11199 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate")::llvm::llvm_unreachable_internal("Unexpected XOP vpcom/vpcomu predicate"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11199)
;
11200 }
11201
11202 Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
11203 Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
11204 return Res;
11205}
11206
11207static Value *EmitX86Select(CodeGenFunction &CGF,
11208 Value *Mask, Value *Op0, Value *Op1) {
11209
11210 // If the mask is all ones just return first argument.
11211 if (const auto *C = dyn_cast<Constant>(Mask))
11212 if (C->isAllOnesValue())
11213 return Op0;
11214
11215 Mask = getMaskVecValue(
11216 CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements());
11217
11218 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
11219}
11220
11221static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
11222 Value *Mask, Value *Op0, Value *Op1) {
11223 // If the mask is all ones just return first argument.
11224 if (const auto *C = dyn_cast<Constant>(Mask))
11225 if (C->isAllOnesValue())
11226 return Op0;
11227
11228 auto *MaskTy = llvm::FixedVectorType::get(
11229 CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
11230 Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
11231 Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
11232 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
11233}
11234
11235static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
11236 unsigned NumElts, Value *MaskIn) {
11237 if (MaskIn) {
11238 const auto *C = dyn_cast<Constant>(MaskIn);
11239 if (!C || !C->isAllOnesValue())
11240 Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
11241 }
11242
11243 if (NumElts < 8) {
11244 int Indices[8];
11245 for (unsigned i = 0; i != NumElts; ++i)
11246 Indices[i] = i;
11247 for (unsigned i = NumElts; i != 8; ++i)
11248 Indices[i] = i % NumElts + NumElts;
11249 Cmp = CGF.Builder.CreateShuffleVector(
11250 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
11251 }
11252
11253 return CGF.Builder.CreateBitCast(Cmp,
11254 IntegerType::get(CGF.getLLVMContext(),
11255 std::max(NumElts, 8U)));
11256}
11257
11258static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
11259 bool Signed, ArrayRef<Value *> Ops) {
11260 assert((Ops.size() == 2 || Ops.size() == 4) &&(((Ops.size() == 2 || Ops.size() == 4) && "Unexpected number of arguments"
) ? static_cast<void> (0) : __assert_fail ("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11261, __PRETTY_FUNCTION__))
11261 "Unexpected number of arguments")(((Ops.size() == 2 || Ops.size() == 4) && "Unexpected number of arguments"
) ? static_cast<void> (0) : __assert_fail ("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11261, __PRETTY_FUNCTION__))
;
11262 unsigned NumElts =
11263 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
11264 Value *Cmp;
11265
11266 if (CC == 3) {
11267 Cmp = Constant::getNullValue(
11268 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
11269 } else if (CC == 7) {
11270 Cmp = Constant::getAllOnesValue(
11271 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
11272 } else {
11273 ICmpInst::Predicate Pred;
11274 switch (CC) {
11275 default: llvm_unreachable("Unknown condition code")::llvm::llvm_unreachable_internal("Unknown condition code", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11275)
;
11276 case 0: Pred = ICmpInst::ICMP_EQ; break;
11277 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
11278 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
11279 case 4: Pred = ICmpInst::ICMP_NE; break;
11280 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
11281 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
11282 }
11283 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
11284 }
11285
11286 Value *MaskIn = nullptr;
11287 if (Ops.size() == 4)
11288 MaskIn = Ops[3];
11289
11290 return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
11291}
11292
11293static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
11294 Value *Zero = Constant::getNullValue(In->getType());
11295 return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
11296}
11297
11298static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
11299 ArrayRef<Value *> Ops, bool IsSigned) {
11300 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
11301 llvm::Type *Ty = Ops[1]->getType();
11302
11303 Value *Res;
11304 if (Rnd != 4) {
11305 Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
11306 : Intrinsic::x86_avx512_uitofp_round;
11307 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
11308 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
11309 } else {
11310 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
11311 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
11312 }
11313
11314 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
11315}
11316
11317// Lowers X86 FMA intrinsics to IR.
11318static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11319 unsigned BuiltinID, bool IsAddSub) {
11320
11321 bool Subtract = false;
11322 Intrinsic::ID IID = Intrinsic::not_intrinsic;
11323 switch (BuiltinID) {
11324 default: break;
11325 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
11326 Subtract = true;
11327 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11328 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
11329 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
11330 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
11331 IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
11332 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
11333 Subtract = true;
11334 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11335 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
11336 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
11337 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
11338 IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
11339 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
11340 Subtract = true;
11341 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11342 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
11343 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
11344 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
11345 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
11346 break;
11347 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
11348 Subtract = true;
11349 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11350 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
11351 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
11352 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
11353 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
11354 break;
11355 }
11356
11357 Value *A = Ops[0];
11358 Value *B = Ops[1];
11359 Value *C = Ops[2];
11360
11361 if (Subtract)
11362 C = CGF.Builder.CreateFNeg(C);
11363
11364 Value *Res;
11365
11366 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
11367 if (IID != Intrinsic::not_intrinsic &&
11368 (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
11369 IsAddSub)) {
11370 Function *Intr = CGF.CGM.getIntrinsic(IID);
11371 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
11372 } else {
11373 llvm::Type *Ty = A->getType();
11374 Function *FMA;
11375 if (CGF.Builder.getIsFPConstrained()) {
11376 FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
11377 Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
11378 } else {
11379 FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
11380 Res = CGF.Builder.CreateCall(FMA, {A, B, C});
11381 }
11382 }
11383
11384 // Handle any required masking.
11385 Value *MaskFalseVal = nullptr;
11386 switch (BuiltinID) {
11387 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
11388 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
11389 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
11390 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
11391 MaskFalseVal = Ops[0];
11392 break;
11393 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
11394 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
11395 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
11396 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
11397 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
11398 break;
11399 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
11400 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
11401 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
11402 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
11403 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
11404 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
11405 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
11406 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
11407 MaskFalseVal = Ops[2];
11408 break;
11409 }
11410
11411 if (MaskFalseVal)
11412 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
11413
11414 return Res;
11415}
11416
11417static Value *
11418EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
11419 Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0,
11420 bool NegAcc = false) {
11421 unsigned Rnd = 4;
11422 if (Ops.size() > 4)
11423 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
11424
11425 if (NegAcc)
11426 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
11427
11428 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
11429 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
11430 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
11431 Value *Res;
11432 if (Rnd != 4) {
11433 Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
11434 Intrinsic::x86_avx512_vfmadd_f32 :
11435 Intrinsic::x86_avx512_vfmadd_f64;
11436 Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
11437 {Ops[0], Ops[1], Ops[2], Ops[4]});
11438 } else if (CGF.Builder.getIsFPConstrained()) {
11439 Function *FMA = CGF.CGM.getIntrinsic(
11440 Intrinsic::experimental_constrained_fma, Ops[0]->getType());
11441 Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
11442 } else {
11443 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
11444 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
11445 }
11446 // If we have more than 3 arguments, we need to do masking.
11447 if (Ops.size() > 3) {
11448 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
11449 : Ops[PTIdx];
11450
11451 // If we negated the accumulator and the its the PassThru value we need to
11452 // bypass the negate. Conveniently Upper should be the same thing in this
11453 // case.
11454 if (NegAcc && PTIdx == 2)
11455 PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
11456
11457 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
11458 }
11459 return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
11460}
11461
11462static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
11463 ArrayRef<Value *> Ops) {
11464 llvm::Type *Ty = Ops[0]->getType();
11465 // Arguments have a vXi32 type so cast to vXi64.
11466 Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
11467 Ty->getPrimitiveSizeInBits() / 64);
11468 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
11469 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
11470
11471 if (IsSigned) {
11472 // Shift left then arithmetic shift right.
11473 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
11474 LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
11475 LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
11476 RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
11477 RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
11478 } else {
11479 // Clear the upper bits.
11480 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
11481 LHS = CGF.Builder.CreateAnd(LHS, Mask);
11482 RHS = CGF.Builder.CreateAnd(RHS, Mask);
11483 }
11484
11485 return CGF.Builder.CreateMul(LHS, RHS);
11486}
11487
11488// Emit a masked pternlog intrinsic. This only exists because the header has to
11489// use a macro and we aren't able to pass the input argument to a pternlog
11490// builtin and a select builtin without evaluating it twice.
11491static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
11492 ArrayRef<Value *> Ops) {
11493 llvm::Type *Ty = Ops[0]->getType();
11494
11495 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
11496 unsigned EltWidth = Ty->getScalarSizeInBits();
11497 Intrinsic::ID IID;
11498 if (VecWidth == 128 && EltWidth == 32)
11499 IID = Intrinsic::x86_avx512_pternlog_d_128;
11500 else if (VecWidth == 256 && EltWidth == 32)
11501 IID = Intrinsic::x86_avx512_pternlog_d_256;
11502 else if (VecWidth == 512 && EltWidth == 32)
11503 IID = Intrinsic::x86_avx512_pternlog_d_512;
11504 else if (VecWidth == 128 && EltWidth == 64)
11505 IID = Intrinsic::x86_avx512_pternlog_q_128;
11506 else if (VecWidth == 256 && EltWidth == 64)
11507 IID = Intrinsic::x86_avx512_pternlog_q_256;
11508 else if (VecWidth == 512 && EltWidth == 64)
11509 IID = Intrinsic::x86_avx512_pternlog_q_512;
11510 else
11511 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11511)
;
11512
11513 Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
11514 Ops.drop_back());
11515 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
11516 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
11517}
11518
11519static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
11520 llvm::Type *DstTy) {
11521 unsigned NumberOfElements =
11522 cast<llvm::FixedVectorType>(DstTy)->getNumElements();
11523 Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
11524 return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
11525}
11526
11527// Emit binary intrinsic with the same type used in result/args.
11528static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF,
11529 ArrayRef<Value *> Ops, Intrinsic::ID IID) {
11530 llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
11531 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
11532}
11533
11534Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
11535 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
11536 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
11537 return EmitX86CpuIs(CPUStr);
11538}
11539
11540// Convert F16 halfs to floats.
11541static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
11542 ArrayRef<Value *> Ops,
11543 llvm::Type *DstTy) {
11544 assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&(((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
"Unknown cvtph2ps intrinsic") ? static_cast<void> (0) :
__assert_fail ("(Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && \"Unknown cvtph2ps intrinsic\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11545, __PRETTY_FUNCTION__))
11545 "Unknown cvtph2ps intrinsic")(((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
"Unknown cvtph2ps intrinsic") ? static_cast<void> (0) :
__assert_fail ("(Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && \"Unknown cvtph2ps intrinsic\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11545, __PRETTY_FUNCTION__))
;
11546
11547 // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
11548 if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
11549 Function *F =
11550 CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
11551 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
11552 }
11553
11554 unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
11555 Value *Src = Ops[0];
11556
11557 // Extract the subvector.
11558 if (NumDstElts !=
11559 cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
11560 assert(NumDstElts == 4 && "Unexpected vector size")((NumDstElts == 4 && "Unexpected vector size") ? static_cast
<void> (0) : __assert_fail ("NumDstElts == 4 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11560, __PRETTY_FUNCTION__))
;
11561 Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()),
11562 ArrayRef<int>{0, 1, 2, 3});
11563 }
11564
11565 // Bitcast from vXi16 to vXf16.
11566 auto *HalfTy = llvm::FixedVectorType::get(
11567 llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
11568 Src = CGF.Builder.CreateBitCast(Src, HalfTy);
11569
11570 // Perform the fp-extension.
11571 Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
11572
11573 if (Ops.size() >= 3)
11574 Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
11575 return Res;
11576}
11577
11578// Convert a BF16 to a float.
11579static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
11580 const CallExpr *E,
11581 ArrayRef<Value *> Ops) {
11582 llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
11583 Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
11584 Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
11585 llvm::Type *ResultType = CGF.ConvertType(E->getType());
11586 Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
11587 return BitCast;
11588}
11589
11590Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
11591
11592 llvm::Type *Int32Ty = Builder.getInt32Ty();
11593
11594 // Matching the struct layout from the compiler-rt/libgcc structure that is
11595 // filled in:
11596 // unsigned int __cpu_vendor;
11597 // unsigned int __cpu_type;
11598 // unsigned int __cpu_subtype;
11599 // unsigned int __cpu_features[1];
11600 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
11601 llvm::ArrayType::get(Int32Ty, 1));
11602
11603 // Grab the global __cpu_model.
11604 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
11605 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
11606
11607 // Calculate the index needed to access the correct field based on the
11608 // range. Also adjust the expected value.
11609 unsigned Index;
11610 unsigned Value;
11611 std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
11612#define X86_VENDOR(ENUM, STRING) \
11613 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
11614#define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
11615 .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
11616#define X86_CPU_TYPE(ENUM, STR) \
11617 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
11618#define X86_CPU_SUBTYPE(ENUM, STR) \
11619 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
11620#include "llvm/Support/X86TargetParser.def"
11621 .Default({0, 0});
11622 assert(Value != 0 && "Invalid CPUStr passed to CpuIs")((Value != 0 && "Invalid CPUStr passed to CpuIs") ? static_cast
<void> (0) : __assert_fail ("Value != 0 && \"Invalid CPUStr passed to CpuIs\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11622, __PRETTY_FUNCTION__))
;
11623
11624 // Grab the appropriate field from __cpu_model.
11625 llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
11626 ConstantInt::get(Int32Ty, Index)};
11627 llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
11628 CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4));
11629
11630 // Check the value of the field against the requested value.
11631 return Builder.CreateICmpEQ(CpuValue,
11632 llvm::ConstantInt::get(Int32Ty, Value));
11633}
11634
11635Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
11636 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
11637 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
11638 return EmitX86CpuSupports(FeatureStr);
11639}
11640
11641uint64_t
11642CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
11643 // Processor features and mapping to processor feature value.
11644 uint64_t FeaturesMask = 0;
11645 for (const StringRef &FeatureStr : FeatureStrs) {
11646 unsigned Feature =
11647 StringSwitch<unsigned>(FeatureStr)
11648#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM)
11649#include "llvm/Support/X86TargetParser.def"
11650 ;
11651 FeaturesMask |= (1ULL << Feature);
11652 }
11653 return FeaturesMask;
11654}
11655
11656Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
11657 return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
11658}
11659
11660llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
11661 uint32_t Features1 = Lo_32(FeaturesMask);
11662 uint32_t Features2 = Hi_32(FeaturesMask);
11663
11664 Value *Result = Builder.getTrue();
11665
11666 if (Features1 != 0) {
11667 // Matching the struct layout from the compiler-rt/libgcc structure that is
11668 // filled in:
11669 // unsigned int __cpu_vendor;
11670 // unsigned int __cpu_type;
11671 // unsigned int __cpu_subtype;
11672 // unsigned int __cpu_features[1];
11673 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
11674 llvm::ArrayType::get(Int32Ty, 1));
11675
11676 // Grab the global __cpu_model.
11677 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
11678 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
11679
11680 // Grab the first (0th) element from the field __cpu_features off of the
11681 // global in the struct STy.
11682 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
11683 Builder.getInt32(0)};
11684 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
11685 Value *Features =
11686 Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
11687
11688 // Check the value of the bit corresponding to the feature requested.
11689 Value *Mask = Builder.getInt32(Features1);
11690 Value *Bitset = Builder.CreateAnd(Features, Mask);
11691 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
11692 Result = Builder.CreateAnd(Result, Cmp);
11693 }
11694
11695 if (Features2 != 0) {
11696 llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
11697 "__cpu_features2");
11698 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
11699
11700 Value *Features =
11701 Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4));
11702
11703 // Check the value of the bit corresponding to the feature requested.
11704 Value *Mask = Builder.getInt32(Features2);
11705 Value *Bitset = Builder.CreateAnd(Features, Mask);
11706 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
11707 Result = Builder.CreateAnd(Result, Cmp);
11708 }
11709
11710 return Result;
11711}
11712
11713Value *CodeGenFunction::EmitX86CpuInit() {
11714 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
11715 /*Variadic*/ false);
11716 llvm::FunctionCallee Func =
11717 CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
11718 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
11719 cast<llvm::GlobalValue>(Func.getCallee())
11720 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
11721 return Builder.CreateCall(Func);
11722}
11723
11724Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
11725 const CallExpr *E) {
11726 if (BuiltinID == X86::BI__builtin_cpu_is)
11727 return EmitX86CpuIs(E);
11728 if (BuiltinID == X86::BI__builtin_cpu_supports)
11729 return EmitX86CpuSupports(E);
11730 if (BuiltinID == X86::BI__builtin_cpu_init)
11731 return EmitX86CpuInit();
11732
11733 SmallVector<Value*, 4> Ops;
11734 bool IsMaskFCmp = false;
11735
11736 // Find out if any arguments are required to be integer constant expressions.
11737 unsigned ICEArguments = 0;
11738 ASTContext::GetBuiltinTypeError Error;
11739 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
11740 assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error"
) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11740, __PRETTY_FUNCTION__))
;
11741
11742 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
11743 // If this is a normal argument, just emit it as a scalar.
11744 if ((ICEArguments & (1 << i)) == 0) {
11745 Ops.push_back(EmitScalarExpr(E->getArg(i)));
11746 continue;
11747 }
11748
11749 // If this is required to be a constant, constant fold it so that we know
11750 // that the generated intrinsic gets a ConstantInt.
11751 Ops.push_back(llvm::ConstantInt::get(
11752 getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
11753 }
11754
11755 // These exist so that the builtin that takes an immediate can be bounds
11756 // checked by clang to avoid passing bad immediates to the backend. Since
11757 // AVX has a larger immediate than SSE we would need separate builtins to
11758 // do the different bounds checking. Rather than create a clang specific
11759 // SSE only builtin, this implements eight separate builtins to match gcc
11760 // implementation.
11761 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
11762 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
11763 llvm::Function *F = CGM.getIntrinsic(ID);
11764 return Builder.CreateCall(F, Ops);
11765 };
11766
11767 // For the vector forms of FP comparisons, translate the builtins directly to
11768 // IR.
11769 // TODO: The builtins could be removed if the SSE header files used vector
11770 // extension comparisons directly (vector ordered/unordered may need
11771 // additional support via __builtin_isnan()).
11772 auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred,
11773 bool IsSignaling) {
11774 Value *Cmp;
11775 if (IsSignaling)
11776 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
11777 else
11778 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
11779 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
11780 llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
11781 Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
11782 return Builder.CreateBitCast(Sext, FPVecTy);
11783 };
11784
11785 switch (BuiltinID) {
11786 default: return nullptr;
11787 case X86::BI_mm_prefetch: {
11788 Value *Address = Ops[0];
11789 ConstantInt *C = cast<ConstantInt>(Ops[1]);
11790 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
11791 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
11792 Value *Data = ConstantInt::get(Int32Ty, 1);
11793 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
11794 return Builder.CreateCall(F, {Address, RW, Locality, Data});
11795 }
11796 case X86::BI_mm_clflush: {
11797 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
11798 Ops[0]);
11799 }
11800 case X86::BI_mm_lfence: {
11801 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
11802 }
11803 case X86::BI_mm_mfence: {
11804 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
11805 }
11806 case X86::BI_mm_sfence: {
11807 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
11808 }
11809 case X86::BI_mm_pause: {
11810 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
11811 }
11812 case X86::BI__rdtsc: {
11813 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
11814 }
11815 case X86::BI__builtin_ia32_rdtscp: {
11816 Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
11817 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
11818 Ops[0]);
11819 return Builder.CreateExtractValue(Call, 0);
11820 }
11821 case X86::BI__builtin_ia32_lzcnt_u16:
11822 case X86::BI__builtin_ia32_lzcnt_u32:
11823 case X86::BI__builtin_ia32_lzcnt_u64: {
11824 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
11825 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
11826 }
11827 case X86::BI__builtin_ia32_tzcnt_u16:
11828 case X86::BI__builtin_ia32_tzcnt_u32:
11829 case X86::BI__builtin_ia32_tzcnt_u64: {
11830 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
11831 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
11832 }
11833 case X86::BI__builtin_ia32_undef128:
11834 case X86::BI__builtin_ia32_undef256:
11835 case X86::BI__builtin_ia32_undef512:
11836 // The x86 definition of "undef" is not the same as the LLVM definition
11837 // (PR32176). We leave optimizing away an unnecessary zero constant to the
11838 // IR optimizer and backend.
11839 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
11840 // value, we should use that here instead of a zero.
11841 return llvm::Constant::getNullValue(ConvertType(E->getType()));
11842 case X86::BI__builtin_ia32_vec_init_v8qi:
11843 case X86::BI__builtin_ia32_vec_init_v4hi:
11844 case X86::BI__builtin_ia32_vec_init_v2si:
11845 return Builder.CreateBitCast(BuildVector(Ops),
11846 llvm::Type::getX86_MMXTy(getLLVMContext()));
11847 case X86::BI__builtin_ia32_vec_ext_v2si:
11848 case X86::BI__builtin_ia32_vec_ext_v16qi:
11849 case X86::BI__builtin_ia32_vec_ext_v8hi:
11850 case X86::BI__builtin_ia32_vec_ext_v4si:
11851 case X86::BI__builtin_ia32_vec_ext_v4sf:
11852 case X86::BI__builtin_ia32_vec_ext_v2di:
11853 case X86::BI__builtin_ia32_vec_ext_v32qi:
11854 case X86::BI__builtin_ia32_vec_ext_v16hi:
11855 case X86::BI__builtin_ia32_vec_ext_v8si:
11856 case X86::BI__builtin_ia32_vec_ext_v4di: {
11857 unsigned NumElts =
11858 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
11859 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
11860 Index &= NumElts - 1;
11861 // These builtins exist so we can ensure the index is an ICE and in range.
11862 // Otherwise we could just do this in the header file.
11863 return Builder.CreateExtractElement(Ops[0], Index);
11864 }
11865 case X86::BI__builtin_ia32_vec_set_v16qi:
11866 case X86::BI__builtin_ia32_vec_set_v8hi:
11867 case X86::BI__builtin_ia32_vec_set_v4si:
11868 case X86::BI__builtin_ia32_vec_set_v2di:
11869 case X86::BI__builtin_ia32_vec_set_v32qi:
11870 case X86::BI__builtin_ia32_vec_set_v16hi:
11871 case X86::BI__builtin_ia32_vec_set_v8si:
11872 case X86::BI__builtin_ia32_vec_set_v4di: {
11873 unsigned NumElts =
11874 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
11875 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
11876 Index &= NumElts - 1;
11877 // These builtins exist so we can ensure the index is an ICE and in range.
11878 // Otherwise we could just do this in the header file.
11879 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
11880 }
11881 case X86::BI_mm_setcsr:
11882 case X86::BI__builtin_ia32_ldmxcsr: {
11883 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
11884 Builder.CreateStore(Ops[0], Tmp);
11885 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
11886 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
11887 }
11888 case X86::BI_mm_getcsr:
11889 case X86::BI__builtin_ia32_stmxcsr: {
11890 Address Tmp = CreateMemTemp(E->getType());
11891 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
11892 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
11893 return Builder.CreateLoad(Tmp, "stmxcsr");
11894 }
11895 case X86::BI__builtin_ia32_xsave:
11896 case X86::BI__builtin_ia32_xsave64:
11897 case X86::BI__builtin_ia32_xrstor:
11898 case X86::BI__builtin_ia32_xrstor64:
11899 case X86::BI__builtin_ia32_xsaveopt:
11900 case X86::BI__builtin_ia32_xsaveopt64:
11901 case X86::BI__builtin_ia32_xrstors:
11902 case X86::BI__builtin_ia32_xrstors64:
11903 case X86::BI__builtin_ia32_xsavec:
11904 case X86::BI__builtin_ia32_xsavec64:
11905 case X86::BI__builtin_ia32_xsaves:
11906 case X86::BI__builtin_ia32_xsaves64:
11907 case X86::BI__builtin_ia32_xsetbv:
11908 case X86::BI_xsetbv: {
11909 Intrinsic::ID ID;
11910#define INTRINSIC_X86_XSAVE_ID(NAME) \
11911 case X86::BI__builtin_ia32_##NAME: \
11912 ID = Intrinsic::x86_##NAME; \
11913 break
11914 switch (BuiltinID) {
11915 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11915)
;
11916 INTRINSIC_X86_XSAVE_ID(xsave);
11917 INTRINSIC_X86_XSAVE_ID(xsave64);
11918 INTRINSIC_X86_XSAVE_ID(xrstor);
11919 INTRINSIC_X86_XSAVE_ID(xrstor64);
11920 INTRINSIC_X86_XSAVE_ID(xsaveopt);
11921 INTRINSIC_X86_XSAVE_ID(xsaveopt64);
11922 INTRINSIC_X86_XSAVE_ID(xrstors);
11923 INTRINSIC_X86_XSAVE_ID(xrstors64);
11924 INTRINSIC_X86_XSAVE_ID(xsavec);
11925 INTRINSIC_X86_XSAVE_ID(xsavec64);
11926 INTRINSIC_X86_XSAVE_ID(xsaves);
11927 INTRINSIC_X86_XSAVE_ID(xsaves64);
11928 INTRINSIC_X86_XSAVE_ID(xsetbv);
11929 case X86::BI_xsetbv:
11930 ID = Intrinsic::x86_xsetbv;
11931 break;
11932 }
11933#undef INTRINSIC_X86_XSAVE_ID
11934 Value *Mhi = Builder.CreateTrunc(
11935 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
11936 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
11937 Ops[1] = Mhi;
11938 Ops.push_back(Mlo);
11939 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
11940 }
11941 case X86::BI__builtin_ia32_xgetbv:
11942 case X86::BI_xgetbv:
11943 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
11944 case X86::BI__builtin_ia32_storedqudi128_mask:
11945 case X86::BI__builtin_ia32_storedqusi128_mask:
11946 case X86::BI__builtin_ia32_storedquhi128_mask:
11947 case X86::BI__builtin_ia32_storedquqi128_mask:
11948 case X86::BI__builtin_ia32_storeupd128_mask:
11949 case X86::BI__builtin_ia32_storeups128_mask:
11950 case X86::BI__builtin_ia32_storedqudi256_mask:
11951 case X86::BI__builtin_ia32_storedqusi256_mask:
11952 case X86::BI__builtin_ia32_storedquhi256_mask:
11953 case X86::BI__builtin_ia32_storedquqi256_mask:
11954 case X86::BI__builtin_ia32_storeupd256_mask:
11955 case X86::BI__builtin_ia32_storeups256_mask:
11956 case X86::BI__builtin_ia32_storedqudi512_mask:
11957 case X86::BI__builtin_ia32_storedqusi512_mask:
11958 case X86::BI__builtin_ia32_storedquhi512_mask:
11959 case X86::BI__builtin_ia32_storedquqi512_mask:
11960 case X86::BI__builtin_ia32_storeupd512_mask:
11961 case X86::BI__builtin_ia32_storeups512_mask:
11962 return EmitX86MaskedStore(*this, Ops, Align(1));
11963
11964 case X86::BI__builtin_ia32_storess128_mask:
11965 case X86::BI__builtin_ia32_storesd128_mask:
11966 return EmitX86MaskedStore(*this, Ops, Align(1));
11967
11968 case X86::BI__builtin_ia32_vpopcntb_128:
11969 case X86::BI__builtin_ia32_vpopcntd_128:
11970 case X86::BI__builtin_ia32_vpopcntq_128:
11971 case X86::BI__builtin_ia32_vpopcntw_128:
11972 case X86::BI__builtin_ia32_vpopcntb_256:
11973 case X86::BI__builtin_ia32_vpopcntd_256:
11974 case X86::BI__builtin_ia32_vpopcntq_256:
11975 case X86::BI__builtin_ia32_vpopcntw_256:
11976 case X86::BI__builtin_ia32_vpopcntb_512:
11977 case X86::BI__builtin_ia32_vpopcntd_512:
11978 case X86::BI__builtin_ia32_vpopcntq_512:
11979 case X86::BI__builtin_ia32_vpopcntw_512: {
11980 llvm::Type *ResultType = ConvertType(E->getType());
11981 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
11982 return Builder.CreateCall(F, Ops);
11983 }
11984 case X86::BI__builtin_ia32_cvtmask2b128:
11985 case X86::BI__builtin_ia32_cvtmask2b256:
11986 case X86::BI__builtin_ia32_cvtmask2b512:
11987 case X86::BI__builtin_ia32_cvtmask2w128:
11988 case X86::BI__builtin_ia32_cvtmask2w256:
11989 case X86::BI__builtin_ia32_cvtmask2w512:
11990 case X86::BI__builtin_ia32_cvtmask2d128:
11991 case X86::BI__builtin_ia32_cvtmask2d256:
11992 case X86::BI__builtin_ia32_cvtmask2d512:
11993 case X86::BI__builtin_ia32_cvtmask2q128:
11994 case X86::BI__builtin_ia32_cvtmask2q256:
11995 case X86::BI__builtin_ia32_cvtmask2q512:
11996 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
11997
11998 case X86::BI__builtin_ia32_cvtb2mask128:
11999 case X86::BI__builtin_ia32_cvtb2mask256:
12000 case X86::BI__builtin_ia32_cvtb2mask512:
12001 case X86::BI__builtin_ia32_cvtw2mask128:
12002 case X86::BI__builtin_ia32_cvtw2mask256:
12003 case X86::BI__builtin_ia32_cvtw2mask512:
12004 case X86::BI__builtin_ia32_cvtd2mask128:
12005 case X86::BI__builtin_ia32_cvtd2mask256:
12006 case X86::BI__builtin_ia32_cvtd2mask512:
12007 case X86::BI__builtin_ia32_cvtq2mask128:
12008 case X86::BI__builtin_ia32_cvtq2mask256:
12009 case X86::BI__builtin_ia32_cvtq2mask512:
12010 return EmitX86ConvertToMask(*this, Ops[0]);
12011
12012 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
12013 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
12014 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
12015 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/true);
12016 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
12017 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
12018 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
12019 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/false);
12020
12021 case X86::BI__builtin_ia32_vfmaddss3:
12022 case X86::BI__builtin_ia32_vfmaddsd3:
12023 case X86::BI__builtin_ia32_vfmaddss3_mask:
12024 case X86::BI__builtin_ia32_vfmaddsd3_mask:
12025 return EmitScalarFMAExpr(*this, Ops, Ops[0]);
12026 case X86::BI__builtin_ia32_vfmaddss:
12027 case X86::BI__builtin_ia32_vfmaddsd:
12028 return EmitScalarFMAExpr(*this, Ops,
12029 Constant::getNullValue(Ops[0]->getType()));
12030 case X86::BI__builtin_ia32_vfmaddss3_maskz:
12031 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
12032 return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true);
12033 case X86::BI__builtin_ia32_vfmaddss3_mask3:
12034 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
12035 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2);
12036 case X86::BI__builtin_ia32_vfmsubss3_mask3:
12037 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
12038 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2,
12039 /*NegAcc*/true);
12040 case X86::BI__builtin_ia32_vfmaddps:
12041 case X86::BI__builtin_ia32_vfmaddpd:
12042 case X86::BI__builtin_ia32_vfmaddps256:
12043 case X86::BI__builtin_ia32_vfmaddpd256:
12044 case X86::BI__builtin_ia32_vfmaddps512_mask:
12045 case X86::BI__builtin_ia32_vfmaddps512_maskz:
12046 case X86::BI__builtin_ia32_vfmaddps512_mask3:
12047 case X86::BI__builtin_ia32_vfmsubps512_mask3:
12048 case X86::BI__builtin_ia32_vfmaddpd512_mask:
12049 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
12050 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
12051 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
12052 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
12053 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
12054 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12055 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12056 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12057 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12058 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12059 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12060 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12061 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true);
12062
12063 case X86::BI__builtin_ia32_movdqa32store128_mask:
12064 case X86::BI__builtin_ia32_movdqa64store128_mask:
12065 case X86::BI__builtin_ia32_storeaps128_mask:
12066 case X86::BI__builtin_ia32_storeapd128_mask:
12067 case X86::BI__builtin_ia32_movdqa32store256_mask:
12068 case X86::BI__builtin_ia32_movdqa64store256_mask:
12069 case X86::BI__builtin_ia32_storeaps256_mask:
12070 case X86::BI__builtin_ia32_storeapd256_mask:
12071 case X86::BI__builtin_ia32_movdqa32store512_mask:
12072 case X86::BI__builtin_ia32_movdqa64store512_mask:
12073 case X86::BI__builtin_ia32_storeaps512_mask:
12074 case X86::BI__builtin_ia32_storeapd512_mask:
12075 return EmitX86MaskedStore(
12076 *this, Ops,
12077 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12078
12079 case X86::BI__builtin_ia32_loadups128_mask:
12080 case X86::BI__builtin_ia32_loadups256_mask:
12081 case X86::BI__builtin_ia32_loadups512_mask:
12082 case X86::BI__builtin_ia32_loadupd128_mask:
12083 case X86::BI__builtin_ia32_loadupd256_mask:
12084 case X86::BI__builtin_ia32_loadupd512_mask:
12085 case X86::BI__builtin_ia32_loaddquqi128_mask:
12086 case X86::BI__builtin_ia32_loaddquqi256_mask:
12087 case X86::BI__builtin_ia32_loaddquqi512_mask:
12088 case X86::BI__builtin_ia32_loaddquhi128_mask:
12089 case X86::BI__builtin_ia32_loaddquhi256_mask:
12090 case X86::BI__builtin_ia32_loaddquhi512_mask:
12091 case X86::BI__builtin_ia32_loaddqusi128_mask:
12092 case X86::BI__builtin_ia32_loaddqusi256_mask:
12093 case X86::BI__builtin_ia32_loaddqusi512_mask:
12094 case X86::BI__builtin_ia32_loaddqudi128_mask:
12095 case X86::BI__builtin_ia32_loaddqudi256_mask:
12096 case X86::BI__builtin_ia32_loaddqudi512_mask:
12097 return EmitX86MaskedLoad(*this, Ops, Align(1));
12098
12099 case X86::BI__builtin_ia32_loadss128_mask:
12100 case X86::BI__builtin_ia32_loadsd128_mask:
12101 return EmitX86MaskedLoad(*this, Ops, Align(1));
12102
12103 case X86::BI__builtin_ia32_loadaps128_mask:
12104 case X86::BI__builtin_ia32_loadaps256_mask:
12105 case X86::BI__builtin_ia32_loadaps512_mask:
12106 case X86::BI__builtin_ia32_loadapd128_mask:
12107 case X86::BI__builtin_ia32_loadapd256_mask:
12108 case X86::BI__builtin_ia32_loadapd512_mask:
12109 case X86::BI__builtin_ia32_movdqa32load128_mask:
12110 case X86::BI__builtin_ia32_movdqa32load256_mask:
12111 case X86::BI__builtin_ia32_movdqa32load512_mask:
12112 case X86::BI__builtin_ia32_movdqa64load128_mask:
12113 case X86::BI__builtin_ia32_movdqa64load256_mask:
12114 case X86::BI__builtin_ia32_movdqa64load512_mask:
12115 return EmitX86MaskedLoad(
12116 *this, Ops,
12117 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12118
12119 case X86::BI__builtin_ia32_expandloaddf128_mask:
12120 case X86::BI__builtin_ia32_expandloaddf256_mask:
12121 case X86::BI__builtin_ia32_expandloaddf512_mask:
12122 case X86::BI__builtin_ia32_expandloadsf128_mask:
12123 case X86::BI__builtin_ia32_expandloadsf256_mask:
12124 case X86::BI__builtin_ia32_expandloadsf512_mask:
12125 case X86::BI__builtin_ia32_expandloaddi128_mask:
12126 case X86::BI__builtin_ia32_expandloaddi256_mask:
12127 case X86::BI__builtin_ia32_expandloaddi512_mask:
12128 case X86::BI__builtin_ia32_expandloadsi128_mask:
12129 case X86::BI__builtin_ia32_expandloadsi256_mask:
12130 case X86::BI__builtin_ia32_expandloadsi512_mask:
12131 case X86::BI__builtin_ia32_expandloadhi128_mask:
12132 case X86::BI__builtin_ia32_expandloadhi256_mask:
12133 case X86::BI__builtin_ia32_expandloadhi512_mask:
12134 case X86::BI__builtin_ia32_expandloadqi128_mask:
12135 case X86::BI__builtin_ia32_expandloadqi256_mask:
12136 case X86::BI__builtin_ia32_expandloadqi512_mask:
12137 return EmitX86ExpandLoad(*this, Ops);
12138
12139 case X86::BI__builtin_ia32_compressstoredf128_mask:
12140 case X86::BI__builtin_ia32_compressstoredf256_mask:
12141 case X86::BI__builtin_ia32_compressstoredf512_mask:
12142 case X86::BI__builtin_ia32_compressstoresf128_mask:
12143 case X86::BI__builtin_ia32_compressstoresf256_mask:
12144 case X86::BI__builtin_ia32_compressstoresf512_mask:
12145 case X86::BI__builtin_ia32_compressstoredi128_mask:
12146 case X86::BI__builtin_ia32_compressstoredi256_mask:
12147 case X86::BI__builtin_ia32_compressstoredi512_mask:
12148 case X86::BI__builtin_ia32_compressstoresi128_mask:
12149 case X86::BI__builtin_ia32_compressstoresi256_mask:
12150 case X86::BI__builtin_ia32_compressstoresi512_mask:
12151 case X86::BI__builtin_ia32_compressstorehi128_mask:
12152 case X86::BI__builtin_ia32_compressstorehi256_mask:
12153 case X86::BI__builtin_ia32_compressstorehi512_mask:
12154 case X86::BI__builtin_ia32_compressstoreqi128_mask:
12155 case X86::BI__builtin_ia32_compressstoreqi256_mask:
12156 case X86::BI__builtin_ia32_compressstoreqi512_mask:
12157 return EmitX86CompressStore(*this, Ops);
12158
12159 case X86::BI__builtin_ia32_expanddf128_mask:
12160 case X86::BI__builtin_ia32_expanddf256_mask:
12161 case X86::BI__builtin_ia32_expanddf512_mask:
12162 case X86::BI__builtin_ia32_expandsf128_mask:
12163 case X86::BI__builtin_ia32_expandsf256_mask:
12164 case X86::BI__builtin_ia32_expandsf512_mask:
12165 case X86::BI__builtin_ia32_expanddi128_mask:
12166 case X86::BI__builtin_ia32_expanddi256_mask:
12167 case X86::BI__builtin_ia32_expanddi512_mask:
12168 case X86::BI__builtin_ia32_expandsi128_mask:
12169 case X86::BI__builtin_ia32_expandsi256_mask:
12170 case X86::BI__builtin_ia32_expandsi512_mask:
12171 case X86::BI__builtin_ia32_expandhi128_mask:
12172 case X86::BI__builtin_ia32_expandhi256_mask:
12173 case X86::BI__builtin_ia32_expandhi512_mask:
12174 case X86::BI__builtin_ia32_expandqi128_mask:
12175 case X86::BI__builtin_ia32_expandqi256_mask:
12176 case X86::BI__builtin_ia32_expandqi512_mask:
12177 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
12178
12179 case X86::BI__builtin_ia32_compressdf128_mask:
12180 case X86::BI__builtin_ia32_compressdf256_mask:
12181 case X86::BI__builtin_ia32_compressdf512_mask:
12182 case X86::BI__builtin_ia32_compresssf128_mask:
12183 case X86::BI__builtin_ia32_compresssf256_mask:
12184 case X86::BI__builtin_ia32_compresssf512_mask:
12185 case X86::BI__builtin_ia32_compressdi128_mask:
12186 case X86::BI__builtin_ia32_compressdi256_mask:
12187 case X86::BI__builtin_ia32_compressdi512_mask:
12188 case X86::BI__builtin_ia32_compresssi128_mask:
12189 case X86::BI__builtin_ia32_compresssi256_mask:
12190 case X86::BI__builtin_ia32_compresssi512_mask:
12191 case X86::BI__builtin_ia32_compresshi128_mask:
12192 case X86::BI__builtin_ia32_compresshi256_mask:
12193 case X86::BI__builtin_ia32_compresshi512_mask:
12194 case X86::BI__builtin_ia32_compressqi128_mask:
12195 case X86::BI__builtin_ia32_compressqi256_mask:
12196 case X86::BI__builtin_ia32_compressqi512_mask:
12197 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
12198
12199 case X86::BI__builtin_ia32_gather3div2df:
12200 case X86::BI__builtin_ia32_gather3div2di:
12201 case X86::BI__builtin_ia32_gather3div4df:
12202 case X86::BI__builtin_ia32_gather3div4di:
12203 case X86::BI__builtin_ia32_gather3div4sf:
12204 case X86::BI__builtin_ia32_gather3div4si:
12205 case X86::BI__builtin_ia32_gather3div8sf:
12206 case X86::BI__builtin_ia32_gather3div8si:
12207 case X86::BI__builtin_ia32_gather3siv2df:
12208 case X86::BI__builtin_ia32_gather3siv2di:
12209 case X86::BI__builtin_ia32_gather3siv4df:
12210 case X86::BI__builtin_ia32_gather3siv4di:
12211 case X86::BI__builtin_ia32_gather3siv4sf:
12212 case X86::BI__builtin_ia32_gather3siv4si:
12213 case X86::BI__builtin_ia32_gather3siv8sf:
12214 case X86::BI__builtin_ia32_gather3siv8si:
12215 case X86::BI__builtin_ia32_gathersiv8df:
12216 case X86::BI__builtin_ia32_gathersiv16sf:
12217 case X86::BI__builtin_ia32_gatherdiv8df:
12218 case X86::BI__builtin_ia32_gatherdiv16sf:
12219 case X86::BI__builtin_ia32_gathersiv8di:
12220 case X86::BI__builtin_ia32_gathersiv16si:
12221 case X86::BI__builtin_ia32_gatherdiv8di:
12222 case X86::BI__builtin_ia32_gatherdiv16si: {
12223 Intrinsic::ID IID;
12224 switch (BuiltinID) {
12225 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12225)
;
12226 case X86::BI__builtin_ia32_gather3div2df:
12227 IID = Intrinsic::x86_avx512_mask_gather3div2_df;
12228 break;
12229 case X86::BI__builtin_ia32_gather3div2di:
12230 IID = Intrinsic::x86_avx512_mask_gather3div2_di;
12231 break;
12232 case X86::BI__builtin_ia32_gather3div4df:
12233 IID = Intrinsic::x86_avx512_mask_gather3div4_df;
12234 break;
12235 case X86::BI__builtin_ia32_gather3div4di:
12236 IID = Intrinsic::x86_avx512_mask_gather3div4_di;
12237 break;
12238 case X86::BI__builtin_ia32_gather3div4sf:
12239 IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
12240 break;
12241 case X86::BI__builtin_ia32_gather3div4si:
12242 IID = Intrinsic::x86_avx512_mask_gather3div4_si;
12243 break;
12244 case X86::BI__builtin_ia32_gather3div8sf:
12245 IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
12246 break;
12247 case X86::BI__builtin_ia32_gather3div8si:
12248 IID = Intrinsic::x86_avx512_mask_gather3div8_si;
12249 break;
12250 case X86::BI__builtin_ia32_gather3siv2df:
12251 IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
12252 break;
12253 case X86::BI__builtin_ia32_gather3siv2di:
12254 IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
12255 break;
12256 case X86::BI__builtin_ia32_gather3siv4df:
12257 IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
12258 break;
12259 case X86::BI__builtin_ia32_gather3siv4di:
12260 IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
12261 break;
12262 case X86::BI__builtin_ia32_gather3siv4sf:
12263 IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
12264 break;
12265 case X86::BI__builtin_ia32_gather3siv4si:
12266 IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
12267 break;
12268 case X86::BI__builtin_ia32_gather3siv8sf:
12269 IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
12270 break;
12271 case X86::BI__builtin_ia32_gather3siv8si:
12272 IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
12273 break;
12274 case X86::BI__builtin_ia32_gathersiv8df:
12275 IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
12276 break;
12277 case X86::BI__builtin_ia32_gathersiv16sf:
12278 IID = Intrinsic::x86_avx512_mask_gather_dps_512;
12279 break;
12280 case X86::BI__builtin_ia32_gatherdiv8df:
12281 IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
12282 break;
12283 case X86::BI__builtin_ia32_gatherdiv16sf:
12284 IID = Intrinsic::x86_avx512_mask_gather_qps_512;
12285 break;
12286 case X86::BI__builtin_ia32_gathersiv8di:
12287 IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
12288 break;
12289 case X86::BI__builtin_ia32_gathersiv16si:
12290 IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
12291 break;
12292 case X86::BI__builtin_ia32_gatherdiv8di:
12293 IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
12294 break;
12295 case X86::BI__builtin_ia32_gatherdiv16si:
12296 IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
12297 break;
12298 }
12299
12300 unsigned MinElts = std::min(
12301 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
12302 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
12303 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
12304 Function *Intr = CGM.getIntrinsic(IID);
12305 return Builder.CreateCall(Intr, Ops);
12306 }
12307
12308 case X86::BI__builtin_ia32_scattersiv8df:
12309 case X86::BI__builtin_ia32_scattersiv16sf:
12310 case X86::BI__builtin_ia32_scatterdiv8df:
12311 case X86::BI__builtin_ia32_scatterdiv16sf:
12312 case X86::BI__builtin_ia32_scattersiv8di:
12313 case X86::BI__builtin_ia32_scattersiv16si:
12314 case X86::BI__builtin_ia32_scatterdiv8di:
12315 case X86::BI__builtin_ia32_scatterdiv16si:
12316 case X86::BI__builtin_ia32_scatterdiv2df:
12317 case X86::BI__builtin_ia32_scatterdiv2di:
12318 case X86::BI__builtin_ia32_scatterdiv4df:
12319 case X86::BI__builtin_ia32_scatterdiv4di:
12320 case X86::BI__builtin_ia32_scatterdiv4sf:
12321 case X86::BI__builtin_ia32_scatterdiv4si:
12322 case X86::BI__builtin_ia32_scatterdiv8sf:
12323 case X86::BI__builtin_ia32_scatterdiv8si:
12324 case X86::BI__builtin_ia32_scattersiv2df:
12325 case X86::BI__builtin_ia32_scattersiv2di:
12326 case X86::BI__builtin_ia32_scattersiv4df:
12327 case X86::BI__builtin_ia32_scattersiv4di:
12328 case X86::BI__builtin_ia32_scattersiv4sf:
12329 case X86::BI__builtin_ia32_scattersiv4si:
12330 case X86::BI__builtin_ia32_scattersiv8sf:
12331 case X86::BI__builtin_ia32_scattersiv8si: {
12332 Intrinsic::ID IID;
12333 switch (BuiltinID) {
12334 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12334)
;
12335 case X86::BI__builtin_ia32_scattersiv8df:
12336 IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
12337 break;
12338 case X86::BI__builtin_ia32_scattersiv16sf:
12339 IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
12340 break;
12341 case X86::BI__builtin_ia32_scatterdiv8df:
12342 IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
12343 break;
12344 case X86::BI__builtin_ia32_scatterdiv16sf:
12345 IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
12346 break;
12347 case X86::BI__builtin_ia32_scattersiv8di:
12348 IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
12349 break;
12350 case X86::BI__builtin_ia32_scattersiv16si:
12351 IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
12352 break;
12353 case X86::BI__builtin_ia32_scatterdiv8di:
12354 IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
12355 break;
12356 case X86::BI__builtin_ia32_scatterdiv16si:
12357 IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
12358 break;
12359 case X86::BI__builtin_ia32_scatterdiv2df:
12360 IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
12361 break;
12362 case X86::BI__builtin_ia32_scatterdiv2di:
12363 IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
12364 break;
12365 case X86::BI__builtin_ia32_scatterdiv4df:
12366 IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
12367 break;
12368 case X86::BI__builtin_ia32_scatterdiv4di:
12369 IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
12370 break;
12371 case X86::BI__builtin_ia32_scatterdiv4sf:
12372 IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
12373 break;
12374 case X86::BI__builtin_ia32_scatterdiv4si:
12375 IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
12376 break;
12377 case X86::BI__builtin_ia32_scatterdiv8sf:
12378 IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
12379 break;
12380 case X86::BI__builtin_ia32_scatterdiv8si:
12381 IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
12382 break;
12383 case X86::BI__builtin_ia32_scattersiv2df:
12384 IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
12385 break;
12386 case X86::BI__builtin_ia32_scattersiv2di:
12387 IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
12388 break;
12389 case X86::BI__builtin_ia32_scattersiv4df:
12390 IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
12391 break;
12392 case X86::BI__builtin_ia32_scattersiv4di:
12393 IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
12394 break;
12395 case X86::BI__builtin_ia32_scattersiv4sf:
12396 IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
12397 break;
12398 case X86::BI__builtin_ia32_scattersiv4si:
12399 IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
12400 break;
12401 case X86::BI__builtin_ia32_scattersiv8sf:
12402 IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
12403 break;
12404 case X86::BI__builtin_ia32_scattersiv8si:
12405 IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
12406 break;
12407 }
12408
12409 unsigned MinElts = std::min(
12410 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
12411 cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
12412 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
12413 Function *Intr = CGM.getIntrinsic(IID);
12414 return Builder.CreateCall(Intr, Ops);
12415 }
12416
12417 case X86::BI__builtin_ia32_vextractf128_pd256:
12418 case X86::BI__builtin_ia32_vextractf128_ps256:
12419 case X86::BI__builtin_ia32_vextractf128_si256:
12420 case X86::BI__builtin_ia32_extract128i256:
12421 case X86::BI__builtin_ia32_extractf64x4_mask:
12422 case X86::BI__builtin_ia32_extractf32x4_mask:
12423 case X86::BI__builtin_ia32_extracti64x4_mask:
12424 case X86::BI__builtin_ia32_extracti32x4_mask:
12425 case X86::BI__builtin_ia32_extractf32x8_mask:
12426 case X86::BI__builtin_ia32_extracti32x8_mask:
12427 case X86::BI__builtin_ia32_extractf32x4_256_mask:
12428 case X86::BI__builtin_ia32_extracti32x4_256_mask:
12429 case X86::BI__builtin_ia32_extractf64x2_256_mask:
12430 case X86::BI__builtin_ia32_extracti64x2_256_mask:
12431 case X86::BI__builtin_ia32_extractf64x2_512_mask:
12432 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
12433 auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType()));
12434 unsigned NumElts = DstTy->getNumElements();
12435 unsigned SrcNumElts =
12436 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12437 unsigned SubVectors = SrcNumElts / NumElts;
12438 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
12439 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")((llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"
) ? static_cast<void> (0) : __assert_fail ("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12439, __PRETTY_FUNCTION__))
;
12440 Index &= SubVectors - 1; // Remove any extra bits.
12441 Index *= NumElts;
12442
12443 int Indices[16];
12444 for (unsigned i = 0; i != NumElts; ++i)
12445 Indices[i] = i + Index;
12446
12447 Value *Res = Builder.CreateShuffleVector(Ops[0],
12448 UndefValue::get(Ops[0]->getType()),
12449 makeArrayRef(Indices, NumElts),
12450 "extract");
12451
12452 if (Ops.size() == 4)
12453 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
12454
12455 return Res;
12456 }
12457 case X86::BI__builtin_ia32_vinsertf128_pd256:
12458 case X86::BI__builtin_ia32_vinsertf128_ps256:
12459 case X86::BI__builtin_ia32_vinsertf128_si256:
12460 case X86::BI__builtin_ia32_insert128i256:
12461 case X86::BI__builtin_ia32_insertf64x4:
12462 case X86::BI__builtin_ia32_insertf32x4:
12463 case X86::BI__builtin_ia32_inserti64x4:
12464 case X86::BI__builtin_ia32_inserti32x4:
12465 case X86::BI__builtin_ia32_insertf32x8:
12466 case X86::BI__builtin_ia32_inserti32x8:
12467 case X86::BI__builtin_ia32_insertf32x4_256:
12468 case X86::BI__builtin_ia32_inserti32x4_256:
12469 case X86::BI__builtin_ia32_insertf64x2_256:
12470 case X86::BI__builtin_ia32_inserti64x2_256:
12471 case X86::BI__builtin_ia32_insertf64x2_512:
12472 case X86::BI__builtin_ia32_inserti64x2_512: {
12473 unsigned DstNumElts =
12474 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12475 unsigned SrcNumElts =
12476 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
12477 unsigned SubVectors = DstNumElts / SrcNumElts;
12478 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
12479 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")((llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"
) ? static_cast<void> (0) : __assert_fail ("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12479, __PRETTY_FUNCTION__))
;
12480 Index &= SubVectors - 1; // Remove any extra bits.
12481 Index *= SrcNumElts;
12482
12483 int Indices[16];
12484 for (unsigned i = 0; i != DstNumElts; ++i)
12485 Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
12486
12487 Value *Op1 = Builder.CreateShuffleVector(Ops[1],
12488 UndefValue::get(Ops[1]->getType()),
12489 makeArrayRef(Indices, DstNumElts),
12490 "widen");
12491
12492 for (unsigned i = 0; i != DstNumElts; ++i) {
12493 if (i >= Index && i < (Index + SrcNumElts))
12494 Indices[i] = (i - Index) + DstNumElts;
12495 else
12496 Indices[i] = i;
12497 }
12498
12499 return Builder.CreateShuffleVector(Ops[0], Op1,
12500 makeArrayRef(Indices, DstNumElts),
12501 "insert");
12502 }
12503 case X86::BI__builtin_ia32_pmovqd512_mask:
12504 case X86::BI__builtin_ia32_pmovwb512_mask: {
12505 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
12506 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
12507 }
12508 case X86::BI__builtin_ia32_pmovdb512_mask:
12509 case X86::BI__builtin_ia32_pmovdw512_mask:
12510 case X86::BI__builtin_ia32_pmovqw512_mask: {
12511 if (const auto *C = dyn_cast<Constant>(Ops[2]))
12512 if (C->isAllOnesValue())
12513 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
12514
12515 Intrinsic::ID IID;
12516 switch (BuiltinID) {
12517 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12517)
;
12518 case X86::BI__builtin_ia32_pmovdb512_mask:
12519 IID = Intrinsic::x86_avx512_mask_pmov_db_512;
12520 break;
12521 case X86::BI__builtin_ia32_pmovdw512_mask:
12522 IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
12523 break;
12524 case X86::BI__builtin_ia32_pmovqw512_mask:
12525 IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
12526 break;
12527 }
12528
12529 Function *Intr = CGM.getIntrinsic(IID);
12530 return Builder.CreateCall(Intr, Ops);
12531 }
12532 case X86::BI__builtin_ia32_pblendw128:
12533 case X86::BI__builtin_ia32_blendpd:
12534 case X86::BI__builtin_ia32_blendps:
12535 case X86::BI__builtin_ia32_blendpd256:
12536 case X86::BI__builtin_ia32_blendps256:
12537 case X86::BI__builtin_ia32_pblendw256:
12538 case X86::BI__builtin_ia32_pblendd128:
12539 case X86::BI__builtin_ia32_pblendd256: {
12540 unsigned NumElts =
12541 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12542 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12543
12544 int Indices[16];
12545 // If there are more than 8 elements, the immediate is used twice so make
12546 // sure we handle that.
12547 for (unsigned i = 0; i != NumElts; ++i)
12548 Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
12549
12550 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12551 makeArrayRef(Indices, NumElts),
12552 "blend");
12553 }
12554 case X86::BI__builtin_ia32_pshuflw:
12555 case X86::BI__builtin_ia32_pshuflw256:
12556 case X86::BI__builtin_ia32_pshuflw512: {
12557 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12558 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12559 unsigned NumElts = Ty->getNumElements();
12560
12561 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12562 Imm = (Imm & 0xff) * 0x01010101;
12563
12564 int Indices[32];
12565 for (unsigned l = 0; l != NumElts; l += 8) {
12566 for (unsigned i = 0; i != 4; ++i) {
12567 Indices[l + i] = l + (Imm & 3);
12568 Imm >>= 2;
12569 }
12570 for (unsigned i = 4; i != 8; ++i)
12571 Indices[l + i] = l + i;
12572 }
12573
12574 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12575 makeArrayRef(Indices, NumElts),
12576 "pshuflw");
12577 }
12578 case X86::BI__builtin_ia32_pshufhw:
12579 case X86::BI__builtin_ia32_pshufhw256:
12580 case X86::BI__builtin_ia32_pshufhw512: {
12581 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12582 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12583 unsigned NumElts = Ty->getNumElements();
12584
12585 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12586 Imm = (Imm & 0xff) * 0x01010101;
12587
12588 int Indices[32];
12589 for (unsigned l = 0; l != NumElts; l += 8) {
12590 for (unsigned i = 0; i != 4; ++i)
12591 Indices[l + i] = l + i;
12592 for (unsigned i = 4; i != 8; ++i) {
12593 Indices[l + i] = l + 4 + (Imm & 3);
12594 Imm >>= 2;
12595 }
12596 }
12597
12598 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12599 makeArrayRef(Indices, NumElts),
12600 "pshufhw");
12601 }
12602 case X86::BI__builtin_ia32_pshufd:
12603 case X86::BI__builtin_ia32_pshufd256:
12604 case X86::BI__builtin_ia32_pshufd512:
12605 case X86::BI__builtin_ia32_vpermilpd:
12606 case X86::BI__builtin_ia32_vpermilps:
12607 case X86::BI__builtin_ia32_vpermilpd256:
12608 case X86::BI__builtin_ia32_vpermilps256:
12609 case X86::BI__builtin_ia32_vpermilpd512:
12610 case X86::BI__builtin_ia32_vpermilps512: {
12611 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12612 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12613 unsigned NumElts = Ty->getNumElements();
12614 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
12615 unsigned NumLaneElts = NumElts / NumLanes;
12616
12617 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12618 Imm = (Imm & 0xff) * 0x01010101;
12619
12620 int Indices[16];
12621 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12622 for (unsigned i = 0; i != NumLaneElts; ++i) {
12623 Indices[i + l] = (Imm % NumLaneElts) + l;
12624 Imm /= NumLaneElts;
12625 }
12626 }
12627
12628 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12629 makeArrayRef(Indices, NumElts),
12630 "permil");
12631 }
12632 case X86::BI__builtin_ia32_shufpd:
12633 case X86::BI__builtin_ia32_shufpd256:
12634 case X86::BI__builtin_ia32_shufpd512:
12635 case X86::BI__builtin_ia32_shufps:
12636 case X86::BI__builtin_ia32_shufps256:
12637 case X86::BI__builtin_ia32_shufps512: {
12638 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12639 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12640 unsigned NumElts = Ty->getNumElements();
12641 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
12642 unsigned NumLaneElts = NumElts / NumLanes;
12643
12644 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12645 Imm = (Imm & 0xff) * 0x01010101;
12646
12647 int Indices[16];
12648 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12649 for (unsigned i = 0; i != NumLaneElts; ++i) {
12650 unsigned Index = Imm % NumLaneElts;
12651 Imm /= NumLaneElts;
12652 if (i >= (NumLaneElts / 2))
12653 Index += NumElts;
12654 Indices[l + i] = l + Index;
12655 }
12656 }
12657
12658 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12659 makeArrayRef(Indices, NumElts),
12660 "shufp");
12661 }
12662 case X86::BI__builtin_ia32_permdi256:
12663 case X86::BI__builtin_ia32_permdf256:
12664 case X86::BI__builtin_ia32_permdi512:
12665 case X86::BI__builtin_ia32_permdf512: {
12666 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12667 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12668 unsigned NumElts = Ty->getNumElements();
12669
12670 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
12671 int Indices[8];
12672 for (unsigned l = 0; l != NumElts; l += 4)
12673 for (unsigned i = 0; i != 4; ++i)
12674 Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
12675
12676 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12677 makeArrayRef(Indices, NumElts),
12678 "perm");
12679 }
12680 case X86::BI__builtin_ia32_palignr128:
12681 case X86::BI__builtin_ia32_palignr256:
12682 case X86::BI__builtin_ia32_palignr512: {
12683 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
12684
12685 unsigned NumElts =
12686 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12687 assert(NumElts % 16 == 0)((NumElts % 16 == 0) ? static_cast<void> (0) : __assert_fail
("NumElts % 16 == 0", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12687, __PRETTY_FUNCTION__))
;
12688
12689 // If palignr is shifting the pair of vectors more than the size of two
12690 // lanes, emit zero.
12691 if (ShiftVal >= 32)
12692 return llvm::Constant::getNullValue(ConvertType(E->getType()));
12693
12694 // If palignr is shifting the pair of input vectors more than one lane,
12695 // but less than two lanes, convert to shifting in zeroes.
12696 if (ShiftVal > 16) {
12697 ShiftVal -= 16;
12698 Ops[1] = Ops[0];
12699 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
12700 }
12701
12702 int Indices[64];
12703 // 256-bit palignr operates on 128-bit lanes so we need to handle that
12704 for (unsigned l = 0; l != NumElts; l += 16) {
12705 for (unsigned i = 0; i != 16; ++i) {
12706 unsigned Idx = ShiftVal + i;
12707 if (Idx >= 16)
12708 Idx += NumElts - 16; // End of lane, switch operand.
12709 Indices[l + i] = Idx + l;
12710 }
12711 }
12712
12713 return Builder.CreateShuffleVector(Ops[1], Ops[0],
12714 makeArrayRef(Indices, NumElts),
12715 "palignr");
12716 }
12717 case X86::BI__builtin_ia32_alignd128:
12718 case X86::BI__builtin_ia32_alignd256:
12719 case X86::BI__builtin_ia32_alignd512:
12720 case X86::BI__builtin_ia32_alignq128:
12721 case X86::BI__builtin_ia32_alignq256:
12722 case X86::BI__builtin_ia32_alignq512: {
12723 unsigned NumElts =
12724 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12725 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
12726
12727 // Mask the shift amount to width of two vectors.
12728 ShiftVal &= (2 * NumElts) - 1;
12729
12730 int Indices[16];
12731 for (unsigned i = 0; i != NumElts; ++i)
12732 Indices[i] = i + ShiftVal;
12733
12734 return Builder.CreateShuffleVector(Ops[1], Ops[0],
12735 makeArrayRef(Indices, NumElts),
12736 "valign");
12737 }
12738 case X86::BI__builtin_ia32_shuf_f32x4_256:
12739 case X86::BI__builtin_ia32_shuf_f64x2_256:
12740 case X86::BI__builtin_ia32_shuf_i32x4_256:
12741 case X86::BI__builtin_ia32_shuf_i64x2_256:
12742 case X86::BI__builtin_ia32_shuf_f32x4:
12743 case X86::BI__builtin_ia32_shuf_f64x2:
12744 case X86::BI__builtin_ia32_shuf_i32x4:
12745 case X86::BI__builtin_ia32_shuf_i64x2: {
12746 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12747 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12748 unsigned NumElts = Ty->getNumElements();
12749 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
12750 unsigned NumLaneElts = NumElts / NumLanes;
12751
12752 int Indices[16];
12753 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12754 unsigned Index = (Imm % NumLanes) * NumLaneElts;
12755 Imm /= NumLanes; // Discard the bits we just used.
12756 if (l >= (NumElts / 2))
12757 Index += NumElts; // Switch to other source.
12758 for (unsigned i = 0; i != NumLaneElts; ++i) {
12759 Indices[l + i] = Index + i;
12760 }
12761 }
12762
12763 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12764 makeArrayRef(Indices, NumElts),
12765 "shuf");
12766 }
12767
12768 case X86::BI__builtin_ia32_vperm2f128_pd256:
12769 case X86::BI__builtin_ia32_vperm2f128_ps256:
12770 case X86::BI__builtin_ia32_vperm2f128_si256:
12771 case X86::BI__builtin_ia32_permti256: {
12772 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12773 unsigned NumElts =
12774 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12775
12776 // This takes a very simple approach since there are two lanes and a
12777 // shuffle can have 2 inputs. So we reserve the first input for the first
12778 // lane and the second input for the second lane. This may result in
12779 // duplicate sources, but this can be dealt with in the backend.
12780
12781 Value *OutOps[2];
12782 int Indices[8];
12783 for (unsigned l = 0; l != 2; ++l) {
12784 // Determine the source for this lane.
12785 if (Imm & (1 << ((l * 4) + 3)))
12786 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
12787 else if (Imm & (1 << ((l * 4) + 1)))
12788 OutOps[l] = Ops[1];
12789 else
12790 OutOps[l] = Ops[0];
12791
12792 for (unsigned i = 0; i != NumElts/2; ++i) {
12793 // Start with ith element of the source for this lane.
12794 unsigned Idx = (l * NumElts) + i;
12795 // If bit 0 of the immediate half is set, switch to the high half of
12796 // the source.
12797 if (Imm & (1 << (l * 4)))
12798 Idx += NumElts/2;
12799 Indices[(l * (NumElts/2)) + i] = Idx;
12800 }
12801 }
12802
12803 return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
12804 makeArrayRef(Indices, NumElts),
12805 "vperm");
12806 }
12807
12808 case X86::BI__builtin_ia32_pslldqi128_byteshift:
12809 case X86::BI__builtin_ia32_pslldqi256_byteshift:
12810 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
12811 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
12812 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
12813 // Builtin type is vXi64 so multiply by 8 to get bytes.
12814 unsigned NumElts = ResultType->getNumElements() * 8;
12815
12816 // If pslldq is shifting the vector more than 15 bytes, emit zero.
12817 if (ShiftVal >= 16)
12818 return llvm::Constant::getNullValue(ResultType);
12819
12820 int Indices[64];
12821 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
12822 for (unsigned l = 0; l != NumElts; l += 16) {
12823 for (unsigned i = 0; i != 16; ++i) {
12824 unsigned Idx = NumElts + i - ShiftVal;
12825 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
12826 Indices[l + i] = Idx + l;
12827 }
12828 }
12829
12830 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
12831 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
12832 Value *Zero = llvm::Constant::getNullValue(VecTy);
12833 Value *SV = Builder.CreateShuffleVector(Zero, Cast,
12834 makeArrayRef(Indices, NumElts),
12835 "pslldq");
12836 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
12837 }
12838 case X86::BI__builtin_ia32_psrldqi128_byteshift:
12839 case X86::BI__builtin_ia32_psrldqi256_byteshift:
12840 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
12841 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
12842 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
12843 // Builtin type is vXi64 so multiply by 8 to get bytes.
12844 unsigned NumElts = ResultType->getNumElements() * 8;
12845
12846 // If psrldq is shifting the vector more than 15 bytes, emit zero.
12847 if (ShiftVal >= 16)
12848 return llvm::Constant::getNullValue(ResultType);
12849
12850 int Indices[64];
12851 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
12852 for (unsigned l = 0; l != NumElts; l += 16) {
12853 for (unsigned i = 0; i != 16; ++i) {
12854 unsigned Idx = i + ShiftVal;
12855 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
12856 Indices[l + i] = Idx + l;
12857 }
12858 }
12859
12860 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
12861 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
12862 Value *Zero = llvm::Constant::getNullValue(VecTy);
12863 Value *SV = Builder.CreateShuffleVector(Cast, Zero,
12864 makeArrayRef(Indices, NumElts),
12865 "psrldq");
12866 return Builder.CreateBitCast(SV, ResultType, "cast");
12867 }
12868 case X86::BI__builtin_ia32_kshiftliqi:
12869 case X86::BI__builtin_ia32_kshiftlihi:
12870 case X86::BI__builtin_ia32_kshiftlisi:
12871 case X86::BI__builtin_ia32_kshiftlidi: {
12872 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
12873 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
12874
12875 if (ShiftVal >= NumElts)
12876 return llvm::Constant::getNullValue(Ops[0]->getType());
12877
12878 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
12879
12880 int Indices[64];
12881 for (unsigned i = 0; i != NumElts; ++i)
12882 Indices[i] = NumElts + i - ShiftVal;
12883
12884 Value *Zero = llvm::Constant::getNullValue(In->getType());
12885 Value *SV = Builder.CreateShuffleVector(Zero, In,
12886 makeArrayRef(Indices, NumElts),
12887 "kshiftl");
12888 return Builder.CreateBitCast(SV, Ops[0]->getType());
12889 }
12890 case X86::BI__builtin_ia32_kshiftriqi:
12891 case X86::BI__builtin_ia32_kshiftrihi:
12892 case X86::BI__builtin_ia32_kshiftrisi:
12893 case X86::BI__builtin_ia32_kshiftridi: {
12894 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
12895 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
12896
12897 if (ShiftVal >= NumElts)
12898 return llvm::Constant::getNullValue(Ops[0]->getType());
12899
12900 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
12901
12902 int Indices[64];
12903 for (unsigned i = 0; i != NumElts; ++i)
12904 Indices[i] = i + ShiftVal;
12905
12906 Value *Zero = llvm::Constant::getNullValue(In->getType());
12907 Value *SV = Builder.CreateShuffleVector(In, Zero,
12908 makeArrayRef(Indices, NumElts),
12909 "kshiftr");
12910 return Builder.CreateBitCast(SV, Ops[0]->getType());
12911 }
12912 case X86::BI__builtin_ia32_movnti:
12913 case X86::BI__builtin_ia32_movnti64:
12914 case X86::BI__builtin_ia32_movntsd:
12915 case X86::BI__builtin_ia32_movntss: {
12916 llvm::MDNode *Node = llvm::MDNode::get(
12917 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
12918
12919 Value *Ptr = Ops[0];
12920 Value *Src = Ops[1];
12921
12922 // Extract the 0'th element of the source vector.
12923 if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
12924 BuiltinID == X86::BI__builtin_ia32_movntss)
12925 Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
12926
12927 // Convert the type of the pointer to a pointer to the stored type.
12928 Value *BC = Builder.CreateBitCast(
12929 Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
12930
12931 // Unaligned nontemporal store of the scalar value.
12932 StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
12933 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
12934 SI->setAlignment(llvm::Align(1));
12935 return SI;
12936 }
12937 // Rotate is a special case of funnel shift - 1st 2 args are the same.
12938 case X86::BI__builtin_ia32_vprotb:
12939 case X86::BI__builtin_ia32_vprotw:
12940 case X86::BI__builtin_ia32_vprotd:
12941 case X86::BI__builtin_ia32_vprotq:
12942 case X86::BI__builtin_ia32_vprotbi:
12943 case X86::BI__builtin_ia32_vprotwi:
12944 case X86::BI__builtin_ia32_vprotdi:
12945 case X86::BI__builtin_ia32_vprotqi:
12946 case X86::BI__builtin_ia32_prold128:
12947 case X86::BI__builtin_ia32_prold256:
12948 case X86::BI__builtin_ia32_prold512:
12949 case X86::BI__builtin_ia32_prolq128:
12950 case X86::BI__builtin_ia32_prolq256:
12951 case X86::BI__builtin_ia32_prolq512:
12952 case X86::BI__builtin_ia32_prolvd128:
12953 case X86::BI__builtin_ia32_prolvd256:
12954 case X86::BI__builtin_ia32_prolvd512:
12955 case X86::BI__builtin_ia32_prolvq128:
12956 case X86::BI__builtin_ia32_prolvq256:
12957 case X86::BI__builtin_ia32_prolvq512:
12958 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
12959 case X86::BI__builtin_ia32_prord128:
12960 case X86::BI__builtin_ia32_prord256:
12961 case X86::BI__builtin_ia32_prord512:
12962 case X86::BI__builtin_ia32_prorq128:
12963 case X86::BI__builtin_ia32_prorq256:
12964 case X86::BI__builtin_ia32_prorq512:
12965 case X86::BI__builtin_ia32_prorvd128:
12966 case X86::BI__builtin_ia32_prorvd256:
12967 case X86::BI__builtin_ia32_prorvd512:
12968 case X86::BI__builtin_ia32_prorvq128:
12969 case X86::BI__builtin_ia32_prorvq256:
12970 case X86::BI__builtin_ia32_prorvq512:
12971 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
12972 case X86::BI__builtin_ia32_selectb_128:
12973 case X86::BI__builtin_ia32_selectb_256:
12974 case X86::BI__builtin_ia32_selectb_512:
12975 case X86::BI__builtin_ia32_selectw_128:
12976 case X86::BI__builtin_ia32_selectw_256:
12977 case X86::BI__builtin_ia32_selectw_512:
12978 case X86::BI__builtin_ia32_selectd_128:
12979 case X86::BI__builtin_ia32_selectd_256:
12980 case X86::BI__builtin_ia32_selectd_512:
12981 case X86::BI__builtin_ia32_selectq_128:
12982 case X86::BI__builtin_ia32_selectq_256:
12983 case X86::BI__builtin_ia32_selectq_512:
12984 case X86::BI__builtin_ia32_selectps_128:
12985 case X86::BI__builtin_ia32_selectps_256:
12986 case X86::BI__builtin_ia32_selectps_512:
12987 case X86::BI__builtin_ia32_selectpd_128:
12988 case X86::BI__builtin_ia32_selectpd_256:
12989 case X86::BI__builtin_ia32_selectpd_512:
12990 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
12991 case X86::BI__builtin_ia32_selectss_128:
12992 case X86::BI__builtin_ia32_selectsd_128: {
12993 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
12994 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
12995 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
12996 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
12997 }
12998 case X86::BI__builtin_ia32_cmpb128_mask:
12999 case X86::BI__builtin_ia32_cmpb256_mask:
13000 case X86::BI__builtin_ia32_cmpb512_mask:
13001 case X86::BI__builtin_ia32_cmpw128_mask:
13002 case X86::BI__builtin_ia32_cmpw256_mask:
13003 case X86::BI__builtin_ia32_cmpw512_mask:
13004 case X86::BI__builtin_ia32_cmpd128_mask:
13005 case X86::BI__builtin_ia32_cmpd256_mask:
13006 case X86::BI__builtin_ia32_cmpd512_mask:
13007 case X86::BI__builtin_ia32_cmpq128_mask:
13008 case X86::BI__builtin_ia32_cmpq256_mask:
13009 case X86::BI__builtin_ia32_cmpq512_mask: {
13010 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13011 return EmitX86MaskedCompare(*this, CC, true, Ops);
13012 }
13013 case X86::BI__builtin_ia32_ucmpb128_mask:
13014 case X86::BI__builtin_ia32_ucmpb256_mask:
13015 case X86::BI__builtin_ia32_ucmpb512_mask:
13016 case X86::BI__builtin_ia32_ucmpw128_mask:
13017 case X86::BI__builtin_ia32_ucmpw256_mask:
13018 case X86::BI__builtin_ia32_ucmpw512_mask:
13019 case X86::BI__builtin_ia32_ucmpd128_mask:
13020 case X86::BI__builtin_ia32_ucmpd256_mask:
13021 case X86::BI__builtin_ia32_ucmpd512_mask:
13022 case X86::BI__builtin_ia32_ucmpq128_mask:
13023 case X86::BI__builtin_ia32_ucmpq256_mask:
13024 case X86::BI__builtin_ia32_ucmpq512_mask: {
13025 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13026 return EmitX86MaskedCompare(*this, CC, false, Ops);
13027 }
13028 case X86::BI__builtin_ia32_vpcomb:
13029 case X86::BI__builtin_ia32_vpcomw:
13030 case X86::BI__builtin_ia32_vpcomd:
13031 case X86::BI__builtin_ia32_vpcomq:
13032 return EmitX86vpcom(*this, Ops, true);
13033 case X86::BI__builtin_ia32_vpcomub:
13034 case X86::BI__builtin_ia32_vpcomuw:
13035 case X86::BI__builtin_ia32_vpcomud:
13036 case X86::BI__builtin_ia32_vpcomuq:
13037 return EmitX86vpcom(*this, Ops, false);
13038
13039 case X86::BI__builtin_ia32_kortestcqi:
13040 case X86::BI__builtin_ia32_kortestchi:
13041 case X86::BI__builtin_ia32_kortestcsi:
13042 case X86::BI__builtin_ia32_kortestcdi: {
13043 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13044 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
13045 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13046 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13047 }
13048 case X86::BI__builtin_ia32_kortestzqi:
13049 case X86::BI__builtin_ia32_kortestzhi:
13050 case X86::BI__builtin_ia32_kortestzsi:
13051 case X86::BI__builtin_ia32_kortestzdi: {
13052 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13053 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
13054 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13055 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13056 }
13057
13058 case X86::BI__builtin_ia32_ktestcqi:
13059 case X86::BI__builtin_ia32_ktestzqi:
13060 case X86::BI__builtin_ia32_ktestchi:
13061 case X86::BI__builtin_ia32_ktestzhi:
13062 case X86::BI__builtin_ia32_ktestcsi:
13063 case X86::BI__builtin_ia32_ktestzsi:
13064 case X86::BI__builtin_ia32_ktestcdi:
13065 case X86::BI__builtin_ia32_ktestzdi: {
13066 Intrinsic::ID IID;
13067 switch (BuiltinID) {
13068 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13068)
;
13069 case X86::BI__builtin_ia32_ktestcqi:
13070 IID = Intrinsic::x86_avx512_ktestc_b;
13071 break;
13072 case X86::BI__builtin_ia32_ktestzqi:
13073 IID = Intrinsic::x86_avx512_ktestz_b;
13074 break;
13075 case X86::BI__builtin_ia32_ktestchi:
13076 IID = Intrinsic::x86_avx512_ktestc_w;
13077 break;
13078 case X86::BI__builtin_ia32_ktestzhi:
13079 IID = Intrinsic::x86_avx512_ktestz_w;
13080 break;
13081 case X86::BI__builtin_ia32_ktestcsi:
13082 IID = Intrinsic::x86_avx512_ktestc_d;
13083 break;
13084 case X86::BI__builtin_ia32_ktestzsi:
13085 IID = Intrinsic::x86_avx512_ktestz_d;
13086 break;
13087 case X86::BI__builtin_ia32_ktestcdi:
13088 IID = Intrinsic::x86_avx512_ktestc_q;
13089 break;
13090 case X86::BI__builtin_ia32_ktestzdi:
13091 IID = Intrinsic::x86_avx512_ktestz_q;
13092 break;
13093 }
13094
13095 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13096 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13097 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13098 Function *Intr = CGM.getIntrinsic(IID);
13099 return Builder.CreateCall(Intr, {LHS, RHS});
13100 }
13101
13102 case X86::BI__builtin_ia32_kaddqi:
13103 case X86::BI__builtin_ia32_kaddhi:
13104 case X86::BI__builtin_ia32_kaddsi:
13105 case X86::BI__builtin_ia32_kadddi: {
13106 Intrinsic::ID IID;
13107 switch (BuiltinID) {
13108 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13108)
;
13109 case X86::BI__builtin_ia32_kaddqi:
13110 IID = Intrinsic::x86_avx512_kadd_b;
13111 break;
13112 case X86::BI__builtin_ia32_kaddhi:
13113 IID = Intrinsic::x86_avx512_kadd_w;
13114 break;
13115 case X86::BI__builtin_ia32_kaddsi:
13116 IID = Intrinsic::x86_avx512_kadd_d;
13117 break;
13118 case X86::BI__builtin_ia32_kadddi:
13119 IID = Intrinsic::x86_avx512_kadd_q;
13120 break;
13121 }
13122
13123 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13124 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13125 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13126 Function *Intr = CGM.getIntrinsic(IID);
13127 Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
13128 return Builder.CreateBitCast(Res, Ops[0]->getType());
13129 }
13130 case X86::BI__builtin_ia32_kandqi:
13131 case X86::BI__builtin_ia32_kandhi:
13132 case X86::BI__builtin_ia32_kandsi:
13133 case X86::BI__builtin_ia32_kanddi:
13134 return EmitX86MaskLogic(*this, Instruction::And, Ops);
13135 case X86::BI__builtin_ia32_kandnqi:
13136 case X86::BI__builtin_ia32_kandnhi:
13137 case X86::BI__builtin_ia32_kandnsi:
13138 case X86::BI__builtin_ia32_kandndi:
13139 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
13140 case X86::BI__builtin_ia32_korqi:
13141 case X86::BI__builtin_ia32_korhi:
13142 case X86::BI__builtin_ia32_korsi:
13143 case X86::BI__builtin_ia32_kordi:
13144 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
13145 case X86::BI__builtin_ia32_kxnorqi:
13146 case X86::BI__builtin_ia32_kxnorhi:
13147 case X86::BI__builtin_ia32_kxnorsi:
13148 case X86::BI__builtin_ia32_kxnordi:
13149 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
13150 case X86::BI__builtin_ia32_kxorqi:
13151 case X86::BI__builtin_ia32_kxorhi:
13152 case X86::BI__builtin_ia32_kxorsi:
13153 case X86::BI__builtin_ia32_kxordi:
13154 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
13155 case X86::BI__builtin_ia32_knotqi:
13156 case X86::BI__builtin_ia32_knothi:
13157 case X86::BI__builtin_ia32_knotsi:
13158 case X86::BI__builtin_ia32_knotdi: {
13159 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13160 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
13161 return Builder.CreateBitCast(Builder.CreateNot(Res),
13162 Ops[0]->getType());
13163 }
13164 case X86::BI__builtin_ia32_kmovb:
13165 case X86::BI__builtin_ia32_kmovw:
13166 case X86::BI__builtin_ia32_kmovd:
13167 case X86::BI__builtin_ia32_kmovq: {
13168 // Bitcast to vXi1 type and then back to integer. This gets the mask
13169 // register type into the IR, but might be optimized out depending on
13170 // what's around it.
13171 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13172 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
13173 return Builder.CreateBitCast(Res, Ops[0]->getType());
13174 }
13175
13176 case X86::BI__builtin_ia32_kunpckdi:
13177 case X86::BI__builtin_ia32_kunpcksi:
13178 case X86::BI__builtin_ia32_kunpckhi: {
13179 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13180 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13181 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13182 int Indices[64];
13183 for (unsigned i = 0; i != NumElts; ++i)
13184 Indices[i] = i;
13185
13186 // First extract half of each vector. This gives better codegen than
13187 // doing it in a single shuffle.
13188 LHS = Builder.CreateShuffleVector(LHS, LHS,
13189 makeArrayRef(Indices, NumElts / 2));
13190 RHS = Builder.CreateShuffleVector(RHS, RHS,
13191 makeArrayRef(Indices, NumElts / 2));
13192 // Concat the vectors.
13193 // NOTE: Operands are swapped to match the intrinsic definition.
13194 Value *Res = Builder.CreateShuffleVector(RHS, LHS,
13195 makeArrayRef(Indices, NumElts));
13196 return Builder.CreateBitCast(Res, Ops[0]->getType());
13197 }
13198
13199 case X86::BI__builtin_ia32_vplzcntd_128:
13200 case X86::BI__builtin_ia32_vplzcntd_256:
13201 case X86::BI__builtin_ia32_vplzcntd_512:
13202 case X86::BI__builtin_ia32_vplzcntq_128:
13203 case X86::BI__builtin_ia32_vplzcntq_256:
13204 case X86::BI__builtin_ia32_vplzcntq_512: {
13205 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
13206 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
13207 }
13208 case X86::BI__builtin_ia32_sqrtss:
13209 case X86::BI__builtin_ia32_sqrtsd: {
13210 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
13211 Function *F;
13212 if (Builder.getIsFPConstrained()) {
13213 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13214 A->getType());
13215 A = Builder.CreateConstrainedFPCall(F, {A});
13216 } else {
13217 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
13218 A = Builder.CreateCall(F, {A});
13219 }
13220 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
13221 }
13222 case X86::BI__builtin_ia32_sqrtsd_round_mask:
13223 case X86::BI__builtin_ia32_sqrtss_round_mask: {
13224 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
13225 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
13226 // otherwise keep the intrinsic.
13227 if (CC != 4) {
13228 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
13229 Intrinsic::x86_avx512_mask_sqrt_sd :
13230 Intrinsic::x86_avx512_mask_sqrt_ss;
13231 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13232 }
13233 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13234 Function *F;
13235 if (Builder.getIsFPConstrained()) {
13236 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13237 A->getType());
13238 A = Builder.CreateConstrainedFPCall(F, A);
13239 } else {
13240 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
13241 A = Builder.CreateCall(F, A);
13242 }
13243 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13244 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
13245 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
13246 }
13247 case X86::BI__builtin_ia32_sqrtpd256:
13248 case X86::BI__builtin_ia32_sqrtpd:
13249 case X86::BI__builtin_ia32_sqrtps256:
13250 case X86::BI__builtin_ia32_sqrtps:
13251 case X86::BI__builtin_ia32_sqrtps512:
13252 case X86::BI__builtin_ia32_sqrtpd512: {
13253 if (Ops.size() == 2) {
13254 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13255 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
13256 // otherwise keep the intrinsic.
13257 if (CC != 4) {
13258 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
13259 Intrinsic::x86_avx512_sqrt_ps_512 :
13260 Intrinsic::x86_avx512_sqrt_pd_512;
13261 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13262 }
13263 }
13264 if (Builder.getIsFPConstrained()) {
13265 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13266 Ops[0]->getType());
13267 return Builder.CreateConstrainedFPCall(F, Ops[0]);
13268 } else {
13269 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
13270 return Builder.CreateCall(F, Ops[0]);
13271 }
13272 }
13273 case X86::BI__builtin_ia32_pabsb128:
13274 case X86::BI__builtin_ia32_pabsw128:
13275 case X86::BI__builtin_ia32_pabsd128:
13276 case X86::BI__builtin_ia32_pabsb256:
13277 case X86::BI__builtin_ia32_pabsw256:
13278 case X86::BI__builtin_ia32_pabsd256:
13279 case X86::BI__builtin_ia32_pabsq128:
13280 case X86::BI__builtin_ia32_pabsq256:
13281 case X86::BI__builtin_ia32_pabsb512:
13282 case X86::BI__builtin_ia32_pabsw512:
13283 case X86::BI__builtin_ia32_pabsd512:
13284 case X86::BI__builtin_ia32_pabsq512: {
13285 Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType());
13286 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
13287 }
13288 case X86::BI__builtin_ia32_pmaxsb128:
13289 case X86::BI__builtin_ia32_pmaxsw128:
13290 case X86::BI__builtin_ia32_pmaxsd128:
13291 case X86::BI__builtin_ia32_pmaxsq128:
13292 case X86::BI__builtin_ia32_pmaxsb256:
13293 case X86::BI__builtin_ia32_pmaxsw256:
13294 case X86::BI__builtin_ia32_pmaxsd256:
13295 case X86::BI__builtin_ia32_pmaxsq256:
13296 case X86::BI__builtin_ia32_pmaxsb512:
13297 case X86::BI__builtin_ia32_pmaxsw512:
13298 case X86::BI__builtin_ia32_pmaxsd512:
13299 case X86::BI__builtin_ia32_pmaxsq512:
13300 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax);
13301 case X86::BI__builtin_ia32_pmaxub128:
13302 case X86::BI__builtin_ia32_pmaxuw128:
13303 case X86::BI__builtin_ia32_pmaxud128:
13304 case X86::BI__builtin_ia32_pmaxuq128:
13305 case X86::BI__builtin_ia32_pmaxub256:
13306 case X86::BI__builtin_ia32_pmaxuw256:
13307 case X86::BI__builtin_ia32_pmaxud256:
13308 case X86::BI__builtin_ia32_pmaxuq256:
13309 case X86::BI__builtin_ia32_pmaxub512:
13310 case X86::BI__builtin_ia32_pmaxuw512:
13311 case X86::BI__builtin_ia32_pmaxud512:
13312 case X86::BI__builtin_ia32_pmaxuq512:
13313 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax);
13314 case X86::BI__builtin_ia32_pminsb128:
13315 case X86::BI__builtin_ia32_pminsw128:
13316 case X86::BI__builtin_ia32_pminsd128:
13317 case X86::BI__builtin_ia32_pminsq128:
13318 case X86::BI__builtin_ia32_pminsb256:
13319 case X86::BI__builtin_ia32_pminsw256:
13320 case X86::BI__builtin_ia32_pminsd256:
13321 case X86::BI__builtin_ia32_pminsq256:
13322 case X86::BI__builtin_ia32_pminsb512:
13323 case X86::BI__builtin_ia32_pminsw512:
13324 case X86::BI__builtin_ia32_pminsd512:
13325 case X86::BI__builtin_ia32_pminsq512:
13326 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin);
13327 case X86::BI__builtin_ia32_pminub128:
13328 case X86::BI__builtin_ia32_pminuw128:
13329 case X86::BI__builtin_ia32_pminud128:
13330 case X86::BI__builtin_ia32_pminuq128:
13331 case X86::BI__builtin_ia32_pminub256:
13332 case X86::BI__builtin_ia32_pminuw256:
13333 case X86::BI__builtin_ia32_pminud256:
13334 case X86::BI__builtin_ia32_pminuq256:
13335 case X86::BI__builtin_ia32_pminub512:
13336 case X86::BI__builtin_ia32_pminuw512:
13337 case X86::BI__builtin_ia32_pminud512:
13338 case X86::BI__builtin_ia32_pminuq512:
13339 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin);
13340
13341 case X86::BI__builtin_ia32_pmuludq128:
13342 case X86::BI__builtin_ia32_pmuludq256:
13343 case X86::BI__builtin_ia32_pmuludq512:
13344 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
13345
13346 case X86::BI__builtin_ia32_pmuldq128:
13347 case X86::BI__builtin_ia32_pmuldq256:
13348 case X86::BI__builtin_ia32_pmuldq512:
13349 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
13350
13351 case X86::BI__builtin_ia32_pternlogd512_mask:
13352 case X86::BI__builtin_ia32_pternlogq512_mask:
13353 case X86::BI__builtin_ia32_pternlogd128_mask:
13354 case X86::BI__builtin_ia32_pternlogd256_mask:
13355 case X86::BI__builtin_ia32_pternlogq128_mask:
13356 case X86::BI__builtin_ia32_pternlogq256_mask:
13357 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
13358
13359 case X86::BI__builtin_ia32_pternlogd512_maskz:
13360 case X86::BI__builtin_ia32_pternlogq512_maskz:
13361 case X86::BI__builtin_ia32_pternlogd128_maskz:
13362 case X86::BI__builtin_ia32_pternlogd256_maskz:
13363 case X86::BI__builtin_ia32_pternlogq128_maskz:
13364 case X86::BI__builtin_ia32_pternlogq256_maskz:
13365 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
13366
13367 case X86::BI__builtin_ia32_vpshldd128:
13368 case X86::BI__builtin_ia32_vpshldd256:
13369 case X86::BI__builtin_ia32_vpshldd512:
13370 case X86::BI__builtin_ia32_vpshldq128:
13371 case X86::BI__builtin_ia32_vpshldq256:
13372 case X86::BI__builtin_ia32_vpshldq512:
13373 case X86::BI__builtin_ia32_vpshldw128:
13374 case X86::BI__builtin_ia32_vpshldw256:
13375 case X86::BI__builtin_ia32_vpshldw512:
13376 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
13377
13378 case X86::BI__builtin_ia32_vpshrdd128:
13379 case X86::BI__builtin_ia32_vpshrdd256:
13380 case X86::BI__builtin_ia32_vpshrdd512:
13381 case X86::BI__builtin_ia32_vpshrdq128:
13382 case X86::BI__builtin_ia32_vpshrdq256:
13383 case X86::BI__builtin_ia32_vpshrdq512:
13384 case X86::BI__builtin_ia32_vpshrdw128:
13385 case X86::BI__builtin_ia32_vpshrdw256:
13386 case X86::BI__builtin_ia32_vpshrdw512:
13387 // Ops 0 and 1 are swapped.
13388 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
13389
13390 case X86::BI__builtin_ia32_vpshldvd128:
13391 case X86::BI__builtin_ia32_vpshldvd256:
13392 case X86::BI__builtin_ia32_vpshldvd512:
13393 case X86::BI__builtin_ia32_vpshldvq128:
13394 case X86::BI__builtin_ia32_vpshldvq256:
13395 case X86::BI__builtin_ia32_vpshldvq512:
13396 case X86::BI__builtin_ia32_vpshldvw128:
13397 case X86::BI__builtin_ia32_vpshldvw256:
13398 case X86::BI__builtin_ia32_vpshldvw512:
13399 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
13400
13401 case X86::BI__builtin_ia32_vpshrdvd128:
13402 case X86::BI__builtin_ia32_vpshrdvd256:
13403 case X86::BI__builtin_ia32_vpshrdvd512:
13404 case X86::BI__builtin_ia32_vpshrdvq128:
13405 case X86::BI__builtin_ia32_vpshrdvq256:
13406 case X86::BI__builtin_ia32_vpshrdvq512:
13407 case X86::BI__builtin_ia32_vpshrdvw128:
13408 case X86::BI__builtin_ia32_vpshrdvw256:
13409 case X86::BI__builtin_ia32_vpshrdvw512:
13410 // Ops 0 and 1 are swapped.
13411 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
13412
13413 // 3DNow!
13414 case X86::BI__builtin_ia32_pswapdsf:
13415 case X86::BI__builtin_ia32_pswapdsi: {
13416 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
13417 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
13418 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
13419 return Builder.CreateCall(F, Ops, "pswapd");
13420 }
13421 case X86::BI__builtin_ia32_rdrand16_step:
13422 case X86::BI__builtin_ia32_rdrand32_step:
13423 case X86::BI__builtin_ia32_rdrand64_step:
13424 case X86::BI__builtin_ia32_rdseed16_step:
13425 case X86::BI__builtin_ia32_rdseed32_step:
13426 case X86::BI__builtin_ia32_rdseed64_step: {
13427 Intrinsic::ID ID;
13428 switch (BuiltinID) {
13429 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13429)
;
13430 case X86::BI__builtin_ia32_rdrand16_step:
13431 ID = Intrinsic::x86_rdrand_16;
13432 break;
13433 case X86::BI__builtin_ia32_rdrand32_step:
13434 ID = Intrinsic::x86_rdrand_32;
13435 break;
13436 case X86::BI__builtin_ia32_rdrand64_step:
13437 ID = Intrinsic::x86_rdrand_64;
13438 break;
13439 case X86::BI__builtin_ia32_rdseed16_step:
13440 ID = Intrinsic::x86_rdseed_16;
13441 break;
13442 case X86::BI__builtin_ia32_rdseed32_step:
13443 ID = Intrinsic::x86_rdseed_32;
13444 break;
13445 case X86::BI__builtin_ia32_rdseed64_step:
13446 ID = Intrinsic::x86_rdseed_64;
13447 break;
13448 }
13449
13450 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
13451 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
13452 Ops[0]);
13453 return Builder.CreateExtractValue(Call, 1);
13454 }
13455 case X86::BI__builtin_ia32_addcarryx_u32:
13456 case X86::BI__builtin_ia32_addcarryx_u64:
13457 case X86::BI__builtin_ia32_subborrow_u32:
13458 case X86::BI__builtin_ia32_subborrow_u64: {
13459 Intrinsic::ID IID;
13460 switch (BuiltinID) {
13461 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13461)
;
13462 case X86::BI__builtin_ia32_addcarryx_u32:
13463 IID = Intrinsic::x86_addcarry_32;
13464 break;
13465 case X86::BI__builtin_ia32_addcarryx_u64:
13466 IID = Intrinsic::x86_addcarry_64;
13467 break;
13468 case X86::BI__builtin_ia32_subborrow_u32:
13469 IID = Intrinsic::x86_subborrow_32;
13470 break;
13471 case X86::BI__builtin_ia32_subborrow_u64:
13472 IID = Intrinsic::x86_subborrow_64;
13473 break;
13474 }
13475
13476 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
13477 { Ops[0], Ops[1], Ops[2] });
13478 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
13479 Ops[3]);
13480 return Builder.CreateExtractValue(Call, 0);
13481 }
13482
13483 case X86::BI__builtin_ia32_fpclassps128_mask:
13484 case X86::BI__builtin_ia32_fpclassps256_mask:
13485 case X86::BI__builtin_ia32_fpclassps512_mask:
13486 case X86::BI__builtin_ia32_fpclasspd128_mask:
13487 case X86::BI__builtin_ia32_fpclasspd256_mask:
13488 case X86::BI__builtin_ia32_fpclasspd512_mask: {
13489 unsigned NumElts =
13490 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13491 Value *MaskIn = Ops[2];
13492 Ops.erase(&Ops[2]);
13493
13494 Intrinsic::ID ID;
13495 switch (BuiltinID) {
13496 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13496)
;
13497 case X86::BI__builtin_ia32_fpclassps128_mask:
13498 ID = Intrinsic::x86_avx512_fpclass_ps_128;
13499 break;
13500 case X86::BI__builtin_ia32_fpclassps256_mask:
13501 ID = Intrinsic::x86_avx512_fpclass_ps_256;
13502 break;
13503 case X86::BI__builtin_ia32_fpclassps512_mask:
13504 ID = Intrinsic::x86_avx512_fpclass_ps_512;
13505 break;
13506 case X86::BI__builtin_ia32_fpclasspd128_mask:
13507 ID = Intrinsic::x86_avx512_fpclass_pd_128;
13508 break;
13509 case X86::BI__builtin_ia32_fpclasspd256_mask:
13510 ID = Intrinsic::x86_avx512_fpclass_pd_256;
13511 break;
13512 case X86::BI__builtin_ia32_fpclasspd512_mask:
13513 ID = Intrinsic::x86_avx512_fpclass_pd_512;
13514 break;
13515 }
13516
13517 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13518 return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
13519 }
13520
13521 case X86::BI__builtin_ia32_vp2intersect_q_512:
13522 case X86::BI__builtin_ia32_vp2intersect_q_256:
13523 case X86::BI__builtin_ia32_vp2intersect_q_128:
13524 case X86::BI__builtin_ia32_vp2intersect_d_512:
13525 case X86::BI__builtin_ia32_vp2intersect_d_256:
13526 case X86::BI__builtin_ia32_vp2intersect_d_128: {
13527 unsigned NumElts =
13528 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13529 Intrinsic::ID ID;
13530
13531 switch (BuiltinID) {
13532 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13532)
;
13533 case X86::BI__builtin_ia32_vp2intersect_q_512:
13534 ID = Intrinsic::x86_avx512_vp2intersect_q_512;
13535 break;
13536 case X86::BI__builtin_ia32_vp2intersect_q_256:
13537 ID = Intrinsic::x86_avx512_vp2intersect_q_256;
13538 break;
13539 case X86::BI__builtin_ia32_vp2intersect_q_128:
13540 ID = Intrinsic::x86_avx512_vp2intersect_q_128;
13541 break;
13542 case X86::BI__builtin_ia32_vp2intersect_d_512:
13543 ID = Intrinsic::x86_avx512_vp2intersect_d_512;
13544 break;
13545 case X86::BI__builtin_ia32_vp2intersect_d_256:
13546 ID = Intrinsic::x86_avx512_vp2intersect_d_256;
13547 break;
13548 case X86::BI__builtin_ia32_vp2intersect_d_128:
13549 ID = Intrinsic::x86_avx512_vp2intersect_d_128;
13550 break;
13551 }
13552
13553 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
13554 Value *Result = Builder.CreateExtractValue(Call, 0);
13555 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
13556 Builder.CreateDefaultAlignedStore(Result, Ops[2]);
13557
13558 Result = Builder.CreateExtractValue(Call, 1);
13559 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
13560 return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
13561 }
13562
13563 case X86::BI__builtin_ia32_vpmultishiftqb128:
13564 case X86::BI__builtin_ia32_vpmultishiftqb256:
13565 case X86::BI__builtin_ia32_vpmultishiftqb512: {
13566 Intrinsic::ID ID;
13567 switch (BuiltinID) {
13568 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13568)
;
13569 case X86::BI__builtin_ia32_vpmultishiftqb128:
13570 ID = Intrinsic::x86_avx512_pmultishift_qb_128;
13571 break;
13572 case X86::BI__builtin_ia32_vpmultishiftqb256:
13573 ID = Intrinsic::x86_avx512_pmultishift_qb_256;
13574 break;
13575 case X86::BI__builtin_ia32_vpmultishiftqb512:
13576 ID = Intrinsic::x86_avx512_pmultishift_qb_512;
13577 break;
13578 }
13579
13580 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13581 }
13582
13583 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
13584 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
13585 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
13586 unsigned NumElts =
13587 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13588 Value *MaskIn = Ops[2];
13589 Ops.erase(&Ops[2]);
13590
13591 Intrinsic::ID ID;
13592 switch (BuiltinID) {
13593 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13593)
;
13594 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
13595 ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
13596 break;
13597 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
13598 ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
13599 break;
13600 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
13601 ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
13602 break;
13603 }
13604
13605 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13606 return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
13607 }
13608
13609 // packed comparison intrinsics
13610 case X86::BI__builtin_ia32_cmpeqps:
13611 case X86::BI__builtin_ia32_cmpeqpd:
13612 return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
13613 case X86::BI__builtin_ia32_cmpltps:
13614 case X86::BI__builtin_ia32_cmpltpd:
13615 return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
13616 case X86::BI__builtin_ia32_cmpleps:
13617 case X86::BI__builtin_ia32_cmplepd:
13618 return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
13619 case X86::BI__builtin_ia32_cmpunordps:
13620 case X86::BI__builtin_ia32_cmpunordpd:
13621 return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
13622 case X86::BI__builtin_ia32_cmpneqps:
13623 case X86::BI__builtin_ia32_cmpneqpd:
13624 return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
13625 case X86::BI__builtin_ia32_cmpnltps:
13626 case X86::BI__builtin_ia32_cmpnltpd:
13627 return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
13628 case X86::BI__builtin_ia32_cmpnleps:
13629 case X86::BI__builtin_ia32_cmpnlepd:
13630 return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
13631 case X86::BI__builtin_ia32_cmpordps:
13632 case X86::BI__builtin_ia32_cmpordpd:
13633 return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
13634 case X86::BI__builtin_ia32_cmpps128_mask:
13635 case X86::BI__builtin_ia32_cmpps256_mask:
13636 case X86::BI__builtin_ia32_cmpps512_mask:
13637 case X86::BI__builtin_ia32_cmppd128_mask:
13638 case X86::BI__builtin_ia32_cmppd256_mask:
13639 case X86::BI__builtin_ia32_cmppd512_mask:
13640 IsMaskFCmp = true;
13641 LLVM_FALLTHROUGH[[gnu::fallthrough]];
13642 case X86::BI__builtin_ia32_cmpps:
13643 case X86::BI__builtin_ia32_cmpps256:
13644 case X86::BI__builtin_ia32_cmppd:
13645 case X86::BI__builtin_ia32_cmppd256: {
13646 // Lowering vector comparisons to fcmp instructions, while
13647 // ignoring signalling behaviour requested
13648 // ignoring rounding mode requested
13649 // This is is only possible as long as FENV_ACCESS is not implemented.
13650 // See also: https://reviews.llvm.org/D45616
13651
13652 // The third argument is the comparison condition, and integer in the
13653 // range [0, 31]
13654 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
13655
13656 // Lowering to IR fcmp instruction.
13657 // Ignoring requested signaling behaviour,
13658 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
13659 FCmpInst::Predicate Pred;
13660 bool IsSignaling;
13661 // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
13662 // behavior is inverted. We'll handle that after the switch.
13663 switch (CC & 0xf) {
13664 case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
13665 case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
13666 case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
13667 case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
13668 case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
13669 case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
13670 case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
13671 case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
13672 case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
13673 case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
13674 case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
13675 case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
13676 case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
13677 case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
13678 case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
13679 case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
13680 default: llvm_unreachable("Unhandled CC")::llvm::llvm_unreachable_internal("Unhandled CC", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13680)
;
13681 }
13682
13683 // Invert the signalling behavior for 16-31.
13684 if (CC & 0x10)
13685 IsSignaling = !IsSignaling;
13686
13687 // If the predicate is true or false and we're using constrained intrinsics,
13688 // we don't have a compare intrinsic we can use. Just use the legacy X86
13689 // specific intrinsic.
13690 // If the intrinsic is mask enabled and we're using constrained intrinsics,
13691 // use the legacy X86 specific intrinsic.
13692 if (Builder.getIsFPConstrained() &&
13693 (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE ||
13694 IsMaskFCmp)) {
13695
13696 Intrinsic::ID IID;
13697 switch (BuiltinID) {
13698 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13698)
;
13699 case X86::BI__builtin_ia32_cmpps:
13700 IID = Intrinsic::x86_sse_cmp_ps;
13701 break;
13702 case X86::BI__builtin_ia32_cmpps256:
13703 IID = Intrinsic::x86_avx_cmp_ps_256;
13704 break;
13705 case X86::BI__builtin_ia32_cmppd:
13706 IID = Intrinsic::x86_sse2_cmp_pd;
13707 break;
13708 case X86::BI__builtin_ia32_cmppd256:
13709 IID = Intrinsic::x86_avx_cmp_pd_256;
13710 break;
13711 case X86::BI__builtin_ia32_cmpps512_mask:
13712 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
13713 break;
13714 case X86::BI__builtin_ia32_cmppd512_mask:
13715 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
13716 break;
13717 case X86::BI__builtin_ia32_cmpps128_mask:
13718 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
13719 break;
13720 case X86::BI__builtin_ia32_cmpps256_mask:
13721 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
13722 break;
13723 case X86::BI__builtin_ia32_cmppd128_mask:
13724 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
13725 break;
13726 case X86::BI__builtin_ia32_cmppd256_mask:
13727 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
13728 break;
13729 }
13730
13731 Function *Intr = CGM.getIntrinsic(IID);
13732 if (IsMaskFCmp) {
13733 unsigned NumElts =
13734 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13735 Ops[3] = getMaskVecValue(*this, Ops[3], NumElts);
13736 Value *Cmp = Builder.CreateCall(Intr, Ops);
13737 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr);
13738 }
13739
13740 return Builder.CreateCall(Intr, Ops);
13741 }
13742
13743 // Builtins without the _mask suffix return a vector of integers
13744 // of the same width as the input vectors
13745 if (IsMaskFCmp) {
13746 // We ignore SAE if strict FP is disabled. We only keep precise
13747 // exception behavior under strict FP.
13748 unsigned NumElts =
13749 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13750 Value *Cmp;
13751 if (IsSignaling)
13752 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
13753 else
13754 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
13755 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
13756 }
13757
13758 return getVectorFCmpIR(Pred, IsSignaling);
13759 }
13760
13761 // SSE scalar comparison intrinsics
13762 case X86::BI__builtin_ia32_cmpeqss:
13763 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
13764 case X86::BI__builtin_ia32_cmpltss:
13765 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
13766 case X86::BI__builtin_ia32_cmpless:
13767 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
13768 case X86::BI__builtin_ia32_cmpunordss:
13769 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
13770 case X86::BI__builtin_ia32_cmpneqss:
13771 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
13772 case X86::BI__builtin_ia32_cmpnltss:
13773 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
13774 case X86::BI__builtin_ia32_cmpnless:
13775 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
13776 case X86::BI__builtin_ia32_cmpordss:
13777 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
13778 case X86::BI__builtin_ia32_cmpeqsd:
13779 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
13780 case X86::BI__builtin_ia32_cmpltsd:
13781 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
13782 case X86::BI__builtin_ia32_cmplesd:
13783 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
13784 case X86::BI__builtin_ia32_cmpunordsd:
13785 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
13786 case X86::BI__builtin_ia32_cmpneqsd:
13787 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
13788 case X86::BI__builtin_ia32_cmpnltsd:
13789 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
13790 case X86::BI__builtin_ia32_cmpnlesd:
13791 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
13792 case X86::BI__builtin_ia32_cmpordsd:
13793 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
13794
13795 // f16c half2float intrinsics
13796 case X86::BI__builtin_ia32_vcvtph2ps:
13797 case X86::BI__builtin_ia32_vcvtph2ps256:
13798 case X86::BI__builtin_ia32_vcvtph2ps_mask:
13799 case X86::BI__builtin_ia32_vcvtph2ps256_mask:
13800 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
13801 return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
13802
13803// AVX512 bf16 intrinsics
13804 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
13805 Ops[2] = getMaskVecValue(
13806 *this, Ops[2],
13807 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
13808 Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
13809 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13810 }
13811 case X86::BI__builtin_ia32_cvtsbf162ss_32:
13812 return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
13813
13814 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
13815 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
13816 Intrinsic::ID IID;
13817 switch (BuiltinID) {
13818 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13818)
;
13819 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
13820 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
13821 break;
13822 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
13823 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
13824 break;
13825 }
13826 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
13827 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
13828 }
13829
13830 case X86::BI__emul:
13831 case X86::BI__emulu: {
13832 llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
13833 bool isSigned = (BuiltinID == X86::BI__emul);
13834 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
13835 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
13836 return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
13837 }
13838 case X86::BI__mulh:
13839 case X86::BI__umulh:
13840 case X86::BI_mul128:
13841 case X86::BI_umul128: {
13842 llvm::Type *ResType = ConvertType(E->getType());
13843 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
13844
13845 bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
13846 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
13847 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
13848
13849 Value *MulResult, *HigherBits;
13850 if (IsSigned) {
13851 MulResult = Builder.CreateNSWMul(LHS, RHS);
13852 HigherBits = Builder.CreateAShr(MulResult, 64);
13853 } else {
13854 MulResult = Builder.CreateNUWMul(LHS, RHS);
13855 HigherBits = Builder.CreateLShr(MulResult, 64);
13856 }
13857 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
13858
13859 if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
13860 return HigherBits;
13861
13862 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
13863 Builder.CreateStore(HigherBits, HighBitsAddress);
13864 return Builder.CreateIntCast(MulResult, ResType, IsSigned);
13865 }
13866
13867 case X86::BI__faststorefence: {
13868 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
13869 llvm::SyncScope::System);
13870 }
13871 case X86::BI__shiftleft128:
13872 case X86::BI__shiftright128: {
13873 // FIXME: Once fshl/fshr no longer add an unneeded and and cmov, do this:
13874 // llvm::Function *F = CGM.getIntrinsic(
13875 // BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
13876 // Int64Ty);
13877 // Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
13878 // return Builder.CreateCall(F, Ops);
13879 llvm::Type *Int128Ty = Builder.getInt128Ty();
13880 Value *HighPart128 =
13881 Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64);
13882 Value *LowPart128 = Builder.CreateZExt(Ops[0], Int128Ty);
13883 Value *Val = Builder.CreateOr(HighPart128, LowPart128);
13884 Value *Amt = Builder.CreateAnd(Builder.CreateZExt(Ops[2], Int128Ty),
13885 llvm::ConstantInt::get(Int128Ty, 0x3f));
13886 Value *Res;
13887 if (BuiltinID == X86::BI__shiftleft128)
13888 Res = Builder.CreateLShr(Builder.CreateShl(Val, Amt), 64);
13889 else
13890 Res = Builder.CreateLShr(Val, Amt);
13891 return Builder.CreateTrunc(Res, Int64Ty);
13892 }
13893 case X86::BI_ReadWriteBarrier:
13894 case X86::BI_ReadBarrier:
13895 case X86::BI_WriteBarrier: {
13896 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
13897 llvm::SyncScope::SingleThread);
13898 }
13899 case X86::BI_BitScanForward:
13900 case X86::BI_BitScanForward64:
13901 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
13902 case X86::BI_BitScanReverse:
13903 case X86::BI_BitScanReverse64:
13904 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
13905
13906 case X86::BI_InterlockedAnd64:
13907 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
13908 case X86::BI_InterlockedExchange64:
13909 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
13910 case X86::BI_InterlockedExchangeAdd64:
13911 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
13912 case X86::BI_InterlockedExchangeSub64:
13913 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
13914 case X86::BI_InterlockedOr64:
13915 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
13916 case X86::BI_InterlockedXor64:
13917 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
13918 case X86::BI_InterlockedDecrement64:
13919 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
13920 case X86::BI_InterlockedIncrement64:
13921 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
13922 case X86::BI_InterlockedCompareExchange128: {
13923 // InterlockedCompareExchange128 doesn't directly refer to 128bit ints,
13924 // instead it takes pointers to 64bit ints for Destination and
13925 // ComparandResult, and exchange is taken as two 64bit ints (high & low).
13926 // The previous value is written to ComparandResult, and success is
13927 // returned.
13928
13929 llvm::Type *Int128Ty = Builder.getInt128Ty();
13930 llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
13931
13932 Value *Destination =
13933 Builder.CreateBitCast(Ops[0], Int128PtrTy);
13934 Value *ExchangeHigh128 = Builder.CreateZExt(Ops[1], Int128Ty);
13935 Value *ExchangeLow128 = Builder.CreateZExt(Ops[2], Int128Ty);
13936 Address ComparandResult(Builder.CreateBitCast(Ops[3], Int128PtrTy),
13937 getContext().toCharUnitsFromBits(128));
13938
13939 Value *Exchange = Builder.CreateOr(
13940 Builder.CreateShl(ExchangeHigh128, 64, "", false, false),
13941 ExchangeLow128);
13942
13943 Value *Comparand = Builder.CreateLoad(ComparandResult);
13944
13945 AtomicCmpXchgInst *CXI =
13946 Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
13947 AtomicOrdering::SequentiallyConsistent,
13948 AtomicOrdering::SequentiallyConsistent);
13949 CXI->setVolatile(true);
13950
13951 // Write the result back to the inout pointer.
13952 Builder.CreateStore(Builder.CreateExtractValue(CXI, 0), ComparandResult);
13953
13954 // Get the success boolean and zero extend it to i8.
13955 Value *Success = Builder.CreateExtractValue(CXI, 1);
13956 return Builder.CreateZExt(Success, ConvertType(E->getType()));
13957 }
13958
13959 case X86::BI_AddressOfReturnAddress: {
13960 Function *F =
13961 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
13962 return Builder.CreateCall(F);
13963 }
13964 case X86::BI__stosb: {
13965 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
13966 // instruction, but it will create a memset that won't be optimized away.
13967 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
13968 }
13969 case X86::BI__ud2:
13970 // llvm.trap makes a ud2a instruction on x86.
13971 return EmitTrapCall(Intrinsic::trap);
13972 case X86::BI__int2c: {
13973 // This syscall signals a driver assertion failure in x86 NT kernels.
13974 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
13975 llvm::InlineAsm *IA =
13976 llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
13977 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
13978 getLLVMContext(), llvm::AttributeList::FunctionIndex,
13979 llvm::Attribute::NoReturn);
13980 llvm::CallInst *CI = Builder.CreateCall(IA);
13981 CI->setAttributes(NoReturnAttr);
13982 return CI;
13983 }
13984 case X86::BI__readfsbyte:
13985 case X86::BI__readfsword:
13986 case X86::BI__readfsdword:
13987 case X86::BI__readfsqword: {
13988 llvm::Type *IntTy = ConvertType(E->getType());
13989 Value *Ptr =
13990 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
13991 LoadInst *Load = Builder.CreateAlignedLoad(
13992 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
13993 Load->setVolatile(true);
13994 return Load;
13995 }
13996 case X86::BI__readgsbyte:
13997 case X86::BI__readgsword:
13998 case X86::BI__readgsdword:
13999 case X86::BI__readgsqword: {
14000 llvm::Type *IntTy = ConvertType(E->getType());
14001 Value *Ptr =
14002 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
14003 LoadInst *Load = Builder.CreateAlignedLoad(
14004 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14005 Load->setVolatile(true);
14006 return Load;
14007 }
14008 case X86::BI__builtin_ia32_paddsb512:
14009 case X86::BI__builtin_ia32_paddsw512:
14010 case X86::BI__builtin_ia32_paddsb256:
14011 case X86::BI__builtin_ia32_paddsw256:
14012 case X86::BI__builtin_ia32_paddsb128:
14013 case X86::BI__builtin_ia32_paddsw128:
14014 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat);
14015 case X86::BI__builtin_ia32_paddusb512:
14016 case X86::BI__builtin_ia32_paddusw512:
14017 case X86::BI__builtin_ia32_paddusb256:
14018 case X86::BI__builtin_ia32_paddusw256:
14019 case X86::BI__builtin_ia32_paddusb128:
14020 case X86::BI__builtin_ia32_paddusw128:
14021 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat);
14022 case X86::BI__builtin_ia32_psubsb512:
14023 case X86::BI__builtin_ia32_psubsw512:
14024 case X86::BI__builtin_ia32_psubsb256:
14025 case X86::BI__builtin_ia32_psubsw256:
14026 case X86::BI__builtin_ia32_psubsb128:
14027 case X86::BI__builtin_ia32_psubsw128:
14028 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat);
14029 case X86::BI__builtin_ia32_psubusb512:
14030 case X86::BI__builtin_ia32_psubusw512:
14031 case X86::BI__builtin_ia32_psubusb256:
14032 case X86::BI__builtin_ia32_psubusw256:
14033 case X86::BI__builtin_ia32_psubusb128:
14034 case X86::BI__builtin_ia32_psubusw128:
14035 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat);
14036 }
14037}
14038
14039Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
14040 const CallExpr *E) {
14041 SmallVector<Value*, 4> Ops;
14042
14043 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
14044 Ops.push_back(EmitScalarExpr(E->getArg(i)));
14045
14046 Intrinsic::ID ID = Intrinsic::not_intrinsic;
14047
14048 switch (BuiltinID) {
14049 default: return nullptr;
14050
14051 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
14052 // call __builtin_readcyclecounter.
14053 case PPC::BI__builtin_ppc_get_timebase:
14054 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
14055
14056 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
14057 case PPC::BI__builtin_altivec_lvx:
14058 case PPC::BI__builtin_altivec_lvxl:
14059 case PPC::BI__builtin_altivec_lvebx:
14060 case PPC::BI__builtin_altivec_lvehx:
14061 case PPC::BI__builtin_altivec_lvewx:
14062 case PPC::BI__builtin_altivec_lvsl:
14063 case PPC::BI__builtin_altivec_lvsr:
14064 case PPC::BI__builtin_vsx_lxvd2x:
14065 case PPC::BI__builtin_vsx_lxvw4x:
14066 case PPC::BI__builtin_vsx_lxvd2x_be:
14067 case PPC::BI__builtin_vsx_lxvw4x_be:
14068 case PPC::BI__builtin_vsx_lxvl:
14069 case PPC::BI__builtin_vsx_lxvll:
14070 {
14071 if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
14072 BuiltinID == PPC::BI__builtin_vsx_lxvll){
14073 Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
14074 }else {
14075 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14076 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
14077 Ops.pop_back();
14078 }
14079
14080 switch (BuiltinID) {
14081 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!")::llvm::llvm_unreachable_internal("Unsupported ld/lvsl/lvsr intrinsic!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14081)
;
14082 case PPC::BI__builtin_altivec_lvx:
14083 ID = Intrinsic::ppc_altivec_lvx;
14084 break;
14085 case PPC::BI__builtin_altivec_lvxl:
14086 ID = Intrinsic::ppc_altivec_lvxl;
14087 break;
14088 case PPC::BI__builtin_altivec_lvebx:
14089 ID = Intrinsic::ppc_altivec_lvebx;
14090 break;
14091 case PPC::BI__builtin_altivec_lvehx:
14092 ID = Intrinsic::ppc_altivec_lvehx;
14093 break;
14094 case PPC::BI__builtin_altivec_lvewx:
14095 ID = Intrinsic::ppc_altivec_lvewx;
14096 break;
14097 case PPC::BI__builtin_altivec_lvsl:
14098 ID = Intrinsic::ppc_altivec_lvsl;
14099 break;
14100 case PPC::BI__builtin_altivec_lvsr:
14101 ID = Intrinsic::ppc_altivec_lvsr;
14102 break;
14103 case PPC::BI__builtin_vsx_lxvd2x:
14104 ID = Intrinsic::ppc_vsx_lxvd2x;
14105 break;
14106 case PPC::BI__builtin_vsx_lxvw4x:
14107 ID = Intrinsic::ppc_vsx_lxvw4x;
14108 break;
14109 case PPC::BI__builtin_vsx_lxvd2x_be:
14110 ID = Intrinsic::ppc_vsx_lxvd2x_be;
14111 break;
14112 case PPC::BI__builtin_vsx_lxvw4x_be:
14113 ID = Intrinsic::ppc_vsx_lxvw4x_be;
14114 break;
14115 case PPC::BI__builtin_vsx_lxvl:
14116 ID = Intrinsic::ppc_vsx_lxvl;
14117 break;
14118 case PPC::BI__builtin_vsx_lxvll:
14119 ID = Intrinsic::ppc_vsx_lxvll;
14120 break;
14121 }
14122 llvm::Function *F = CGM.getIntrinsic(ID);
14123 return Builder.CreateCall(F, Ops, "");
14124 }
14125
14126 // vec_st, vec_xst_be
14127 case PPC::BI__builtin_altivec_stvx:
14128 case PPC::BI__builtin_altivec_stvxl:
14129 case PPC::BI__builtin_altivec_stvebx:
14130 case PPC::BI__builtin_altivec_stvehx:
14131 case PPC::BI__builtin_altivec_stvewx:
14132 case PPC::BI__builtin_vsx_stxvd2x:
14133 case PPC::BI__builtin_vsx_stxvw4x:
14134 case PPC::BI__builtin_vsx_stxvd2x_be:
14135 case PPC::BI__builtin_vsx_stxvw4x_be:
14136 case PPC::BI__builtin_vsx_stxvl:
14137 case PPC::BI__builtin_vsx_stxvll:
14138 {
14139 if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
14140 BuiltinID == PPC::BI__builtin_vsx_stxvll ){
14141 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14142 }else {
14143 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
14144 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
14145 Ops.pop_back();
14146 }
14147
14148 switch (BuiltinID) {
14149 default: llvm_unreachable("Unsupported st intrinsic!")::llvm::llvm_unreachable_internal("Unsupported st intrinsic!"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14149)
;
14150 case PPC::BI__builtin_altivec_stvx:
14151 ID = Intrinsic::ppc_altivec_stvx;
14152 break;
14153 case PPC::BI__builtin_altivec_stvxl:
14154 ID = Intrinsic::ppc_altivec_stvxl;
14155 break;
14156 case PPC::BI__builtin_altivec_stvebx:
14157 ID = Intrinsic::ppc_altivec_stvebx;
14158 break;
14159 case PPC::BI__builtin_altivec_stvehx:
14160 ID = Intrinsic::ppc_altivec_stvehx;
14161 break;
14162 case PPC::BI__builtin_altivec_stvewx:
14163 ID = Intrinsic::ppc_altivec_stvewx;
14164 break;
14165 case PPC::BI__builtin_vsx_stxvd2x:
14166 ID = Intrinsic::ppc_vsx_stxvd2x;
14167 break;
14168 case PPC::BI__builtin_vsx_stxvw4x:
14169 ID = Intrinsic::ppc_vsx_stxvw4x;
14170 break;
14171 case PPC::BI__builtin_vsx_stxvd2x_be:
14172 ID = Intrinsic::ppc_vsx_stxvd2x_be;
14173 break;
14174 case PPC::BI__builtin_vsx_stxvw4x_be:
14175 ID = Intrinsic::ppc_vsx_stxvw4x_be;
14176 break;
14177 case PPC::BI__builtin_vsx_stxvl:
14178 ID = Intrinsic::ppc_vsx_stxvl;
14179 break;
14180 case PPC::BI__builtin_vsx_stxvll:
14181 ID = Intrinsic::ppc_vsx_stxvll;
14182 break;
14183 }
14184 llvm::Function *F = CGM.getIntrinsic(ID);
14185 return Builder.CreateCall(F, Ops, "");
14186 }
14187 // Square root
14188 case PPC::BI__builtin_vsx_xvsqrtsp:
14189 case PPC::BI__builtin_vsx_xvsqrtdp: {
14190 llvm::Type *ResultType = ConvertType(E->getType());
14191 Value *X = EmitScalarExpr(E->getArg(0));
14192 if (Builder.getIsFPConstrained()) {
14193 llvm::Function *F = CGM.getIntrinsic(
14194 Intrinsic::experimental_constrained_sqrt, ResultType);
14195 return Builder.CreateConstrainedFPCall(F, X);
14196 } else {
14197 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
14198 return Builder.CreateCall(F, X);
14199 }
14200 }
14201 // Count leading zeros
14202 case PPC::BI__builtin_altivec_vclzb:
14203 case PPC::BI__builtin_altivec_vclzh:
14204 case PPC::BI__builtin_altivec_vclzw:
14205 case PPC::BI__builtin_altivec_vclzd: {
14206 llvm::Type *ResultType = ConvertType(E->getType());
14207 Value *X = EmitScalarExpr(E->getArg(0));
14208 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14209 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
14210 return Builder.CreateCall(F, {X, Undef});
14211 }
14212 case PPC::BI__builtin_altivec_vctzb:
14213 case PPC::BI__builtin_altivec_vctzh:
14214 case PPC::BI__builtin_altivec_vctzw:
14215 case PPC::BI__builtin_altivec_vctzd: {
14216 llvm::Type *ResultType = ConvertType(E->getType());
14217 Value *X = EmitScalarExpr(E->getArg(0));
14218 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14219 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
14220 return Builder.CreateCall(F, {X, Undef});
14221 }
14222 case PPC::BI__builtin_altivec_vpopcntb:
14223 case PPC::BI__builtin_altivec_vpopcnth:
14224 case PPC::BI__builtin_altivec_vpopcntw:
14225 case PPC::BI__builtin_altivec_vpopcntd: {
14226 llvm::Type *ResultType = ConvertType(E->getType());
14227 Value *X = EmitScalarExpr(E->getArg(0));
14228 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
14229 return Builder.CreateCall(F, X);
14230 }
14231 // Copy sign
14232 case PPC::BI__builtin_vsx_xvcpsgnsp:
14233 case PPC::BI__builtin_vsx_xvcpsgndp: {
14234 llvm::Type *ResultType = ConvertType(E->getType());
14235 Value *X = EmitScalarExpr(E->getArg(0));
14236 Value *Y = EmitScalarExpr(E->getArg(1));
14237 ID = Intrinsic::copysign;
14238 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
14239 return Builder.CreateCall(F, {X, Y});
14240 }
14241 // Rounding/truncation
14242 case PPC::BI__builtin_vsx_xvrspip:
14243 case PPC::BI__builtin_vsx_xvrdpip:
14244 case PPC::BI__builtin_vsx_xvrdpim:
14245 case PPC::BI__builtin_vsx_xvrspim:
14246 case PPC::BI__builtin_vsx_xvrdpi:
14247 case PPC::BI__builtin_vsx_xvrspi:
14248 case PPC::BI__builtin_vsx_xvrdpic:
14249 case PPC::BI__builtin_vsx_xvrspic:
14250 case PPC::BI__builtin_vsx_xvrdpiz:
14251 case PPC::BI__builtin_vsx_xvrspiz: {
14252 llvm::Type *ResultType = ConvertType(E->getType());
14253 Value *X = EmitScalarExpr(E->getArg(0));
14254 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
14255 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
14256 ID = Builder.getIsFPConstrained()
14257 ? Intrinsic::experimental_constrained_floor
14258 : Intrinsic::floor;
14259 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
14260 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
14261 ID = Builder.getIsFPConstrained()
14262 ? Intrinsic::experimental_constrained_round
14263 : Intrinsic::round;
14264 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
14265 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
14266 ID = Builder.getIsFPConstrained()
14267 ? Intrinsic::experimental_constrained_rint
14268 : Intrinsic::rint;
14269 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
14270 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
14271 ID = Builder.getIsFPConstrained()
14272 ? Intrinsic::experimental_constrained_ceil
14273 : Intrinsic::ceil;
14274 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
14275 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
14276 ID = Builder.getIsFPConstrained()
14277 ? Intrinsic::experimental_constrained_trunc
14278 : Intrinsic::trunc;
14279 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
14280 return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
14281 : Builder.CreateCall(F, X);
14282 }
14283
14284 // Absolute value
14285 case PPC::BI__builtin_vsx_xvabsdp:
14286 case PPC::BI__builtin_vsx_xvabssp: {
14287 llvm::Type *ResultType = ConvertType(E->getType());
14288 Value *X = EmitScalarExpr(E->getArg(0));
14289 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
14290 return Builder.CreateCall(F, X);
14291 }
14292
14293 // FMA variations
14294 case PPC::BI__builtin_vsx_xvmaddadp:
14295 case PPC::BI__builtin_vsx_xvmaddasp:
14296 case PPC::BI__builtin_vsx_xvnmaddadp:
14297 case PPC::BI__builtin_vsx_xvnmaddasp:
14298 case PPC::BI__builtin_vsx_xvmsubadp:
14299 case PPC::BI__builtin_vsx_xvmsubasp:
14300 case PPC::BI__builtin_vsx_xvnmsubadp:
14301 case PPC::BI__builtin_vsx_xvnmsubasp: {
14302 llvm::Type *ResultType = ConvertType(E->getType());
14303 Value *X = EmitScalarExpr(E->getArg(0));
14304 Value *Y = EmitScalarExpr(E->getArg(1));
14305 Value *Z = EmitScalarExpr(E->getArg(2));
14306 llvm::Function *F;
14307 if (Builder.getIsFPConstrained())
14308 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
14309 else
14310 F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
14311 switch (BuiltinID) {
14312 case PPC::BI__builtin_vsx_xvmaddadp:
14313 case PPC::BI__builtin_vsx_xvmaddasp:
14314 if (Builder.getIsFPConstrained())
14315 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
14316 else
14317 return Builder.CreateCall(F, {X, Y, Z});
14318 case PPC::BI__builtin_vsx_xvnmaddadp:
14319 case PPC::BI__builtin_vsx_xvnmaddasp:
14320 if (Builder.getIsFPConstrained())
14321 return Builder.CreateFNeg(
14322 Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
14323 else
14324 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
14325 case PPC::BI__builtin_vsx_xvmsubadp:
14326 case PPC::BI__builtin_vsx_xvmsubasp:
14327 if (Builder.getIsFPConstrained())
14328 return Builder.CreateConstrainedFPCall(
14329 F, {X, Y, Builder.CreateFNeg(Z, "neg")});
14330 else
14331 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
14332 case PPC::BI__builtin_vsx_xvnmsubadp:
14333 case PPC::BI__builtin_vsx_xvnmsubasp:
14334 if (Builder.getIsFPConstrained())
14335 return Builder.CreateFNeg(
14336 Builder.CreateConstrainedFPCall(
14337 F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
14338 "neg");
14339 else
14340 return Builder.CreateFNeg(
14341 Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
14342 "neg");
14343 }
14344 llvm_unreachable("Unknown FMA operation")::llvm::llvm_unreachable_internal("Unknown FMA operation", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14344)
;
14345 return nullptr; // Suppress no-return warning
14346 }
14347
14348 case PPC::BI__builtin_vsx_insertword: {
14349 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
14350
14351 // Third argument is a compile time constant int. It must be clamped to
14352 // to the range [0, 12].
14353 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14354 assert(ArgCI &&((ArgCI && "Third arg to xxinsertw intrinsic must be constant integer"
) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14355, __PRETTY_FUNCTION__))
14355 "Third arg to xxinsertw intrinsic must be constant integer")((ArgCI && "Third arg to xxinsertw intrinsic must be constant integer"
) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14355, __PRETTY_FUNCTION__))
;
14356 const int64_t MaxIndex = 12;
14357 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
14358
14359 // The builtin semantics don't exactly match the xxinsertw instructions
14360 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
14361 // word from the first argument, and inserts it in the second argument. The
14362 // instruction extracts the word from its second input register and inserts
14363 // it into its first input register, so swap the first and second arguments.
14364 std::swap(Ops[0], Ops[1]);
14365
14366 // Need to cast the second argument from a vector of unsigned int to a
14367 // vector of long long.
14368 Ops[1] =
14369 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
14370
14371 if (getTarget().isLittleEndian()) {
14372 // Reverse the double words in the vector we will extract from.
14373 Ops[0] =
14374 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14375 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
14376
14377 // Reverse the index.
14378 Index = MaxIndex - Index;
14379 }
14380
14381 // Intrinsic expects the first arg to be a vector of int.
14382 Ops[0] =
14383 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
14384 Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
14385 return Builder.CreateCall(F, Ops);
14386 }
14387
14388 case PPC::BI__builtin_vsx_extractuword: {
14389 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
14390
14391 // Intrinsic expects the first argument to be a vector of doublewords.
14392 Ops[0] =
14393 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14394
14395 // The second argument is a compile time constant int that needs to
14396 // be clamped to the range [0, 12].
14397 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
14398 assert(ArgCI &&((ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!"
) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14399, __PRETTY_FUNCTION__))
14399 "Second Arg to xxextractuw intrinsic must be a constant integer!")((ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!"
) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14399, __PRETTY_FUNCTION__))
;
14400 const int64_t MaxIndex = 12;
14401 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
14402
14403 if (getTarget().isLittleEndian()) {
14404 // Reverse the index.
14405 Index = MaxIndex - Index;
14406 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
14407
14408 // Emit the call, then reverse the double words of the results vector.
14409 Value *Call = Builder.CreateCall(F, Ops);
14410
14411 Value *ShuffleCall =
14412 Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
14413 return ShuffleCall;
14414 } else {
14415 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
14416 return Builder.CreateCall(F, Ops);
14417 }
14418 }
14419
14420 case PPC::BI__builtin_vsx_xxpermdi: {
14421 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14422 assert(ArgCI && "Third arg must be constant integer!")((ArgCI && "Third arg must be constant integer!") ? static_cast
<void> (0) : __assert_fail ("ArgCI && \"Third arg must be constant integer!\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14422, __PRETTY_FUNCTION__))
;
14423
14424 unsigned Index = ArgCI->getZExtValue();
14425 Ops[0] =
14426 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14427 Ops[1] =
14428 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
14429
14430 // Account for endianness by treating this as just a shuffle. So we use the
14431 // same indices for both LE and BE in order to produce expected results in
14432 // both cases.
14433 int ElemIdx0 = (Index & 2) >> 1;
14434 int ElemIdx1 = 2 + (Index & 1);
14435
14436 int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
14437 Value *ShuffleCall =
14438 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
14439 QualType BIRetType = E->getType();
14440 auto RetTy = ConvertType(BIRetType);
14441 return Builder.CreateBitCast(ShuffleCall, RetTy);
14442 }
14443
14444 case PPC::BI__builtin_vsx_xxsldwi: {
14445 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14446 assert(ArgCI && "Third argument must be a compile time constant")((ArgCI && "Third argument must be a compile time constant"
) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Third argument must be a compile time constant\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14446, __PRETTY_FUNCTION__))
;
14447 unsigned Index = ArgCI->getZExtValue() & 0x3;
14448 Ops[0] =
14449 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
14450 Ops[1] =
14451 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
14452
14453 // Create a shuffle mask
14454 int ElemIdx0;
14455 int ElemIdx1;
14456 int ElemIdx2;
14457 int ElemIdx3;
14458 if (getTarget().isLittleEndian()) {
14459 // Little endian element N comes from element 8+N-Index of the
14460 // concatenated wide vector (of course, using modulo arithmetic on
14461 // the total number of elements).
14462 ElemIdx0 = (8 - Index) % 8;
14463 ElemIdx1 = (9 - Index) % 8;
14464 ElemIdx2 = (10 - Index) % 8;
14465 ElemIdx3 = (11 - Index) % 8;
14466 } else {
14467 // Big endian ElemIdx<N> = Index + N
14468 ElemIdx0 = Index;
14469 ElemIdx1 = Index + 1;
14470 ElemIdx2 = Index + 2;
14471 ElemIdx3 = Index + 3;
14472 }
14473
14474 int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
14475 Value *ShuffleCall =
14476 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
14477 QualType BIRetType = E->getType();
14478 auto RetTy = ConvertType(BIRetType);
14479 return Builder.CreateBitCast(ShuffleCall, RetTy);
14480 }
14481
14482 case PPC::BI__builtin_pack_vector_int128: {
14483 bool isLittleEndian = getTarget().isLittleEndian();
14484 Value *UndefValue =
14485 llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
14486 Value *Res = Builder.CreateInsertElement(
14487 UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
14488 Res = Builder.CreateInsertElement(Res, Ops[1],
14489 (uint64_t)(isLittleEndian ? 0 : 1));
14490 return Builder.CreateBitCast(Res, ConvertType(E->getType()));
14491 }
14492
14493 case PPC::BI__builtin_unpack_vector_int128: {
14494 ConstantInt *Index = cast<ConstantInt>(Ops[1]);
14495 Value *Unpacked = Builder.CreateBitCast(
14496 Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
14497
14498 if (getTarget().isLittleEndian())
14499 Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
14500
14501 return Builder.CreateExtractElement(Unpacked, Index);
14502 }
14503 }
14504}
14505
14506namespace {
14507// If \p E is not null pointer, insert address space cast to match return
14508// type of \p E if necessary.
14509Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
14510 const CallExpr *E = nullptr) {
14511 auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
14512 auto *Call = CGF.Builder.CreateCall(F);
14513 Call->addAttribute(
14514 AttributeList::ReturnIndex,
14515 Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
14516 Call->addAttribute(AttributeList::ReturnIndex,
14517 Attribute::getWithAlignment(Call->getContext(), Align(4)));
14518 if (!E)
14519 return Call;
14520 QualType BuiltinRetType = E->getType();
14521 auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
14522 if (RetTy == Call->getType())
14523 return Call;
14524 return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
14525}
14526
14527// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
14528Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
14529 const unsigned XOffset = 4;
14530 auto *DP = EmitAMDGPUDispatchPtr(CGF);
14531 // Indexing the HSA kernel_dispatch_packet struct.
14532 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
14533 auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
14534 auto *DstTy =
14535 CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
14536 auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
14537 auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
14538 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
14539 llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
14540 APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
14541 LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
14542 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
14543 llvm::MDNode::get(CGF.getLLVMContext(), None));
14544 return LD;
14545}
14546} // namespace
14547
14548// For processing memory ordering and memory scope arguments of various
14549// amdgcn builtins.
14550// \p Order takes a C++11 comptabile memory-ordering specifier and converts
14551// it into LLVM's memory ordering specifier using atomic C ABI, and writes
14552// to \p AO. \p Scope takes a const char * and converts it into AMDGCN
14553// specific SyncScopeID and writes it to \p SSID.
14554bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
14555 llvm::AtomicOrdering &AO,
14556 llvm::SyncScope::ID &SSID) {
14557 if (isa<llvm::ConstantInt>(Order)) {
3
Assuming 'Order' is not a 'ConstantInt'
4
Taking false branch
14558 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
14559
14560 // Map C11/C++11 memory ordering to LLVM memory ordering
14561 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
14562 case llvm::AtomicOrderingCABI::acquire:
14563 AO = llvm::AtomicOrdering::Acquire;
14564 break;
14565 case llvm::AtomicOrderingCABI::release:
14566 AO = llvm::AtomicOrdering::Release;
14567 break;
14568 case llvm::AtomicOrderingCABI::acq_rel:
14569 AO = llvm::AtomicOrdering::AcquireRelease;
14570 break;
14571 case llvm::AtomicOrderingCABI::seq_cst:
14572 AO = llvm::AtomicOrdering::SequentiallyConsistent;
14573 break;
14574 case llvm::AtomicOrderingCABI::consume:
14575 case llvm::AtomicOrderingCABI::relaxed:
14576 break;
14577 }
14578
14579 StringRef scp;
14580 llvm::getConstantStringInfo(Scope, scp);
14581 SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
14582 return true;
14583 }
14584 return false;
5
Returning zero, which participates in a condition later
14585}
14586
14587Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
14588 const CallExpr *E) {
14589 llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
14590 llvm::SyncScope::ID SSID;
14591 switch (BuiltinID) {
1
Control jumps to 'case BI__builtin_amdgcn_fence:' at line 14798
14592 case AMDGPU::BI__builtin_amdgcn_div_scale:
14593 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
14594 // Translate from the intrinsics's struct return to the builtin's out
14595 // argument.
14596
14597 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
14598
14599 llvm::Value *X = EmitScalarExpr(E->getArg(0));
14600 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
14601 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
14602
14603 llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
14604 X->getType());
14605
14606 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
14607
14608 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
14609 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
14610
14611 llvm::Type *RealFlagType
14612 = FlagOutPtr.getPointer()->getType()->getPointerElementType();
14613
14614 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
14615 Builder.CreateStore(FlagExt, FlagOutPtr);
14616 return Result;
14617 }
14618 case AMDGPU::BI__builtin_amdgcn_div_fmas:
14619 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
14620 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
14621 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
14622 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
14623 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
14624
14625 llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
14626 Src0->getType());
14627 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
14628 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
14629 }
14630
14631 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
14632 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
14633 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
14634 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
14635 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
14636 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
14637 llvm::SmallVector<llvm::Value *, 6> Args;
14638 for (unsigned I = 0; I != E->getNumArgs(); ++I)
14639 Args.push_back(EmitScalarExpr(E->getArg(I)));
14640 assert(Args.size() == 5 || Args.size() == 6)((Args.size() == 5 || Args.size() == 6) ? static_cast<void
> (0) : __assert_fail ("Args.size() == 5 || Args.size() == 6"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14640, __PRETTY_FUNCTION__))
;
14641 if (Args.size() == 5)
14642 Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
14643 Function *F =
14644 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
14645 return Builder.CreateCall(F, Args);
14646 }
14647 case AMDGPU::BI__builtin_amdgcn_div_fixup:
14648 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
14649 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
14650 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
14651 case AMDGPU::BI__builtin_amdgcn_trig_preop:
14652 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
14653 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
14654 case AMDGPU::BI__builtin_amdgcn_rcp:
14655 case AMDGPU::BI__builtin_amdgcn_rcpf:
14656 case AMDGPU::BI__builtin_amdgcn_rcph:
14657 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
14658 case AMDGPU::BI__builtin_amdgcn_sqrt:
14659 case AMDGPU::BI__builtin_amdgcn_sqrtf:
14660 case AMDGPU::BI__builtin_amdgcn_sqrth:
14661 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
14662 case AMDGPU::BI__builtin_amdgcn_rsq:
14663 case AMDGPU::BI__builtin_amdgcn_rsqf:
14664 case AMDGPU::BI__builtin_amdgcn_rsqh:
14665 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
14666 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
14667 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
14668 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
14669 case AMDGPU::BI__builtin_amdgcn_sinf:
14670 case AMDGPU::BI__builtin_amdgcn_sinh:
14671 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
14672 case AMDGPU::BI__builtin_amdgcn_cosf:
14673 case AMDGPU::BI__builtin_amdgcn_cosh:
14674 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
14675 case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
14676 return EmitAMDGPUDispatchPtr(*this, E);
14677 case AMDGPU::BI__builtin_amdgcn_log_clampf:
14678 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
14679 case AMDGPU::BI__builtin_amdgcn_ldexp:
14680 case AMDGPU::BI__builtin_amdgcn_ldexpf:
14681 case AMDGPU::BI__builtin_amdgcn_ldexph:
14682 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
14683 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
14684 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
14685 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
14686 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
14687 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
14688 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
14689 Value *Src0 = EmitScalarExpr(E->getArg(0));
14690 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
14691 { Builder.getInt32Ty(), Src0->getType() });
14692 return Builder.CreateCall(F, Src0);
14693 }
14694 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
14695 Value *Src0 = EmitScalarExpr(E->getArg(0));
14696 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
14697 { Builder.getInt16Ty(), Src0->getType() });
14698 return Builder.CreateCall(F, Src0);
14699 }
14700 case AMDGPU::BI__builtin_amdgcn_fract:
14701 case AMDGPU::BI__builtin_amdgcn_fractf:
14702 case AMDGPU::BI__builtin_amdgcn_fracth:
14703 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
14704 case AMDGPU::BI__builtin_amdgcn_lerp:
14705 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
14706 case AMDGPU::BI__builtin_amdgcn_ubfe:
14707 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
14708 case AMDGPU::BI__builtin_amdgcn_sbfe:
14709 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
14710 case AMDGPU::BI__builtin_amdgcn_uicmp:
14711 case AMDGPU::BI__builtin_amdgcn_uicmpl:
14712 case AMDGPU::BI__builtin_amdgcn_sicmp:
14713 case AMDGPU::BI__builtin_amdgcn_sicmpl: {
14714 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
14715 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
14716 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
14717
14718 // FIXME-GFX10: How should 32 bit mask be handled?
14719 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
14720 { Builder.getInt64Ty(), Src0->getType() });
14721 return Builder.CreateCall(F, { Src0, Src1, Src2 });
14722 }
14723 case AMDGPU::BI__builtin_amdgcn_fcmp:
14724 case AMDGPU::BI__builtin_amdgcn_fcmpf: {
14725 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
14726 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
14727 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
14728
14729 // FIXME-GFX10: How should 32 bit mask be handled?
14730 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
14731 { Builder.getInt64Ty(), Src0->getType() });
14732 return Builder.CreateCall(F, { Src0, Src1, Src2 });
14733 }
14734 case AMDGPU::BI__builtin_amdgcn_class:
14735 case AMDGPU::BI__builtin_amdgcn_classf:
14736 case AMDGPU::BI__builtin_amdgcn_classh:
14737 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
14738 case AMDGPU::BI__builtin_amdgcn_fmed3f:
14739 case AMDGPU::BI__builtin_amdgcn_fmed3h:
14740 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
14741 case AMDGPU::BI__builtin_amdgcn_ds_append:
14742 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
14743 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
14744 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
14745 Value *Src0 = EmitScalarExpr(E->getArg(0));
14746 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
14747 return Builder.CreateCall(F, { Src0, Builder.getFalse() });
14748 }
14749 case AMDGPU::BI__builtin_amdgcn_read_exec: {
14750 CallInst *CI = cast<CallInst>(
14751 EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
14752 CI->setConvergent();
14753 return CI;
14754 }
14755 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
14756 case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
14757 StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
14758 "exec_lo" : "exec_hi";
14759 CallInst *CI = cast<CallInst>(
14760 EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName));
14761 CI->setConvergent();
14762 return CI;
14763 }
14764 // amdgcn workitem
14765 case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
14766 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
14767 case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
14768 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
14769 case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
14770 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
14771
14772 // amdgcn workgroup size
14773 case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
14774 return EmitAMDGPUWorkGroupSize(*this, 0);
14775 case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
14776 return EmitAMDGPUWorkGroupSize(*this, 1);
14777 case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
14778 return EmitAMDGPUWorkGroupSize(*this, 2);
14779
14780 // r600 intrinsics
14781 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
14782 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
14783 return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
14784 case AMDGPU::BI__builtin_r600_read_tidig_x:
14785 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
14786 case AMDGPU::BI__builtin_r600_read_tidig_y:
14787 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
14788 case AMDGPU::BI__builtin_r600_read_tidig_z:
14789 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
14790 case AMDGPU::BI__builtin_amdgcn_alignbit: {
14791 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
14792 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
14793 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
14794 Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
14795 return Builder.CreateCall(F, { Src0, Src1, Src2 });
14796 }
14797
14798 case AMDGPU::BI__builtin_amdgcn_fence: {
14799 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
2
Calling 'CodeGenFunction::ProcessOrderScopeAMDGCN'
6
Returning from 'CodeGenFunction::ProcessOrderScopeAMDGCN'
7
Taking false branch
14800 EmitScalarExpr(E->getArg(1)), AO, SSID))
14801 return Builder.CreateFence(AO, SSID);
14802 LLVM_FALLTHROUGH[[gnu::fallthrough]];
14803 }
14804 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
14805 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
14806 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
14807 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
14808 unsigned BuiltinAtomicOp;
8
'BuiltinAtomicOp' declared without an initial value
14809 llvm::Type *ResultType = ConvertType(E->getType());
14810
14811 switch (BuiltinID) {
9
'Default' branch taken. Execution continues on line 14822
14812 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
14813 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
14814 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
14815 break;
14816 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
14817 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
14818 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
14819 break;
14820 }
14821
14822 Value *Ptr = EmitScalarExpr(E->getArg(0));
14823 Value *Val = EmitScalarExpr(E->getArg(1));
14824
14825 llvm::Function *F =
14826 CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
10
1st function call argument is an uninitialized value
14827
14828 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
14829 EmitScalarExpr(E->getArg(3)), AO, SSID)) {
14830
14831 // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
14832 // scope as unsigned values
14833 Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
14834 Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
14835
14836 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
14837 bool Volatile =
14838 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
14839 Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
14840
14841 return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
14842 }
14843 LLVM_FALLTHROUGH[[gnu::fallthrough]];
14844 }
14845 default:
14846 return nullptr;
14847 }
14848}
14849
14850/// Handle a SystemZ function in which the final argument is a pointer
14851/// to an int that receives the post-instruction CC value. At the LLVM level
14852/// this is represented as a function that returns a {result, cc} pair.
14853static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
14854 unsigned IntrinsicID,
14855 const CallExpr *E) {
14856 unsigned NumArgs = E->getNumArgs() - 1;
14857 SmallVector<Value *, 8> Args(NumArgs);
14858 for (unsigned I = 0; I < NumArgs; ++I)
14859 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
14860 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
14861 Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
14862 Value *Call = CGF.Builder.CreateCall(F, Args);
14863 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
14864 CGF.Builder.CreateStore(CC, CCPtr);
14865 return CGF.Builder.CreateExtractValue(Call, 0);
14866}
14867
14868Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
14869 const CallExpr *E) {
14870 switch (BuiltinID) {
14871 case SystemZ::BI__builtin_tbegin: {
14872 Value *TDB = EmitScalarExpr(E->getArg(0));
14873 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
14874 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
14875 return Builder.CreateCall(F, {TDB, Control});
14876 }
14877 case SystemZ::BI__builtin_tbegin_nofloat: {
14878 Value *TDB = EmitScalarExpr(E->getArg(0));
14879 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
14880 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
14881 return Builder.CreateCall(F, {TDB, Control});
14882 }
14883 case SystemZ::BI__builtin_tbeginc: {
14884 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
14885 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
14886 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
14887 return Builder.CreateCall(F, {TDB, Control});
14888 }
14889 case SystemZ::BI__builtin_tabort: {
14890 Value *Data = EmitScalarExpr(E->getArg(0));
14891 Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
14892 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
14893 }
14894 case SystemZ::BI__builtin_non_tx_store: {
14895 Value *Address = EmitScalarExpr(E->getArg(0));
14896 Value *Data = EmitScalarExpr(E->getArg(1));
14897 Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
14898 return Builder.CreateCall(F, {Data, Address});
14899 }
14900
14901 // Vector builtins. Note that most vector builtins are mapped automatically
14902 // to target-specific LLVM intrinsics. The ones handled specially here can
14903 // be represented via standard LLVM IR, which is preferable to enable common
14904 // LLVM optimizations.
14905
14906 case SystemZ::BI__builtin_s390_vpopctb:
14907 case SystemZ::BI__builtin_s390_vpopcth:
14908 case SystemZ::BI__builtin_s390_vpopctf:
14909 case SystemZ::BI__builtin_s390_vpopctg: {
14910 llvm::Type *ResultType = ConvertType(E->getType());
14911 Value *X = EmitScalarExpr(E->getArg(0));
14912 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
14913 return Builder.CreateCall(F, X);
14914 }
14915
14916 case SystemZ::BI__builtin_s390_vclzb:
14917 case SystemZ::BI__builtin_s390_vclzh:
14918 case SystemZ::BI__builtin_s390_vclzf:
14919 case SystemZ::BI__builtin_s390_vclzg: {
14920 llvm::Type *ResultType = ConvertType(E->getType());
14921 Value *X = EmitScalarExpr(E->getArg(0));
14922 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14923 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
14924 return Builder.CreateCall(F, {X, Undef});
14925 }
14926
14927 case SystemZ::BI__builtin_s390_vctzb:
14928 case SystemZ::BI__builtin_s390_vctzh:
14929 case SystemZ::BI__builtin_s390_vctzf:
14930 case SystemZ::BI__builtin_s390_vctzg: {
14931 llvm::Type *ResultType = ConvertType(E->getType());
14932 Value *X = EmitScalarExpr(E->getArg(0));
14933 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14934 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
14935 return Builder.CreateCall(F, {X, Undef});
14936 }
14937
14938 case SystemZ::BI__builtin_s390_vfsqsb:
14939 case SystemZ::BI__builtin_s390_vfsqdb: {
14940 llvm::Type *ResultType = ConvertType(E->getType());
14941 Value *X = EmitScalarExpr(E->getArg(0));
14942 if (Builder.getIsFPConstrained()) {
14943 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
14944 return Builder.CreateConstrainedFPCall(F, { X });
14945 } else {
14946 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
14947 return Builder.CreateCall(F, X);
14948 }
14949 }
14950 case SystemZ::BI__builtin_s390_vfmasb:
14951 case SystemZ::BI__builtin_s390_vfmadb: {
14952 llvm::Type *ResultType = ConvertType(E->getType());
14953 Value *X = EmitScalarExpr(E->getArg(0));
14954 Value *Y = EmitScalarExpr(E->getArg(1));
14955 Value *Z = EmitScalarExpr(E->getArg(2));
14956 if (Builder.getIsFPConstrained()) {
14957 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
14958 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
14959 } else {
14960 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
14961 return Builder.CreateCall(F, {X, Y, Z});
14962 }
14963 }
14964 case SystemZ::BI__builtin_s390_vfmssb:
14965 case SystemZ::BI__builtin_s390_vfmsdb: {
14966 llvm::Type *ResultType = ConvertType(E->getType());
14967 Value *X = EmitScalarExpr(E->getArg(0));
14968 Value *Y = EmitScalarExpr(E->getArg(1));
14969 Value *Z = EmitScalarExpr(E->getArg(2));
14970 if (Builder.getIsFPConstrained()) {
14971 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
14972 return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
14973 } else {
14974 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
14975 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
14976 }
14977 }
14978 case SystemZ::BI__builtin_s390_vfnmasb:
14979 case SystemZ::BI__builtin_s390_vfnmadb: {
14980 llvm::Type *ResultType = ConvertType(E->getType());
14981 Value *X = EmitScalarExpr(E->getArg(0));
14982 Value *Y = EmitScalarExpr(E->getArg(1));
14983 Value *Z = EmitScalarExpr(E->getArg(2));
14984 if (Builder.getIsFPConstrained()) {
14985 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
14986 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
14987 } else {
14988 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
14989 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
14990 }
14991 }
14992 case SystemZ::BI__builtin_s390_vfnmssb:
14993 case SystemZ::BI__builtin_s390_vfnmsdb: {
14994 llvm::Type *ResultType = ConvertType(E->getType());
14995 Value *X = EmitScalarExpr(E->getArg(0));
14996 Value *Y = EmitScalarExpr(E->getArg(1));
14997 Value *Z = EmitScalarExpr(E->getArg(2));
14998 if (Builder.getIsFPConstrained()) {
14999 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15000 Value *NegZ = Builder.CreateFNeg(Z, "sub");
15001 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
15002 } else {
15003 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15004 Value *NegZ = Builder.CreateFNeg(Z, "neg");
15005 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
15006 }
15007 }
15008 case SystemZ::BI__builtin_s390_vflpsb:
15009 case SystemZ::BI__builtin_s390_vflpdb: {
15010 llvm::Type *ResultType = ConvertType(E->getType());
15011 Value *X = EmitScalarExpr(E->getArg(0));
15012 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15013 return Builder.CreateCall(F, X);
15014 }
15015 case SystemZ::BI__builtin_s390_vflnsb:
15016 case SystemZ::BI__builtin_s390_vflndb: {
15017 llvm::Type *ResultType = ConvertType(E->getType());
15018 Value *X = EmitScalarExpr(E->getArg(0));
15019 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15020 return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg");
15021 }
15022 case SystemZ::BI__builtin_s390_vfisb:
15023 case SystemZ::BI__builtin_s390_vfidb: {
15024 llvm::Type *ResultType = ConvertType(E->getType());
15025 Value *X = EmitScalarExpr(E->getArg(0));
15026 // Constant-fold the M4 and M5 mask arguments.
15027 llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext());
15028 llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15029 // Check whether this instance can be represented via a LLVM standard
15030 // intrinsic. We only support some combinations of M4 and M5.
15031 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15032 Intrinsic::ID CI;
15033 switch (M4.getZExtValue()) {
15034 default: break;
15035 case 0: // IEEE-inexact exception allowed
15036 switch (M5.getZExtValue()) {
15037 default: break;
15038 case 0: ID = Intrinsic::rint;
15039 CI = Intrinsic::experimental_constrained_rint; break;
15040 }
15041 break;
15042 case 4: // IEEE-inexact exception suppressed
15043 switch (M5.getZExtValue()) {
15044 default: break;
15045 case 0: ID = Intrinsic::nearbyint;
15046 CI = Intrinsic::experimental_constrained_nearbyint; break;
15047 case 1: ID = Intrinsic::round;
15048 CI = Intrinsic::experimental_constrained_round; break;
15049 case 5: ID = Intrinsic::trunc;
15050 CI = Intrinsic::experimental_constrained_trunc; break;
15051 case 6: ID = Intrinsic::ceil;
15052 CI = Intrinsic::experimental_constrained_ceil; break;
15053 case 7: ID = Intrinsic::floor;
15054 CI = Intrinsic::experimental_constrained_floor; break;
15055 }
15056 break;
15057 }
15058 if (ID != Intrinsic::not_intrinsic) {
15059 if (Builder.getIsFPConstrained()) {
15060 Function *F = CGM.getIntrinsic(CI, ResultType);
15061 return Builder.CreateConstrainedFPCall(F, X);
15062 } else {
15063 Function *F = CGM.getIntrinsic(ID, ResultType);
15064 return Builder.CreateCall(F, X);
15065 }
15066 }
15067 switch (BuiltinID) { // FIXME: constrained version?
15068 case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
15069 case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
15070 default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15070)
;
15071 }
15072 Function *F = CGM.getIntrinsic(ID);
15073 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15074 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
15075 return Builder.CreateCall(F, {X, M4Value, M5Value});
15076 }
15077 case SystemZ::BI__builtin_s390_vfmaxsb:
15078 case SystemZ::BI__builtin_s390_vfmaxdb: {
15079 llvm::Type *ResultType = ConvertType(E->getType());
15080 Value *X = EmitScalarExpr(E->getArg(0));
15081 Value *Y = EmitScalarExpr(E->getArg(1));
15082 // Constant-fold the M4 mask argument.
15083 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15084 // Check whether this instance can be represented via a LLVM standard
15085 // intrinsic. We only support some values of M4.
15086 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15087 Intrinsic::ID CI;
15088 switch (M4.getZExtValue()) {
15089 default: break;
15090 case 4: ID = Intrinsic::maxnum;
15091 CI = Intrinsic::experimental_constrained_maxnum; break;
15092 }
15093 if (ID != Intrinsic::not_intrinsic) {
15094 if (Builder.getIsFPConstrained()) {
15095 Function *F = CGM.getIntrinsic(CI, ResultType);
15096 return Builder.CreateConstrainedFPCall(F, {X, Y});
15097 } else {
15098 Function *F = CGM.getIntrinsic(ID, ResultType);
15099 return Builder.CreateCall(F, {X, Y});
15100 }
15101 }
15102 switch (BuiltinID) {
15103 case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
15104 case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
15105 default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15105)
;
15106 }
15107 Function *F = CGM.getIntrinsic(ID);
15108 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15109 return Builder.CreateCall(F, {X, Y, M4Value});
15110 }
15111 case SystemZ::BI__builtin_s390_vfminsb:
15112 case SystemZ::BI__builtin_s390_vfmindb: {
15113 llvm::Type *ResultType = ConvertType(E->getType());
15114 Value *X = EmitScalarExpr(E->getArg(0));
15115 Value *Y = EmitScalarExpr(E->getArg(1));
15116 // Constant-fold the M4 mask argument.
15117 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15118 // Check whether this instance can be represented via a LLVM standard
15119 // intrinsic. We only support some values of M4.
15120 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15121 Intrinsic::ID CI;
15122 switch (M4.getZExtValue()) {
15123 default: break;
15124 case 4: ID = Intrinsic::minnum;
15125 CI = Intrinsic::experimental_constrained_minnum; break;
15126 }
15127 if (ID != Intrinsic::not_intrinsic) {
15128 if (Builder.getIsFPConstrained()) {
15129 Function *F = CGM.getIntrinsic(CI, ResultType);
15130 return Builder.CreateConstrainedFPCall(F, {X, Y});
15131 } else {
15132 Function *F = CGM.getIntrinsic(ID, ResultType);
15133 return Builder.CreateCall(F, {X, Y});
15134 }
15135 }
15136 switch (BuiltinID) {
15137 case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
15138 case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
15139 default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15139)
;
15140 }
15141 Function *F = CGM.getIntrinsic(ID);
15142 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15143 return Builder.CreateCall(F, {X, Y, M4Value});
15144 }
15145
15146 case SystemZ::BI__builtin_s390_vlbrh:
15147 case SystemZ::BI__builtin_s390_vlbrf:
15148 case SystemZ::BI__builtin_s390_vlbrg: {
15149 llvm::Type *ResultType = ConvertType(E->getType());
15150 Value *X = EmitScalarExpr(E->getArg(0));
15151 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
15152 return Builder.CreateCall(F, X);
15153 }
15154
15155 // Vector intrinsics that output the post-instruction CC value.
15156
15157#define INTRINSIC_WITH_CC(NAME) \
15158 case SystemZ::BI__builtin_##NAME: \
15159 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
15160
15161 INTRINSIC_WITH_CC(s390_vpkshs);
15162 INTRINSIC_WITH_CC(s390_vpksfs);
15163 INTRINSIC_WITH_CC(s390_vpksgs);
15164
15165 INTRINSIC_WITH_CC(s390_vpklshs);
15166 INTRINSIC_WITH_CC(s390_vpklsfs);
15167 INTRINSIC_WITH_CC(s390_vpklsgs);
15168
15169 INTRINSIC_WITH_CC(s390_vceqbs);
15170 INTRINSIC_WITH_CC(s390_vceqhs);
15171 INTRINSIC_WITH_CC(s390_vceqfs);
15172 INTRINSIC_WITH_CC(s390_vceqgs);
15173
15174 INTRINSIC_WITH_CC(s390_vchbs);
15175 INTRINSIC_WITH_CC(s390_vchhs);
15176 INTRINSIC_WITH_CC(s390_vchfs);
15177 INTRINSIC_WITH_CC(s390_vchgs);
15178
15179 INTRINSIC_WITH_CC(s390_vchlbs);
15180 INTRINSIC_WITH_CC(s390_vchlhs);
15181 INTRINSIC_WITH_CC(s390_vchlfs);
15182 INTRINSIC_WITH_CC(s390_vchlgs);
15183
15184 INTRINSIC_WITH_CC(s390_vfaebs);
15185 INTRINSIC_WITH_CC(s390_vfaehs);
15186 INTRINSIC_WITH_CC(s390_vfaefs);
15187
15188 INTRINSIC_WITH_CC(s390_vfaezbs);
15189 INTRINSIC_WITH_CC(s390_vfaezhs);
15190 INTRINSIC_WITH_CC(s390_vfaezfs);
15191
15192 INTRINSIC_WITH_CC(s390_vfeebs);
15193 INTRINSIC_WITH_CC(s390_vfeehs);
15194 INTRINSIC_WITH_CC(s390_vfeefs);
15195
15196 INTRINSIC_WITH_CC(s390_vfeezbs);
15197 INTRINSIC_WITH_CC(s390_vfeezhs);
15198 INTRINSIC_WITH_CC(s390_vfeezfs);
15199
15200 INTRINSIC_WITH_CC(s390_vfenebs);
15201 INTRINSIC_WITH_CC(s390_vfenehs);
15202 INTRINSIC_WITH_CC(s390_vfenefs);
15203
15204 INTRINSIC_WITH_CC(s390_vfenezbs);
15205 INTRINSIC_WITH_CC(s390_vfenezhs);
15206 INTRINSIC_WITH_CC(s390_vfenezfs);
15207
15208 INTRINSIC_WITH_CC(s390_vistrbs);
15209 INTRINSIC_WITH_CC(s390_vistrhs);
15210 INTRINSIC_WITH_CC(s390_vistrfs);
15211
15212 INTRINSIC_WITH_CC(s390_vstrcbs);
15213 INTRINSIC_WITH_CC(s390_vstrchs);
15214 INTRINSIC_WITH_CC(s390_vstrcfs);
15215
15216 INTRINSIC_WITH_CC(s390_vstrczbs);
15217 INTRINSIC_WITH_CC(s390_vstrczhs);
15218 INTRINSIC_WITH_CC(s390_vstrczfs);
15219
15220 INTRINSIC_WITH_CC(s390_vfcesbs);
15221 INTRINSIC_WITH_CC(s390_vfcedbs);
15222 INTRINSIC_WITH_CC(s390_vfchsbs);
15223 INTRINSIC_WITH_CC(s390_vfchdbs);
15224 INTRINSIC_WITH_CC(s390_vfchesbs);
15225 INTRINSIC_WITH_CC(s390_vfchedbs);
15226
15227 INTRINSIC_WITH_CC(s390_vftcisb);
15228 INTRINSIC_WITH_CC(s390_vftcidb);
15229
15230 INTRINSIC_WITH_CC(s390_vstrsb);
15231 INTRINSIC_WITH_CC(s390_vstrsh);
15232 INTRINSIC_WITH_CC(s390_vstrsf);
15233
15234 INTRINSIC_WITH_CC(s390_vstrszb);
15235 INTRINSIC_WITH_CC(s390_vstrszh);
15236 INTRINSIC_WITH_CC(s390_vstrszf);
15237
15238#undef INTRINSIC_WITH_CC
15239
15240 default:
15241 return nullptr;
15242 }
15243}
15244
15245namespace {
15246// Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
15247struct NVPTXMmaLdstInfo {
15248 unsigned NumResults; // Number of elements to load/store
15249 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
15250 unsigned IID_col;
15251 unsigned IID_row;
15252};
15253
15254#define MMA_INTR(geom_op_type, layout) \
15255 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
15256#define MMA_LDST(n, geom_op_type) \
15257 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
15258
15259static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
15260 switch (BuiltinID) {
15261 // FP MMA loads
15262 case NVPTX::BI__hmma_m16n16k16_ld_a:
15263 return MMA_LDST(8, m16n16k16_load_a_f16);
15264 case NVPTX::BI__hmma_m16n16k16_ld_b:
15265 return MMA_LDST(8, m16n16k16_load_b_f16);
15266 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
15267 return MMA_LDST(4, m16n16k16_load_c_f16);
15268 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
15269 return MMA_LDST(8, m16n16k16_load_c_f32);
15270 case NVPTX::BI__hmma_m32n8k16_ld_a:
15271 return MMA_LDST(8, m32n8k16_load_a_f16);
15272 case NVPTX::BI__hmma_m32n8k16_ld_b:
15273 return MMA_LDST(8, m32n8k16_load_b_f16);
15274 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
15275 return MMA_LDST(4, m32n8k16_load_c_f16);
15276 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
15277 return MMA_LDST(8, m32n8k16_load_c_f32);
15278 case NVPTX::BI__hmma_m8n32k16_ld_a:
15279 return MMA_LDST(8, m8n32k16_load_a_f16);
15280 case NVPTX::BI__hmma_m8n32k16_ld_b:
15281 return MMA_LDST(8, m8n32k16_load_b_f16);
15282 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
15283 return MMA_LDST(4, m8n32k16_load_c_f16);
15284 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
15285 return MMA_LDST(8, m8n32k16_load_c_f32);
15286
15287 // Integer MMA loads
15288 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
15289 return MMA_LDST(2, m16n16k16_load_a_s8);
15290 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
15291 return MMA_LDST(2, m16n16k16_load_a_u8);
15292 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
15293 return MMA_LDST(2, m16n16k16_load_b_s8);
15294 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
15295 return MMA_LDST(2, m16n16k16_load_b_u8);
15296 case NVPTX::BI__imma_m16n16k16_ld_c:
15297 return MMA_LDST(8, m16n16k16_load_c_s32);
15298 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
15299 return MMA_LDST(4, m32n8k16_load_a_s8);
15300 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
15301 return MMA_LDST(4, m32n8k16_load_a_u8);
15302 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
15303 return MMA_LDST(1, m32n8k16_load_b_s8);
15304 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
15305 return MMA_LDST(1, m32n8k16_load_b_u8);
15306 case NVPTX::BI__imma_m32n8k16_ld_c:
15307 return MMA_LDST(8, m32n8k16_load_c_s32);
15308 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
15309 return MMA_LDST(1, m8n32k16_load_a_s8);
15310 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
15311 return MMA_LDST(1, m8n32k16_load_a_u8);
15312 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
15313 return MMA_LDST(4, m8n32k16_load_b_s8);
15314 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
15315 return MMA_LDST(4, m8n32k16_load_b_u8);
15316 case NVPTX::BI__imma_m8n32k16_ld_c:
15317 return MMA_LDST(8, m8n32k16_load_c_s32);
15318
15319 // Sub-integer MMA loads.
15320 // Only row/col layout is supported by A/B fragments.
15321 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
15322 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
15323 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
15324 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
15325 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
15326 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
15327 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
15328 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
15329 case NVPTX::BI__imma_m8n8k32_ld_c:
15330 return MMA_LDST(2, m8n8k32_load_c_s32);
15331 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
15332 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
15333 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
15334 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
15335 case NVPTX::BI__bmma_m8n8k128_ld_c:
15336 return MMA_LDST(2, m8n8k128_load_c_s32);
15337
15338 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
15339 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
15340 // use fragment C for both loads and stores.
15341 // FP MMA stores.
15342 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
15343 return MMA_LDST(4, m16n16k16_store_d_f16);
15344 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
15345 return MMA_LDST(8, m16n16k16_store_d_f32);
15346 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
15347 return MMA_LDST(4, m32n8k16_store_d_f16);
15348 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
15349 return MMA_LDST(8, m32n8k16_store_d_f32);
15350 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
15351 return MMA_LDST(4, m8n32k16_store_d_f16);
15352 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
15353 return MMA_LDST(8, m8n32k16_store_d_f32);
15354
15355 // Integer and sub-integer MMA stores.
15356 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
15357 // name, integer loads/stores use LLVM's i32.
15358 case NVPTX::BI__imma_m16n16k16_st_c_i32:
15359 return MMA_LDST(8, m16n16k16_store_d_s32);
15360 case NVPTX::BI__imma_m32n8k16_st_c_i32:
15361 return MMA_LDST(8, m32n8k16_store_d_s32);
15362 case NVPTX::BI__imma_m8n32k16_st_c_i32:
15363 return MMA_LDST(8, m8n32k16_store_d_s32);
15364 case NVPTX::BI__imma_m8n8k32_st_c_i32:
15365 return MMA_LDST(2, m8n8k32_store_d_s32);
15366 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
15367 return MMA_LDST(2, m8n8k128_store_d_s32);
15368
15369 default:
15370 llvm_unreachable("Unknown MMA builtin")::llvm::llvm_unreachable_internal("Unknown MMA builtin", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15370)
;
15371 }
15372}
15373#undef MMA_LDST
15374#undef MMA_INTR
15375
15376
15377struct NVPTXMmaInfo {
15378 unsigned NumEltsA;
15379 unsigned NumEltsB;
15380 unsigned NumEltsC;
15381 unsigned NumEltsD;
15382 std::array<unsigned, 8> Variants;
15383
15384 unsigned getMMAIntrinsic(int Layout, bool Satf) {
15385 unsigned Index = Layout * 2 + Satf;
15386 if (Index >= Variants.size())
15387 return 0;
15388 return Variants[Index];
15389 }
15390};
15391
15392 // Returns an intrinsic that matches Layout and Satf for valid combinations of
15393 // Layout and Satf, 0 otherwise.
15394static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
15395 // clang-format off
15396#define MMA_VARIANTS(geom, type) {{ \
15397 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
15398 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
15399 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
15400 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
15401 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
15402 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
15403 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
15404 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
15405 }}
15406// Sub-integer MMA only supports row.col layout.
15407#define MMA_VARIANTS_I4(geom, type) {{ \
15408 0, \
15409 0, \
15410 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
15411 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
15412 0, \
15413 0, \
15414 0, \
15415 0 \
15416 }}
15417// b1 MMA does not support .satfinite.
15418#define MMA_VARIANTS_B1(geom, type) {{ \
15419 0, \
15420 0, \
15421 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
15422 0, \
15423 0, \
15424 0, \
15425 0, \
15426 0 \
15427 }}
15428 // clang-format on
15429 switch (BuiltinID) {
15430 // FP MMA
15431 // Note that 'type' argument of MMA_VARIANT uses D_C notation, while
15432 // NumEltsN of return value are ordered as A,B,C,D.
15433 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
15434 return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)};
15435 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
15436 return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)};
15437 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
15438 return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)};
15439 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
15440 return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)};
15441 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
15442 return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)};
15443 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
15444 return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)};
15445 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
15446 return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)};
15447 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
15448 return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)};
15449 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
15450 return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)};
15451 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
15452 return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)};
15453 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
15454 return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)};
15455 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
15456 return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)};
15457
15458 // Integer MMA
15459 case NVPTX::BI__imma_m16n16k16_mma_s8:
15460 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)};
15461 case NVPTX::BI__imma_m16n16k16_mma_u8:
15462 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)};
15463 case NVPTX::BI__imma_m32n8k16_mma_s8:
15464 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)};
15465 case NVPTX::BI__imma_m32n8k16_mma_u8:
15466 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)};
15467 case NVPTX::BI__imma_m8n32k16_mma_s8:
15468 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)};
15469 case NVPTX::BI__imma_m8n32k16_mma_u8:
15470 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)};
15471
15472 // Sub-integer MMA
15473 case NVPTX::BI__imma_m8n8k32_mma_s4:
15474 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)};
15475 case NVPTX::BI__imma_m8n8k32_mma_u4:
15476 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)};
15477 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
15478 return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)};
15479 default:
15480 llvm_unreachable("Unexpected builtin ID.")::llvm::llvm_unreachable_internal("Unexpected builtin ID.", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15480)
;
15481 }
15482#undef MMA_VARIANTS
15483#undef MMA_VARIANTS_I4
15484#undef MMA_VARIANTS_B1
15485}
15486
15487} // namespace
15488
15489Value *
15490CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
15491 auto MakeLdg = [&](unsigned IntrinsicID) {
15492 Value *Ptr = EmitScalarExpr(E->getArg(0));
15493 clang::CharUnits Align =
15494 CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
15495 return Builder.CreateCall(
15496 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
15497 Ptr->getType()}),
15498 {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
15499 };
15500 auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
15501 Value *Ptr = EmitScalarExpr(E->getArg(0));
15502 return Builder.CreateCall(
15503 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
15504 Ptr->getType()}),
15505 {Ptr, EmitScalarExpr(E->getArg(1))});
15506 };
15507 switch (BuiltinID) {
15508 case NVPTX::BI__nvvm_atom_add_gen_i:
15509 case NVPTX::BI__nvvm_atom_add_gen_l:
15510 case NVPTX::BI__nvvm_atom_add_gen_ll:
15511 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
15512
15513 case NVPTX::BI__nvvm_atom_sub_gen_i:
15514 case NVPTX::BI__nvvm_atom_sub_gen_l:
15515 case NVPTX::BI__nvvm_atom_sub_gen_ll:
15516 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
15517
15518 case NVPTX::BI__nvvm_atom_and_gen_i:
15519 case NVPTX::BI__nvvm_atom_and_gen_l:
15520 case NVPTX::BI__nvvm_atom_and_gen_ll:
15521 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
15522
15523 case NVPTX::BI__nvvm_atom_or_gen_i:
15524 case NVPTX::BI__nvvm_atom_or_gen_l:
15525 case NVPTX::BI__nvvm_atom_or_gen_ll:
15526 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
15527
15528 case NVPTX::BI__nvvm_atom_xor_gen_i:
15529 case NVPTX::BI__nvvm_atom_xor_gen_l:
15530 case NVPTX::BI__nvvm_atom_xor_gen_ll:
15531 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
15532
15533 case NVPTX::BI__nvvm_atom_xchg_gen_i:
15534 case NVPTX::BI__nvvm_atom_xchg_gen_l:
15535 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
15536 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
15537
15538 case NVPTX::BI__nvvm_atom_max_gen_i:
15539 case NVPTX::BI__nvvm_atom_max_gen_l:
15540 case NVPTX::BI__nvvm_atom_max_gen_ll:
15541 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
15542
15543 case NVPTX::BI__nvvm_atom_max_gen_ui:
15544 case NVPTX::BI__nvvm_atom_max_gen_ul:
15545 case NVPTX::BI__nvvm_atom_max_gen_ull:
15546 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
15547
15548 case NVPTX::BI__nvvm_atom_min_gen_i:
15549 case NVPTX::BI__nvvm_atom_min_gen_l:
15550 case NVPTX::BI__nvvm_atom_min_gen_ll:
15551 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
15552
15553 case NVPTX::BI__nvvm_atom_min_gen_ui:
15554 case NVPTX::BI__nvvm_atom_min_gen_ul:
15555 case NVPTX::BI__nvvm_atom_min_gen_ull:
15556 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
15557
15558 case NVPTX::BI__nvvm_atom_cas_gen_i:
15559 case NVPTX::BI__nvvm_atom_cas_gen_l:
15560 case NVPTX::BI__nvvm_atom_cas_gen_ll:
15561 // __nvvm_atom_cas_gen_* should return the old value rather than the
15562 // success flag.
15563 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
15564
15565 case NVPTX::BI__nvvm_atom_add_gen_f:
15566 case NVPTX::BI__nvvm_atom_add_gen_d: {
15567 Value *Ptr = EmitScalarExpr(E->getArg(0));
15568 Value *Val = EmitScalarExpr(E->getArg(1));
15569 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
15570 AtomicOrdering::SequentiallyConsistent);
15571 }
15572
15573 case NVPTX::BI__nvvm_atom_inc_gen_ui: {
15574 Value *Ptr = EmitScalarExpr(E->getArg(0));
15575 Value *Val = EmitScalarExpr(E->getArg(1));
15576 Function *FnALI32 =
15577 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
15578 return Builder.CreateCall(FnALI32, {Ptr, Val});
15579 }
15580
15581 case NVPTX::BI__nvvm_atom_dec_gen_ui: {
15582 Value *Ptr = EmitScalarExpr(E->getArg(0));
15583 Value *Val = EmitScalarExpr(E->getArg(1));
15584 Function *FnALD32 =
15585 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
15586 return Builder.CreateCall(FnALD32, {Ptr, Val});
15587 }
15588
15589 case NVPTX::BI__nvvm_ldg_c:
15590 case NVPTX::BI__nvvm_ldg_c2:
15591 case NVPTX::BI__nvvm_ldg_c4:
15592 case NVPTX::BI__nvvm_ldg_s:
15593 case NVPTX::BI__nvvm_ldg_s2:
15594 case NVPTX::BI__nvvm_ldg_s4:
15595 case NVPTX::BI__nvvm_ldg_i:
15596 case NVPTX::BI__nvvm_ldg_i2:
15597 case NVPTX::BI__nvvm_ldg_i4:
15598 case NVPTX::BI__nvvm_ldg_l:
15599 case NVPTX::BI__nvvm_ldg_ll:
15600 case NVPTX::BI__nvvm_ldg_ll2:
15601 case NVPTX::BI__nvvm_ldg_uc:
15602 case NVPTX::BI__nvvm_ldg_uc2:
15603 case NVPTX::BI__nvvm_ldg_uc4:
15604 case NVPTX::BI__nvvm_ldg_us:
15605 case NVPTX::BI__nvvm_ldg_us2:
15606 case NVPTX::BI__nvvm_ldg_us4:
15607 case NVPTX::BI__nvvm_ldg_ui:
15608 case NVPTX::BI__nvvm_ldg_ui2:
15609 case NVPTX::BI__nvvm_ldg_ui4:
15610 case NVPTX::BI__nvvm_ldg_ul:
15611 case NVPTX::BI__nvvm_ldg_ull:
15612 case NVPTX::BI__nvvm_ldg_ull2:
15613 // PTX Interoperability section 2.2: "For a vector with an even number of
15614 // elements, its alignment is set to number of elements times the alignment
15615 // of its member: n*alignof(t)."
15616 return MakeLdg(Intrinsic::nvvm_ldg_global_i);
15617 case NVPTX::BI__nvvm_ldg_f:
15618 case NVPTX::BI__nvvm_ldg_f2:
15619 case NVPTX::BI__nvvm_ldg_f4:
15620 case NVPTX::BI__nvvm_ldg_d:
15621 case NVPTX::BI__nvvm_ldg_d2:
15622 return MakeLdg(Intrinsic::nvvm_ldg_global_f);
15623
15624 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
15625 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
15626 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
15627 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
15628 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
15629 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
15630 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
15631 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
15632 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
15633 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
15634 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
15635 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
15636 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
15637 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
15638 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
15639 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
15640 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
15641 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
15642 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
15643 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
15644 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
15645 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
15646 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
15647 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
15648 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
15649 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
15650 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
15651 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
15652 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
15653 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
15654 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
15655 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
15656 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
15657 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
15658 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
15659 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
15660 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
15661 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
15662 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
15663 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
15664 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
15665 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
15666 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
15667 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
15668 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
15669 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
15670 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
15671 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
15672 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
15673 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
15674 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
15675 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
15676 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
15677 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
15678 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
15679 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
15680 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
15681 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
15682 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
15683 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
15684 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
15685 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
15686 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
15687 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
15688 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
15689 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
15690 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
15691 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
15692 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
15693 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
15694 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
15695 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
15696 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
15697 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
15698 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
15699 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
15700 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
15701 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
15702 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
15703 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
15704 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
15705 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
15706 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
15707 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
15708 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
15709 Value *Ptr = EmitScalarExpr(E->getArg(0));
15710 return Builder.CreateCall(
15711 CGM.getIntrinsic(
15712 Intrinsic::nvvm_atomic_cas_gen_i_cta,
15713 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
15714 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
15715 }
15716 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
15717 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
15718 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
15719 Value *Ptr = EmitScalarExpr(E->getArg(0));
15720 return Builder.CreateCall(
15721 CGM.getIntrinsic(
15722 Intrinsic::nvvm_atomic_cas_gen_i_sys,
15723 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
15724 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
15725 }
15726 case NVPTX::BI__nvvm_match_all_sync_i32p:
15727 case NVPTX::BI__nvvm_match_all_sync_i64p: {
15728 Value *Mask = EmitScalarExpr(E->getArg(0));
15729 Value *Val = EmitScalarExpr(E->getArg(1));
15730 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
15731 Value *ResultPair = Builder.CreateCall(
15732 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
15733 ? Intrinsic::nvvm_match_all_sync_i32p
15734 : Intrinsic::nvvm_match_all_sync_i64p),
15735 {Mask, Val});
15736 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
15737 PredOutPtr.getElementType());
15738 Builder.CreateStore(Pred, PredOutPtr);
15739 return Builder.CreateExtractValue(ResultPair, 0);
15740 }
15741
15742 // FP MMA loads
15743 case NVPTX::BI__hmma_m16n16k16_ld_a:
15744 case NVPTX::BI__hmma_m16n16k16_ld_b:
15745 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
15746 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
15747 case NVPTX::BI__hmma_m32n8k16_ld_a:
15748 case NVPTX::BI__hmma_m32n8k16_ld_b:
15749 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
15750 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
15751 case NVPTX::BI__hmma_m8n32k16_ld_a:
15752 case NVPTX::BI__hmma_m8n32k16_ld_b:
15753 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
15754 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
15755 // Integer MMA loads.
15756 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
15757 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
15758 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
15759 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
15760 case NVPTX::BI__imma_m16n16k16_ld_c:
15761 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
15762 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
15763 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
15764 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
15765 case NVPTX::BI__imma_m32n8k16_ld_c:
15766 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
15767 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
15768 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
15769 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
15770 case NVPTX::BI__imma_m8n32k16_ld_c:
15771 // Sub-integer MMA loads.
15772 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
15773 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
15774 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
15775 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
15776 case NVPTX::BI__imma_m8n8k32_ld_c:
15777 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
15778 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
15779 case NVPTX::BI__bmma_m8n8k128_ld_c:
15780 {
15781 Address Dst = EmitPointerWithAlignment(E->getArg(0));
15782 Value *Src = EmitScalarExpr(E->getArg(1));
15783 Value *Ldm = EmitScalarExpr(E->getArg(2));
15784 Optional<llvm::APSInt> isColMajorArg =
15785 E->getArg(3)->getIntegerConstantExpr(getContext());
15786 if (!isColMajorArg)
15787 return nullptr;
15788 bool isColMajor = isColMajorArg->getSExtValue();
15789 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
15790 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
15791 if (IID == 0)
15792 return nullptr;
15793
15794 Value *Result =
15795 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
15796
15797 // Save returned values.
15798 assert(II.NumResults)((II.NumResults) ? static_cast<void> (0) : __assert_fail
("II.NumResults", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15798, __PRETTY_FUNCTION__))
;
15799 if (II.NumResults == 1) {
15800 Builder.CreateAlignedStore(Result, Dst.getPointer(),
15801 CharUnits::fromQuantity(4));
15802 } else {
15803 for (unsigned i = 0; i < II.NumResults; ++i) {
15804 Builder.CreateAlignedStore(
15805 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
15806 Dst.getElementType()),
15807 Builder.CreateGEP(Dst.getPointer(),
15808 llvm::ConstantInt::get(IntTy, i)),
15809 CharUnits::fromQuantity(4));
15810 }
15811 }
15812 return Result;
15813 }
15814
15815 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
15816 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
15817 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
15818 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
15819 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
15820 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
15821 case NVPTX::BI__imma_m16n16k16_st_c_i32:
15822 case NVPTX::BI__imma_m32n8k16_st_c_i32:
15823 case NVPTX::BI__imma_m8n32k16_st_c_i32:
15824 case NVPTX::BI__imma_m8n8k32_st_c_i32:
15825 case NVPTX::BI__bmma_m8n8k128_st_c_i32: {
15826 Value *Dst = EmitScalarExpr(E->getArg(0));
15827 Address Src = EmitPointerWithAlignment(E->getArg(1));
15828 Value *Ldm = EmitScalarExpr(E->getArg(2));
15829 Optional<llvm::APSInt> isColMajorArg =
15830 E->getArg(3)->getIntegerConstantExpr(getContext());
15831 if (!isColMajorArg)
15832 return nullptr;
15833 bool isColMajor = isColMajorArg->getSExtValue();
15834 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
15835 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
15836 if (IID == 0)
15837 return nullptr;
15838 Function *Intrinsic =
15839 CGM.getIntrinsic(IID, Dst->getType());
15840 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
15841 SmallVector<Value *, 10> Values = {Dst};
15842 for (unsigned i = 0; i < II.NumResults; ++i) {
15843 Value *V = Builder.CreateAlignedLoad(
15844 Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
15845 CharUnits::fromQuantity(4));
15846 Values.push_back(Builder.CreateBitCast(V, ParamType));
15847 }
15848 Values.push_back(Ldm);
15849 Value *Result = Builder.CreateCall(Intrinsic, Values);
15850 return Result;
15851 }
15852
15853 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
15854 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
15855 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
15856 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
15857 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
15858 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
15859 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
15860 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
15861 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
15862 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
15863 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
15864 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
15865 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
15866 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
15867 case NVPTX::BI__imma_m16n16k16_mma_s8:
15868 case NVPTX::BI__imma_m16n16k16_mma_u8:
15869 case NVPTX::BI__imma_m32n8k16_mma_s8:
15870 case NVPTX::BI__imma_m32n8k16_mma_u8:
15871 case NVPTX::BI__imma_m8n32k16_mma_s8:
15872 case NVPTX::BI__imma_m8n32k16_mma_u8:
15873 case NVPTX::BI__imma_m8n8k32_mma_s4:
15874 case NVPTX::BI__imma_m8n8k32_mma_u4:
15875 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: {
15876 Address Dst = EmitPointerWithAlignment(E->getArg(0));
15877 Address SrcA = EmitPointerWithAlignment(E->getArg(1));
15878 Address SrcB = EmitPointerWithAlignment(E->getArg(2));
15879 Address SrcC = EmitPointerWithAlignment(E->getArg(3));
15880 Optional<llvm::APSInt> LayoutArg =
15881 E->getArg(4)->getIntegerConstantExpr(getContext());
15882 if (!LayoutArg)
15883 return nullptr;
15884 int Layout = LayoutArg->getSExtValue();
15885 if (Layout < 0 || Layout > 3)
15886 return nullptr;
15887 llvm::APSInt SatfArg;
15888 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
15889 SatfArg = 0; // .b1 does not have satf argument.
15890 else if (Optional<llvm::APSInt> OptSatfArg =
15891 E->getArg(5)->getIntegerConstantExpr(getContext()))
15892 SatfArg = *OptSatfArg;
15893 else
15894 return nullptr;
15895 bool Satf = SatfArg.getSExtValue();
15896 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
15897 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
15898 if (IID == 0) // Unsupported combination of Layout/Satf.
15899 return nullptr;
15900
15901 SmallVector<Value *, 24> Values;
15902 Function *Intrinsic = CGM.getIntrinsic(IID);
15903 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
15904 // Load A
15905 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
15906 Value *V = Builder.CreateAlignedLoad(
15907 Builder.CreateGEP(SrcA.getPointer(),
15908 llvm::ConstantInt::get(IntTy, i)),
15909 CharUnits::fromQuantity(4));
15910 Values.push_back(Builder.CreateBitCast(V, AType));
15911 }
15912 // Load B
15913 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
15914 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
15915 Value *V = Builder.CreateAlignedLoad(
15916 Builder.CreateGEP(SrcB.getPointer(),
15917 llvm::ConstantInt::get(IntTy, i)),
15918 CharUnits::fromQuantity(4));
15919 Values.push_back(Builder.CreateBitCast(V, BType));
15920 }
15921 // Load C
15922 llvm::Type *CType =
15923 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
15924 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
15925 Value *V = Builder.CreateAlignedLoad(
15926 Builder.CreateGEP(SrcC.getPointer(),
15927 llvm::ConstantInt::get(IntTy, i)),
15928 CharUnits::fromQuantity(4));
15929 Values.push_back(Builder.CreateBitCast(V, CType));
15930 }
15931 Value *Result = Builder.CreateCall(Intrinsic, Values);
15932 llvm::Type *DType = Dst.getElementType();
15933 for (unsigned i = 0; i < MI.NumEltsD; ++i)
15934 Builder.CreateAlignedStore(
15935 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
15936 Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
15937 CharUnits::fromQuantity(4));
15938 return Result;
15939 }
15940 default:
15941 return nullptr;
15942 }
15943}
15944
15945namespace {
15946struct BuiltinAlignArgs {
15947 llvm::Value *Src = nullptr;
15948 llvm::Type *SrcType = nullptr;
15949 llvm::Value *Alignment = nullptr;
15950 llvm::Value *Mask = nullptr;
15951 llvm::IntegerType *IntType = nullptr;
15952
15953 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
15954 QualType AstType = E->getArg(0)->getType();
15955 if (AstType->isArrayType())
15956 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
15957 else
15958 Src = CGF.EmitScalarExpr(E->getArg(0));
15959 SrcType = Src->getType();
15960 if (SrcType->isPointerTy()) {
15961 IntType = IntegerType::get(
15962 CGF.getLLVMContext(),
15963 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
15964 } else {
15965 assert(SrcType->isIntegerTy())((SrcType->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("SrcType->isIntegerTy()", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15965, __PRETTY_FUNCTION__))
;
15966 IntType = cast<llvm::IntegerType>(SrcType);
15967 }
15968 Alignment = CGF.EmitScalarExpr(E->getArg(1));
15969 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
15970 auto *One = llvm::ConstantInt::get(IntType, 1);
15971 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
15972 }
15973};
15974} // namespace
15975
15976/// Generate (x & (y-1)) == 0.
15977RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
15978 BuiltinAlignArgs Args(E, *this);
15979 llvm::Value *SrcAddress = Args.Src;
15980 if (Args.SrcType->isPointerTy())
15981 SrcAddress =
15982 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
15983 return RValue::get(Builder.CreateICmpEQ(
15984 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
15985 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
15986}
15987
15988/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
15989/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
15990/// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
15991/// TODO: actually use ptrmask once most optimization passes know about it.
15992RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
15993 BuiltinAlignArgs Args(E, *this);
15994 llvm::Value *SrcAddr = Args.Src;
15995 if (Args.Src->getType()->isPointerTy())
15996 SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
15997 llvm::Value *SrcForMask = SrcAddr;
15998 if (AlignUp) {
15999 // When aligning up we have to first add the mask to ensure we go over the
16000 // next alignment value and then align down to the next valid multiple.
16001 // By adding the mask, we ensure that align_up on an already aligned
16002 // value will not change the value.
16003 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
16004 }
16005 // Invert the mask to only clear the lower bits.
16006 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
16007 llvm::Value *Result =
16008 Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
16009 if (Args.Src->getType()->isPointerTy()) {
16010 /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
16011 // Result = Builder.CreateIntrinsic(
16012 // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
16013 // {SrcForMask, NegatedMask}, nullptr, "aligned_result");
16014 Result->setName("aligned_intptr");
16015 llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
16016 // The result must point to the same underlying allocation. This means we
16017 // can use an inbounds GEP to enable better optimization.
16018 Value *Base = EmitCastToVoidPtr(Args.Src);
16019 if (getLangOpts().isSignedOverflowDefined())
16020 Result = Builder.CreateGEP(Base, Difference, "aligned_result");
16021 else
16022 Result = EmitCheckedInBoundsGEP(Base, Difference,
16023 /*SignedIndices=*/true,
16024 /*isSubtraction=*/!AlignUp,
16025 E->getExprLoc(), "aligned_result");
16026 Result = Builder.CreatePointerCast(Result, Args.SrcType);
16027 // Emit an alignment assumption to ensure that the new alignment is
16028 // propagated to loads/stores, etc.
16029 emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
16030 }
16031 assert(Result->getType() == Args.SrcType)((Result->getType() == Args.SrcType) ? static_cast<void
> (0) : __assert_fail ("Result->getType() == Args.SrcType"
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16031, __PRETTY_FUNCTION__))
;
16032 return RValue::get(Result);
16033}
16034
16035Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
16036 const CallExpr *E) {
16037 switch (BuiltinID) {
16038 case WebAssembly::BI__builtin_wasm_memory_size: {
16039 llvm::Type *ResultType = ConvertType(E->getType());
16040 Value *I = EmitScalarExpr(E->getArg(0));
16041 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
16042 return Builder.CreateCall(Callee, I);
16043 }
16044 case WebAssembly::BI__builtin_wasm_memory_grow: {
16045 llvm::Type *ResultType = ConvertType(E->getType());
16046 Value *Args[] = {
16047 EmitScalarExpr(E->getArg(0)),
16048 EmitScalarExpr(E->getArg(1))
16049 };
16050 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
16051 return Builder.CreateCall(Callee, Args);
16052 }
16053 case WebAssembly::BI__builtin_wasm_tls_size: {
16054 llvm::Type *ResultType = ConvertType(E->getType());
16055 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
16056 return Builder.CreateCall(Callee);
16057 }
16058 case WebAssembly::BI__builtin_wasm_tls_align: {
16059 llvm::Type *ResultType = ConvertType(E->getType());
16060 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
16061 return Builder.CreateCall(Callee);
16062 }
16063 case WebAssembly::BI__builtin_wasm_tls_base: {
16064 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
16065 return Builder.CreateCall(Callee);
16066 }
16067 case WebAssembly::BI__builtin_wasm_throw: {
16068 Value *Tag = EmitScalarExpr(E->getArg(0));
16069 Value *Obj = EmitScalarExpr(E->getArg(1));
16070 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
16071 return Builder.CreateCall(Callee, {Tag, Obj});
16072 }
16073 case WebAssembly::BI__builtin_wasm_rethrow_in_catch: {
16074 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow_in_catch);
16075 return Builder.CreateCall(Callee);
16076 }
16077 case WebAssembly::BI__builtin_wasm_atomic_wait_i32: {
16078 Value *Addr = EmitScalarExpr(E->getArg(0));
16079 Value *Expected = EmitScalarExpr(E->getArg(1));
16080 Value *Timeout = EmitScalarExpr(E->getArg(2));
16081 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i32);
16082 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
16083 }
16084 case WebAssembly::BI__builtin_wasm_atomic_wait_i64: {
16085 Value *Addr = EmitScalarExpr(E->getArg(0));
16086 Value *Expected = EmitScalarExpr(E->getArg(1));
16087 Value *Timeout = EmitScalarExpr(E->getArg(2));
16088 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i64);
16089 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
16090 }
16091 case WebAssembly::BI__builtin_wasm_atomic_notify: {
16092 Value *Addr = EmitScalarExpr(E->getArg(0));
16093 Value *Count = EmitScalarExpr(E->getArg(1));
16094 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify);
16095 return Builder.CreateCall(Callee, {Addr, Count});
16096 }
16097 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
16098 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
16099 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
16100 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
16101 Value *Src = EmitScalarExpr(E->getArg(0));
16102 llvm::Type *ResT = ConvertType(E->getType());
16103 Function *Callee =
16104 CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
16105 return Builder.CreateCall(Callee, {Src});
16106 }
16107 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
16108 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
16109 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
16110 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
16111 Value *Src = EmitScalarExpr(E->getArg(0));
16112 llvm::Type *ResT = ConvertType(E->getType());
16113 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
16114 {ResT, Src->getType()});
16115 return Builder.CreateCall(Callee, {Src});
16116 }
16117 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
16118 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
16119 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
16120 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
16121 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
16122 Value *Src = EmitScalarExpr(E->getArg(0));
16123 llvm::Type *ResT = ConvertType(E->getType());
16124 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
16125 {ResT, Src->getType()});
16126 return Builder.CreateCall(Callee, {Src});
16127 }
16128 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
16129 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
16130 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
16131 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
16132 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
16133 Value *Src = EmitScalarExpr(E->getArg(0));
16134 llvm::Type *ResT = ConvertType(E->getType());
16135 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
16136 {ResT, Src->getType()});
16137 return Builder.CreateCall(Callee, {Src});
16138 }
16139 case WebAssembly::BI__builtin_wasm_min_f32:
16140 case WebAssembly::BI__builtin_wasm_min_f64:
16141 case WebAssembly::BI__builtin_wasm_min_f32x4:
16142 case WebAssembly::BI__builtin_wasm_min_f64x2: {
16143 Value *LHS = EmitScalarExpr(E->getArg(0));
16144 Value *RHS = EmitScalarExpr(E->getArg(1));
16145 Function *Callee = CGM.getIntrinsic(Intrinsic::minimum,
16146 ConvertType(E->getType()));
16147 return Builder.CreateCall(Callee, {LHS, RHS});
16148 }
16149 case WebAssembly::BI__builtin_wasm_max_f32:
16150 case WebAssembly::BI__builtin_wasm_max_f64:
16151 case WebAssembly::BI__builtin_wasm_max_f32x4:
16152 case WebAssembly::BI__builtin_wasm_max_f64x2: {
16153 Value *LHS = EmitScalarExpr(E->getArg(0));
16154 Value *RHS = EmitScalarExpr(E->getArg(1));
16155 Function *Callee = CGM.getIntrinsic(Intrinsic::maximum,
16156 ConvertType(E->getType()));
16157 return Builder.CreateCall(Callee, {LHS, RHS});
16158 }
16159 case WebAssembly::BI__builtin_wasm_pmin_f32x4:
16160 case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
16161 Value *LHS = EmitScalarExpr(E->getArg(0));
16162 Value *RHS = EmitScalarExpr(E->getArg(1));
16163 Function *Callee =
16164 CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
16165 return Builder.CreateCall(Callee, {LHS, RHS});
16166 }
16167 case WebAssembly::BI__builtin_wasm_pmax_f32x4:
16168 case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
16169 Value *LHS = EmitScalarExpr(E->getArg(0));
16170 Value *RHS = EmitScalarExpr(E->getArg(1));
16171 Function *Callee =
16172 CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
16173 return Builder.CreateCall(Callee, {LHS, RHS});
16174 }
16175 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
16176 case WebAssembly::BI__builtin_wasm_floor_f32x4:
16177 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
16178 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
16179 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
16180 case WebAssembly::BI__builtin_wasm_floor_f64x2:
16181 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
16182 case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
16183 unsigned IntNo;
16184 switch (BuiltinID) {
16185 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
16186 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
16187 IntNo = Intrinsic::wasm_ceil;
16188 break;
16189 case WebAssembly::BI__builtin_wasm_floor_f32x4:
16190 case WebAssembly::BI__builtin_wasm_floor_f64x2:
16191 IntNo = Intrinsic::wasm_floor;
16192 break;
16193 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
16194 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
16195 IntNo = Intrinsic::wasm_trunc;
16196 break;
16197 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
16198 case WebAssembly::BI__builtin_wasm_nearest_f64x2:
16199 IntNo = Intrinsic::wasm_nearest;
16200 break;
16201 default:
16202 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16202)
;
16203 }
16204 Value *Value = EmitScalarExpr(E->getArg(0));
16205 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
16206 return Builder.CreateCall(Callee, Value);
16207 }
16208 case WebAssembly::BI__builtin_wasm_swizzle_v8x16: {
16209 Value *Src = EmitScalarExpr(E->getArg(0));
16210 Value *Indices = EmitScalarExpr(E->getArg(1));
16211 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
16212 return Builder.CreateCall(Callee, {Src, Indices});
16213 }
16214 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
16215 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
16216 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
16217 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
16218 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
16219 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
16220 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
16221 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
16222 llvm::APSInt LaneConst =
16223 *E->getArg(1)->getIntegerConstantExpr(getContext());
16224 Value *Vec = EmitScalarExpr(E->getArg(0));
16225 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
16226 Value *Extract = Builder.CreateExtractElement(Vec, Lane);
16227 switch (BuiltinID) {
16228 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
16229 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
16230 return Builder.CreateSExt(Extract, ConvertType(E->getType()));
16231 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
16232 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
16233 return Builder.CreateZExt(Extract, ConvertType(E->getType()));
16234 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
16235 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
16236 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
16237 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
16238 return Extract;
16239 default:
16240 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16240)
;
16241 }
16242 }
16243 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
16244 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
16245 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
16246 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
16247 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
16248 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
16249 llvm::APSInt LaneConst =
16250 *E->getArg(1)->getIntegerConstantExpr(getContext());
16251 Value *Vec = EmitScalarExpr(E->getArg(0));
16252 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
16253 Value *Val = EmitScalarExpr(E->getArg(2));
16254 switch (BuiltinID) {
16255 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
16256 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
16257 llvm::Type *ElemType =
16258 cast<llvm::VectorType>(ConvertType(E->getType()))->getElementType();
16259 Value *Trunc = Builder.CreateTrunc(Val, ElemType);
16260 return Builder.CreateInsertElement(Vec, Trunc, Lane);
16261 }
16262 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
16263 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
16264 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
16265 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
16266 return Builder.CreateInsertElement(Vec, Val, Lane);
16267 default:
16268 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16268)
;
16269 }
16270 }
16271 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
16272 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
16273 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
16274 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
16275 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
16276 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
16277 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
16278 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
16279 unsigned IntNo;
16280 switch (BuiltinID) {
16281 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
16282 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
16283 IntNo = Intrinsic::sadd_sat;
16284 break;
16285 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
16286 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
16287 IntNo = Intrinsic::uadd_sat;
16288 break;
16289 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
16290 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
16291 IntNo = Intrinsic::wasm_sub_saturate_signed;
16292 break;
16293 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
16294 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
16295 IntNo = Intrinsic::wasm_sub_saturate_unsigned;
16296 break;
16297 default:
16298 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16298)
;
16299 }
16300 Value *LHS = EmitScalarExpr(E->getArg(0));
16301 Value *RHS = EmitScalarExpr(E->getArg(1));
16302 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
16303 return Builder.CreateCall(Callee, {LHS, RHS});
16304 }
16305 case WebAssembly::BI__builtin_wasm_abs_i8x16:
16306 case WebAssembly::BI__builtin_wasm_abs_i16x8:
16307 case WebAssembly::BI__builtin_wasm_abs_i32x4: {
16308 Value *Vec = EmitScalarExpr(E->getArg(0));
16309 Value *Neg = Builder.CreateNeg(Vec, "neg");
16310 Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
16311 Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
16312 return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
16313 }
16314 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
16315 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
16316 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
16317 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
16318 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
16319 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
16320 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
16321 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
16322 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
16323 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
16324 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
16325 case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
16326 Value *LHS = EmitScalarExpr(E->getArg(0));
16327 Value *RHS = EmitScalarExpr(E->getArg(1));
16328 Value *ICmp;
16329 switch (BuiltinID) {
16330 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
16331 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
16332 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
16333 ICmp = Builder.CreateICmpSLT(LHS, RHS);
16334 break;
16335 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
16336 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
16337 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
16338 ICmp = Builder.CreateICmpULT(LHS, RHS);
16339 break;
16340 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
16341 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
16342 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
16343 ICmp = Builder.CreateICmpSGT(LHS, RHS);
16344 break;
16345 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
16346 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
16347 case WebAssembly::BI__builtin_wasm_max_u_i32x4:
16348 ICmp = Builder.CreateICmpUGT(LHS, RHS);
16349 break;
16350 default:
16351 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16351)
;
16352 }
16353 return Builder.CreateSelect(ICmp, LHS, RHS);
16354 }
16355 case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
16356 case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
16357 Value *LHS = EmitScalarExpr(E->getArg(0));
16358 Value *RHS = EmitScalarExpr(E->getArg(1));
16359 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned,
16360 ConvertType(E->getType()));
16361 return Builder.CreateCall(Callee, {LHS, RHS});
16362 }
16363 case WebAssembly::BI__builtin_wasm_bitselect: {
16364 Value *V1 = EmitScalarExpr(E->getArg(0));
16365 Value *V2 = EmitScalarExpr(E->getArg(1));
16366 Value *C = EmitScalarExpr(E->getArg(2));
16367 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect,
16368 ConvertType(E->getType()));
16369 return Builder.CreateCall(Callee, {V1, V2, C});
16370 }
16371 case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
16372 Value *LHS = EmitScalarExpr(E->getArg(0));
16373 Value *RHS = EmitScalarExpr(E->getArg(1));
16374 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
16375 return Builder.CreateCall(Callee, {LHS, RHS});
16376 }
16377 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
16378 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
16379 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
16380 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
16381 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
16382 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
16383 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
16384 case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
16385 unsigned IntNo;
16386 switch (BuiltinID) {
16387 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
16388 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
16389 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
16390 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
16391 IntNo = Intrinsic::wasm_anytrue;
16392 break;
16393 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
16394 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
16395 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
16396 case WebAssembly::BI__builtin_wasm_all_true_i64x2:
16397 IntNo = Intrinsic::wasm_alltrue;
16398 break;
16399 default:
16400 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16400)
;
16401 }
16402 Value *Vec = EmitScalarExpr(E->getArg(0));
16403 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
16404 return Builder.CreateCall(Callee, {Vec});
16405 }
16406 case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
16407 case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
16408 case WebAssembly::BI__builtin_wasm_bitmask_i32x4: {
16409 Value *Vec = EmitScalarExpr(E->getArg(0));
16410 Function *Callee =
16411 CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
16412 return Builder.CreateCall(Callee, {Vec});
16413 }
16414 case WebAssembly::BI__builtin_wasm_abs_f32x4:
16415 case WebAssembly::BI__builtin_wasm_abs_f64x2: {
16416 Value *Vec = EmitScalarExpr(E->getArg(0));
16417 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
16418 return Builder.CreateCall(Callee, {Vec});
16419 }
16420 case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
16421 case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
16422 Value *Vec = EmitScalarExpr(E->getArg(0));
16423 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
16424 return Builder.CreateCall(Callee, {Vec});
16425 }
16426 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
16427 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
16428 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
16429 case WebAssembly::BI__builtin_wasm_qfms_f64x2: {
16430 Value *A = EmitScalarExpr(E->getArg(0));
16431 Value *B = EmitScalarExpr(E->getArg(1));
16432 Value *C = EmitScalarExpr(E->getArg(2));
16433 unsigned IntNo;
16434 switch (BuiltinID) {
16435 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
16436 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
16437 IntNo = Intrinsic::wasm_qfma;
16438 break;
16439 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
16440 case WebAssembly::BI__builtin_wasm_qfms_f64x2:
16441 IntNo = Intrinsic::wasm_qfms;
16442 break;
16443 default:
16444 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16444)
;
16445 }
16446 Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
16447 return Builder.CreateCall(Callee, {A, B, C});
16448 }
16449 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
16450 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
16451 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
16452 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
16453 Value *Low = EmitScalarExpr(E->getArg(0));
16454 Value *High = EmitScalarExpr(E->getArg(1));
16455 unsigned IntNo;
16456 switch (BuiltinID) {
16457 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
16458 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
16459 IntNo = Intrinsic::wasm_narrow_signed;
16460 break;
16461 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
16462 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
16463 IntNo = Intrinsic::wasm_narrow_unsigned;
16464 break;
16465 default:
16466 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16466)
;
16467 }
16468 Function *Callee =
16469 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
16470 return Builder.CreateCall(Callee, {Low, High});
16471 }
16472 case WebAssembly::BI__builtin_wasm_load32_zero: {
16473 Value *Ptr = EmitScalarExpr(E->getArg(0));
16474 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load32_zero);
16475 return Builder.CreateCall(Callee, {Ptr});
16476 }
16477 case WebAssembly::BI__builtin_wasm_load64_zero: {
16478 Value *Ptr = EmitScalarExpr(E->getArg(0));
16479 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load64_zero);
16480 return Builder.CreateCall(Callee, {Ptr});
16481 }
16482 case WebAssembly::BI__builtin_wasm_shuffle_v8x16: {
16483 Value *Ops[18];
16484 size_t OpIdx = 0;
16485 Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
16486 Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
16487 while (OpIdx < 18) {
16488 Optional<llvm::APSInt> LaneConst =
16489 E->getArg(OpIdx)->getIntegerConstantExpr(getContext());
16490 assert(LaneConst && "Constant arg isn't actually constant?")((LaneConst && "Constant arg isn't actually constant?"
) ? static_cast<void> (0) : __assert_fail ("LaneConst && \"Constant arg isn't actually constant?\""
, "/build/llvm-toolchain-snapshot-12~++20200917111122+b03c2b8395b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16490, __PRETTY_FUNCTION__))
;
16491 Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
16492 }
16493 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
16494 return Builder.CreateCall(Callee, Ops);
16495 }
16496 default:
16497 return nullptr;
16498 }
16499}
16500
16501static std::pair<Intrinsic::ID, unsigned>
16502getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
16503 struct Info {
16504 unsigned BuiltinID;
16505 Intrinsic::ID IntrinsicID;
16506 unsigned VecLen;
16507 };
16508 Info Infos[] = {
16509#define CUSTOM_BUILTIN_MAPPING(x,s) \
16510 { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
16511 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
16512 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
16513 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
16514 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
16515 CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
16516 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
16517 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
16518 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
16519 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
16520 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
16521 CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
16522 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
16523 CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
16524 CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
16525 CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
16526 CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
16527 CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
16528 CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
16529 CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
16530 CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
16531 CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
16532 CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
16533 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
16534 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
16535 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
16536 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
16537 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
16538 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
16539 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
16540 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
16541#include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
16542#undef CUSTOM_BUILTIN_MAPPING
16543 };
16544
16545 auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
16546 static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
16547 (void)SortOnce;
16548
16549 const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos),
16550 Info{BuiltinID, 0, 0}, CmpInfo);
16551 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
16552 return {Intrinsic::not_intrinsic, 0};
16553
16554 return {F->IntrinsicID, F->VecLen};
16555}
16556
16557Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
16558 const CallExpr *E) {
16559 Intrinsic::ID ID;
16560 unsigned VecLen;
16561 std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
16562
16563 auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
16564 // The base pointer is passed by address, so it needs to be loaded.
16565 Address A = EmitPointerWithAlignment(E->getArg(0));
16566 Address BP = Address(
16567 Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
16568 llvm::Value *Base = Builder.CreateLoad(BP);
16569 // The treatment of both loads and stores is the same: the arguments for
16570 // the builtin are the same as the arguments for the intrinsic.
16571 // Load:
16572 // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
16573 // builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
16574 // Store:
16575 // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
16576 // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
16577 SmallVector<llvm::Value*,5> Ops = { Base };
16578 for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
16579 Ops.push_back(EmitScalarExpr(E->getArg(i)));
16580
16581 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
16582 // The load intrinsics generate two results (Value, NewBase), stores
16583 // generate one (NewBase). The new base address needs to be stored.
16584 llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
16585 : Result;
16586 llvm::Value *LV = Builder.CreateBitCast(
16587 EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
16588 Address Dest = EmitPointerWithAlignment(E->getArg(0));
16589 llvm::Value *RetVal =
16590 Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
16591 if (IsLoad)
16592 RetVal = Builder.CreateExtractValue(Result, 0);
16593 return RetVal;
16594 };
16595
16596 // Handle the conversion of bit-reverse load intrinsics to bit code.
16597 // The intrinsic call after this function only reads from memory and the
16598 // write to memory is dealt by the store instruction.
16599 auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
16600 // The intrinsic generates one result, which is the new value for the base
16601 // pointer. It needs to be returned. The result of the load instruction is
16602 // passed to intrinsic by address, so the value needs to be stored.
16603 llvm::Value *BaseAddress =
16604 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
16605
16606 // Expressions like &(*pt++) will be incremented per evaluation.
16607 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
16608 // per call.
16609 Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
16610 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
16611 DestAddr.getAlignment());
16612 llvm::Value *DestAddress = DestAddr.getPointer();
16613
16614 // Operands are Base, Dest, Modifier.
16615 // The intrinsic format in LLVM IR is defined as
16616 // { ValueType, i8* } (i8*, i32).
16617 llvm::Value *Result = Builder.CreateCall(
16618 CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
16619
16620 // The value needs to be stored as the variable is passed by reference.
16621 llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
16622
16623 // The store needs to be truncated to fit the destination type.
16624 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
16625 // to be handled with stores of respective destination type.
16626 DestVal = Builder.CreateTrunc(DestVal, DestTy);
16627
16628 llvm::Value *DestForStore =
16629 Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
16630 Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
16631 // The updated value of the base pointer is returned.
16632 return Builder.CreateExtractValue(Result, 1);
16633 };
16634
16635 auto V2Q = [this, VecLen] (llvm::Value *Vec) {
16636 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
16637 : Intrinsic::hexagon_V6_vandvrt;
16638 return Builder.CreateCall(CGM.getIntrinsic(ID),
16639 {Vec, Builder.getInt32(-1)});
16640 };
16641 auto Q2V = [this, VecLen] (llvm::Value *Pred) {
16642 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
16643 : Intrinsic::hexagon_V6_vandqrt;
16644 return Builder.CreateCall(CGM.getIntrinsic(ID),
16645 {Pred, Builder.getInt32(-1)});
16646 };
16647
16648 switch (BuiltinID) {
16649 // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
16650 // and the corresponding C/C++ builtins use loads/stores to update
16651 // the predicate.
16652 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
16653 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
16654 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
16655 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
16656 // Get the type from the 0-th argument.
16657 llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
16658 Address PredAddr = Builder.CreateBitCast(
16659 EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
16660 llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
16661 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
16662 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
16663
16664 llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
16665 Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
16666 PredAddr.getAlignment());
16667 return Builder.CreateExtractValue(Result, 0);
16668 }
16669
16670 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
16671 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
16672 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
16673 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
16674 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
16675 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
16676 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
16677 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
16678 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
16679 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
16680 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
16681 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
16682 return MakeCircOp(ID, /*IsLoad=*/true);
16683 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
16684 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
16685 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
16686 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
16687 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
16688 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
16689 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
16690 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
16691 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
16692 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
16693 return MakeCircOp(ID, /*IsLoad=*/false);
16694 case Hexagon::BI__builtin_brev_ldub:
16695 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
16696 case Hexagon::BI__builtin_brev_ldb:
16697 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
16698 case Hexagon::BI__builtin_brev_lduh:
16699 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
16700 case Hexagon::BI__builtin_brev_ldh:
16701 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
16702 case Hexagon::BI__builtin_brev_ldw:
16703 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
16704 case Hexagon::BI__builtin_brev_ldd:
16705 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
16706
16707 default: {
16708 if (ID == Intrinsic::not_intrinsic)
16709 return nullptr;
16710
16711 auto IsVectorPredTy = [](llvm::Type *T) {
16712 return T->isVectorTy() &&
16713 cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
16714 };
16715
16716 llvm::Function *IntrFn = CGM.getIntrinsic(ID);
16717 llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
16718 SmallVector<llvm::Value*,4> Ops;
16719 for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
16720 llvm::Type *T = IntrTy->getParamType(i);
16721 const Expr *A = E->getArg(i);
16722 if (IsVectorPredTy(T)) {
16723 // There will be an implicit cast to a boolean vector. Strip it.
16724 if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
16725 if (Cast->getCastKind() == CK_BitCast)
16726 A = Cast->getSubExpr();
16727 }
16728 Ops.push_back(V2Q(EmitScalarExpr(A)));
16729 } else {
16730 Ops.push_back(EmitScalarExpr(A));
16731 }
16732 }
16733
16734 llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
16735 if (IsVectorPredTy(IntrTy->getReturnType()))
16736 Call = Q2V(Call);
16737
16738 return Call;
16739 } // default
16740 } // switch
16741
16742 return nullptr;
16743}