Bug Summary

File:clang/lib/CodeGen/CGBuiltin.cpp
Warning:line 15059, column 9
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGBuiltin.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D CLANG_VENDOR="Debian " -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/include -I /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-10-27-053609-25509-1 -x c++ /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGObjCRuntime.h"
15#include "CGOpenCLRuntime.h"
16#include "CGRecordLayout.h"
17#include "CodeGenFunction.h"
18#include "CodeGenModule.h"
19#include "ConstantEmitter.h"
20#include "PatternInit.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/Decl.h"
25#include "clang/AST/OSLog.h"
26#include "clang/Basic/TargetBuiltins.h"
27#include "clang/Basic/TargetInfo.h"
28#include "clang/CodeGen/CGFunctionInfo.h"
29#include "llvm/ADT/SmallPtrSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/Analysis/ValueTracking.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/IntrinsicsAArch64.h"
36#include "llvm/IR/IntrinsicsAMDGPU.h"
37#include "llvm/IR/IntrinsicsARM.h"
38#include "llvm/IR/IntrinsicsBPF.h"
39#include "llvm/IR/IntrinsicsHexagon.h"
40#include "llvm/IR/IntrinsicsNVPTX.h"
41#include "llvm/IR/IntrinsicsPowerPC.h"
42#include "llvm/IR/IntrinsicsR600.h"
43#include "llvm/IR/IntrinsicsS390.h"
44#include "llvm/IR/IntrinsicsWebAssembly.h"
45#include "llvm/IR/IntrinsicsX86.h"
46#include "llvm/IR/MDBuilder.h"
47#include "llvm/IR/MatrixBuilder.h"
48#include "llvm/Support/ConvertUTF.h"
49#include "llvm/Support/ScopedPrinter.h"
50#include "llvm/Support/X86TargetParser.h"
51#include <sstream>
52
53using namespace clang;
54using namespace CodeGen;
55using namespace llvm;
56
57static
58int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
59 return std::min(High, std::max(Low, Value));
60}
61
62static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
63 Align AlignmentInBytes) {
64 ConstantInt *Byte;
65 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
66 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
67 // Nothing to initialize.
68 return;
69 case LangOptions::TrivialAutoVarInitKind::Zero:
70 Byte = CGF.Builder.getInt8(0x00);
71 break;
72 case LangOptions::TrivialAutoVarInitKind::Pattern: {
73 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
74 Byte = llvm::dyn_cast<llvm::ConstantInt>(
75 initializationPatternFor(CGF.CGM, Int8));
76 break;
77 }
78 }
79 if (CGF.CGM.stopAutoInit())
80 return;
81 CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
82}
83
84/// getBuiltinLibFunction - Given a builtin id for a function like
85/// "__builtin_fabsf", return a Function* for "fabsf".
86llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
87 unsigned BuiltinID) {
88 assert(Context.BuiltinInfo.isLibFunction(BuiltinID))((Context.BuiltinInfo.isLibFunction(BuiltinID)) ? static_cast
<void> (0) : __assert_fail ("Context.BuiltinInfo.isLibFunction(BuiltinID)"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 88, __PRETTY_FUNCTION__))
;
89
90 // Get the name, skip over the __builtin_ prefix (if necessary).
91 StringRef Name;
92 GlobalDecl D(FD);
93
94 // If the builtin has been declared explicitly with an assembler label,
95 // use the mangled name. This differs from the plain label on platforms
96 // that prefix labels.
97 if (FD->hasAttr<AsmLabelAttr>())
98 Name = getMangledName(D);
99 else
100 Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
101
102 llvm::FunctionType *Ty =
103 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
104
105 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
106}
107
108/// Emit the conversions required to turn the given value into an
109/// integer of the given size.
110static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
111 QualType T, llvm::IntegerType *IntType) {
112 V = CGF.EmitToMemory(V, T);
113
114 if (V->getType()->isPointerTy())
115 return CGF.Builder.CreatePtrToInt(V, IntType);
116
117 assert(V->getType() == IntType)((V->getType() == IntType) ? static_cast<void> (0) :
__assert_fail ("V->getType() == IntType", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 117, __PRETTY_FUNCTION__))
;
118 return V;
119}
120
121static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
122 QualType T, llvm::Type *ResultType) {
123 V = CGF.EmitFromMemory(V, T);
124
125 if (ResultType->isPointerTy())
126 return CGF.Builder.CreateIntToPtr(V, ResultType);
127
128 assert(V->getType() == ResultType)((V->getType() == ResultType) ? static_cast<void> (0
) : __assert_fail ("V->getType() == ResultType", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 128, __PRETTY_FUNCTION__))
;
129 return V;
130}
131
132/// Utility to insert an atomic instruction based on Intrinsic::ID
133/// and the expression node.
134static Value *MakeBinaryAtomicValue(
135 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
136 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
137 QualType T = E->getType();
138 assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast
<void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 138, __PRETTY_FUNCTION__))
;
139 assert(CGF.getContext().hasSameUnqualifiedType(T,((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->
getType()->getPointeeType())) ? static_cast<void> (0
) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 140, __PRETTY_FUNCTION__))
140 E->getArg(0)->getType()->getPointeeType()))((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->
getType()->getPointeeType())) ? static_cast<void> (0
) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 140, __PRETTY_FUNCTION__))
;
141 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->
getType())) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 141, __PRETTY_FUNCTION__))
;
142
143 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
144 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
145
146 llvm::IntegerType *IntType =
147 llvm::IntegerType::get(CGF.getLLVMContext(),
148 CGF.getContext().getTypeSize(T));
149 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
150
151 llvm::Value *Args[2];
152 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
153 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
154 llvm::Type *ValueType = Args[1]->getType();
155 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
156
157 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
158 Kind, Args[0], Args[1], Ordering);
159 return EmitFromInt(CGF, Result, T, ValueType);
160}
161
162static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
163 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
164 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
165
166 // Convert the type of the pointer to a pointer to the stored type.
167 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
168 Value *BC = CGF.Builder.CreateBitCast(
169 Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
170 LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
171 LV.setNontemporal(true);
172 CGF.EmitStoreOfScalar(Val, LV, false);
173 return nullptr;
174}
175
176static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
177 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
178
179 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
180 LV.setNontemporal(true);
181 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
182}
183
184static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
185 llvm::AtomicRMWInst::BinOp Kind,
186 const CallExpr *E) {
187 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
188}
189
190/// Utility to insert an atomic instruction based Intrinsic::ID and
191/// the expression node, where the return value is the result of the
192/// operation.
193static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
194 llvm::AtomicRMWInst::BinOp Kind,
195 const CallExpr *E,
196 Instruction::BinaryOps Op,
197 bool Invert = false) {
198 QualType T = E->getType();
199 assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast
<void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 199, __PRETTY_FUNCTION__))
;
200 assert(CGF.getContext().hasSameUnqualifiedType(T,((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->
getType()->getPointeeType())) ? static_cast<void> (0
) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 201, __PRETTY_FUNCTION__))
201 E->getArg(0)->getType()->getPointeeType()))((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->
getType()->getPointeeType())) ? static_cast<void> (0
) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 201, __PRETTY_FUNCTION__))
;
202 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->
getType())) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 202, __PRETTY_FUNCTION__))
;
203
204 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
205 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
206
207 llvm::IntegerType *IntType =
208 llvm::IntegerType::get(CGF.getLLVMContext(),
209 CGF.getContext().getTypeSize(T));
210 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
211
212 llvm::Value *Args[2];
213 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
214 llvm::Type *ValueType = Args[1]->getType();
215 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
216 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
217
218 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
219 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
220 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
221 if (Invert)
222 Result =
223 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
224 llvm::ConstantInt::getAllOnesValue(IntType));
225 Result = EmitFromInt(CGF, Result, T, ValueType);
226 return RValue::get(Result);
227}
228
229/// Utility to insert an atomic cmpxchg instruction.
230///
231/// @param CGF The current codegen function.
232/// @param E Builtin call expression to convert to cmpxchg.
233/// arg0 - address to operate on
234/// arg1 - value to compare with
235/// arg2 - new value
236/// @param ReturnBool Specifies whether to return success flag of
237/// cmpxchg result or the old value.
238///
239/// @returns result of cmpxchg, according to ReturnBool
240///
241/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
242/// invoke the function EmitAtomicCmpXchgForMSIntrin.
243static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
244 bool ReturnBool) {
245 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
246 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
247 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
248
249 llvm::IntegerType *IntType = llvm::IntegerType::get(
250 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
251 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
252
253 Value *Args[3];
254 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
255 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
256 llvm::Type *ValueType = Args[1]->getType();
257 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
258 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
259
260 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
261 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
262 llvm::AtomicOrdering::SequentiallyConsistent);
263 if (ReturnBool)
264 // Extract boolean success flag and zext it to int.
265 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
266 CGF.ConvertType(E->getType()));
267 else
268 // Extract old value and emit it using the same type as compare value.
269 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
270 ValueType);
271}
272
273/// This function should be invoked to emit atomic cmpxchg for Microsoft's
274/// _InterlockedCompareExchange* intrinsics which have the following signature:
275/// T _InterlockedCompareExchange(T volatile *Destination,
276/// T Exchange,
277/// T Comparand);
278///
279/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
280/// cmpxchg *Destination, Comparand, Exchange.
281/// So we need to swap Comparand and Exchange when invoking
282/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
283/// function MakeAtomicCmpXchgValue since it expects the arguments to be
284/// already swapped.
285
286static
287Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
288 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
289 assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast
<void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 289, __PRETTY_FUNCTION__))
;
290 assert(CGF.getContext().hasSameUnqualifiedType(((CGF.getContext().hasSameUnqualifiedType( E->getType(), E
->getArg(0)->getType()->getPointeeType())) ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 291, __PRETTY_FUNCTION__))
291 E->getType(), E->getArg(0)->getType()->getPointeeType()))((CGF.getContext().hasSameUnqualifiedType( E->getType(), E
->getArg(0)->getType()->getPointeeType())) ? static_cast
<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 291, __PRETTY_FUNCTION__))
;
292 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),((CGF.getContext().hasSameUnqualifiedType(E->getType(), E->
getArg(1)->getType())) ? static_cast<void> (0) : __assert_fail
("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 293, __PRETTY_FUNCTION__))
293 E->getArg(1)->getType()))((CGF.getContext().hasSameUnqualifiedType(E->getType(), E->
getArg(1)->getType())) ? static_cast<void> (0) : __assert_fail
("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 293, __PRETTY_FUNCTION__))
;
294 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),((CGF.getContext().hasSameUnqualifiedType(E->getType(), E->
getArg(2)->getType())) ? static_cast<void> (0) : __assert_fail
("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 295, __PRETTY_FUNCTION__))
295 E->getArg(2)->getType()))((CGF.getContext().hasSameUnqualifiedType(E->getType(), E->
getArg(2)->getType())) ? static_cast<void> (0) : __assert_fail
("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 295, __PRETTY_FUNCTION__))
;
296
297 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
298 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
299 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
300
301 // For Release ordering, the failure ordering should be Monotonic.
302 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
303 AtomicOrdering::Monotonic :
304 SuccessOrdering;
305
306 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
307 Destination, Comparand, Exchange,
308 SuccessOrdering, FailureOrdering);
309 Result->setVolatile(true);
310 return CGF.Builder.CreateExtractValue(Result, 0);
311}
312
313static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
314 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
315 assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast
<void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 315, __PRETTY_FUNCTION__))
;
316
317 auto *IntTy = CGF.ConvertType(E->getType());
318 auto *Result = CGF.Builder.CreateAtomicRMW(
319 AtomicRMWInst::Add,
320 CGF.EmitScalarExpr(E->getArg(0)),
321 ConstantInt::get(IntTy, 1),
322 Ordering);
323 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
324}
325
326static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
327 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
328 assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast
<void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 328, __PRETTY_FUNCTION__))
;
329
330 auto *IntTy = CGF.ConvertType(E->getType());
331 auto *Result = CGF.Builder.CreateAtomicRMW(
332 AtomicRMWInst::Sub,
333 CGF.EmitScalarExpr(E->getArg(0)),
334 ConstantInt::get(IntTy, 1),
335 Ordering);
336 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
337}
338
339// Build a plain volatile load.
340static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
341 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
342 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
343 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
344 llvm::Type *ITy =
345 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
346 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
347 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize);
348 Load->setVolatile(true);
349 return Load;
350}
351
352// Build a plain volatile store.
353static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
354 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
355 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
356 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
357 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
358 llvm::Type *ITy =
359 llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
360 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
361 llvm::StoreInst *Store =
362 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
363 Store->setVolatile(true);
364 return Store;
365}
366
367// Emit a simple mangled intrinsic that has 1 argument and a return type
368// matching the argument type. Depending on mode, this may be a constrained
369// floating-point intrinsic.
370static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
371 const CallExpr *E, unsigned IntrinsicID,
372 unsigned ConstrainedIntrinsicID) {
373 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
374
375 if (CGF.Builder.getIsFPConstrained()) {
376 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
377 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
378 } else {
379 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
380 return CGF.Builder.CreateCall(F, Src0);
381 }
382}
383
384// Emit an intrinsic that has 2 operands of the same type as its result.
385// Depending on mode, this may be a constrained floating-point intrinsic.
386static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
387 const CallExpr *E, unsigned IntrinsicID,
388 unsigned ConstrainedIntrinsicID) {
389 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
390 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
391
392 if (CGF.Builder.getIsFPConstrained()) {
393 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
394 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
395 } else {
396 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
397 return CGF.Builder.CreateCall(F, { Src0, Src1 });
398 }
399}
400
401// Emit an intrinsic that has 3 operands of the same type as its result.
402// Depending on mode, this may be a constrained floating-point intrinsic.
403static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
404 const CallExpr *E, unsigned IntrinsicID,
405 unsigned ConstrainedIntrinsicID) {
406 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
407 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
408 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
409
410 if (CGF.Builder.getIsFPConstrained()) {
411 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
412 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
413 } else {
414 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
415 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
416 }
417}
418
419// Emit an intrinsic where all operands are of the same type as the result.
420// Depending on mode, this may be a constrained floating-point intrinsic.
421static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
422 unsigned IntrinsicID,
423 unsigned ConstrainedIntrinsicID,
424 llvm::Type *Ty,
425 ArrayRef<Value *> Args) {
426 Function *F;
427 if (CGF.Builder.getIsFPConstrained())
428 F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
429 else
430 F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
431
432 if (CGF.Builder.getIsFPConstrained())
433 return CGF.Builder.CreateConstrainedFPCall(F, Args);
434 else
435 return CGF.Builder.CreateCall(F, Args);
436}
437
438// Emit a simple mangled intrinsic that has 1 argument and a return type
439// matching the argument type.
440static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
441 const CallExpr *E,
442 unsigned IntrinsicID) {
443 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
444
445 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
446 return CGF.Builder.CreateCall(F, Src0);
447}
448
449// Emit an intrinsic that has 2 operands of the same type as its result.
450static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
451 const CallExpr *E,
452 unsigned IntrinsicID) {
453 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
454 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
455
456 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
457 return CGF.Builder.CreateCall(F, { Src0, Src1 });
458}
459
460// Emit an intrinsic that has 3 operands of the same type as its result.
461static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
462 const CallExpr *E,
463 unsigned IntrinsicID) {
464 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
465 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
466 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
467
468 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
469 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
470}
471
472// Emit an intrinsic that has 1 float or double operand, and 1 integer.
473static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
474 const CallExpr *E,
475 unsigned IntrinsicID) {
476 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
477 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
478
479 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
480 return CGF.Builder.CreateCall(F, {Src0, Src1});
481}
482
483// Emit an intrinsic that has overloaded integer result and fp operand.
484static Value *
485emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
486 unsigned IntrinsicID,
487 unsigned ConstrainedIntrinsicID) {
488 llvm::Type *ResultType = CGF.ConvertType(E->getType());
489 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
490
491 if (CGF.Builder.getIsFPConstrained()) {
492 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
493 {ResultType, Src0->getType()});
494 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
495 } else {
496 Function *F =
497 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
498 return CGF.Builder.CreateCall(F, Src0);
499 }
500}
501
502/// EmitFAbs - Emit a call to @llvm.fabs().
503static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
504 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
505 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
506 Call->setDoesNotAccessMemory();
507 return Call;
508}
509
510/// Emit the computation of the sign bit for a floating point value. Returns
511/// the i1 sign bit value.
512static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
513 LLVMContext &C = CGF.CGM.getLLVMContext();
514
515 llvm::Type *Ty = V->getType();
516 int Width = Ty->getPrimitiveSizeInBits();
517 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
518 V = CGF.Builder.CreateBitCast(V, IntTy);
519 if (Ty->isPPC_FP128Ty()) {
520 // We want the sign bit of the higher-order double. The bitcast we just
521 // did works as if the double-double was stored to memory and then
522 // read as an i128. The "store" will put the higher-order double in the
523 // lower address in both little- and big-Endian modes, but the "load"
524 // will treat those bits as a different part of the i128: the low bits in
525 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
526 // we need to shift the high bits down to the low before truncating.
527 Width >>= 1;
528 if (CGF.getTarget().isBigEndian()) {
529 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
530 V = CGF.Builder.CreateLShr(V, ShiftCst);
531 }
532 // We are truncating value in order to extract the higher-order
533 // double, which we will be using to extract the sign from.
534 IntTy = llvm::IntegerType::get(C, Width);
535 V = CGF.Builder.CreateTrunc(V, IntTy);
536 }
537 Value *Zero = llvm::Constant::getNullValue(IntTy);
538 return CGF.Builder.CreateICmpSLT(V, Zero);
539}
540
541static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
542 const CallExpr *E, llvm::Constant *calleeValue) {
543 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
544 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
545}
546
547/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
548/// depending on IntrinsicID.
549///
550/// \arg CGF The current codegen function.
551/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
552/// \arg X The first argument to the llvm.*.with.overflow.*.
553/// \arg Y The second argument to the llvm.*.with.overflow.*.
554/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
555/// \returns The result (i.e. sum/product) returned by the intrinsic.
556static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
557 const llvm::Intrinsic::ID IntrinsicID,
558 llvm::Value *X, llvm::Value *Y,
559 llvm::Value *&Carry) {
560 // Make sure we have integers of the same width.
561 assert(X->getType() == Y->getType() &&((X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)") ? static_cast<
void> (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 563, __PRETTY_FUNCTION__))
562 "Arguments must be the same type. (Did you forget to make sure both "((X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)") ? static_cast<
void> (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 563, __PRETTY_FUNCTION__))
563 "arguments have the same integer width?)")((X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)") ? static_cast<
void> (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 563, __PRETTY_FUNCTION__))
;
564
565 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
566 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
567 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
568 return CGF.Builder.CreateExtractValue(Tmp, 0);
569}
570
571static Value *emitRangedBuiltin(CodeGenFunction &CGF,
572 unsigned IntrinsicID,
573 int low, int high) {
574 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
575 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
576 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
577 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
578 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
579 return Call;
580}
581
582namespace {
583 struct WidthAndSignedness {
584 unsigned Width;
585 bool Signed;
586 };
587}
588
589static WidthAndSignedness
590getIntegerWidthAndSignedness(const clang::ASTContext &context,
591 const clang::QualType Type) {
592 assert(Type->isIntegerType() && "Given type is not an integer.")((Type->isIntegerType() && "Given type is not an integer."
) ? static_cast<void> (0) : __assert_fail ("Type->isIntegerType() && \"Given type is not an integer.\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 592, __PRETTY_FUNCTION__))
;
593 unsigned Width = Type->isBooleanType() ? 1
594 : Type->isExtIntType() ? context.getIntWidth(Type)
595 : context.getTypeInfo(Type).Width;
596 bool Signed = Type->isSignedIntegerType();
597 return {Width, Signed};
598}
599
600// Given one or more integer types, this function produces an integer type that
601// encompasses them: any value in one of the given types could be expressed in
602// the encompassing type.
603static struct WidthAndSignedness
604EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
605 assert(Types.size() > 0 && "Empty list of types.")((Types.size() > 0 && "Empty list of types.") ? static_cast
<void> (0) : __assert_fail ("Types.size() > 0 && \"Empty list of types.\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 605, __PRETTY_FUNCTION__))
;
606
607 // If any of the given types is signed, we must return a signed type.
608 bool Signed = false;
609 for (const auto &Type : Types) {
610 Signed |= Type.Signed;
611 }
612
613 // The encompassing type must have a width greater than or equal to the width
614 // of the specified types. Additionally, if the encompassing type is signed,
615 // its width must be strictly greater than the width of any unsigned types
616 // given.
617 unsigned Width = 0;
618 for (const auto &Type : Types) {
619 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
620 if (Width < MinWidth) {
621 Width = MinWidth;
622 }
623 }
624
625 return {Width, Signed};
626}
627
628Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
629 llvm::Type *DestType = Int8PtrTy;
630 if (ArgValue->getType() != DestType)
631 ArgValue =
632 Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
633
634 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
635 return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
636}
637
638/// Checks if using the result of __builtin_object_size(p, @p From) in place of
639/// __builtin_object_size(p, @p To) is correct
640static bool areBOSTypesCompatible(int From, int To) {
641 // Note: Our __builtin_object_size implementation currently treats Type=0 and
642 // Type=2 identically. Encoding this implementation detail here may make
643 // improving __builtin_object_size difficult in the future, so it's omitted.
644 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
645}
646
647static llvm::Value *
648getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
649 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
650}
651
652llvm::Value *
653CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
654 llvm::IntegerType *ResType,
655 llvm::Value *EmittedE,
656 bool IsDynamic) {
657 uint64_t ObjectSize;
658 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
659 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
660 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
661}
662
663/// Returns a Value corresponding to the size of the given expression.
664/// This Value may be either of the following:
665/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
666/// it)
667/// - A call to the @llvm.objectsize intrinsic
668///
669/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
670/// and we wouldn't otherwise try to reference a pass_object_size parameter,
671/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
672llvm::Value *
673CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
674 llvm::IntegerType *ResType,
675 llvm::Value *EmittedE, bool IsDynamic) {
676 // We need to reference an argument if the pointer is a parameter with the
677 // pass_object_size attribute.
678 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
679 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
680 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
681 if (Param != nullptr && PS != nullptr &&
682 areBOSTypesCompatible(PS->getType(), Type)) {
683 auto Iter = SizeArguments.find(Param);
684 assert(Iter != SizeArguments.end())((Iter != SizeArguments.end()) ? static_cast<void> (0) :
__assert_fail ("Iter != SizeArguments.end()", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 684, __PRETTY_FUNCTION__))
;
685
686 const ImplicitParamDecl *D = Iter->second;
687 auto DIter = LocalDeclMap.find(D);
688 assert(DIter != LocalDeclMap.end())((DIter != LocalDeclMap.end()) ? static_cast<void> (0) :
__assert_fail ("DIter != LocalDeclMap.end()", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 688, __PRETTY_FUNCTION__))
;
689
690 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
691 getContext().getSizeType(), E->getBeginLoc());
692 }
693 }
694
695 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
696 // evaluate E for side-effects. In either case, we shouldn't lower to
697 // @llvm.objectsize.
698 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
699 return getDefaultBuiltinObjectSizeResult(Type, ResType);
700
701 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
702 assert(Ptr->getType()->isPointerTy() &&((Ptr->getType()->isPointerTy() && "Non-pointer passed to __builtin_object_size?"
) ? static_cast<void> (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 703, __PRETTY_FUNCTION__))
703 "Non-pointer passed to __builtin_object_size?")((Ptr->getType()->isPointerTy() && "Non-pointer passed to __builtin_object_size?"
) ? static_cast<void> (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 703, __PRETTY_FUNCTION__))
;
704
705 Function *F =
706 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
707
708 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
709 Value *Min = Builder.getInt1((Type & 2) != 0);
710 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
711 Value *NullIsUnknown = Builder.getTrue();
712 Value *Dynamic = Builder.getInt1(IsDynamic);
713 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
714}
715
716namespace {
717/// A struct to generically describe a bit test intrinsic.
718struct BitTest {
719 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
720 enum InterlockingKind : uint8_t {
721 Unlocked,
722 Sequential,
723 Acquire,
724 Release,
725 NoFence
726 };
727
728 ActionKind Action;
729 InterlockingKind Interlocking;
730 bool Is64Bit;
731
732 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
733};
734} // namespace
735
736BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
737 switch (BuiltinID) {
738 // Main portable variants.
739 case Builtin::BI_bittest:
740 return {TestOnly, Unlocked, false};
741 case Builtin::BI_bittestandcomplement:
742 return {Complement, Unlocked, false};
743 case Builtin::BI_bittestandreset:
744 return {Reset, Unlocked, false};
745 case Builtin::BI_bittestandset:
746 return {Set, Unlocked, false};
747 case Builtin::BI_interlockedbittestandreset:
748 return {Reset, Sequential, false};
749 case Builtin::BI_interlockedbittestandset:
750 return {Set, Sequential, false};
751
752 // X86-specific 64-bit variants.
753 case Builtin::BI_bittest64:
754 return {TestOnly, Unlocked, true};
755 case Builtin::BI_bittestandcomplement64:
756 return {Complement, Unlocked, true};
757 case Builtin::BI_bittestandreset64:
758 return {Reset, Unlocked, true};
759 case Builtin::BI_bittestandset64:
760 return {Set, Unlocked, true};
761 case Builtin::BI_interlockedbittestandreset64:
762 return {Reset, Sequential, true};
763 case Builtin::BI_interlockedbittestandset64:
764 return {Set, Sequential, true};
765
766 // ARM/AArch64-specific ordering variants.
767 case Builtin::BI_interlockedbittestandset_acq:
768 return {Set, Acquire, false};
769 case Builtin::BI_interlockedbittestandset_rel:
770 return {Set, Release, false};
771 case Builtin::BI_interlockedbittestandset_nf:
772 return {Set, NoFence, false};
773 case Builtin::BI_interlockedbittestandreset_acq:
774 return {Reset, Acquire, false};
775 case Builtin::BI_interlockedbittestandreset_rel:
776 return {Reset, Release, false};
777 case Builtin::BI_interlockedbittestandreset_nf:
778 return {Reset, NoFence, false};
779 }
780 llvm_unreachable("expected only bittest intrinsics")::llvm::llvm_unreachable_internal("expected only bittest intrinsics"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 780)
;
781}
782
783static char bitActionToX86BTCode(BitTest::ActionKind A) {
784 switch (A) {
785 case BitTest::TestOnly: return '\0';
786 case BitTest::Complement: return 'c';
787 case BitTest::Reset: return 'r';
788 case BitTest::Set: return 's';
789 }
790 llvm_unreachable("invalid action")::llvm::llvm_unreachable_internal("invalid action", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 790)
;
791}
792
793static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
794 BitTest BT,
795 const CallExpr *E, Value *BitBase,
796 Value *BitPos) {
797 char Action = bitActionToX86BTCode(BT.Action);
798 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
799
800 // Build the assembly.
801 SmallString<64> Asm;
802 raw_svector_ostream AsmOS(Asm);
803 if (BT.Interlocking != BitTest::Unlocked)
804 AsmOS << "lock ";
805 AsmOS << "bt";
806 if (Action)
807 AsmOS << Action;
808 AsmOS << SizeSuffix << " $2, ($1)";
809
810 // Build the constraints. FIXME: We should support immediates when possible.
811 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
812 std::string MachineClobbers = CGF.getTarget().getClobbers();
813 if (!MachineClobbers.empty()) {
814 Constraints += ',';
815 Constraints += MachineClobbers;
816 }
817 llvm::IntegerType *IntType = llvm::IntegerType::get(
818 CGF.getLLVMContext(),
819 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
820 llvm::Type *IntPtrType = IntType->getPointerTo();
821 llvm::FunctionType *FTy =
822 llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
823
824 llvm::InlineAsm *IA =
825 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
826 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
827}
828
829static llvm::AtomicOrdering
830getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
831 switch (I) {
832 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
833 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
834 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
835 case BitTest::Release: return llvm::AtomicOrdering::Release;
836 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
837 }
838 llvm_unreachable("invalid interlocking")::llvm::llvm_unreachable_internal("invalid interlocking", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 838)
;
839}
840
841/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
842/// bits and a bit position and read and optionally modify the bit at that
843/// position. The position index can be arbitrarily large, i.e. it can be larger
844/// than 31 or 63, so we need an indexed load in the general case.
845static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
846 unsigned BuiltinID,
847 const CallExpr *E) {
848 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
849 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
850
851 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
852
853 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
854 // indexing operation internally. Use them if possible.
855 if (CGF.getTarget().getTriple().isX86())
856 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
857
858 // Otherwise, use generic code to load one byte and test the bit. Use all but
859 // the bottom three bits as the array index, and the bottom three bits to form
860 // a mask.
861 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
862 Value *ByteIndex = CGF.Builder.CreateAShr(
863 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
864 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
865 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
866 ByteIndex, "bittest.byteaddr"),
867 CharUnits::One());
868 Value *PosLow =
869 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
870 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
871
872 // The updating instructions will need a mask.
873 Value *Mask = nullptr;
874 if (BT.Action != BitTest::TestOnly) {
875 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
876 "bittest.mask");
877 }
878
879 // Check the action and ordering of the interlocked intrinsics.
880 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
881
882 Value *OldByte = nullptr;
883 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
884 // Emit a combined atomicrmw load/store operation for the interlocked
885 // intrinsics.
886 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
887 if (BT.Action == BitTest::Reset) {
888 Mask = CGF.Builder.CreateNot(Mask);
889 RMWOp = llvm::AtomicRMWInst::And;
890 }
891 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
892 Ordering);
893 } else {
894 // Emit a plain load for the non-interlocked intrinsics.
895 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
896 Value *NewByte = nullptr;
897 switch (BT.Action) {
898 case BitTest::TestOnly:
899 // Don't store anything.
900 break;
901 case BitTest::Complement:
902 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
903 break;
904 case BitTest::Reset:
905 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
906 break;
907 case BitTest::Set:
908 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
909 break;
910 }
911 if (NewByte)
912 CGF.Builder.CreateStore(NewByte, ByteAddr);
913 }
914
915 // However we loaded the old byte, either by plain load or atomicrmw, shift
916 // the bit into the low position and mask it to 0 or 1.
917 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
918 return CGF.Builder.CreateAnd(
919 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
920}
921
922namespace {
923enum class MSVCSetJmpKind {
924 _setjmpex,
925 _setjmp3,
926 _setjmp
927};
928}
929
930/// MSVC handles setjmp a bit differently on different platforms. On every
931/// architecture except 32-bit x86, the frame address is passed. On x86, extra
932/// parameters can be passed as variadic arguments, but we always pass none.
933static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
934 const CallExpr *E) {
935 llvm::Value *Arg1 = nullptr;
936 llvm::Type *Arg1Ty = nullptr;
937 StringRef Name;
938 bool IsVarArg = false;
939 if (SJKind == MSVCSetJmpKind::_setjmp3) {
940 Name = "_setjmp3";
941 Arg1Ty = CGF.Int32Ty;
942 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
943 IsVarArg = true;
944 } else {
945 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
946 Arg1Ty = CGF.Int8PtrTy;
947 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
948 Arg1 = CGF.Builder.CreateCall(
949 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
950 } else
951 Arg1 = CGF.Builder.CreateCall(
952 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
953 llvm::ConstantInt::get(CGF.Int32Ty, 0));
954 }
955
956 // Mark the call site and declaration with ReturnsTwice.
957 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
958 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
959 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
960 llvm::Attribute::ReturnsTwice);
961 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
962 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
963 ReturnsTwiceAttr, /*Local=*/true);
964
965 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
966 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
967 llvm::Value *Args[] = {Buf, Arg1};
968 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
969 CB->setAttributes(ReturnsTwiceAttr);
970 return RValue::get(CB);
971}
972
973// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
974// we handle them here.
975enum class CodeGenFunction::MSVCIntrin {
976 _BitScanForward,
977 _BitScanReverse,
978 _InterlockedAnd,
979 _InterlockedDecrement,
980 _InterlockedExchange,
981 _InterlockedExchangeAdd,
982 _InterlockedExchangeSub,
983 _InterlockedIncrement,
984 _InterlockedOr,
985 _InterlockedXor,
986 _InterlockedExchangeAdd_acq,
987 _InterlockedExchangeAdd_rel,
988 _InterlockedExchangeAdd_nf,
989 _InterlockedExchange_acq,
990 _InterlockedExchange_rel,
991 _InterlockedExchange_nf,
992 _InterlockedCompareExchange_acq,
993 _InterlockedCompareExchange_rel,
994 _InterlockedCompareExchange_nf,
995 _InterlockedOr_acq,
996 _InterlockedOr_rel,
997 _InterlockedOr_nf,
998 _InterlockedXor_acq,
999 _InterlockedXor_rel,
1000 _InterlockedXor_nf,
1001 _InterlockedAnd_acq,
1002 _InterlockedAnd_rel,
1003 _InterlockedAnd_nf,
1004 _InterlockedIncrement_acq,
1005 _InterlockedIncrement_rel,
1006 _InterlockedIncrement_nf,
1007 _InterlockedDecrement_acq,
1008 _InterlockedDecrement_rel,
1009 _InterlockedDecrement_nf,
1010 __fastfail,
1011};
1012
1013Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
1014 const CallExpr *E) {
1015 switch (BuiltinID) {
1016 case MSVCIntrin::_BitScanForward:
1017 case MSVCIntrin::_BitScanReverse: {
1018 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1019
1020 llvm::Type *ArgType = ArgValue->getType();
1021 llvm::Type *IndexType =
1022 EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
1023 llvm::Type *ResultType = ConvertType(E->getType());
1024
1025 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1026 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1027 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1028
1029 BasicBlock *Begin = Builder.GetInsertBlock();
1030 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1031 Builder.SetInsertPoint(End);
1032 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1033
1034 Builder.SetInsertPoint(Begin);
1035 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1036 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1037 Builder.CreateCondBr(IsZero, End, NotZero);
1038 Result->addIncoming(ResZero, Begin);
1039
1040 Builder.SetInsertPoint(NotZero);
1041 Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
1042
1043 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1044 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1045 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1046 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1047 Builder.CreateStore(ZeroCount, IndexAddress, false);
1048 } else {
1049 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1050 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1051
1052 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1053 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1054 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1055 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1056 Builder.CreateStore(Index, IndexAddress, false);
1057 }
1058 Builder.CreateBr(End);
1059 Result->addIncoming(ResOne, NotZero);
1060
1061 Builder.SetInsertPoint(End);
1062 return Result;
1063 }
1064 case MSVCIntrin::_InterlockedAnd:
1065 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1066 case MSVCIntrin::_InterlockedExchange:
1067 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1068 case MSVCIntrin::_InterlockedExchangeAdd:
1069 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1070 case MSVCIntrin::_InterlockedExchangeSub:
1071 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1072 case MSVCIntrin::_InterlockedOr:
1073 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1074 case MSVCIntrin::_InterlockedXor:
1075 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1076 case MSVCIntrin::_InterlockedExchangeAdd_acq:
1077 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1078 AtomicOrdering::Acquire);
1079 case MSVCIntrin::_InterlockedExchangeAdd_rel:
1080 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1081 AtomicOrdering::Release);
1082 case MSVCIntrin::_InterlockedExchangeAdd_nf:
1083 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1084 AtomicOrdering::Monotonic);
1085 case MSVCIntrin::_InterlockedExchange_acq:
1086 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1087 AtomicOrdering::Acquire);
1088 case MSVCIntrin::_InterlockedExchange_rel:
1089 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1090 AtomicOrdering::Release);
1091 case MSVCIntrin::_InterlockedExchange_nf:
1092 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1093 AtomicOrdering::Monotonic);
1094 case MSVCIntrin::_InterlockedCompareExchange_acq:
1095 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1096 case MSVCIntrin::_InterlockedCompareExchange_rel:
1097 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1098 case MSVCIntrin::_InterlockedCompareExchange_nf:
1099 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1100 case MSVCIntrin::_InterlockedOr_acq:
1101 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1102 AtomicOrdering::Acquire);
1103 case MSVCIntrin::_InterlockedOr_rel:
1104 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1105 AtomicOrdering::Release);
1106 case MSVCIntrin::_InterlockedOr_nf:
1107 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1108 AtomicOrdering::Monotonic);
1109 case MSVCIntrin::_InterlockedXor_acq:
1110 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1111 AtomicOrdering::Acquire);
1112 case MSVCIntrin::_InterlockedXor_rel:
1113 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1114 AtomicOrdering::Release);
1115 case MSVCIntrin::_InterlockedXor_nf:
1116 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1117 AtomicOrdering::Monotonic);
1118 case MSVCIntrin::_InterlockedAnd_acq:
1119 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1120 AtomicOrdering::Acquire);
1121 case MSVCIntrin::_InterlockedAnd_rel:
1122 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1123 AtomicOrdering::Release);
1124 case MSVCIntrin::_InterlockedAnd_nf:
1125 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1126 AtomicOrdering::Monotonic);
1127 case MSVCIntrin::_InterlockedIncrement_acq:
1128 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1129 case MSVCIntrin::_InterlockedIncrement_rel:
1130 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1131 case MSVCIntrin::_InterlockedIncrement_nf:
1132 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1133 case MSVCIntrin::_InterlockedDecrement_acq:
1134 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1135 case MSVCIntrin::_InterlockedDecrement_rel:
1136 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1137 case MSVCIntrin::_InterlockedDecrement_nf:
1138 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1139
1140 case MSVCIntrin::_InterlockedDecrement:
1141 return EmitAtomicDecrementValue(*this, E);
1142 case MSVCIntrin::_InterlockedIncrement:
1143 return EmitAtomicIncrementValue(*this, E);
1144
1145 case MSVCIntrin::__fastfail: {
1146 // Request immediate process termination from the kernel. The instruction
1147 // sequences to do this are documented on MSDN:
1148 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1149 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1150 StringRef Asm, Constraints;
1151 switch (ISA) {
1152 default:
1153 ErrorUnsupported(E, "__fastfail call for this architecture");
1154 break;
1155 case llvm::Triple::x86:
1156 case llvm::Triple::x86_64:
1157 Asm = "int $$0x29";
1158 Constraints = "{cx}";
1159 break;
1160 case llvm::Triple::thumb:
1161 Asm = "udf #251";
1162 Constraints = "{r0}";
1163 break;
1164 case llvm::Triple::aarch64:
1165 Asm = "brk #0xF003";
1166 Constraints = "{w0}";
1167 }
1168 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1169 llvm::InlineAsm *IA =
1170 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1171 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1172 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1173 llvm::Attribute::NoReturn);
1174 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1175 CI->setAttributes(NoReturnAttr);
1176 return CI;
1177 }
1178 }
1179 llvm_unreachable("Incorrect MSVC intrinsic!")::llvm::llvm_unreachable_internal("Incorrect MSVC intrinsic!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1179)
;
1180}
1181
1182namespace {
1183// ARC cleanup for __builtin_os_log_format
1184struct CallObjCArcUse final : EHScopeStack::Cleanup {
1185 CallObjCArcUse(llvm::Value *object) : object(object) {}
1186 llvm::Value *object;
1187
1188 void Emit(CodeGenFunction &CGF, Flags flags) override {
1189 CGF.EmitARCIntrinsicUse(object);
1190 }
1191};
1192}
1193
1194Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1195 BuiltinCheckKind Kind) {
1196 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)(((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
"Unsupported builtin check kind") ? static_cast<void> (
0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1197, __PRETTY_FUNCTION__))
1197 && "Unsupported builtin check kind")(((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
"Unsupported builtin check kind") ? static_cast<void> (
0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1197, __PRETTY_FUNCTION__))
;
1198
1199 Value *ArgValue = EmitScalarExpr(E);
1200 if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
1201 return ArgValue;
1202
1203 SanitizerScope SanScope(this);
1204 Value *Cond = Builder.CreateICmpNE(
1205 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1206 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1207 SanitizerHandler::InvalidBuiltin,
1208 {EmitCheckSourceLocation(E->getExprLoc()),
1209 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1210 None);
1211 return ArgValue;
1212}
1213
1214/// Get the argument type for arguments to os_log_helper.
1215static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1216 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1217 return C.getCanonicalType(UnsignedTy);
1218}
1219
1220llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1221 const analyze_os_log::OSLogBufferLayout &Layout,
1222 CharUnits BufferAlignment) {
1223 ASTContext &Ctx = getContext();
1224
1225 llvm::SmallString<64> Name;
1226 {
1227 raw_svector_ostream OS(Name);
1228 OS << "__os_log_helper";
1229 OS << "_" << BufferAlignment.getQuantity();
1230 OS << "_" << int(Layout.getSummaryByte());
1231 OS << "_" << int(Layout.getNumArgsByte());
1232 for (const auto &Item : Layout.Items)
1233 OS << "_" << int(Item.getSizeByte()) << "_"
1234 << int(Item.getDescriptorByte());
1235 }
1236
1237 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1238 return F;
1239
1240 llvm::SmallVector<QualType, 4> ArgTys;
1241 FunctionArgList Args;
1242 Args.push_back(ImplicitParamDecl::Create(
1243 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1244 ImplicitParamDecl::Other));
1245 ArgTys.emplace_back(Ctx.VoidPtrTy);
1246
1247 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1248 char Size = Layout.Items[I].getSizeByte();
1249 if (!Size)
1250 continue;
1251
1252 QualType ArgTy = getOSLogArgType(Ctx, Size);
1253 Args.push_back(ImplicitParamDecl::Create(
1254 Ctx, nullptr, SourceLocation(),
1255 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1256 ImplicitParamDecl::Other));
1257 ArgTys.emplace_back(ArgTy);
1258 }
1259
1260 QualType ReturnTy = Ctx.VoidTy;
1261 QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
1262
1263 // The helper function has linkonce_odr linkage to enable the linker to merge
1264 // identical functions. To ensure the merging always happens, 'noinline' is
1265 // attached to the function when compiling with -Oz.
1266 const CGFunctionInfo &FI =
1267 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1268 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1269 llvm::Function *Fn = llvm::Function::Create(
1270 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1271 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1272 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
1273 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1274 Fn->setDoesNotThrow();
1275
1276 // Attach 'noinline' at -Oz.
1277 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1278 Fn->addFnAttr(llvm::Attribute::NoInline);
1279
1280 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1281 IdentifierInfo *II = &Ctx.Idents.get(Name);
1282 FunctionDecl *FD = FunctionDecl::Create(
1283 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
1284 FuncionTy, nullptr, SC_PrivateExtern, false, false);
1285 // Avoid generating debug location info for the function.
1286 FD->setImplicit();
1287
1288 StartFunction(FD, ReturnTy, Fn, FI, Args);
1289
1290 // Create a scope with an artificial location for the body of this function.
1291 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1292
1293 CharUnits Offset;
1294 Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
1295 BufferAlignment);
1296 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1297 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1298 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1299 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1300
1301 unsigned I = 1;
1302 for (const auto &Item : Layout.Items) {
1303 Builder.CreateStore(
1304 Builder.getInt8(Item.getDescriptorByte()),
1305 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1306 Builder.CreateStore(
1307 Builder.getInt8(Item.getSizeByte()),
1308 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1309
1310 CharUnits Size = Item.size();
1311 if (!Size.getQuantity())
1312 continue;
1313
1314 Address Arg = GetAddrOfLocalVar(Args[I]);
1315 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1316 Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
1317 "argDataCast");
1318 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1319 Offset += Size;
1320 ++I;
1321 }
1322
1323 FinishFunction();
1324
1325 return Fn;
1326}
1327
1328RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1329 assert(E.getNumArgs() >= 2 &&((E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments"
) ? static_cast<void> (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1330, __PRETTY_FUNCTION__))
1330 "__builtin_os_log_format takes at least 2 arguments")((E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments"
) ? static_cast<void> (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1330, __PRETTY_FUNCTION__))
;
1331 ASTContext &Ctx = getContext();
1332 analyze_os_log::OSLogBufferLayout Layout;
1333 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1334 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1335 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1336
1337 // Ignore argument 1, the format string. It is not currently used.
1338 CallArgList Args;
1339 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1340
1341 for (const auto &Item : Layout.Items) {
1342 int Size = Item.getSizeByte();
1343 if (!Size)
1344 continue;
1345
1346 llvm::Value *ArgVal;
1347
1348 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1349 uint64_t Val = 0;
1350 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1351 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1352 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1353 } else if (const Expr *TheExpr = Item.getExpr()) {
1354 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1355
1356 // If a temporary object that requires destruction after the full
1357 // expression is passed, push a lifetime-extended cleanup to extend its
1358 // lifetime to the end of the enclosing block scope.
1359 auto LifetimeExtendObject = [&](const Expr *E) {
1360 E = E->IgnoreParenCasts();
1361 // Extend lifetimes of objects returned by function calls and message
1362 // sends.
1363
1364 // FIXME: We should do this in other cases in which temporaries are
1365 // created including arguments of non-ARC types (e.g., C++
1366 // temporaries).
1367 if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
1368 return true;
1369 return false;
1370 };
1371
1372 if (TheExpr->getType()->isObjCRetainableType() &&
1373 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
1374 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&((getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
"Only scalar can be a ObjC retainable type") ? static_cast<
void> (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1375, __PRETTY_FUNCTION__))
1375 "Only scalar can be a ObjC retainable type")((getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
"Only scalar can be a ObjC retainable type") ? static_cast<
void> (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1375, __PRETTY_FUNCTION__))
;
1376 if (!isa<Constant>(ArgVal)) {
1377 CleanupKind Cleanup = getARCCleanupKind();
1378 QualType Ty = TheExpr->getType();
1379 Address Alloca = Address::invalid();
1380 Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
1381 ArgVal = EmitARCRetain(Ty, ArgVal);
1382 Builder.CreateStore(ArgVal, Addr);
1383 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
1384 CodeGenFunction::destroyARCStrongPrecise,
1385 Cleanup & EHCleanup);
1386
1387 // Push a clang.arc.use call to ensure ARC optimizer knows that the
1388 // argument has to be alive.
1389 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
1390 pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
1391 }
1392 }
1393 } else {
1394 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1395 }
1396
1397 unsigned ArgValSize =
1398 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1399 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1400 ArgValSize);
1401 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1402 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1403 // If ArgVal has type x86_fp80, zero-extend ArgVal.
1404 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1405 Args.add(RValue::get(ArgVal), ArgTy);
1406 }
1407
1408 const CGFunctionInfo &FI =
1409 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
1410 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
1411 Layout, BufAddr.getAlignment());
1412 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
1413 return RValue::get(BufAddr.getPointer());
1414}
1415
1416/// Determine if a binop is a checked mixed-sign multiply we can specialize.
1417static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
1418 WidthAndSignedness Op1Info,
1419 WidthAndSignedness Op2Info,
1420 WidthAndSignedness ResultInfo) {
1421 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1422 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
1423 Op1Info.Signed != Op2Info.Signed;
1424}
1425
1426/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
1427/// the generic checked-binop irgen.
1428static RValue
1429EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
1430 WidthAndSignedness Op1Info, const clang::Expr *Op2,
1431 WidthAndSignedness Op2Info,
1432 const clang::Expr *ResultArg, QualType ResultQTy,
1433 WidthAndSignedness ResultInfo) {
1434 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,((isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow
, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize"
) ? static_cast<void> (0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1436, __PRETTY_FUNCTION__))
1435 Op2Info, ResultInfo) &&((isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow
, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize"
) ? static_cast<void> (0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1436, __PRETTY_FUNCTION__))
1436 "Not a mixed-sign multipliction we can specialize")((isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow
, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize"
) ? static_cast<void> (0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1436, __PRETTY_FUNCTION__))
;
1437
1438 // Emit the signed and unsigned operands.
1439 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
1440 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
1441 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
1442 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
1443 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
1444 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
1445
1446 // One of the operands may be smaller than the other. If so, [s|z]ext it.
1447 if (SignedOpWidth < UnsignedOpWidth)
1448 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
1449 if (UnsignedOpWidth < SignedOpWidth)
1450 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
1451
1452 llvm::Type *OpTy = Signed->getType();
1453 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
1454 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1455 llvm::Type *ResTy = ResultPtr.getElementType();
1456 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
1457
1458 // Take the absolute value of the signed operand.
1459 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
1460 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
1461 llvm::Value *AbsSigned =
1462 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
1463
1464 // Perform a checked unsigned multiplication.
1465 llvm::Value *UnsignedOverflow;
1466 llvm::Value *UnsignedResult =
1467 EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
1468 Unsigned, UnsignedOverflow);
1469
1470 llvm::Value *Overflow, *Result;
1471 if (ResultInfo.Signed) {
1472 // Signed overflow occurs if the result is greater than INT_MAX or lesser
1473 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
1474 auto IntMax =
1475 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
1476 llvm::Value *MaxResult =
1477 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
1478 CGF.Builder.CreateZExt(IsNegative, OpTy));
1479 llvm::Value *SignedOverflow =
1480 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
1481 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
1482
1483 // Prepare the signed result (possibly by negating it).
1484 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
1485 llvm::Value *SignedResult =
1486 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
1487 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
1488 } else {
1489 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
1490 llvm::Value *Underflow = CGF.Builder.CreateAnd(
1491 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
1492 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
1493 if (ResultInfo.Width < OpWidth) {
1494 auto IntMax =
1495 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
1496 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
1497 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
1498 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
1499 }
1500
1501 // Negate the product if it would be negative in infinite precision.
1502 Result = CGF.Builder.CreateSelect(
1503 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
1504
1505 Result = CGF.Builder.CreateTrunc(Result, ResTy);
1506 }
1507 assert(Overflow && Result && "Missing overflow or result")((Overflow && Result && "Missing overflow or result"
) ? static_cast<void> (0) : __assert_fail ("Overflow && Result && \"Missing overflow or result\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1507, __PRETTY_FUNCTION__))
;
1508
1509 bool isVolatile =
1510 ResultArg->getType()->getPointeeType().isVolatileQualified();
1511 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1512 isVolatile);
1513 return RValue::get(Overflow);
1514}
1515
1516static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
1517 Value *&RecordPtr, CharUnits Align,
1518 llvm::FunctionCallee Func, int Lvl) {
1519 ASTContext &Context = CGF.getContext();
1520 RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
1521 std::string Pad = std::string(Lvl * 4, ' ');
1522
1523 Value *GString =
1524 CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
1525 Value *Res = CGF.Builder.CreateCall(Func, {GString});
1526
1527 static llvm::DenseMap<QualType, const char *> Types;
1528 if (Types.empty()) {
1529 Types[Context.CharTy] = "%c";
1530 Types[Context.BoolTy] = "%d";
1531 Types[Context.SignedCharTy] = "%hhd";
1532 Types[Context.UnsignedCharTy] = "%hhu";
1533 Types[Context.IntTy] = "%d";
1534 Types[Context.UnsignedIntTy] = "%u";
1535 Types[Context.LongTy] = "%ld";
1536 Types[Context.UnsignedLongTy] = "%lu";
1537 Types[Context.LongLongTy] = "%lld";
1538 Types[Context.UnsignedLongLongTy] = "%llu";
1539 Types[Context.ShortTy] = "%hd";
1540 Types[Context.UnsignedShortTy] = "%hu";
1541 Types[Context.VoidPtrTy] = "%p";
1542 Types[Context.FloatTy] = "%f";
1543 Types[Context.DoubleTy] = "%f";
1544 Types[Context.LongDoubleTy] = "%Lf";
1545 Types[Context.getPointerType(Context.CharTy)] = "%s";
1546 Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
1547 }
1548
1549 for (const auto *FD : RD->fields()) {
1550 Value *FieldPtr = RecordPtr;
1551 if (RD->isUnion())
1552 FieldPtr = CGF.Builder.CreatePointerCast(
1553 FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
1554 else
1555 FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
1556 FD->getFieldIndex());
1557
1558 GString = CGF.Builder.CreateGlobalStringPtr(
1559 llvm::Twine(Pad)
1560 .concat(FD->getType().getAsString())
1561 .concat(llvm::Twine(' '))
1562 .concat(FD->getNameAsString())
1563 .concat(" : ")
1564 .str());
1565 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1566 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1567
1568 QualType CanonicalType =
1569 FD->getType().getUnqualifiedType().getCanonicalType();
1570
1571 // We check whether we are in a recursive type
1572 if (CanonicalType->isRecordType()) {
1573 TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
1574 Res = CGF.Builder.CreateAdd(TmpRes, Res);
1575 continue;
1576 }
1577
1578 // We try to determine the best format to print the current field
1579 llvm::Twine Format = Types.find(CanonicalType) == Types.end()
1580 ? Types[Context.VoidPtrTy]
1581 : Types[CanonicalType];
1582
1583 Address FieldAddress = Address(FieldPtr, Align);
1584 FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
1585
1586 // FIXME Need to handle bitfield here
1587 GString = CGF.Builder.CreateGlobalStringPtr(
1588 Format.concat(llvm::Twine('\n')).str());
1589 TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
1590 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1591 }
1592
1593 GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
1594 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1595 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1596 return Res;
1597}
1598
1599static bool
1600TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
1601 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
1602 if (const auto *Arr = Ctx.getAsArrayType(Ty))
1603 Ty = Ctx.getBaseElementType(Arr);
1604
1605 const auto *Record = Ty->getAsCXXRecordDecl();
1606 if (!Record)
1607 return false;
1608
1609 // We've already checked this type, or are in the process of checking it.
1610 if (!Seen.insert(Record).second)
1611 return false;
1612
1613 assert(Record->hasDefinition() &&((Record->hasDefinition() && "Incomplete types should already be diagnosed"
) ? static_cast<void> (0) : __assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1614, __PRETTY_FUNCTION__))
1614 "Incomplete types should already be diagnosed")((Record->hasDefinition() && "Incomplete types should already be diagnosed"
) ? static_cast<void> (0) : __assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 1614, __PRETTY_FUNCTION__))
;
1615
1616 if (Record->isDynamicClass())
1617 return true;
1618
1619 for (FieldDecl *F : Record->fields()) {
1620 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
1621 return true;
1622 }
1623 return false;
1624}
1625
1626/// Determine if the specified type requires laundering by checking if it is a
1627/// dynamic class type or contains a subobject which is a dynamic class type.
1628static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
1629 if (!CGM.getCodeGenOpts().StrictVTablePointers)
1630 return false;
1631 llvm::SmallPtrSet<const Decl *, 16> Seen;
1632 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
1633}
1634
1635RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
1636 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
1637 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
1638
1639 // The builtin's shift arg may have a different type than the source arg and
1640 // result, but the LLVM intrinsic uses the same type for all values.
1641 llvm::Type *Ty = Src->getType();
1642 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
1643
1644 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
1645 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1646 Function *F = CGM.getIntrinsic(IID, Ty);
1647 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
1648}
1649
1650RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
1651 const CallExpr *E,
1652 ReturnValueSlot ReturnValue) {
1653 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
1654 // See if we can constant fold this builtin. If so, don't emit it at all.
1655 Expr::EvalResult Result;
1656 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
1657 !Result.hasSideEffects()) {
1658 if (Result.Val.isInt())
1659 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
1660 Result.Val.getInt()));
1661 if (Result.Val.isFloat())
1662 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
1663 Result.Val.getFloat()));
1664 }
1665
1666 // If the builtin has been declared explicitly with an assembler label,
1667 // disable the specialized emitting below. Ideally we should communicate the
1668 // rename in IR, or at least avoid generating the intrinsic calls that are
1669 // likely to get lowered to the renamed library functions.
1670 const unsigned BuiltinIDIfNoAsmLabel =
1671 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
1672
1673 // There are LLVM math intrinsics/instructions corresponding to math library
1674 // functions except the LLVM op will never set errno while the math library
1675 // might. Also, math builtins have the same semantics as their math library
1676 // twins. Thus, we can transform math library and builtin calls to their
1677 // LLVM counterparts if the call is marked 'const' (known to never set errno).
1678 if (FD->hasAttr<ConstAttr>()) {
1679 switch (BuiltinIDIfNoAsmLabel) {
1680 case Builtin::BIceil:
1681 case Builtin::BIceilf:
1682 case Builtin::BIceill:
1683 case Builtin::BI__builtin_ceil:
1684 case Builtin::BI__builtin_ceilf:
1685 case Builtin::BI__builtin_ceilf16:
1686 case Builtin::BI__builtin_ceill:
1687 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1688 Intrinsic::ceil,
1689 Intrinsic::experimental_constrained_ceil));
1690
1691 case Builtin::BIcopysign:
1692 case Builtin::BIcopysignf:
1693 case Builtin::BIcopysignl:
1694 case Builtin::BI__builtin_copysign:
1695 case Builtin::BI__builtin_copysignf:
1696 case Builtin::BI__builtin_copysignf16:
1697 case Builtin::BI__builtin_copysignl:
1698 case Builtin::BI__builtin_copysignf128:
1699 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
1700
1701 case Builtin::BIcos:
1702 case Builtin::BIcosf:
1703 case Builtin::BIcosl:
1704 case Builtin::BI__builtin_cos:
1705 case Builtin::BI__builtin_cosf:
1706 case Builtin::BI__builtin_cosf16:
1707 case Builtin::BI__builtin_cosl:
1708 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1709 Intrinsic::cos,
1710 Intrinsic::experimental_constrained_cos));
1711
1712 case Builtin::BIexp:
1713 case Builtin::BIexpf:
1714 case Builtin::BIexpl:
1715 case Builtin::BI__builtin_exp:
1716 case Builtin::BI__builtin_expf:
1717 case Builtin::BI__builtin_expf16:
1718 case Builtin::BI__builtin_expl:
1719 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1720 Intrinsic::exp,
1721 Intrinsic::experimental_constrained_exp));
1722
1723 case Builtin::BIexp2:
1724 case Builtin::BIexp2f:
1725 case Builtin::BIexp2l:
1726 case Builtin::BI__builtin_exp2:
1727 case Builtin::BI__builtin_exp2f:
1728 case Builtin::BI__builtin_exp2f16:
1729 case Builtin::BI__builtin_exp2l:
1730 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1731 Intrinsic::exp2,
1732 Intrinsic::experimental_constrained_exp2));
1733
1734 case Builtin::BIfabs:
1735 case Builtin::BIfabsf:
1736 case Builtin::BIfabsl:
1737 case Builtin::BI__builtin_fabs:
1738 case Builtin::BI__builtin_fabsf:
1739 case Builtin::BI__builtin_fabsf16:
1740 case Builtin::BI__builtin_fabsl:
1741 case Builtin::BI__builtin_fabsf128:
1742 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
1743
1744 case Builtin::BIfloor:
1745 case Builtin::BIfloorf:
1746 case Builtin::BIfloorl:
1747 case Builtin::BI__builtin_floor:
1748 case Builtin::BI__builtin_floorf:
1749 case Builtin::BI__builtin_floorf16:
1750 case Builtin::BI__builtin_floorl:
1751 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1752 Intrinsic::floor,
1753 Intrinsic::experimental_constrained_floor));
1754
1755 case Builtin::BIfma:
1756 case Builtin::BIfmaf:
1757 case Builtin::BIfmal:
1758 case Builtin::BI__builtin_fma:
1759 case Builtin::BI__builtin_fmaf:
1760 case Builtin::BI__builtin_fmaf16:
1761 case Builtin::BI__builtin_fmal:
1762 return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
1763 Intrinsic::fma,
1764 Intrinsic::experimental_constrained_fma));
1765
1766 case Builtin::BIfmax:
1767 case Builtin::BIfmaxf:
1768 case Builtin::BIfmaxl:
1769 case Builtin::BI__builtin_fmax:
1770 case Builtin::BI__builtin_fmaxf:
1771 case Builtin::BI__builtin_fmaxf16:
1772 case Builtin::BI__builtin_fmaxl:
1773 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
1774 Intrinsic::maxnum,
1775 Intrinsic::experimental_constrained_maxnum));
1776
1777 case Builtin::BIfmin:
1778 case Builtin::BIfminf:
1779 case Builtin::BIfminl:
1780 case Builtin::BI__builtin_fmin:
1781 case Builtin::BI__builtin_fminf:
1782 case Builtin::BI__builtin_fminf16:
1783 case Builtin::BI__builtin_fminl:
1784 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
1785 Intrinsic::minnum,
1786 Intrinsic::experimental_constrained_minnum));
1787
1788 // fmod() is a special-case. It maps to the frem instruction rather than an
1789 // LLVM intrinsic.
1790 case Builtin::BIfmod:
1791 case Builtin::BIfmodf:
1792 case Builtin::BIfmodl:
1793 case Builtin::BI__builtin_fmod:
1794 case Builtin::BI__builtin_fmodf:
1795 case Builtin::BI__builtin_fmodf16:
1796 case Builtin::BI__builtin_fmodl: {
1797 Value *Arg1 = EmitScalarExpr(E->getArg(0));
1798 Value *Arg2 = EmitScalarExpr(E->getArg(1));
1799 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
1800 }
1801
1802 case Builtin::BIlog:
1803 case Builtin::BIlogf:
1804 case Builtin::BIlogl:
1805 case Builtin::BI__builtin_log:
1806 case Builtin::BI__builtin_logf:
1807 case Builtin::BI__builtin_logf16:
1808 case Builtin::BI__builtin_logl:
1809 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1810 Intrinsic::log,
1811 Intrinsic::experimental_constrained_log));
1812
1813 case Builtin::BIlog10:
1814 case Builtin::BIlog10f:
1815 case Builtin::BIlog10l:
1816 case Builtin::BI__builtin_log10:
1817 case Builtin::BI__builtin_log10f:
1818 case Builtin::BI__builtin_log10f16:
1819 case Builtin::BI__builtin_log10l:
1820 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1821 Intrinsic::log10,
1822 Intrinsic::experimental_constrained_log10));
1823
1824 case Builtin::BIlog2:
1825 case Builtin::BIlog2f:
1826 case Builtin::BIlog2l:
1827 case Builtin::BI__builtin_log2:
1828 case Builtin::BI__builtin_log2f:
1829 case Builtin::BI__builtin_log2f16:
1830 case Builtin::BI__builtin_log2l:
1831 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1832 Intrinsic::log2,
1833 Intrinsic::experimental_constrained_log2));
1834
1835 case Builtin::BInearbyint:
1836 case Builtin::BInearbyintf:
1837 case Builtin::BInearbyintl:
1838 case Builtin::BI__builtin_nearbyint:
1839 case Builtin::BI__builtin_nearbyintf:
1840 case Builtin::BI__builtin_nearbyintl:
1841 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1842 Intrinsic::nearbyint,
1843 Intrinsic::experimental_constrained_nearbyint));
1844
1845 case Builtin::BIpow:
1846 case Builtin::BIpowf:
1847 case Builtin::BIpowl:
1848 case Builtin::BI__builtin_pow:
1849 case Builtin::BI__builtin_powf:
1850 case Builtin::BI__builtin_powf16:
1851 case Builtin::BI__builtin_powl:
1852 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
1853 Intrinsic::pow,
1854 Intrinsic::experimental_constrained_pow));
1855
1856 case Builtin::BIrint:
1857 case Builtin::BIrintf:
1858 case Builtin::BIrintl:
1859 case Builtin::BI__builtin_rint:
1860 case Builtin::BI__builtin_rintf:
1861 case Builtin::BI__builtin_rintf16:
1862 case Builtin::BI__builtin_rintl:
1863 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1864 Intrinsic::rint,
1865 Intrinsic::experimental_constrained_rint));
1866
1867 case Builtin::BIround:
1868 case Builtin::BIroundf:
1869 case Builtin::BIroundl:
1870 case Builtin::BI__builtin_round:
1871 case Builtin::BI__builtin_roundf:
1872 case Builtin::BI__builtin_roundf16:
1873 case Builtin::BI__builtin_roundl:
1874 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1875 Intrinsic::round,
1876 Intrinsic::experimental_constrained_round));
1877
1878 case Builtin::BIsin:
1879 case Builtin::BIsinf:
1880 case Builtin::BIsinl:
1881 case Builtin::BI__builtin_sin:
1882 case Builtin::BI__builtin_sinf:
1883 case Builtin::BI__builtin_sinf16:
1884 case Builtin::BI__builtin_sinl:
1885 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1886 Intrinsic::sin,
1887 Intrinsic::experimental_constrained_sin));
1888
1889 case Builtin::BIsqrt:
1890 case Builtin::BIsqrtf:
1891 case Builtin::BIsqrtl:
1892 case Builtin::BI__builtin_sqrt:
1893 case Builtin::BI__builtin_sqrtf:
1894 case Builtin::BI__builtin_sqrtf16:
1895 case Builtin::BI__builtin_sqrtl:
1896 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1897 Intrinsic::sqrt,
1898 Intrinsic::experimental_constrained_sqrt));
1899
1900 case Builtin::BItrunc:
1901 case Builtin::BItruncf:
1902 case Builtin::BItruncl:
1903 case Builtin::BI__builtin_trunc:
1904 case Builtin::BI__builtin_truncf:
1905 case Builtin::BI__builtin_truncf16:
1906 case Builtin::BI__builtin_truncl:
1907 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
1908 Intrinsic::trunc,
1909 Intrinsic::experimental_constrained_trunc));
1910
1911 case Builtin::BIlround:
1912 case Builtin::BIlroundf:
1913 case Builtin::BIlroundl:
1914 case Builtin::BI__builtin_lround:
1915 case Builtin::BI__builtin_lroundf:
1916 case Builtin::BI__builtin_lroundl:
1917 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1918 *this, E, Intrinsic::lround,
1919 Intrinsic::experimental_constrained_lround));
1920
1921 case Builtin::BIllround:
1922 case Builtin::BIllroundf:
1923 case Builtin::BIllroundl:
1924 case Builtin::BI__builtin_llround:
1925 case Builtin::BI__builtin_llroundf:
1926 case Builtin::BI__builtin_llroundl:
1927 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1928 *this, E, Intrinsic::llround,
1929 Intrinsic::experimental_constrained_llround));
1930
1931 case Builtin::BIlrint:
1932 case Builtin::BIlrintf:
1933 case Builtin::BIlrintl:
1934 case Builtin::BI__builtin_lrint:
1935 case Builtin::BI__builtin_lrintf:
1936 case Builtin::BI__builtin_lrintl:
1937 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1938 *this, E, Intrinsic::lrint,
1939 Intrinsic::experimental_constrained_lrint));
1940
1941 case Builtin::BIllrint:
1942 case Builtin::BIllrintf:
1943 case Builtin::BIllrintl:
1944 case Builtin::BI__builtin_llrint:
1945 case Builtin::BI__builtin_llrintf:
1946 case Builtin::BI__builtin_llrintl:
1947 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
1948 *this, E, Intrinsic::llrint,
1949 Intrinsic::experimental_constrained_llrint));
1950
1951 default:
1952 break;
1953 }
1954 }
1955
1956 switch (BuiltinIDIfNoAsmLabel) {
1957 default: break;
1958 case Builtin::BI__builtin___CFStringMakeConstantString:
1959 case Builtin::BI__builtin___NSStringMakeConstantString:
1960 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
1961 case Builtin::BI__builtin_stdarg_start:
1962 case Builtin::BI__builtin_va_start:
1963 case Builtin::BI__va_start:
1964 case Builtin::BI__builtin_va_end:
1965 return RValue::get(
1966 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
1967 ? EmitScalarExpr(E->getArg(0))
1968 : EmitVAListRef(E->getArg(0)).getPointer(),
1969 BuiltinID != Builtin::BI__builtin_va_end));
1970 case Builtin::BI__builtin_va_copy: {
1971 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
1972 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
1973
1974 llvm::Type *Type = Int8PtrTy;
1975
1976 DstPtr = Builder.CreateBitCast(DstPtr, Type);
1977 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
1978 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
1979 {DstPtr, SrcPtr}));
1980 }
1981 case Builtin::BI__builtin_abs:
1982 case Builtin::BI__builtin_labs:
1983 case Builtin::BI__builtin_llabs: {
1984 // X < 0 ? -X : X
1985 // The negation has 'nsw' because abs of INT_MIN is undefined.
1986 Value *ArgValue = EmitScalarExpr(E->getArg(0));
1987 Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
1988 Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
1989 Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
1990 Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
1991 return RValue::get(Result);
1992 }
1993 case Builtin::BI__builtin_complex: {
1994 Value *Real = EmitScalarExpr(E->getArg(0));
1995 Value *Imag = EmitScalarExpr(E->getArg(1));
1996 return RValue::getComplex({Real, Imag});
1997 }
1998 case Builtin::BI__builtin_conj:
1999 case Builtin::BI__builtin_conjf:
2000 case Builtin::BI__builtin_conjl:
2001 case Builtin::BIconj:
2002 case Builtin::BIconjf:
2003 case Builtin::BIconjl: {
2004 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2005 Value *Real = ComplexVal.first;
2006 Value *Imag = ComplexVal.second;
2007 Imag = Builder.CreateFNeg(Imag, "neg");
2008 return RValue::getComplex(std::make_pair(Real, Imag));
2009 }
2010 case Builtin::BI__builtin_creal:
2011 case Builtin::BI__builtin_crealf:
2012 case Builtin::BI__builtin_creall:
2013 case Builtin::BIcreal:
2014 case Builtin::BIcrealf:
2015 case Builtin::BIcreall: {
2016 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2017 return RValue::get(ComplexVal.first);
2018 }
2019
2020 case Builtin::BI__builtin_dump_struct: {
2021 llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
2022 llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
2023 LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
2024
2025 Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
2026 CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
2027
2028 const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
2029 QualType Arg0Type = Arg0->getType()->getPointeeType();
2030
2031 Value *RecordPtr = EmitScalarExpr(Arg0);
2032 Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
2033 {LLVMFuncType, Func}, 0);
2034 return RValue::get(Res);
2035 }
2036
2037 case Builtin::BI__builtin_preserve_access_index: {
2038 // Only enabled preserved access index region when debuginfo
2039 // is available as debuginfo is needed to preserve user-level
2040 // access pattern.
2041 if (!getDebugInfo()) {
2042 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
2043 return RValue::get(EmitScalarExpr(E->getArg(0)));
2044 }
2045
2046 // Nested builtin_preserve_access_index() not supported
2047 if (IsInPreservedAIRegion) {
2048 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
2049 return RValue::get(EmitScalarExpr(E->getArg(0)));
2050 }
2051
2052 IsInPreservedAIRegion = true;
2053 Value *Res = EmitScalarExpr(E->getArg(0));
2054 IsInPreservedAIRegion = false;
2055 return RValue::get(Res);
2056 }
2057
2058 case Builtin::BI__builtin_cimag:
2059 case Builtin::BI__builtin_cimagf:
2060 case Builtin::BI__builtin_cimagl:
2061 case Builtin::BIcimag:
2062 case Builtin::BIcimagf:
2063 case Builtin::BIcimagl: {
2064 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2065 return RValue::get(ComplexVal.second);
2066 }
2067
2068 case Builtin::BI__builtin_clrsb:
2069 case Builtin::BI__builtin_clrsbl:
2070 case Builtin::BI__builtin_clrsbll: {
2071 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
2072 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2073
2074 llvm::Type *ArgType = ArgValue->getType();
2075 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2076
2077 llvm::Type *ResultType = ConvertType(E->getType());
2078 Value *Zero = llvm::Constant::getNullValue(ArgType);
2079 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
2080 Value *Inverse = Builder.CreateNot(ArgValue, "not");
2081 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
2082 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
2083 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
2084 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2085 "cast");
2086 return RValue::get(Result);
2087 }
2088 case Builtin::BI__builtin_ctzs:
2089 case Builtin::BI__builtin_ctz:
2090 case Builtin::BI__builtin_ctzl:
2091 case Builtin::BI__builtin_ctzll: {
2092 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
2093
2094 llvm::Type *ArgType = ArgValue->getType();
2095 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2096
2097 llvm::Type *ResultType = ConvertType(E->getType());
2098 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2099 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2100 if (Result->getType() != ResultType)
2101 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2102 "cast");
2103 return RValue::get(Result);
2104 }
2105 case Builtin::BI__builtin_clzs:
2106 case Builtin::BI__builtin_clz:
2107 case Builtin::BI__builtin_clzl:
2108 case Builtin::BI__builtin_clzll: {
2109 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
2110
2111 llvm::Type *ArgType = ArgValue->getType();
2112 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2113
2114 llvm::Type *ResultType = ConvertType(E->getType());
2115 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2116 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2117 if (Result->getType() != ResultType)
2118 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2119 "cast");
2120 return RValue::get(Result);
2121 }
2122 case Builtin::BI__builtin_ffs:
2123 case Builtin::BI__builtin_ffsl:
2124 case Builtin::BI__builtin_ffsll: {
2125 // ffs(x) -> x ? cttz(x) + 1 : 0
2126 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2127
2128 llvm::Type *ArgType = ArgValue->getType();
2129 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2130
2131 llvm::Type *ResultType = ConvertType(E->getType());
2132 Value *Tmp =
2133 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
2134 llvm::ConstantInt::get(ArgType, 1));
2135 Value *Zero = llvm::Constant::getNullValue(ArgType);
2136 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
2137 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
2138 if (Result->getType() != ResultType)
2139 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2140 "cast");
2141 return RValue::get(Result);
2142 }
2143 case Builtin::BI__builtin_parity:
2144 case Builtin::BI__builtin_parityl:
2145 case Builtin::BI__builtin_parityll: {
2146 // parity(x) -> ctpop(x) & 1
2147 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2148
2149 llvm::Type *ArgType = ArgValue->getType();
2150 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2151
2152 llvm::Type *ResultType = ConvertType(E->getType());
2153 Value *Tmp = Builder.CreateCall(F, ArgValue);
2154 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
2155 if (Result->getType() != ResultType)
2156 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2157 "cast");
2158 return RValue::get(Result);
2159 }
2160 case Builtin::BI__lzcnt16:
2161 case Builtin::BI__lzcnt:
2162 case Builtin::BI__lzcnt64: {
2163 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2164
2165 llvm::Type *ArgType = ArgValue->getType();
2166 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2167
2168 llvm::Type *ResultType = ConvertType(E->getType());
2169 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2170 if (Result->getType() != ResultType)
2171 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2172 "cast");
2173 return RValue::get(Result);
2174 }
2175 case Builtin::BI__popcnt16:
2176 case Builtin::BI__popcnt:
2177 case Builtin::BI__popcnt64:
2178 case Builtin::BI__builtin_popcount:
2179 case Builtin::BI__builtin_popcountl:
2180 case Builtin::BI__builtin_popcountll: {
2181 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2182
2183 llvm::Type *ArgType = ArgValue->getType();
2184 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2185
2186 llvm::Type *ResultType = ConvertType(E->getType());
2187 Value *Result = Builder.CreateCall(F, ArgValue);
2188 if (Result->getType() != ResultType)
2189 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2190 "cast");
2191 return RValue::get(Result);
2192 }
2193 case Builtin::BI__builtin_unpredictable: {
2194 // Always return the argument of __builtin_unpredictable. LLVM does not
2195 // handle this builtin. Metadata for this builtin should be added directly
2196 // to instructions such as branches or switches that use it.
2197 return RValue::get(EmitScalarExpr(E->getArg(0)));
2198 }
2199 case Builtin::BI__builtin_expect: {
2200 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2201 llvm::Type *ArgType = ArgValue->getType();
2202
2203 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2204 // Don't generate llvm.expect on -O0 as the backend won't use it for
2205 // anything.
2206 // Note, we still IRGen ExpectedValue because it could have side-effects.
2207 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2208 return RValue::get(ArgValue);
2209
2210 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
2211 Value *Result =
2212 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
2213 return RValue::get(Result);
2214 }
2215 case Builtin::BI__builtin_expect_with_probability: {
2216 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2217 llvm::Type *ArgType = ArgValue->getType();
2218
2219 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2220 llvm::APFloat Probability(0.0);
2221 const Expr *ProbArg = E->getArg(2);
2222 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
2223 assert(EvalSucceed && "probability should be able to evaluate as float")((EvalSucceed && "probability should be able to evaluate as float"
) ? static_cast<void> (0) : __assert_fail ("EvalSucceed && \"probability should be able to evaluate as float\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 2223, __PRETTY_FUNCTION__))
;
2224 (void)EvalSucceed;
2225 bool LoseInfo = false;
2226 Probability.convert(llvm::APFloat::IEEEdouble(),
2227 llvm::RoundingMode::Dynamic, &LoseInfo);
2228 llvm::Type *Ty = ConvertType(ProbArg->getType());
2229 Constant *Confidence = ConstantFP::get(Ty, Probability);
2230 // Don't generate llvm.expect.with.probability on -O0 as the backend
2231 // won't use it for anything.
2232 // Note, we still IRGen ExpectedValue because it could have side-effects.
2233 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2234 return RValue::get(ArgValue);
2235
2236 Function *FnExpect =
2237 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
2238 Value *Result = Builder.CreateCall(
2239 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
2240 return RValue::get(Result);
2241 }
2242 case Builtin::BI__builtin_assume_aligned: {
2243 const Expr *Ptr = E->getArg(0);
2244 Value *PtrValue = EmitScalarExpr(Ptr);
2245 Value *OffsetValue =
2246 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
2247
2248 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
2249 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
2250 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
2251 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
2252 llvm::Value::MaximumAlignment);
2253
2254 emitAlignmentAssumption(PtrValue, Ptr,
2255 /*The expr loc is sufficient.*/ SourceLocation(),
2256 AlignmentCI, OffsetValue);
2257 return RValue::get(PtrValue);
2258 }
2259 case Builtin::BI__assume:
2260 case Builtin::BI__builtin_assume: {
2261 if (E->getArg(0)->HasSideEffects(getContext()))
2262 return RValue::get(nullptr);
2263
2264 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2265 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
2266 return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
2267 }
2268 case Builtin::BI__builtin_bswap16:
2269 case Builtin::BI__builtin_bswap32:
2270 case Builtin::BI__builtin_bswap64: {
2271 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
2272 }
2273 case Builtin::BI__builtin_bitreverse8:
2274 case Builtin::BI__builtin_bitreverse16:
2275 case Builtin::BI__builtin_bitreverse32:
2276 case Builtin::BI__builtin_bitreverse64: {
2277 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
2278 }
2279 case Builtin::BI__builtin_rotateleft8:
2280 case Builtin::BI__builtin_rotateleft16:
2281 case Builtin::BI__builtin_rotateleft32:
2282 case Builtin::BI__builtin_rotateleft64:
2283 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2284 case Builtin::BI_rotl16:
2285 case Builtin::BI_rotl:
2286 case Builtin::BI_lrotl:
2287 case Builtin::BI_rotl64:
2288 return emitRotate(E, false);
2289
2290 case Builtin::BI__builtin_rotateright8:
2291 case Builtin::BI__builtin_rotateright16:
2292 case Builtin::BI__builtin_rotateright32:
2293 case Builtin::BI__builtin_rotateright64:
2294 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2295 case Builtin::BI_rotr16:
2296 case Builtin::BI_rotr:
2297 case Builtin::BI_lrotr:
2298 case Builtin::BI_rotr64:
2299 return emitRotate(E, true);
2300
2301 case Builtin::BI__builtin_constant_p: {
2302 llvm::Type *ResultType = ConvertType(E->getType());
2303
2304 const Expr *Arg = E->getArg(0);
2305 QualType ArgType = Arg->getType();
2306 // FIXME: The allowance for Obj-C pointers and block pointers is historical
2307 // and likely a mistake.
2308 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
2309 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
2310 // Per the GCC documentation, only numeric constants are recognized after
2311 // inlining.
2312 return RValue::get(ConstantInt::get(ResultType, 0));
2313
2314 if (Arg->HasSideEffects(getContext()))
2315 // The argument is unevaluated, so be conservative if it might have
2316 // side-effects.
2317 return RValue::get(ConstantInt::get(ResultType, 0));
2318
2319 Value *ArgValue = EmitScalarExpr(Arg);
2320 if (ArgType->isObjCObjectPointerType()) {
2321 // Convert Objective-C objects to id because we cannot distinguish between
2322 // LLVM types for Obj-C classes as they are opaque.
2323 ArgType = CGM.getContext().getObjCIdType();
2324 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
2325 }
2326 Function *F =
2327 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
2328 Value *Result = Builder.CreateCall(F, ArgValue);
2329 if (Result->getType() != ResultType)
2330 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
2331 return RValue::get(Result);
2332 }
2333 case Builtin::BI__builtin_dynamic_object_size:
2334 case Builtin::BI__builtin_object_size: {
2335 unsigned Type =
2336 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
2337 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
2338
2339 // We pass this builtin onto the optimizer so that it can figure out the
2340 // object size in more complex cases.
2341 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
2342 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
2343 /*EmittedE=*/nullptr, IsDynamic));
2344 }
2345 case Builtin::BI__builtin_prefetch: {
2346 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
2347 // FIXME: Technically these constants should of type 'int', yes?
2348 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
2349 llvm::ConstantInt::get(Int32Ty, 0);
2350 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
2351 llvm::ConstantInt::get(Int32Ty, 3);
2352 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
2353 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
2354 return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
2355 }
2356 case Builtin::BI__builtin_readcyclecounter: {
2357 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
2358 return RValue::get(Builder.CreateCall(F));
2359 }
2360 case Builtin::BI__builtin___clear_cache: {
2361 Value *Begin = EmitScalarExpr(E->getArg(0));
2362 Value *End = EmitScalarExpr(E->getArg(1));
2363 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
2364 return RValue::get(Builder.CreateCall(F, {Begin, End}));
2365 }
2366 case Builtin::BI__builtin_trap:
2367 return RValue::get(EmitTrapCall(Intrinsic::trap));
2368 case Builtin::BI__debugbreak:
2369 return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
2370 case Builtin::BI__builtin_unreachable: {
2371 EmitUnreachable(E->getExprLoc());
2372
2373 // We do need to preserve an insertion point.
2374 EmitBlock(createBasicBlock("unreachable.cont"));
2375
2376 return RValue::get(nullptr);
2377 }
2378
2379 case Builtin::BI__builtin_powi:
2380 case Builtin::BI__builtin_powif:
2381 case Builtin::BI__builtin_powil:
2382 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
2383 *this, E, Intrinsic::powi, Intrinsic::experimental_constrained_powi));
2384
2385 case Builtin::BI__builtin_isgreater:
2386 case Builtin::BI__builtin_isgreaterequal:
2387 case Builtin::BI__builtin_isless:
2388 case Builtin::BI__builtin_islessequal:
2389 case Builtin::BI__builtin_islessgreater:
2390 case Builtin::BI__builtin_isunordered: {
2391 // Ordered comparisons: we know the arguments to these are matching scalar
2392 // floating point values.
2393 Value *LHS = EmitScalarExpr(E->getArg(0));
2394 Value *RHS = EmitScalarExpr(E->getArg(1));
2395
2396 switch (BuiltinID) {
2397 default: llvm_unreachable("Unknown ordered comparison")::llvm::llvm_unreachable_internal("Unknown ordered comparison"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 2397)
;
2398 case Builtin::BI__builtin_isgreater:
2399 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
2400 break;
2401 case Builtin::BI__builtin_isgreaterequal:
2402 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
2403 break;
2404 case Builtin::BI__builtin_isless:
2405 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
2406 break;
2407 case Builtin::BI__builtin_islessequal:
2408 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
2409 break;
2410 case Builtin::BI__builtin_islessgreater:
2411 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
2412 break;
2413 case Builtin::BI__builtin_isunordered:
2414 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
2415 break;
2416 }
2417 // ZExt bool to int type.
2418 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
2419 }
2420 case Builtin::BI__builtin_isnan: {
2421 Value *V = EmitScalarExpr(E->getArg(0));
2422 V = Builder.CreateFCmpUNO(V, V, "cmp");
2423 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2424 }
2425
2426 case Builtin::BI__builtin_matrix_transpose: {
2427 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
2428 Value *MatValue = EmitScalarExpr(E->getArg(0));
2429 MatrixBuilder<CGBuilderTy> MB(Builder);
2430 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
2431 MatrixTy->getNumColumns());
2432 return RValue::get(Result);
2433 }
2434
2435 case Builtin::BI__builtin_matrix_column_major_load: {
2436 MatrixBuilder<CGBuilderTy> MB(Builder);
2437 // Emit everything that isn't dependent on the first parameter type
2438 Value *Stride = EmitScalarExpr(E->getArg(3));
2439 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
2440 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
2441 assert(PtrTy && "arg0 must be of pointer type")((PtrTy && "arg0 must be of pointer type") ? static_cast
<void> (0) : __assert_fail ("PtrTy && \"arg0 must be of pointer type\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 2441, __PRETTY_FUNCTION__))
;
2442 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
2443
2444 Address Src = EmitPointerWithAlignment(E->getArg(0));
2445 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
2446 E->getArg(0)->getExprLoc(), FD, 0);
2447 Value *Result = MB.CreateColumnMajorLoad(
2448 Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
2449 IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
2450 "matrix");
2451 return RValue::get(Result);
2452 }
2453
2454 case Builtin::BI__builtin_matrix_column_major_store: {
2455 MatrixBuilder<CGBuilderTy> MB(Builder);
2456 Value *Matrix = EmitScalarExpr(E->getArg(0));
2457 Address Dst = EmitPointerWithAlignment(E->getArg(1));
2458 Value *Stride = EmitScalarExpr(E->getArg(2));
2459
2460 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
2461 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
2462 assert(PtrTy && "arg1 must be of pointer type")((PtrTy && "arg1 must be of pointer type") ? static_cast
<void> (0) : __assert_fail ("PtrTy && \"arg1 must be of pointer type\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 2462, __PRETTY_FUNCTION__))
;
2463 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
2464
2465 EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
2466 E->getArg(1)->getExprLoc(), FD, 0);
2467 Value *Result = MB.CreateColumnMajorStore(
2468 Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
2469 Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
2470 return RValue::get(Result);
2471 }
2472
2473 case Builtin::BIfinite:
2474 case Builtin::BI__finite:
2475 case Builtin::BIfinitef:
2476 case Builtin::BI__finitef:
2477 case Builtin::BIfinitel:
2478 case Builtin::BI__finitel:
2479 case Builtin::BI__builtin_isinf:
2480 case Builtin::BI__builtin_isfinite: {
2481 // isinf(x) --> fabs(x) == infinity
2482 // isfinite(x) --> fabs(x) != infinity
2483 // x != NaN via the ordered compare in either case.
2484 Value *V = EmitScalarExpr(E->getArg(0));
2485 Value *Fabs = EmitFAbs(*this, V);
2486 Constant *Infinity = ConstantFP::getInfinity(V->getType());
2487 CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
2488 ? CmpInst::FCMP_OEQ
2489 : CmpInst::FCMP_ONE;
2490 Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
2491 return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
2492 }
2493
2494 case Builtin::BI__builtin_isinf_sign: {
2495 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
2496 Value *Arg = EmitScalarExpr(E->getArg(0));
2497 Value *AbsArg = EmitFAbs(*this, Arg);
2498 Value *IsInf = Builder.CreateFCmpOEQ(
2499 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
2500 Value *IsNeg = EmitSignBit(*this, Arg);
2501
2502 llvm::Type *IntTy = ConvertType(E->getType());
2503 Value *Zero = Constant::getNullValue(IntTy);
2504 Value *One = ConstantInt::get(IntTy, 1);
2505 Value *NegativeOne = ConstantInt::get(IntTy, -1);
2506 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
2507 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
2508 return RValue::get(Result);
2509 }
2510
2511 case Builtin::BI__builtin_isnormal: {
2512 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
2513 Value *V = EmitScalarExpr(E->getArg(0));
2514 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
2515
2516 Value *Abs = EmitFAbs(*this, V);
2517 Value *IsLessThanInf =
2518 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
2519 APFloat Smallest = APFloat::getSmallestNormalized(
2520 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
2521 Value *IsNormal =
2522 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
2523 "isnormal");
2524 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
2525 V = Builder.CreateAnd(V, IsNormal, "and");
2526 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2527 }
2528
2529 case Builtin::BI__builtin_flt_rounds: {
2530 Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
2531
2532 llvm::Type *ResultType = ConvertType(E->getType());
2533 Value *Result = Builder.CreateCall(F);
2534 if (Result->getType() != ResultType)
2535 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2536 "cast");
2537 return RValue::get(Result);
2538 }
2539
2540 case Builtin::BI__builtin_fpclassify: {
2541 Value *V = EmitScalarExpr(E->getArg(5));
2542 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
2543
2544 // Create Result
2545 BasicBlock *Begin = Builder.GetInsertBlock();
2546 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
2547 Builder.SetInsertPoint(End);
2548 PHINode *Result =
2549 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
2550 "fpclassify_result");
2551
2552 // if (V==0) return FP_ZERO
2553 Builder.SetInsertPoint(Begin);
2554 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
2555 "iszero");
2556 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
2557 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
2558 Builder.CreateCondBr(IsZero, End, NotZero);
2559 Result->addIncoming(ZeroLiteral, Begin);
2560
2561 // if (V != V) return FP_NAN
2562 Builder.SetInsertPoint(NotZero);
2563 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
2564 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
2565 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
2566 Builder.CreateCondBr(IsNan, End, NotNan);
2567 Result->addIncoming(NanLiteral, NotZero);
2568
2569 // if (fabs(V) == infinity) return FP_INFINITY
2570 Builder.SetInsertPoint(NotNan);
2571 Value *VAbs = EmitFAbs(*this, V);
2572 Value *IsInf =
2573 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
2574 "isinf");
2575 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
2576 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
2577 Builder.CreateCondBr(IsInf, End, NotInf);
2578 Result->addIncoming(InfLiteral, NotNan);
2579
2580 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
2581 Builder.SetInsertPoint(NotInf);
2582 APFloat Smallest = APFloat::getSmallestNormalized(
2583 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
2584 Value *IsNormal =
2585 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
2586 "isnormal");
2587 Value *NormalResult =
2588 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
2589 EmitScalarExpr(E->getArg(3)));
2590 Builder.CreateBr(End);
2591 Result->addIncoming(NormalResult, NotInf);
2592
2593 // return Result
2594 Builder.SetInsertPoint(End);
2595 return RValue::get(Result);
2596 }
2597
2598 case Builtin::BIalloca:
2599 case Builtin::BI_alloca:
2600 case Builtin::BI__builtin_alloca: {
2601 Value *Size = EmitScalarExpr(E->getArg(0));
2602 const TargetInfo &TI = getContext().getTargetInfo();
2603 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
2604 const Align SuitableAlignmentInBytes =
2605 CGM.getContext()
2606 .toCharUnitsFromBits(TI.getSuitableAlign())
2607 .getAsAlign();
2608 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
2609 AI->setAlignment(SuitableAlignmentInBytes);
2610 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
2611 return RValue::get(AI);
2612 }
2613
2614 case Builtin::BI__builtin_alloca_with_align: {
2615 Value *Size = EmitScalarExpr(E->getArg(0));
2616 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
2617 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
2618 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
2619 const Align AlignmentInBytes =
2620 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
2621 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
2622 AI->setAlignment(AlignmentInBytes);
2623 initializeAlloca(*this, AI, Size, AlignmentInBytes);
2624 return RValue::get(AI);
2625 }
2626
2627 case Builtin::BIbzero:
2628 case Builtin::BI__builtin_bzero: {
2629 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2630 Value *SizeVal = EmitScalarExpr(E->getArg(1));
2631 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2632 E->getArg(0)->getExprLoc(), FD, 0);
2633 Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
2634 return RValue::get(nullptr);
2635 }
2636 case Builtin::BImemcpy:
2637 case Builtin::BI__builtin_memcpy:
2638 case Builtin::BImempcpy:
2639 case Builtin::BI__builtin_mempcpy: {
2640 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2641 Address Src = EmitPointerWithAlignment(E->getArg(1));
2642 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2643 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2644 E->getArg(0)->getExprLoc(), FD, 0);
2645 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2646 E->getArg(1)->getExprLoc(), FD, 1);
2647 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
2648 if (BuiltinID == Builtin::BImempcpy ||
2649 BuiltinID == Builtin::BI__builtin_mempcpy)
2650 return RValue::get(Builder.CreateInBoundsGEP(Dest.getPointer(), SizeVal));
2651 else
2652 return RValue::get(Dest.getPointer());
2653 }
2654
2655 case Builtin::BI__builtin_memcpy_inline: {
2656 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2657 Address Src = EmitPointerWithAlignment(E->getArg(1));
2658 uint64_t Size =
2659 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
2660 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2661 E->getArg(0)->getExprLoc(), FD, 0);
2662 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2663 E->getArg(1)->getExprLoc(), FD, 1);
2664 Builder.CreateMemCpyInline(Dest, Src, Size);
2665 return RValue::get(nullptr);
2666 }
2667
2668 case Builtin::BI__builtin_char_memchr:
2669 BuiltinID = Builtin::BI__builtin_memchr;
2670 break;
2671
2672 case Builtin::BI__builtin___memcpy_chk: {
2673 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
2674 Expr::EvalResult SizeResult, DstSizeResult;
2675 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2676 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2677 break;
2678 llvm::APSInt Size = SizeResult.Val.getInt();
2679 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2680 if (Size.ugt(DstSize))
2681 break;
2682 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2683 Address Src = EmitPointerWithAlignment(E->getArg(1));
2684 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2685 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
2686 return RValue::get(Dest.getPointer());
2687 }
2688
2689 case Builtin::BI__builtin_objc_memmove_collectable: {
2690 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
2691 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
2692 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2693 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
2694 DestAddr, SrcAddr, SizeVal);
2695 return RValue::get(DestAddr.getPointer());
2696 }
2697
2698 case Builtin::BI__builtin___memmove_chk: {
2699 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
2700 Expr::EvalResult SizeResult, DstSizeResult;
2701 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2702 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2703 break;
2704 llvm::APSInt Size = SizeResult.Val.getInt();
2705 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2706 if (Size.ugt(DstSize))
2707 break;
2708 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2709 Address Src = EmitPointerWithAlignment(E->getArg(1));
2710 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2711 Builder.CreateMemMove(Dest, Src, SizeVal, false);
2712 return RValue::get(Dest.getPointer());
2713 }
2714
2715 case Builtin::BImemmove:
2716 case Builtin::BI__builtin_memmove: {
2717 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2718 Address Src = EmitPointerWithAlignment(E->getArg(1));
2719 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2720 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2721 E->getArg(0)->getExprLoc(), FD, 0);
2722 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2723 E->getArg(1)->getExprLoc(), FD, 1);
2724 Builder.CreateMemMove(Dest, Src, SizeVal, false);
2725 return RValue::get(Dest.getPointer());
2726 }
2727 case Builtin::BImemset:
2728 case Builtin::BI__builtin_memset: {
2729 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2730 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
2731 Builder.getInt8Ty());
2732 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2733 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2734 E->getArg(0)->getExprLoc(), FD, 0);
2735 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
2736 return RValue::get(Dest.getPointer());
2737 }
2738 case Builtin::BI__builtin___memset_chk: {
2739 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
2740 Expr::EvalResult SizeResult, DstSizeResult;
2741 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2742 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2743 break;
2744 llvm::APSInt Size = SizeResult.Val.getInt();
2745 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2746 if (Size.ugt(DstSize))
2747 break;
2748 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2749 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
2750 Builder.getInt8Ty());
2751 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2752 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
2753 return RValue::get(Dest.getPointer());
2754 }
2755 case Builtin::BI__builtin_wmemcmp: {
2756 // The MSVC runtime library does not provide a definition of wmemcmp, so we
2757 // need an inline implementation.
2758 if (!getTarget().getTriple().isOSMSVCRT())
2759 break;
2760
2761 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
2762
2763 Value *Dst = EmitScalarExpr(E->getArg(0));
2764 Value *Src = EmitScalarExpr(E->getArg(1));
2765 Value *Size = EmitScalarExpr(E->getArg(2));
2766
2767 BasicBlock *Entry = Builder.GetInsertBlock();
2768 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
2769 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
2770 BasicBlock *Next = createBasicBlock("wmemcmp.next");
2771 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
2772 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
2773 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
2774
2775 EmitBlock(CmpGT);
2776 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
2777 DstPhi->addIncoming(Dst, Entry);
2778 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
2779 SrcPhi->addIncoming(Src, Entry);
2780 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
2781 SizePhi->addIncoming(Size, Entry);
2782 CharUnits WCharAlign =
2783 getContext().getTypeAlignInChars(getContext().WCharTy);
2784 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
2785 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
2786 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
2787 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
2788
2789 EmitBlock(CmpLT);
2790 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
2791 Builder.CreateCondBr(DstLtSrc, Exit, Next);
2792
2793 EmitBlock(Next);
2794 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
2795 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
2796 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
2797 Value *NextSizeEq0 =
2798 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
2799 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
2800 DstPhi->addIncoming(NextDst, Next);
2801 SrcPhi->addIncoming(NextSrc, Next);
2802 SizePhi->addIncoming(NextSize, Next);
2803
2804 EmitBlock(Exit);
2805 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
2806 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
2807 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
2808 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
2809 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
2810 return RValue::get(Ret);
2811 }
2812 case Builtin::BI__builtin_dwarf_cfa: {
2813 // The offset in bytes from the first argument to the CFA.
2814 //
2815 // Why on earth is this in the frontend? Is there any reason at
2816 // all that the backend can't reasonably determine this while
2817 // lowering llvm.eh.dwarf.cfa()?
2818 //
2819 // TODO: If there's a satisfactory reason, add a target hook for
2820 // this instead of hard-coding 0, which is correct for most targets.
2821 int32_t Offset = 0;
2822
2823 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
2824 return RValue::get(Builder.CreateCall(F,
2825 llvm::ConstantInt::get(Int32Ty, Offset)));
2826 }
2827 case Builtin::BI__builtin_return_address: {
2828 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
2829 getContext().UnsignedIntTy);
2830 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
2831 return RValue::get(Builder.CreateCall(F, Depth));
2832 }
2833 case Builtin::BI_ReturnAddress: {
2834 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
2835 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
2836 }
2837 case Builtin::BI__builtin_frame_address: {
2838 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
2839 getContext().UnsignedIntTy);
2840 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
2841 return RValue::get(Builder.CreateCall(F, Depth));
2842 }
2843 case Builtin::BI__builtin_extract_return_addr: {
2844 Value *Address = EmitScalarExpr(E->getArg(0));
2845 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
2846 return RValue::get(Result);
2847 }
2848 case Builtin::BI__builtin_frob_return_addr: {
2849 Value *Address = EmitScalarExpr(E->getArg(0));
2850 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
2851 return RValue::get(Result);
2852 }
2853 case Builtin::BI__builtin_dwarf_sp_column: {
2854 llvm::IntegerType *Ty
2855 = cast<llvm::IntegerType>(ConvertType(E->getType()));
2856 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
2857 if (Column == -1) {
2858 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
2859 return RValue::get(llvm::UndefValue::get(Ty));
2860 }
2861 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
2862 }
2863 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
2864 Value *Address = EmitScalarExpr(E->getArg(0));
2865 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
2866 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
2867 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
2868 }
2869 case Builtin::BI__builtin_eh_return: {
2870 Value *Int = EmitScalarExpr(E->getArg(0));
2871 Value *Ptr = EmitScalarExpr(E->getArg(1));
2872
2873 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
2874 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&(((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() ==
64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"
) ? static_cast<void> (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 2875, __PRETTY_FUNCTION__))
2875 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants")(((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() ==
64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"
) ? static_cast<void> (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 2875, __PRETTY_FUNCTION__))
;
2876 Function *F =
2877 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
2878 : Intrinsic::eh_return_i64);
2879 Builder.CreateCall(F, {Int, Ptr});
2880 Builder.CreateUnreachable();
2881
2882 // We do need to preserve an insertion point.
2883 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
2884
2885 return RValue::get(nullptr);
2886 }
2887 case Builtin::BI__builtin_unwind_init: {
2888 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
2889 return RValue::get(Builder.CreateCall(F));
2890 }
2891 case Builtin::BI__builtin_extend_pointer: {
2892 // Extends a pointer to the size of an _Unwind_Word, which is
2893 // uint64_t on all platforms. Generally this gets poked into a
2894 // register and eventually used as an address, so if the
2895 // addressing registers are wider than pointers and the platform
2896 // doesn't implicitly ignore high-order bits when doing
2897 // addressing, we need to make sure we zext / sext based on
2898 // the platform's expectations.
2899 //
2900 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
2901
2902 // Cast the pointer to intptr_t.
2903 Value *Ptr = EmitScalarExpr(E->getArg(0));
2904 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
2905
2906 // If that's 64 bits, we're done.
2907 if (IntPtrTy->getBitWidth() == 64)
2908 return RValue::get(Result);
2909
2910 // Otherwise, ask the codegen data what to do.
2911 if (getTargetHooks().extendPointerWithSExt())
2912 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
2913 else
2914 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
2915 }
2916 case Builtin::BI__builtin_setjmp: {
2917 // Buffer is a void**.
2918 Address Buf = EmitPointerWithAlignment(E->getArg(0));
2919
2920 // Store the frame pointer to the setjmp buffer.
2921 Value *FrameAddr = Builder.CreateCall(
2922 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
2923 ConstantInt::get(Int32Ty, 0));
2924 Builder.CreateStore(FrameAddr, Buf);
2925
2926 // Store the stack pointer to the setjmp buffer.
2927 Value *StackAddr =
2928 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
2929 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
2930 Builder.CreateStore(StackAddr, StackSaveSlot);
2931
2932 // Call LLVM's EH setjmp, which is lightweight.
2933 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
2934 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
2935 return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
2936 }
2937 case Builtin::BI__builtin_longjmp: {
2938 Value *Buf = EmitScalarExpr(E->getArg(0));
2939 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
2940
2941 // Call LLVM's EH longjmp, which is lightweight.
2942 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
2943
2944 // longjmp doesn't return; mark this as unreachable.
2945 Builder.CreateUnreachable();
2946
2947 // We do need to preserve an insertion point.
2948 EmitBlock(createBasicBlock("longjmp.cont"));
2949
2950 return RValue::get(nullptr);
2951 }
2952 case Builtin::BI__builtin_launder: {
2953 const Expr *Arg = E->getArg(0);
2954 QualType ArgTy = Arg->getType()->getPointeeType();
2955 Value *Ptr = EmitScalarExpr(Arg);
2956 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
2957 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
2958
2959 return RValue::get(Ptr);
2960 }
2961 case Builtin::BI__sync_fetch_and_add:
2962 case Builtin::BI__sync_fetch_and_sub:
2963 case Builtin::BI__sync_fetch_and_or:
2964 case Builtin::BI__sync_fetch_and_and:
2965 case Builtin::BI__sync_fetch_and_xor:
2966 case Builtin::BI__sync_fetch_and_nand:
2967 case Builtin::BI__sync_add_and_fetch:
2968 case Builtin::BI__sync_sub_and_fetch:
2969 case Builtin::BI__sync_and_and_fetch:
2970 case Builtin::BI__sync_or_and_fetch:
2971 case Builtin::BI__sync_xor_and_fetch:
2972 case Builtin::BI__sync_nand_and_fetch:
2973 case Builtin::BI__sync_val_compare_and_swap:
2974 case Builtin::BI__sync_bool_compare_and_swap:
2975 case Builtin::BI__sync_lock_test_and_set:
2976 case Builtin::BI__sync_lock_release:
2977 case Builtin::BI__sync_swap:
2978 llvm_unreachable("Shouldn't make it through sema")::llvm::llvm_unreachable_internal("Shouldn't make it through sema"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 2978)
;
2979 case Builtin::BI__sync_fetch_and_add_1:
2980 case Builtin::BI__sync_fetch_and_add_2:
2981 case Builtin::BI__sync_fetch_and_add_4:
2982 case Builtin::BI__sync_fetch_and_add_8:
2983 case Builtin::BI__sync_fetch_and_add_16:
2984 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
2985 case Builtin::BI__sync_fetch_and_sub_1:
2986 case Builtin::BI__sync_fetch_and_sub_2:
2987 case Builtin::BI__sync_fetch_and_sub_4:
2988 case Builtin::BI__sync_fetch_and_sub_8:
2989 case Builtin::BI__sync_fetch_and_sub_16:
2990 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
2991 case Builtin::BI__sync_fetch_and_or_1:
2992 case Builtin::BI__sync_fetch_and_or_2:
2993 case Builtin::BI__sync_fetch_and_or_4:
2994 case Builtin::BI__sync_fetch_and_or_8:
2995 case Builtin::BI__sync_fetch_and_or_16:
2996 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
2997 case Builtin::BI__sync_fetch_and_and_1:
2998 case Builtin::BI__sync_fetch_and_and_2:
2999 case Builtin::BI__sync_fetch_and_and_4:
3000 case Builtin::BI__sync_fetch_and_and_8:
3001 case Builtin::BI__sync_fetch_and_and_16:
3002 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
3003 case Builtin::BI__sync_fetch_and_xor_1:
3004 case Builtin::BI__sync_fetch_and_xor_2:
3005 case Builtin::BI__sync_fetch_and_xor_4:
3006 case Builtin::BI__sync_fetch_and_xor_8:
3007 case Builtin::BI__sync_fetch_and_xor_16:
3008 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
3009 case Builtin::BI__sync_fetch_and_nand_1:
3010 case Builtin::BI__sync_fetch_and_nand_2:
3011 case Builtin::BI__sync_fetch_and_nand_4:
3012 case Builtin::BI__sync_fetch_and_nand_8:
3013 case Builtin::BI__sync_fetch_and_nand_16:
3014 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
3015
3016 // Clang extensions: not overloaded yet.
3017 case Builtin::BI__sync_fetch_and_min:
3018 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
3019 case Builtin::BI__sync_fetch_and_max:
3020 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
3021 case Builtin::BI__sync_fetch_and_umin:
3022 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
3023 case Builtin::BI__sync_fetch_and_umax:
3024 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
3025
3026 case Builtin::BI__sync_add_and_fetch_1:
3027 case Builtin::BI__sync_add_and_fetch_2:
3028 case Builtin::BI__sync_add_and_fetch_4:
3029 case Builtin::BI__sync_add_and_fetch_8:
3030 case Builtin::BI__sync_add_and_fetch_16:
3031 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
3032 llvm::Instruction::Add);
3033 case Builtin::BI__sync_sub_and_fetch_1:
3034 case Builtin::BI__sync_sub_and_fetch_2:
3035 case Builtin::BI__sync_sub_and_fetch_4:
3036 case Builtin::BI__sync_sub_and_fetch_8:
3037 case Builtin::BI__sync_sub_and_fetch_16:
3038 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
3039 llvm::Instruction::Sub);
3040 case Builtin::BI__sync_and_and_fetch_1:
3041 case Builtin::BI__sync_and_and_fetch_2:
3042 case Builtin::BI__sync_and_and_fetch_4:
3043 case Builtin::BI__sync_and_and_fetch_8:
3044 case Builtin::BI__sync_and_and_fetch_16:
3045 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
3046 llvm::Instruction::And);
3047 case Builtin::BI__sync_or_and_fetch_1:
3048 case Builtin::BI__sync_or_and_fetch_2:
3049 case Builtin::BI__sync_or_and_fetch_4:
3050 case Builtin::BI__sync_or_and_fetch_8:
3051 case Builtin::BI__sync_or_and_fetch_16:
3052 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
3053 llvm::Instruction::Or);
3054 case Builtin::BI__sync_xor_and_fetch_1:
3055 case Builtin::BI__sync_xor_and_fetch_2:
3056 case Builtin::BI__sync_xor_and_fetch_4:
3057 case Builtin::BI__sync_xor_and_fetch_8:
3058 case Builtin::BI__sync_xor_and_fetch_16:
3059 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
3060 llvm::Instruction::Xor);
3061 case Builtin::BI__sync_nand_and_fetch_1:
3062 case Builtin::BI__sync_nand_and_fetch_2:
3063 case Builtin::BI__sync_nand_and_fetch_4:
3064 case Builtin::BI__sync_nand_and_fetch_8:
3065 case Builtin::BI__sync_nand_and_fetch_16:
3066 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
3067 llvm::Instruction::And, true);
3068
3069 case Builtin::BI__sync_val_compare_and_swap_1:
3070 case Builtin::BI__sync_val_compare_and_swap_2:
3071 case Builtin::BI__sync_val_compare_and_swap_4:
3072 case Builtin::BI__sync_val_compare_and_swap_8:
3073 case Builtin::BI__sync_val_compare_and_swap_16:
3074 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
3075
3076 case Builtin::BI__sync_bool_compare_and_swap_1:
3077 case Builtin::BI__sync_bool_compare_and_swap_2:
3078 case Builtin::BI__sync_bool_compare_and_swap_4:
3079 case Builtin::BI__sync_bool_compare_and_swap_8:
3080 case Builtin::BI__sync_bool_compare_and_swap_16:
3081 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
3082
3083 case Builtin::BI__sync_swap_1:
3084 case Builtin::BI__sync_swap_2:
3085 case Builtin::BI__sync_swap_4:
3086 case Builtin::BI__sync_swap_8:
3087 case Builtin::BI__sync_swap_16:
3088 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3089
3090 case Builtin::BI__sync_lock_test_and_set_1:
3091 case Builtin::BI__sync_lock_test_and_set_2:
3092 case Builtin::BI__sync_lock_test_and_set_4:
3093 case Builtin::BI__sync_lock_test_and_set_8:
3094 case Builtin::BI__sync_lock_test_and_set_16:
3095 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3096
3097 case Builtin::BI__sync_lock_release_1:
3098 case Builtin::BI__sync_lock_release_2:
3099 case Builtin::BI__sync_lock_release_4:
3100 case Builtin::BI__sync_lock_release_8:
3101 case Builtin::BI__sync_lock_release_16: {
3102 Value *Ptr = EmitScalarExpr(E->getArg(0));
3103 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
3104 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
3105 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
3106 StoreSize.getQuantity() * 8);
3107 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
3108 llvm::StoreInst *Store =
3109 Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
3110 StoreSize);
3111 Store->setAtomic(llvm::AtomicOrdering::Release);
3112 return RValue::get(nullptr);
3113 }
3114
3115 case Builtin::BI__sync_synchronize: {
3116 // We assume this is supposed to correspond to a C++0x-style
3117 // sequentially-consistent fence (i.e. this is only usable for
3118 // synchronization, not device I/O or anything like that). This intrinsic
3119 // is really badly designed in the sense that in theory, there isn't
3120 // any way to safely use it... but in practice, it mostly works
3121 // to use it with non-atomic loads and stores to get acquire/release
3122 // semantics.
3123 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
3124 return RValue::get(nullptr);
3125 }
3126
3127 case Builtin::BI__builtin_nontemporal_load:
3128 return RValue::get(EmitNontemporalLoad(*this, E));
3129 case Builtin::BI__builtin_nontemporal_store:
3130 return RValue::get(EmitNontemporalStore(*this, E));
3131 case Builtin::BI__c11_atomic_is_lock_free:
3132 case Builtin::BI__atomic_is_lock_free: {
3133 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
3134 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
3135 // _Atomic(T) is always properly-aligned.
3136 const char *LibCallName = "__atomic_is_lock_free";
3137 CallArgList Args;
3138 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
3139 getContext().getSizeType());
3140 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
3141 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
3142 getContext().VoidPtrTy);
3143 else
3144 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
3145 getContext().VoidPtrTy);
3146 const CGFunctionInfo &FuncInfo =
3147 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
3148 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3149 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3150 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
3151 ReturnValueSlot(), Args);
3152 }
3153
3154 case Builtin::BI__atomic_test_and_set: {
3155 // Look at the argument type to determine whether this is a volatile
3156 // operation. The parameter type is always volatile.
3157 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3158 bool Volatile =
3159 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3160
3161 Value *Ptr = EmitScalarExpr(E->getArg(0));
3162 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
3163 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3164 Value *NewVal = Builder.getInt8(1);
3165 Value *Order = EmitScalarExpr(E->getArg(1));
3166 if (isa<llvm::ConstantInt>(Order)) {
3167 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3168 AtomicRMWInst *Result = nullptr;
3169 switch (ord) {
3170 case 0: // memory_order_relaxed
3171 default: // invalid order
3172 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3173 llvm::AtomicOrdering::Monotonic);
3174 break;
3175 case 1: // memory_order_consume
3176 case 2: // memory_order_acquire
3177 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3178 llvm::AtomicOrdering::Acquire);
3179 break;
3180 case 3: // memory_order_release
3181 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3182 llvm::AtomicOrdering::Release);
3183 break;
3184 case 4: // memory_order_acq_rel
3185
3186 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3187 llvm::AtomicOrdering::AcquireRelease);
3188 break;
3189 case 5: // memory_order_seq_cst
3190 Result = Builder.CreateAtomicRMW(
3191 llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3192 llvm::AtomicOrdering::SequentiallyConsistent);
3193 break;
3194 }
3195 Result->setVolatile(Volatile);
3196 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3197 }
3198
3199 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3200
3201 llvm::BasicBlock *BBs[5] = {
3202 createBasicBlock("monotonic", CurFn),
3203 createBasicBlock("acquire", CurFn),
3204 createBasicBlock("release", CurFn),
3205 createBasicBlock("acqrel", CurFn),
3206 createBasicBlock("seqcst", CurFn)
3207 };
3208 llvm::AtomicOrdering Orders[5] = {
3209 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
3210 llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
3211 llvm::AtomicOrdering::SequentiallyConsistent};
3212
3213 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3214 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3215
3216 Builder.SetInsertPoint(ContBB);
3217 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
3218
3219 for (unsigned i = 0; i < 5; ++i) {
3220 Builder.SetInsertPoint(BBs[i]);
3221 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
3222 Ptr, NewVal, Orders[i]);
3223 RMW->setVolatile(Volatile);
3224 Result->addIncoming(RMW, BBs[i]);
3225 Builder.CreateBr(ContBB);
3226 }
3227
3228 SI->addCase(Builder.getInt32(0), BBs[0]);
3229 SI->addCase(Builder.getInt32(1), BBs[1]);
3230 SI->addCase(Builder.getInt32(2), BBs[1]);
3231 SI->addCase(Builder.getInt32(3), BBs[2]);
3232 SI->addCase(Builder.getInt32(4), BBs[3]);
3233 SI->addCase(Builder.getInt32(5), BBs[4]);
3234
3235 Builder.SetInsertPoint(ContBB);
3236 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3237 }
3238
3239 case Builtin::BI__atomic_clear: {
3240 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3241 bool Volatile =
3242 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3243
3244 Address Ptr = EmitPointerWithAlignment(E->getArg(0));
3245 unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
3246 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3247 Value *NewVal = Builder.getInt8(0);
3248 Value *Order = EmitScalarExpr(E->getArg(1));
3249 if (isa<llvm::ConstantInt>(Order)) {
3250 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3251 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3252 switch (ord) {
3253 case 0: // memory_order_relaxed
3254 default: // invalid order
3255 Store->setOrdering(llvm::AtomicOrdering::Monotonic);
3256 break;
3257 case 3: // memory_order_release
3258 Store->setOrdering(llvm::AtomicOrdering::Release);
3259 break;
3260 case 5: // memory_order_seq_cst
3261 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
3262 break;
3263 }
3264 return RValue::get(nullptr);
3265 }
3266
3267 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3268
3269 llvm::BasicBlock *BBs[3] = {
3270 createBasicBlock("monotonic", CurFn),
3271 createBasicBlock("release", CurFn),
3272 createBasicBlock("seqcst", CurFn)
3273 };
3274 llvm::AtomicOrdering Orders[3] = {
3275 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
3276 llvm::AtomicOrdering::SequentiallyConsistent};
3277
3278 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3279 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3280
3281 for (unsigned i = 0; i < 3; ++i) {
3282 Builder.SetInsertPoint(BBs[i]);
3283 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3284 Store->setOrdering(Orders[i]);
3285 Builder.CreateBr(ContBB);
3286 }
3287
3288 SI->addCase(Builder.getInt32(0), BBs[0]);
3289 SI->addCase(Builder.getInt32(3), BBs[1]);
3290 SI->addCase(Builder.getInt32(5), BBs[2]);
3291
3292 Builder.SetInsertPoint(ContBB);
3293 return RValue::get(nullptr);
3294 }
3295
3296 case Builtin::BI__atomic_thread_fence:
3297 case Builtin::BI__atomic_signal_fence:
3298 case Builtin::BI__c11_atomic_thread_fence:
3299 case Builtin::BI__c11_atomic_signal_fence: {
3300 llvm::SyncScope::ID SSID;
3301 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
3302 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
3303 SSID = llvm::SyncScope::SingleThread;
3304 else
3305 SSID = llvm::SyncScope::System;
3306 Value *Order = EmitScalarExpr(E->getArg(0));
3307 if (isa<llvm::ConstantInt>(Order)) {
3308 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3309 switch (ord) {
3310 case 0: // memory_order_relaxed
3311 default: // invalid order
3312 break;
3313 case 1: // memory_order_consume
3314 case 2: // memory_order_acquire
3315 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3316 break;
3317 case 3: // memory_order_release
3318 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3319 break;
3320 case 4: // memory_order_acq_rel
3321 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3322 break;
3323 case 5: // memory_order_seq_cst
3324 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3325 break;
3326 }
3327 return RValue::get(nullptr);
3328 }
3329
3330 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
3331 AcquireBB = createBasicBlock("acquire", CurFn);
3332 ReleaseBB = createBasicBlock("release", CurFn);
3333 AcqRelBB = createBasicBlock("acqrel", CurFn);
3334 SeqCstBB = createBasicBlock("seqcst", CurFn);
3335 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3336
3337 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3338 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
3339
3340 Builder.SetInsertPoint(AcquireBB);
3341 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3342 Builder.CreateBr(ContBB);
3343 SI->addCase(Builder.getInt32(1), AcquireBB);
3344 SI->addCase(Builder.getInt32(2), AcquireBB);
3345
3346 Builder.SetInsertPoint(ReleaseBB);
3347 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3348 Builder.CreateBr(ContBB);
3349 SI->addCase(Builder.getInt32(3), ReleaseBB);
3350
3351 Builder.SetInsertPoint(AcqRelBB);
3352 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3353 Builder.CreateBr(ContBB);
3354 SI->addCase(Builder.getInt32(4), AcqRelBB);
3355
3356 Builder.SetInsertPoint(SeqCstBB);
3357 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3358 Builder.CreateBr(ContBB);
3359 SI->addCase(Builder.getInt32(5), SeqCstBB);
3360
3361 Builder.SetInsertPoint(ContBB);
3362 return RValue::get(nullptr);
3363 }
3364
3365 case Builtin::BI__builtin_signbit:
3366 case Builtin::BI__builtin_signbitf:
3367 case Builtin::BI__builtin_signbitl: {
3368 return RValue::get(
3369 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
3370 ConvertType(E->getType())));
3371 }
3372 case Builtin::BI__warn_memset_zero_len:
3373 return RValue::getIgnored();
3374 case Builtin::BI__annotation: {
3375 // Re-encode each wide string to UTF8 and make an MDString.
3376 SmallVector<Metadata *, 1> Strings;
3377 for (const Expr *Arg : E->arguments()) {
3378 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
3379 assert(Str->getCharByteWidth() == 2)((Str->getCharByteWidth() == 2) ? static_cast<void> (
0) : __assert_fail ("Str->getCharByteWidth() == 2", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 3379, __PRETTY_FUNCTION__))
;
3380 StringRef WideBytes = Str->getBytes();
3381 std::string StrUtf8;
3382 if (!convertUTF16ToUTF8String(
3383 makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
3384 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
3385 continue;
3386 }
3387 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
3388 }
3389
3390 // Build and MDTuple of MDStrings and emit the intrinsic call.
3391 llvm::Function *F =
3392 CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
3393 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
3394 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
3395 return RValue::getIgnored();
3396 }
3397 case Builtin::BI__builtin_annotation: {
3398 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
3399 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
3400 AnnVal->getType());
3401
3402 // Get the annotation string, go through casts. Sema requires this to be a
3403 // non-wide string literal, potentially casted, so the cast<> is safe.
3404 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
3405 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
3406 return RValue::get(
3407 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
3408 }
3409 case Builtin::BI__builtin_addcb:
3410 case Builtin::BI__builtin_addcs:
3411 case Builtin::BI__builtin_addc:
3412 case Builtin::BI__builtin_addcl:
3413 case Builtin::BI__builtin_addcll:
3414 case Builtin::BI__builtin_subcb:
3415 case Builtin::BI__builtin_subcs:
3416 case Builtin::BI__builtin_subc:
3417 case Builtin::BI__builtin_subcl:
3418 case Builtin::BI__builtin_subcll: {
3419
3420 // We translate all of these builtins from expressions of the form:
3421 // int x = ..., y = ..., carryin = ..., carryout, result;
3422 // result = __builtin_addc(x, y, carryin, &carryout);
3423 //
3424 // to LLVM IR of the form:
3425 //
3426 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
3427 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
3428 // %carry1 = extractvalue {i32, i1} %tmp1, 1
3429 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
3430 // i32 %carryin)
3431 // %result = extractvalue {i32, i1} %tmp2, 0
3432 // %carry2 = extractvalue {i32, i1} %tmp2, 1
3433 // %tmp3 = or i1 %carry1, %carry2
3434 // %tmp4 = zext i1 %tmp3 to i32
3435 // store i32 %tmp4, i32* %carryout
3436
3437 // Scalarize our inputs.
3438 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3439 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3440 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
3441 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
3442
3443 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
3444 llvm::Intrinsic::ID IntrinsicId;
3445 switch (BuiltinID) {
3446 default: llvm_unreachable("Unknown multiprecision builtin id.")::llvm::llvm_unreachable_internal("Unknown multiprecision builtin id."
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 3446)
;
3447 case Builtin::BI__builtin_addcb:
3448 case Builtin::BI__builtin_addcs:
3449 case Builtin::BI__builtin_addc:
3450 case Builtin::BI__builtin_addcl:
3451 case Builtin::BI__builtin_addcll:
3452 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3453 break;
3454 case Builtin::BI__builtin_subcb:
3455 case Builtin::BI__builtin_subcs:
3456 case Builtin::BI__builtin_subc:
3457 case Builtin::BI__builtin_subcl:
3458 case Builtin::BI__builtin_subcll:
3459 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3460 break;
3461 }
3462
3463 // Construct our resulting LLVM IR expression.
3464 llvm::Value *Carry1;
3465 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
3466 X, Y, Carry1);
3467 llvm::Value *Carry2;
3468 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
3469 Sum1, Carryin, Carry2);
3470 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
3471 X->getType());
3472 Builder.CreateStore(CarryOut, CarryOutPtr);
3473 return RValue::get(Sum2);
3474 }
3475
3476 case Builtin::BI__builtin_add_overflow:
3477 case Builtin::BI__builtin_sub_overflow:
3478 case Builtin::BI__builtin_mul_overflow: {
3479 const clang::Expr *LeftArg = E->getArg(0);
3480 const clang::Expr *RightArg = E->getArg(1);
3481 const clang::Expr *ResultArg = E->getArg(2);
3482
3483 clang::QualType ResultQTy =
3484 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
3485
3486 WidthAndSignedness LeftInfo =
3487 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
3488 WidthAndSignedness RightInfo =
3489 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
3490 WidthAndSignedness ResultInfo =
3491 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
3492
3493 // Handle mixed-sign multiplication as a special case, because adding
3494 // runtime or backend support for our generic irgen would be too expensive.
3495 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
3496 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
3497 RightInfo, ResultArg, ResultQTy,
3498 ResultInfo);
3499
3500 WidthAndSignedness EncompassingInfo =
3501 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
3502
3503 llvm::Type *EncompassingLLVMTy =
3504 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
3505
3506 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
3507
3508 llvm::Intrinsic::ID IntrinsicId;
3509 switch (BuiltinID) {
3510 default:
3511 llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id."
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 3511)
;
3512 case Builtin::BI__builtin_add_overflow:
3513 IntrinsicId = EncompassingInfo.Signed
3514 ? llvm::Intrinsic::sadd_with_overflow
3515 : llvm::Intrinsic::uadd_with_overflow;
3516 break;
3517 case Builtin::BI__builtin_sub_overflow:
3518 IntrinsicId = EncompassingInfo.Signed
3519 ? llvm::Intrinsic::ssub_with_overflow
3520 : llvm::Intrinsic::usub_with_overflow;
3521 break;
3522 case Builtin::BI__builtin_mul_overflow:
3523 IntrinsicId = EncompassingInfo.Signed
3524 ? llvm::Intrinsic::smul_with_overflow
3525 : llvm::Intrinsic::umul_with_overflow;
3526 break;
3527 }
3528
3529 llvm::Value *Left = EmitScalarExpr(LeftArg);
3530 llvm::Value *Right = EmitScalarExpr(RightArg);
3531 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
3532
3533 // Extend each operand to the encompassing type.
3534 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
3535 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
3536
3537 // Perform the operation on the extended values.
3538 llvm::Value *Overflow, *Result;
3539 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
3540
3541 if (EncompassingInfo.Width > ResultInfo.Width) {
3542 // The encompassing type is wider than the result type, so we need to
3543 // truncate it.
3544 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
3545
3546 // To see if the truncation caused an overflow, we will extend
3547 // the result and then compare it to the original result.
3548 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
3549 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
3550 llvm::Value *TruncationOverflow =
3551 Builder.CreateICmpNE(Result, ResultTruncExt);
3552
3553 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
3554 Result = ResultTrunc;
3555 }
3556
3557 // Finally, store the result using the pointer.
3558 bool isVolatile =
3559 ResultArg->getType()->getPointeeType().isVolatileQualified();
3560 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
3561
3562 return RValue::get(Overflow);
3563 }
3564
3565 case Builtin::BI__builtin_uadd_overflow:
3566 case Builtin::BI__builtin_uaddl_overflow:
3567 case Builtin::BI__builtin_uaddll_overflow:
3568 case Builtin::BI__builtin_usub_overflow:
3569 case Builtin::BI__builtin_usubl_overflow:
3570 case Builtin::BI__builtin_usubll_overflow:
3571 case Builtin::BI__builtin_umul_overflow:
3572 case Builtin::BI__builtin_umull_overflow:
3573 case Builtin::BI__builtin_umulll_overflow:
3574 case Builtin::BI__builtin_sadd_overflow:
3575 case Builtin::BI__builtin_saddl_overflow:
3576 case Builtin::BI__builtin_saddll_overflow:
3577 case Builtin::BI__builtin_ssub_overflow:
3578 case Builtin::BI__builtin_ssubl_overflow:
3579 case Builtin::BI__builtin_ssubll_overflow:
3580 case Builtin::BI__builtin_smul_overflow:
3581 case Builtin::BI__builtin_smull_overflow:
3582 case Builtin::BI__builtin_smulll_overflow: {
3583
3584 // We translate all of these builtins directly to the relevant llvm IR node.
3585
3586 // Scalarize our inputs.
3587 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3588 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3589 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
3590
3591 // Decide which of the overflow intrinsics we are lowering to:
3592 llvm::Intrinsic::ID IntrinsicId;
3593 switch (BuiltinID) {
3594 default: llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id."
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 3594)
;
3595 case Builtin::BI__builtin_uadd_overflow:
3596 case Builtin::BI__builtin_uaddl_overflow:
3597 case Builtin::BI__builtin_uaddll_overflow:
3598 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3599 break;
3600 case Builtin::BI__builtin_usub_overflow:
3601 case Builtin::BI__builtin_usubl_overflow:
3602 case Builtin::BI__builtin_usubll_overflow:
3603 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3604 break;
3605 case Builtin::BI__builtin_umul_overflow:
3606 case Builtin::BI__builtin_umull_overflow:
3607 case Builtin::BI__builtin_umulll_overflow:
3608 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
3609 break;
3610 case Builtin::BI__builtin_sadd_overflow:
3611 case Builtin::BI__builtin_saddl_overflow:
3612 case Builtin::BI__builtin_saddll_overflow:
3613 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
3614 break;
3615 case Builtin::BI__builtin_ssub_overflow:
3616 case Builtin::BI__builtin_ssubl_overflow:
3617 case Builtin::BI__builtin_ssubll_overflow:
3618 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
3619 break;
3620 case Builtin::BI__builtin_smul_overflow:
3621 case Builtin::BI__builtin_smull_overflow:
3622 case Builtin::BI__builtin_smulll_overflow:
3623 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
3624 break;
3625 }
3626
3627
3628 llvm::Value *Carry;
3629 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
3630 Builder.CreateStore(Sum, SumOutPtr);
3631
3632 return RValue::get(Carry);
3633 }
3634 case Builtin::BI__builtin_addressof:
3635 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
3636 case Builtin::BI__builtin_operator_new:
3637 return EmitBuiltinNewDeleteCall(
3638 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
3639 case Builtin::BI__builtin_operator_delete:
3640 return EmitBuiltinNewDeleteCall(
3641 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
3642
3643 case Builtin::BI__builtin_is_aligned:
3644 return EmitBuiltinIsAligned(E);
3645 case Builtin::BI__builtin_align_up:
3646 return EmitBuiltinAlignTo(E, true);
3647 case Builtin::BI__builtin_align_down:
3648 return EmitBuiltinAlignTo(E, false);
3649
3650 case Builtin::BI__noop:
3651 // __noop always evaluates to an integer literal zero.
3652 return RValue::get(ConstantInt::get(IntTy, 0));
3653 case Builtin::BI__builtin_call_with_static_chain: {
3654 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
3655 const Expr *Chain = E->getArg(1);
3656 return EmitCall(Call->getCallee()->getType(),
3657 EmitCallee(Call->getCallee()), Call, ReturnValue,
3658 EmitScalarExpr(Chain));
3659 }
3660 case Builtin::BI_InterlockedExchange8:
3661 case Builtin::BI_InterlockedExchange16:
3662 case Builtin::BI_InterlockedExchange:
3663 case Builtin::BI_InterlockedExchangePointer:
3664 return RValue::get(
3665 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
3666 case Builtin::BI_InterlockedCompareExchangePointer:
3667 case Builtin::BI_InterlockedCompareExchangePointer_nf: {
3668 llvm::Type *RTy;
3669 llvm::IntegerType *IntType =
3670 IntegerType::get(getLLVMContext(),
3671 getContext().getTypeSize(E->getType()));
3672 llvm::Type *IntPtrType = IntType->getPointerTo();
3673
3674 llvm::Value *Destination =
3675 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
3676
3677 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
3678 RTy = Exchange->getType();
3679 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
3680
3681 llvm::Value *Comparand =
3682 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
3683
3684 auto Ordering =
3685 BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
3686 AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
3687
3688 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
3689 Ordering, Ordering);
3690 Result->setVolatile(true);
3691
3692 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
3693 0),
3694 RTy));
3695 }
3696 case Builtin::BI_InterlockedCompareExchange8:
3697 case Builtin::BI_InterlockedCompareExchange16:
3698 case Builtin::BI_InterlockedCompareExchange:
3699 case Builtin::BI_InterlockedCompareExchange64:
3700 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
3701 case Builtin::BI_InterlockedIncrement16:
3702 case Builtin::BI_InterlockedIncrement:
3703 return RValue::get(
3704 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
3705 case Builtin::BI_InterlockedDecrement16:
3706 case Builtin::BI_InterlockedDecrement:
3707 return RValue::get(
3708 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
3709 case Builtin::BI_InterlockedAnd8:
3710 case Builtin::BI_InterlockedAnd16:
3711 case Builtin::BI_InterlockedAnd:
3712 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
3713 case Builtin::BI_InterlockedExchangeAdd8:
3714 case Builtin::BI_InterlockedExchangeAdd16:
3715 case Builtin::BI_InterlockedExchangeAdd:
3716 return RValue::get(
3717 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
3718 case Builtin::BI_InterlockedExchangeSub8:
3719 case Builtin::BI_InterlockedExchangeSub16:
3720 case Builtin::BI_InterlockedExchangeSub:
3721 return RValue::get(
3722 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
3723 case Builtin::BI_InterlockedOr8:
3724 case Builtin::BI_InterlockedOr16:
3725 case Builtin::BI_InterlockedOr:
3726 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
3727 case Builtin::BI_InterlockedXor8:
3728 case Builtin::BI_InterlockedXor16:
3729 case Builtin::BI_InterlockedXor:
3730 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
3731
3732 case Builtin::BI_bittest64:
3733 case Builtin::BI_bittest:
3734 case Builtin::BI_bittestandcomplement64:
3735 case Builtin::BI_bittestandcomplement:
3736 case Builtin::BI_bittestandreset64:
3737 case Builtin::BI_bittestandreset:
3738 case Builtin::BI_bittestandset64:
3739 case Builtin::BI_bittestandset:
3740 case Builtin::BI_interlockedbittestandreset:
3741 case Builtin::BI_interlockedbittestandreset64:
3742 case Builtin::BI_interlockedbittestandset64:
3743 case Builtin::BI_interlockedbittestandset:
3744 case Builtin::BI_interlockedbittestandset_acq:
3745 case Builtin::BI_interlockedbittestandset_rel:
3746 case Builtin::BI_interlockedbittestandset_nf:
3747 case Builtin::BI_interlockedbittestandreset_acq:
3748 case Builtin::BI_interlockedbittestandreset_rel:
3749 case Builtin::BI_interlockedbittestandreset_nf:
3750 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
3751
3752 // These builtins exist to emit regular volatile loads and stores not
3753 // affected by the -fms-volatile setting.
3754 case Builtin::BI__iso_volatile_load8:
3755 case Builtin::BI__iso_volatile_load16:
3756 case Builtin::BI__iso_volatile_load32:
3757 case Builtin::BI__iso_volatile_load64:
3758 return RValue::get(EmitISOVolatileLoad(*this, E));
3759 case Builtin::BI__iso_volatile_store8:
3760 case Builtin::BI__iso_volatile_store16:
3761 case Builtin::BI__iso_volatile_store32:
3762 case Builtin::BI__iso_volatile_store64:
3763 return RValue::get(EmitISOVolatileStore(*this, E));
3764
3765 case Builtin::BI__exception_code:
3766 case Builtin::BI_exception_code:
3767 return RValue::get(EmitSEHExceptionCode());
3768 case Builtin::BI__exception_info:
3769 case Builtin::BI_exception_info:
3770 return RValue::get(EmitSEHExceptionInfo());
3771 case Builtin::BI__abnormal_termination:
3772 case Builtin::BI_abnormal_termination:
3773 return RValue::get(EmitSEHAbnormalTermination());
3774 case Builtin::BI_setjmpex:
3775 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
3776 E->getArg(0)->getType()->isPointerType())
3777 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
3778 break;
3779 case Builtin::BI_setjmp:
3780 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
3781 E->getArg(0)->getType()->isPointerType()) {
3782 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
3783 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
3784 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
3785 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
3786 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
3787 }
3788 break;
3789
3790 case Builtin::BI__GetExceptionInfo: {
3791 if (llvm::GlobalVariable *GV =
3792 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
3793 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
3794 break;
3795 }
3796
3797 case Builtin::BI__fastfail:
3798 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
3799
3800 case Builtin::BI__builtin_coro_size: {
3801 auto & Context = getContext();
3802 auto SizeTy = Context.getSizeType();
3803 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3804 Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
3805 return RValue::get(Builder.CreateCall(F));
3806 }
3807
3808 case Builtin::BI__builtin_coro_id:
3809 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
3810 case Builtin::BI__builtin_coro_promise:
3811 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
3812 case Builtin::BI__builtin_coro_resume:
3813 return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
3814 case Builtin::BI__builtin_coro_frame:
3815 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
3816 case Builtin::BI__builtin_coro_noop:
3817 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
3818 case Builtin::BI__builtin_coro_free:
3819 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
3820 case Builtin::BI__builtin_coro_destroy:
3821 return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
3822 case Builtin::BI__builtin_coro_done:
3823 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
3824 case Builtin::BI__builtin_coro_alloc:
3825 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
3826 case Builtin::BI__builtin_coro_begin:
3827 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
3828 case Builtin::BI__builtin_coro_end:
3829 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
3830 case Builtin::BI__builtin_coro_suspend:
3831 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
3832 case Builtin::BI__builtin_coro_param:
3833 return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
3834
3835 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
3836 case Builtin::BIread_pipe:
3837 case Builtin::BIwrite_pipe: {
3838 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3839 *Arg1 = EmitScalarExpr(E->getArg(1));
3840 CGOpenCLRuntime OpenCLRT(CGM);
3841 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3842 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3843
3844 // Type of the generic packet parameter.
3845 unsigned GenericAS =
3846 getContext().getTargetAddressSpace(LangAS::opencl_generic);
3847 llvm::Type *I8PTy = llvm::PointerType::get(
3848 llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
3849
3850 // Testing which overloaded version we should generate the call for.
3851 if (2U == E->getNumArgs()) {
3852 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
3853 : "__write_pipe_2";
3854 // Creating a generic function type to be able to call with any builtin or
3855 // user defined type.
3856 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
3857 llvm::FunctionType *FTy = llvm::FunctionType::get(
3858 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3859 Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
3860 return RValue::get(
3861 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3862 {Arg0, BCast, PacketSize, PacketAlign}));
3863 } else {
3864 assert(4 == E->getNumArgs() &&((4 == E->getNumArgs() && "Illegal number of parameters to pipe function"
) ? static_cast<void> (0) : __assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 3865, __PRETTY_FUNCTION__))
3865 "Illegal number of parameters to pipe function")((4 == E->getNumArgs() && "Illegal number of parameters to pipe function"
) ? static_cast<void> (0) : __assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 3865, __PRETTY_FUNCTION__))
;
3866 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
3867 : "__write_pipe_4";
3868
3869 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
3870 Int32Ty, Int32Ty};
3871 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
3872 *Arg3 = EmitScalarExpr(E->getArg(3));
3873 llvm::FunctionType *FTy = llvm::FunctionType::get(
3874 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3875 Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
3876 // We know the third argument is an integer type, but we may need to cast
3877 // it to i32.
3878 if (Arg2->getType() != Int32Ty)
3879 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
3880 return RValue::get(Builder.CreateCall(
3881 CGM.CreateRuntimeFunction(FTy, Name),
3882 {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
3883 }
3884 }
3885 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
3886 // functions
3887 case Builtin::BIreserve_read_pipe:
3888 case Builtin::BIreserve_write_pipe:
3889 case Builtin::BIwork_group_reserve_read_pipe:
3890 case Builtin::BIwork_group_reserve_write_pipe:
3891 case Builtin::BIsub_group_reserve_read_pipe:
3892 case Builtin::BIsub_group_reserve_write_pipe: {
3893 // Composing the mangled name for the function.
3894 const char *Name;
3895 if (BuiltinID == Builtin::BIreserve_read_pipe)
3896 Name = "__reserve_read_pipe";
3897 else if (BuiltinID == Builtin::BIreserve_write_pipe)
3898 Name = "__reserve_write_pipe";
3899 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
3900 Name = "__work_group_reserve_read_pipe";
3901 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
3902 Name = "__work_group_reserve_write_pipe";
3903 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
3904 Name = "__sub_group_reserve_read_pipe";
3905 else
3906 Name = "__sub_group_reserve_write_pipe";
3907
3908 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3909 *Arg1 = EmitScalarExpr(E->getArg(1));
3910 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
3911 CGOpenCLRuntime OpenCLRT(CGM);
3912 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3913 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3914
3915 // Building the generic function prototype.
3916 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
3917 llvm::FunctionType *FTy = llvm::FunctionType::get(
3918 ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3919 // We know the second argument is an integer type, but we may need to cast
3920 // it to i32.
3921 if (Arg1->getType() != Int32Ty)
3922 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
3923 return RValue::get(
3924 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3925 {Arg0, Arg1, PacketSize, PacketAlign}));
3926 }
3927 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
3928 // functions
3929 case Builtin::BIcommit_read_pipe:
3930 case Builtin::BIcommit_write_pipe:
3931 case Builtin::BIwork_group_commit_read_pipe:
3932 case Builtin::BIwork_group_commit_write_pipe:
3933 case Builtin::BIsub_group_commit_read_pipe:
3934 case Builtin::BIsub_group_commit_write_pipe: {
3935 const char *Name;
3936 if (BuiltinID == Builtin::BIcommit_read_pipe)
3937 Name = "__commit_read_pipe";
3938 else if (BuiltinID == Builtin::BIcommit_write_pipe)
3939 Name = "__commit_write_pipe";
3940 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
3941 Name = "__work_group_commit_read_pipe";
3942 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
3943 Name = "__work_group_commit_write_pipe";
3944 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
3945 Name = "__sub_group_commit_read_pipe";
3946 else
3947 Name = "__sub_group_commit_write_pipe";
3948
3949 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3950 *Arg1 = EmitScalarExpr(E->getArg(1));
3951 CGOpenCLRuntime OpenCLRT(CGM);
3952 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3953 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3954
3955 // Building the generic function prototype.
3956 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
3957 llvm::FunctionType *FTy =
3958 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
3959 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3960
3961 return RValue::get(
3962 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3963 {Arg0, Arg1, PacketSize, PacketAlign}));
3964 }
3965 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
3966 case Builtin::BIget_pipe_num_packets:
3967 case Builtin::BIget_pipe_max_packets: {
3968 const char *BaseName;
3969 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
3970 if (BuiltinID == Builtin::BIget_pipe_num_packets)
3971 BaseName = "__get_pipe_num_packets";
3972 else
3973 BaseName = "__get_pipe_max_packets";
3974 std::string Name = std::string(BaseName) +
3975 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
3976
3977 // Building the generic function prototype.
3978 Value *Arg0 = EmitScalarExpr(E->getArg(0));
3979 CGOpenCLRuntime OpenCLRT(CGM);
3980 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3981 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3982 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
3983 llvm::FunctionType *FTy = llvm::FunctionType::get(
3984 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3985
3986 return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3987 {Arg0, PacketSize, PacketAlign}));
3988 }
3989
3990 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
3991 case Builtin::BIto_global:
3992 case Builtin::BIto_local:
3993 case Builtin::BIto_private: {
3994 auto Arg0 = EmitScalarExpr(E->getArg(0));
3995 auto NewArgT = llvm::PointerType::get(Int8Ty,
3996 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
3997 auto NewRetT = llvm::PointerType::get(Int8Ty,
3998 CGM.getContext().getTargetAddressSpace(
3999 E->getType()->getPointeeType().getAddressSpace()));
4000 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
4001 llvm::Value *NewArg;
4002 if (Arg0->getType()->getPointerAddressSpace() !=
4003 NewArgT->getPointerAddressSpace())
4004 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
4005 else
4006 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
4007 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
4008 auto NewCall =
4009 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
4010 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
4011 ConvertType(E->getType())));
4012 }
4013
4014 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
4015 // It contains four different overload formats specified in Table 6.13.17.1.
4016 case Builtin::BIenqueue_kernel: {
4017 StringRef Name; // Generated function call name
4018 unsigned NumArgs = E->getNumArgs();
4019
4020 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
4021 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4022 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4023
4024 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
4025 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
4026 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
4027 llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
4028 llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
4029
4030 if (NumArgs == 4) {
4031 // The most basic form of the call with parameters:
4032 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
4033 Name = "__enqueue_kernel_basic";
4034 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
4035 GenericVoidPtrTy};
4036 llvm::FunctionType *FTy = llvm::FunctionType::get(
4037 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4038
4039 auto Info =
4040 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4041 llvm::Value *Kernel =
4042 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4043 llvm::Value *Block =
4044 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4045
4046 AttrBuilder B;
4047 B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
4048 llvm::AttributeList ByValAttrSet =
4049 llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
4050
4051 auto RTCall =
4052 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
4053 {Queue, Flags, Range, Kernel, Block});
4054 RTCall->setAttributes(ByValAttrSet);
4055 return RValue::get(RTCall);
4056 }
4057 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature")((NumArgs >= 5 && "Invalid enqueue_kernel signature"
) ? static_cast<void> (0) : __assert_fail ("NumArgs >= 5 && \"Invalid enqueue_kernel signature\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 4057, __PRETTY_FUNCTION__))
;
4058
4059 // Create a temporary array to hold the sizes of local pointer arguments
4060 // for the block. \p First is the position of the first size argument.
4061 auto CreateArrayForSizeVar = [=](unsigned First)
4062 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
4063 llvm::APInt ArraySize(32, NumArgs - First);
4064 QualType SizeArrayTy = getContext().getConstantArrayType(
4065 getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
4066 /*IndexTypeQuals=*/0);
4067 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
4068 llvm::Value *TmpPtr = Tmp.getPointer();
4069 llvm::Value *TmpSize = EmitLifetimeStart(
4070 CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
4071 llvm::Value *ElemPtr;
4072 // Each of the following arguments specifies the size of the corresponding
4073 // argument passed to the enqueued block.
4074 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
4075 for (unsigned I = First; I < NumArgs; ++I) {
4076 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
4077 auto *GEP = Builder.CreateGEP(TmpPtr, {Zero, Index});
4078 if (I == First)
4079 ElemPtr = GEP;
4080 auto *V =
4081 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
4082 Builder.CreateAlignedStore(
4083 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
4084 }
4085 return std::tie(ElemPtr, TmpSize, TmpPtr);
4086 };
4087
4088 // Could have events and/or varargs.
4089 if (E->getArg(3)->getType()->isBlockPointerType()) {
4090 // No events passed, but has variadic arguments.
4091 Name = "__enqueue_kernel_varargs";
4092 auto Info =
4093 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4094 llvm::Value *Kernel =
4095 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4096 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4097 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4098 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
4099
4100 // Create a vector of the arguments, as well as a constant value to
4101 // express to the runtime the number of variadic arguments.
4102 llvm::Value *const Args[] = {Queue, Flags,
4103 Range, Kernel,
4104 Block, ConstantInt::get(IntTy, NumArgs - 4),
4105 ElemPtr};
4106 llvm::Type *const ArgTys[] = {
4107 QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
4108 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
4109
4110 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
4111 auto Call = RValue::get(
4112 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
4113 if (TmpSize)
4114 EmitLifetimeEnd(TmpSize, TmpPtr);
4115 return Call;
4116 }
4117 // Any calls now have event arguments passed.
4118 if (NumArgs >= 7) {
4119 llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
4120 llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
4121 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4122
4123 llvm::Value *NumEvents =
4124 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
4125
4126 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
4127 // to be a null pointer constant (including `0` literal), we can take it
4128 // into account and emit null pointer directly.
4129 llvm::Value *EventWaitList = nullptr;
4130 if (E->getArg(4)->isNullPointerConstant(
4131 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4132 EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
4133 } else {
4134 EventWaitList = E->getArg(4)->getType()->isArrayType()
4135 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
4136 : EmitScalarExpr(E->getArg(4));
4137 // Convert to generic address space.
4138 EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
4139 }
4140 llvm::Value *EventRet = nullptr;
4141 if (E->getArg(5)->isNullPointerConstant(
4142 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4143 EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
4144 } else {
4145 EventRet =
4146 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
4147 }
4148
4149 auto Info =
4150 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
4151 llvm::Value *Kernel =
4152 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4153 llvm::Value *Block =
4154 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4155
4156 std::vector<llvm::Type *> ArgTys = {
4157 QueueTy, Int32Ty, RangeTy, Int32Ty,
4158 EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
4159
4160 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
4161 NumEvents, EventWaitList, EventRet,
4162 Kernel, Block};
4163
4164 if (NumArgs == 7) {
4165 // Has events but no variadics.
4166 Name = "__enqueue_kernel_basic_events";
4167 llvm::FunctionType *FTy = llvm::FunctionType::get(
4168 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4169 return RValue::get(
4170 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4171 llvm::ArrayRef<llvm::Value *>(Args)));
4172 }
4173 // Has event info and variadics
4174 // Pass the number of variadics to the runtime function too.
4175 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
4176 ArgTys.push_back(Int32Ty);
4177 Name = "__enqueue_kernel_events_varargs";
4178
4179 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4180 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
4181 Args.push_back(ElemPtr);
4182 ArgTys.push_back(ElemPtr->getType());
4183
4184 llvm::FunctionType *FTy = llvm::FunctionType::get(
4185 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4186 auto Call =
4187 RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4188 llvm::ArrayRef<llvm::Value *>(Args)));
4189 if (TmpSize)
4190 EmitLifetimeEnd(TmpSize, TmpPtr);
4191 return Call;
4192 }
4193 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4194 }
4195 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
4196 // parameter.
4197 case Builtin::BIget_kernel_work_group_size: {
4198 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4199 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4200 auto Info =
4201 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4202 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4203 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4204 return RValue::get(Builder.CreateCall(
4205 CGM.CreateRuntimeFunction(
4206 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4207 false),
4208 "__get_kernel_work_group_size_impl"),
4209 {Kernel, Arg}));
4210 }
4211 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
4212 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4213 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4214 auto Info =
4215 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4216 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4217 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4218 return RValue::get(Builder.CreateCall(
4219 CGM.CreateRuntimeFunction(
4220 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4221 false),
4222 "__get_kernel_preferred_work_group_size_multiple_impl"),
4223 {Kernel, Arg}));
4224 }
4225 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
4226 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
4227 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4228 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4229 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
4230 llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
4231 auto Info =
4232 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
4233 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4234 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4235 const char *Name =
4236 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
4237 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
4238 : "__get_kernel_sub_group_count_for_ndrange_impl";
4239 return RValue::get(Builder.CreateCall(
4240 CGM.CreateRuntimeFunction(
4241 llvm::FunctionType::get(
4242 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
4243 false),
4244 Name),
4245 {NDRange, Kernel, Block}));
4246 }
4247
4248 case Builtin::BI__builtin_store_half:
4249 case Builtin::BI__builtin_store_halff: {
4250 Value *Val = EmitScalarExpr(E->getArg(0));
4251 Address Address = EmitPointerWithAlignment(E->getArg(1));
4252 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
4253 return RValue::get(Builder.CreateStore(HalfVal, Address));
4254 }
4255 case Builtin::BI__builtin_load_half: {
4256 Address Address = EmitPointerWithAlignment(E->getArg(0));
4257 Value *HalfVal = Builder.CreateLoad(Address);
4258 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
4259 }
4260 case Builtin::BI__builtin_load_halff: {
4261 Address Address = EmitPointerWithAlignment(E->getArg(0));
4262 Value *HalfVal = Builder.CreateLoad(Address);
4263 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
4264 }
4265 case Builtin::BIprintf:
4266 if (getTarget().getTriple().isNVPTX())
4267 return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
4268 if (getTarget().getTriple().getArch() == Triple::amdgcn &&
4269 getLangOpts().HIP)
4270 return EmitAMDGPUDevicePrintfCallExpr(E, ReturnValue);
4271 break;
4272 case Builtin::BI__builtin_canonicalize:
4273 case Builtin::BI__builtin_canonicalizef:
4274 case Builtin::BI__builtin_canonicalizef16:
4275 case Builtin::BI__builtin_canonicalizel:
4276 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
4277
4278 case Builtin::BI__builtin_thread_pointer: {
4279 if (!getContext().getTargetInfo().isTLSSupported())
4280 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
4281 // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
4282 break;
4283 }
4284 case Builtin::BI__builtin_os_log_format:
4285 return emitBuiltinOSLogFormat(*E);
4286
4287 case Builtin::BI__xray_customevent: {
4288 if (!ShouldXRayInstrumentFunction())
4289 return RValue::getIgnored();
4290
4291 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4292 XRayInstrKind::Custom))
4293 return RValue::getIgnored();
4294
4295 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4296 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
4297 return RValue::getIgnored();
4298
4299 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
4300 auto FTy = F->getFunctionType();
4301 auto Arg0 = E->getArg(0);
4302 auto Arg0Val = EmitScalarExpr(Arg0);
4303 auto Arg0Ty = Arg0->getType();
4304 auto PTy0 = FTy->getParamType(0);
4305 if (PTy0 != Arg0Val->getType()) {
4306 if (Arg0Ty->isArrayType())
4307 Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
4308 else
4309 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
4310 }
4311 auto Arg1 = EmitScalarExpr(E->getArg(1));
4312 auto PTy1 = FTy->getParamType(1);
4313 if (PTy1 != Arg1->getType())
4314 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
4315 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
4316 }
4317
4318 case Builtin::BI__xray_typedevent: {
4319 // TODO: There should be a way to always emit events even if the current
4320 // function is not instrumented. Losing events in a stream can cripple
4321 // a trace.
4322 if (!ShouldXRayInstrumentFunction())
4323 return RValue::getIgnored();
4324
4325 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4326 XRayInstrKind::Typed))
4327 return RValue::getIgnored();
4328
4329 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4330 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
4331 return RValue::getIgnored();
4332
4333 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
4334 auto FTy = F->getFunctionType();
4335 auto Arg0 = EmitScalarExpr(E->getArg(0));
4336 auto PTy0 = FTy->getParamType(0);
4337 if (PTy0 != Arg0->getType())
4338 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
4339 auto Arg1 = E->getArg(1);
4340 auto Arg1Val = EmitScalarExpr(Arg1);
4341 auto Arg1Ty = Arg1->getType();
4342 auto PTy1 = FTy->getParamType(1);
4343 if (PTy1 != Arg1Val->getType()) {
4344 if (Arg1Ty->isArrayType())
4345 Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
4346 else
4347 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
4348 }
4349 auto Arg2 = EmitScalarExpr(E->getArg(2));
4350 auto PTy2 = FTy->getParamType(2);
4351 if (PTy2 != Arg2->getType())
4352 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
4353 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
4354 }
4355
4356 case Builtin::BI__builtin_ms_va_start:
4357 case Builtin::BI__builtin_ms_va_end:
4358 return RValue::get(
4359 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
4360 BuiltinID == Builtin::BI__builtin_ms_va_start));
4361
4362 case Builtin::BI__builtin_ms_va_copy: {
4363 // Lower this manually. We can't reliably determine whether or not any
4364 // given va_copy() is for a Win64 va_list from the calling convention
4365 // alone, because it's legal to do this from a System V ABI function.
4366 // With opaque pointer types, we won't have enough information in LLVM
4367 // IR to determine this from the argument types, either. Best to do it
4368 // now, while we have enough information.
4369 Address DestAddr = EmitMSVAListRef(E->getArg(0));
4370 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
4371
4372 llvm::Type *BPP = Int8PtrPtrTy;
4373
4374 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
4375 DestAddr.getAlignment());
4376 SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
4377 SrcAddr.getAlignment());
4378
4379 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
4380 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
4381 }
4382 }
4383
4384 // If this is an alias for a lib function (e.g. __builtin_sin), emit
4385 // the call using the normal call path, but using the unmangled
4386 // version of the function name.
4387 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
4388 return emitLibraryCall(*this, FD, E,
4389 CGM.getBuiltinLibFunction(FD, BuiltinID));
4390
4391 // If this is a predefined lib function (e.g. malloc), emit the call
4392 // using exactly the normal call path.
4393 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
4394 return emitLibraryCall(*this, FD, E,
4395 cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
4396
4397 // Check that a call to a target specific builtin has the correct target
4398 // features.
4399 // This is down here to avoid non-target specific builtins, however, if
4400 // generic builtins start to require generic target features then we
4401 // can move this up to the beginning of the function.
4402 checkTargetFeatures(E, FD);
4403
4404 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
4405 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
4406
4407 // See if we have a target specific intrinsic.
4408 const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
4409 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
4410 StringRef Prefix =
4411 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
4412 if (!Prefix.empty()) {
4413 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
4414 // NOTE we don't need to perform a compatibility flag check here since the
4415 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
4416 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
4417 if (IntrinsicID == Intrinsic::not_intrinsic)
4418 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
4419 }
4420
4421 if (IntrinsicID != Intrinsic::not_intrinsic) {
4422 SmallVector<Value*, 16> Args;
4423
4424 // Find out if any arguments are required to be integer constant
4425 // expressions.
4426 unsigned ICEArguments = 0;
4427 ASTContext::GetBuiltinTypeError Error;
4428 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
4429 assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error"
) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 4429, __PRETTY_FUNCTION__))
;
4430
4431 Function *F = CGM.getIntrinsic(IntrinsicID);
4432 llvm::FunctionType *FTy = F->getFunctionType();
4433
4434 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
4435 Value *ArgValue;
4436 // If this is a normal argument, just emit it as a scalar.
4437 if ((ICEArguments & (1 << i)) == 0) {
4438 ArgValue = EmitScalarExpr(E->getArg(i));
4439 } else {
4440 // If this is required to be a constant, constant fold it so that we
4441 // know that the generated intrinsic gets a ConstantInt.
4442 ArgValue = llvm::ConstantInt::get(
4443 getLLVMContext(),
4444 *E->getArg(i)->getIntegerConstantExpr(getContext()));
4445 }
4446
4447 // If the intrinsic arg type is different from the builtin arg type
4448 // we need to do a bit cast.
4449 llvm::Type *PTy = FTy->getParamType(i);
4450 if (PTy != ArgValue->getType()) {
4451 // XXX - vector of pointers?
4452 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
4453 if (PtrTy->getAddressSpace() !=
4454 ArgValue->getType()->getPointerAddressSpace()) {
4455 ArgValue = Builder.CreateAddrSpaceCast(
4456 ArgValue,
4457 ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
4458 }
4459 }
4460
4461 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&((PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param") ? static_cast
<void> (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 4462, __PRETTY_FUNCTION__))
4462 "Must be able to losslessly bit cast to param")((PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param") ? static_cast
<void> (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 4462, __PRETTY_FUNCTION__))
;
4463 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
4464 }
4465
4466 Args.push_back(ArgValue);
4467 }
4468
4469 Value *V = Builder.CreateCall(F, Args);
4470 QualType BuiltinRetType = E->getType();
4471
4472 llvm::Type *RetTy = VoidTy;
4473 if (!BuiltinRetType->isVoidType())
4474 RetTy = ConvertType(BuiltinRetType);
4475
4476 if (RetTy != V->getType()) {
4477 // XXX - vector of pointers?
4478 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
4479 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
4480 V = Builder.CreateAddrSpaceCast(
4481 V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
4482 }
4483 }
4484
4485 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&((V->getType()->canLosslesslyBitCastTo(RetTy) &&
"Must be able to losslessly bit cast result type") ? static_cast
<void> (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 4486, __PRETTY_FUNCTION__))
4486 "Must be able to losslessly bit cast result type")((V->getType()->canLosslesslyBitCastTo(RetTy) &&
"Must be able to losslessly bit cast result type") ? static_cast
<void> (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 4486, __PRETTY_FUNCTION__))
;
4487 V = Builder.CreateBitCast(V, RetTy);
4488 }
4489
4490 return RValue::get(V);
4491 }
4492
4493 // Some target-specific builtins can have aggregate return values, e.g.
4494 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
4495 // ReturnValue to be non-null, so that the target-specific emission code can
4496 // always just emit into it.
4497 TypeEvaluationKind EvalKind = getEvaluationKind(E->getType());
4498 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
4499 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
4500 ReturnValue = ReturnValueSlot(DestPtr, false);
4501 }
4502
4503 // Now see if we can emit a target-specific builtin.
4504 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
4505 switch (EvalKind) {
4506 case TEK_Scalar:
4507 return RValue::get(V);
4508 case TEK_Aggregate:
4509 return RValue::getAggregate(ReturnValue.getValue(),
4510 ReturnValue.isVolatile());
4511 case TEK_Complex:
4512 llvm_unreachable("No current target builtin returns complex")::llvm::llvm_unreachable_internal("No current target builtin returns complex"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 4512)
;
4513 }
4514 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr")::llvm::llvm_unreachable_internal("Bad evaluation kind in EmitBuiltinExpr"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 4514)
;
4515 }
4516
4517 ErrorUnsupported(E, "builtin function");
4518
4519 // Unknown builtin, for now just dump it out and return undef.
4520 return GetUndefRValue(E->getType());
4521}
4522
4523static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
4524 unsigned BuiltinID, const CallExpr *E,
4525 ReturnValueSlot ReturnValue,
4526 llvm::Triple::ArchType Arch) {
4527 switch (Arch) {
4528 case llvm::Triple::arm:
4529 case llvm::Triple::armeb:
4530 case llvm::Triple::thumb:
4531 case llvm::Triple::thumbeb:
4532 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
4533 case llvm::Triple::aarch64:
4534 case llvm::Triple::aarch64_32:
4535 case llvm::Triple::aarch64_be:
4536 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
4537 case llvm::Triple::bpfeb:
4538 case llvm::Triple::bpfel:
4539 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
4540 case llvm::Triple::x86:
4541 case llvm::Triple::x86_64:
4542 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
4543 case llvm::Triple::ppc:
4544 case llvm::Triple::ppc64:
4545 case llvm::Triple::ppc64le:
4546 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
4547 case llvm::Triple::r600:
4548 case llvm::Triple::amdgcn:
4549 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
4550 case llvm::Triple::systemz:
4551 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
4552 case llvm::Triple::nvptx:
4553 case llvm::Triple::nvptx64:
4554 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
4555 case llvm::Triple::wasm32:
4556 case llvm::Triple::wasm64:
4557 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
4558 case llvm::Triple::hexagon:
4559 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
4560 default:
4561 return nullptr;
4562 }
4563}
4564
4565Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
4566 const CallExpr *E,
4567 ReturnValueSlot ReturnValue) {
4568 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
4569 assert(getContext().getAuxTargetInfo() && "Missing aux target info")((getContext().getAuxTargetInfo() && "Missing aux target info"
) ? static_cast<void> (0) : __assert_fail ("getContext().getAuxTargetInfo() && \"Missing aux target info\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 4569, __PRETTY_FUNCTION__))
;
4570 return EmitTargetArchBuiltinExpr(
4571 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
4572 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
4573 }
4574
4575 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
4576 getTarget().getTriple().getArch());
4577}
4578
4579static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
4580 NeonTypeFlags TypeFlags,
4581 bool HasLegalHalfType = true,
4582 bool V1Ty = false,
4583 bool AllowBFloatArgsAndRet = true) {
4584 int IsQuad = TypeFlags.isQuad();
4585 switch (TypeFlags.getEltType()) {
4586 case NeonTypeFlags::Int8:
4587 case NeonTypeFlags::Poly8:
4588 return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
4589 case NeonTypeFlags::Int16:
4590 case NeonTypeFlags::Poly16:
4591 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4592 case NeonTypeFlags::BFloat16:
4593 if (AllowBFloatArgsAndRet)
4594 return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
4595 else
4596 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4597 case NeonTypeFlags::Float16:
4598 if (HasLegalHalfType)
4599 return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
4600 else
4601 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4602 case NeonTypeFlags::Int32:
4603 return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
4604 case NeonTypeFlags::Int64:
4605 case NeonTypeFlags::Poly64:
4606 return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
4607 case NeonTypeFlags::Poly128:
4608 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
4609 // There is a lot of i128 and f128 API missing.
4610 // so we use v16i8 to represent poly128 and get pattern matched.
4611 return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
4612 case NeonTypeFlags::Float32:
4613 return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
4614 case NeonTypeFlags::Float64:
4615 return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
4616 }
4617 llvm_unreachable("Unknown vector element type!")::llvm::llvm_unreachable_internal("Unknown vector element type!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 4617)
;
4618}
4619
4620static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
4621 NeonTypeFlags IntTypeFlags) {
4622 int IsQuad = IntTypeFlags.isQuad();
4623 switch (IntTypeFlags.getEltType()) {
4624 case NeonTypeFlags::Int16:
4625 return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
4626 case NeonTypeFlags::Int32:
4627 return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
4628 case NeonTypeFlags::Int64:
4629 return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
4630 default:
4631 llvm_unreachable("Type can't be converted to floating-point!")::llvm::llvm_unreachable_internal("Type can't be converted to floating-point!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 4631)
;
4632 }
4633}
4634
4635Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
4636 const ElementCount &Count) {
4637 Value *SV = llvm::ConstantVector::getSplat(Count, C);
4638 return Builder.CreateShuffleVector(V, V, SV, "lane");
4639}
4640
4641Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
4642 ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
4643 return EmitNeonSplat(V, C, EC);
4644}
4645
4646Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
4647 const char *name,
4648 unsigned shift, bool rightshift) {
4649 unsigned j = 0;
4650 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
4651 ai != ae; ++ai, ++j) {
4652 if (F->isConstrainedFPIntrinsic())
4653 if (ai->getType()->isMetadataTy())
4654 continue;
4655 if (shift > 0 && shift == j)
4656 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
4657 else
4658 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
4659 }
4660
4661 if (F->isConstrainedFPIntrinsic())
4662 return Builder.CreateConstrainedFPCall(F, Ops, name);
4663 else
4664 return Builder.CreateCall(F, Ops, name);
4665}
4666
4667Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
4668 bool neg) {
4669 int SV = cast<ConstantInt>(V)->getSExtValue();
4670 return ConstantInt::get(Ty, neg ? -SV : SV);
4671}
4672
4673// Right-shift a vector by a constant.
4674Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
4675 llvm::Type *Ty, bool usgn,
4676 const char *name) {
4677 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
4678
4679 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
4680 int EltSize = VTy->getScalarSizeInBits();
4681
4682 Vec = Builder.CreateBitCast(Vec, Ty);
4683
4684 // lshr/ashr are undefined when the shift amount is equal to the vector
4685 // element size.
4686 if (ShiftAmt == EltSize) {
4687 if (usgn) {
4688 // Right-shifting an unsigned value by its size yields 0.
4689 return llvm::ConstantAggregateZero::get(VTy);
4690 } else {
4691 // Right-shifting a signed value by its size is equivalent
4692 // to a shift of size-1.
4693 --ShiftAmt;
4694 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
4695 }
4696 }
4697
4698 Shift = EmitNeonShiftVector(Shift, Ty, false);
4699 if (usgn)
4700 return Builder.CreateLShr(Vec, Shift, name);
4701 else
4702 return Builder.CreateAShr(Vec, Shift, name);
4703}
4704
4705enum {
4706 AddRetType = (1 << 0),
4707 Add1ArgType = (1 << 1),
4708 Add2ArgTypes = (1 << 2),
4709
4710 VectorizeRetType = (1 << 3),
4711 VectorizeArgTypes = (1 << 4),
4712
4713 InventFloatType = (1 << 5),
4714 UnsignedAlts = (1 << 6),
4715
4716 Use64BitVectors = (1 << 7),
4717 Use128BitVectors = (1 << 8),
4718
4719 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
4720 VectorRet = AddRetType | VectorizeRetType,
4721 VectorRetGetArgs01 =
4722 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
4723 FpCmpzModifiers =
4724 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
4725};
4726
4727namespace {
4728struct ARMVectorIntrinsicInfo {
4729 const char *NameHint;
4730 unsigned BuiltinID;
4731 unsigned LLVMIntrinsic;
4732 unsigned AltLLVMIntrinsic;
4733 uint64_t TypeModifier;
4734
4735 bool operator<(unsigned RHSBuiltinID) const {
4736 return BuiltinID < RHSBuiltinID;
4737 }
4738 bool operator<(const ARMVectorIntrinsicInfo &TE) const {
4739 return BuiltinID < TE.BuiltinID;
4740 }
4741};
4742} // end anonymous namespace
4743
4744#define NEONMAP0(NameBase) \
4745 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
4746
4747#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
4748 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
4749 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
4750
4751#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
4752 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
4753 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
4754 TypeModifier }
4755
4756static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
4757 NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0),
4758 NEONMAP0(splat_lane_v),
4759 NEONMAP0(splat_laneq_v),
4760 NEONMAP0(splatq_lane_v),
4761 NEONMAP0(splatq_laneq_v),
4762 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
4763 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
4764 NEONMAP1(vabs_v, arm_neon_vabs, 0),
4765 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
4766 NEONMAP0(vaddhn_v),
4767 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
4768 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
4769 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
4770 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
4771 NEONMAP1(vbfdot_v, arm_neon_bfdot, 0),
4772 NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0),
4773 NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0),
4774 NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0),
4775 NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0),
4776 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
4777 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
4778 NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
4779 NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
4780 NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
4781 NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
4782 NEONMAP1(vcage_v, arm_neon_vacge, 0),
4783 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
4784 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
4785 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
4786 NEONMAP1(vcale_v, arm_neon_vacge, 0),
4787 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
4788 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
4789 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
4790 NEONMAP0(vceqz_v),
4791 NEONMAP0(vceqzq_v),
4792 NEONMAP0(vcgez_v),
4793 NEONMAP0(vcgezq_v),
4794 NEONMAP0(vcgtz_v),
4795 NEONMAP0(vcgtzq_v),
4796 NEONMAP0(vclez_v),
4797 NEONMAP0(vclezq_v),
4798 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
4799 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
4800 NEONMAP0(vcltz_v),
4801 NEONMAP0(vcltzq_v),
4802 NEONMAP1(vclz_v, ctlz, Add1ArgType),
4803 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
4804 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
4805 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
4806 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
4807 NEONMAP0(vcvt_f16_v),
4808 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
4809 NEONMAP0(vcvt_f32_v),
4810 NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4811 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4812 NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
4813 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
4814 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
4815 NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
4816 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
4817 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
4818 NEONMAP0(vcvt_s16_v),
4819 NEONMAP0(vcvt_s32_v),
4820 NEONMAP0(vcvt_s64_v),
4821 NEONMAP0(vcvt_u16_v),
4822 NEONMAP0(vcvt_u32_v),
4823 NEONMAP0(vcvt_u64_v),
4824 NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
4825 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
4826 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
4827 NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
4828 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
4829 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
4830 NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
4831 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
4832 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
4833 NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
4834 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
4835 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
4836 NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
4837 NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
4838 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
4839 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
4840 NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
4841 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
4842 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
4843 NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
4844 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
4845 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
4846 NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
4847 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
4848 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
4849 NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
4850 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
4851 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
4852 NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
4853 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
4854 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
4855 NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
4856 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
4857 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
4858 NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
4859 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
4860 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
4861 NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
4862 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
4863 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
4864 NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
4865 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
4866 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
4867 NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
4868 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
4869 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
4870 NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
4871 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
4872 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
4873 NEONMAP0(vcvtq_f16_v),
4874 NEONMAP0(vcvtq_f32_v),
4875 NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4876 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4877 NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
4878 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
4879 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
4880 NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
4881 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
4882 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
4883 NEONMAP0(vcvtq_s16_v),
4884 NEONMAP0(vcvtq_s32_v),
4885 NEONMAP0(vcvtq_s64_v),
4886 NEONMAP0(vcvtq_u16_v),
4887 NEONMAP0(vcvtq_u32_v),
4888 NEONMAP0(vcvtq_u64_v),
4889 NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
4890 NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
4891 NEONMAP0(vext_v),
4892 NEONMAP0(vextq_v),
4893 NEONMAP0(vfma_v),
4894 NEONMAP0(vfmaq_v),
4895 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
4896 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
4897 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
4898 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
4899 NEONMAP0(vld1_dup_v),
4900 NEONMAP1(vld1_v, arm_neon_vld1, 0),
4901 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
4902 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
4903 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
4904 NEONMAP0(vld1q_dup_v),
4905 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
4906 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
4907 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
4908 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
4909 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
4910 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
4911 NEONMAP1(vld2_v, arm_neon_vld2, 0),
4912 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
4913 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
4914 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
4915 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
4916 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
4917 NEONMAP1(vld3_v, arm_neon_vld3, 0),
4918 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
4919 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
4920 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
4921 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
4922 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
4923 NEONMAP1(vld4_v, arm_neon_vld4, 0),
4924 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
4925 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
4926 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
4927 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
4928 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
4929 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
4930 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
4931 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
4932 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
4933 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
4934 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
4935 NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
4936 NEONMAP0(vmovl_v),
4937 NEONMAP0(vmovn_v),
4938 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
4939 NEONMAP0(vmull_v),
4940 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
4941 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
4942 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
4943 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
4944 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
4945 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
4946 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
4947 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
4948 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
4949 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
4950 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
4951 NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
4952 NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
4953 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
4954 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
4955 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
4956 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
4957 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
4958 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
4959 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
4960 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
4961 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
4962 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
4963 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
4964 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
4965 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
4966 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
4967 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
4968 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
4969 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
4970 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
4971 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
4972 NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
4973 NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
4974 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
4975 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
4976 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
4977 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
4978 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
4979 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
4980 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
4981 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
4982 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
4983 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
4984 NEONMAP0(vrndi_v),
4985 NEONMAP0(vrndiq_v),
4986 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
4987 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
4988 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
4989 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
4990 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
4991 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
4992 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
4993 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
4994 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
4995 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
4996 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
4997 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
4998 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
4999 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
5000 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
5001 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
5002 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
5003 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
5004 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
5005 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
5006 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
5007 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
5008 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
5009 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
5010 NEONMAP0(vshl_n_v),
5011 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5012 NEONMAP0(vshll_n_v),
5013 NEONMAP0(vshlq_n_v),
5014 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5015 NEONMAP0(vshr_n_v),
5016 NEONMAP0(vshrn_n_v),
5017 NEONMAP0(vshrq_n_v),
5018 NEONMAP1(vst1_v, arm_neon_vst1, 0),
5019 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
5020 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
5021 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
5022 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
5023 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
5024 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
5025 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
5026 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
5027 NEONMAP1(vst2_v, arm_neon_vst2, 0),
5028 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
5029 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
5030 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
5031 NEONMAP1(vst3_v, arm_neon_vst3, 0),
5032 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
5033 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
5034 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
5035 NEONMAP1(vst4_v, arm_neon_vst4, 0),
5036 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
5037 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
5038 NEONMAP0(vsubhn_v),
5039 NEONMAP0(vtrn_v),
5040 NEONMAP0(vtrnq_v),
5041 NEONMAP0(vtst_v),
5042 NEONMAP0(vtstq_v),
5043 NEONMAP1(vusdot_v, arm_neon_usdot, 0),
5044 NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
5045 NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
5046 NEONMAP0(vuzp_v),
5047 NEONMAP0(vuzpq_v),
5048 NEONMAP0(vzip_v),
5049 NEONMAP0(vzipq_v)
5050};
5051
5052static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
5053 NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0),
5054 NEONMAP0(splat_lane_v),
5055 NEONMAP0(splat_laneq_v),
5056 NEONMAP0(splatq_lane_v),
5057 NEONMAP0(splatq_laneq_v),
5058 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
5059 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
5060 NEONMAP0(vaddhn_v),
5061 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
5062 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
5063 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
5064 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
5065 NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
5066 NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
5067 NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
5068 NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0),
5069 NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0),
5070 NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5071 NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5072 NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5073 NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5074 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
5075 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
5076 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
5077 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
5078 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
5079 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
5080 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
5081 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
5082 NEONMAP0(vceqz_v),
5083 NEONMAP0(vceqzq_v),
5084 NEONMAP0(vcgez_v),
5085 NEONMAP0(vcgezq_v),
5086 NEONMAP0(vcgtz_v),
5087 NEONMAP0(vcgtzq_v),
5088 NEONMAP0(vclez_v),
5089 NEONMAP0(vclezq_v),
5090 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
5091 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
5092 NEONMAP0(vcltz_v),
5093 NEONMAP0(vcltzq_v),
5094 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5095 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5096 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5097 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5098 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
5099 NEONMAP0(vcvt_f16_v),
5100 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
5101 NEONMAP0(vcvt_f32_v),
5102 NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5103 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5104 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5105 NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5106 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5107 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5108 NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5109 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5110 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5111 NEONMAP0(vcvtq_f16_v),
5112 NEONMAP0(vcvtq_f32_v),
5113 NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0),
5114 NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5115 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5116 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5117 NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5118 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5119 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5120 NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5121 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5122 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5123 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
5124 NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5125 NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5126 NEONMAP0(vext_v),
5127 NEONMAP0(vextq_v),
5128 NEONMAP0(vfma_v),
5129 NEONMAP0(vfmaq_v),
5130 NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
5131 NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
5132 NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
5133 NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
5134 NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
5135 NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
5136 NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
5137 NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
5138 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5139 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5140 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5141 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5142 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
5143 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
5144 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
5145 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
5146 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
5147 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
5148 NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0),
5149 NEONMAP0(vmovl_v),
5150 NEONMAP0(vmovn_v),
5151 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
5152 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
5153 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
5154 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5155 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5156 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
5157 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
5158 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
5159 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5160 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5161 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
5162 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
5163 NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
5164 NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
5165 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
5166 NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
5167 NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
5168 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
5169 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
5170 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
5171 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
5172 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
5173 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
5174 NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
5175 NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
5176 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
5177 NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
5178 NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
5179 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
5180 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5181 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5182 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
5183 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5184 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
5185 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5186 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
5187 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
5188 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5189 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5190 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
5191 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5192 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5193 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
5194 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
5195 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5196 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5197 NEONMAP0(vrndi_v),
5198 NEONMAP0(vrndiq_v),
5199 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5200 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5201 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5202 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5203 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5204 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5205 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
5206 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
5207 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
5208 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
5209 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
5210 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
5211 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
5212 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
5213 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
5214 NEONMAP0(vshl_n_v),
5215 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5216 NEONMAP0(vshll_n_v),
5217 NEONMAP0(vshlq_n_v),
5218 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5219 NEONMAP0(vshr_n_v),
5220 NEONMAP0(vshrn_n_v),
5221 NEONMAP0(vshrq_n_v),
5222 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
5223 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
5224 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
5225 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
5226 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
5227 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
5228 NEONMAP0(vsubhn_v),
5229 NEONMAP0(vtst_v),
5230 NEONMAP0(vtstq_v),
5231 NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
5232 NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
5233 NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
5234};
5235
5236static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
5237 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
5238 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
5239 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
5240 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5241 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5242 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5243 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5244 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5245 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5246 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5247 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5248 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
5249 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5250 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
5251 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5252 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5253 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5254 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5255 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5256 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5257 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5258 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5259 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5260 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5261 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5262 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5263 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5264 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5265 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5266 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5267 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5268 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5269 NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5270 NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5271 NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
5272 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5273 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5274 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5275 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5276 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5277 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5278 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5279 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5280 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5281 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5282 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5283 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5284 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5285 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5286 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5287 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5288 NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5289 NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5290 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
5291 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5292 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5293 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5294 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5295 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5296 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5297 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5298 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5299 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5300 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5301 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5302 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5303 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5304 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5305 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5306 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5307 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5308 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5309 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5310 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5311 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
5312 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
5313 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
5314 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5315 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5316 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5317 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5318 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5319 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5320 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5321 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5322 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5323 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5324 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5325 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
5326 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5327 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
5328 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5329 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5330 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
5331 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
5332 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5333 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5334 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
5335 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
5336 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
5337 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
5338 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
5339 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
5340 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
5341 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
5342 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5343 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5344 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5345 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5346 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
5347 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5348 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5349 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5350 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
5351 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5352 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
5353 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
5354 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
5355 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5356 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5357 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
5358 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
5359 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5360 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5361 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
5362 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
5363 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
5364 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
5365 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5366 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5367 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5368 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5369 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
5370 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5371 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5372 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5373 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5374 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5375 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5376 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
5377 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
5378 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5379 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5380 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5381 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5382 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
5383 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
5384 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
5385 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
5386 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5387 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5388 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
5389 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
5390 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
5391 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5392 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5393 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5394 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5395 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
5396 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5397 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5398 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5399 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5400 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
5401 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
5402 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5403 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5404 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
5405 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
5406 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
5407 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
5408 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
5409 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
5410 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
5411 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
5412 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
5413 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
5414 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
5415 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
5416 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
5417 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
5418 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
5419 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
5420 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
5421 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
5422 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
5423 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
5424 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5425 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
5426 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5427 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
5428 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
5429 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
5430 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5431 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
5432 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5433 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
5434 // FP16 scalar intrinisics go here.
5435 NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
5436 NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5437 NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5438 NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5439 NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5440 NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5441 NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5442 NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5443 NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5444 NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5445 NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5446 NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5447 NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5448 NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5449 NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5450 NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5451 NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5452 NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5453 NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5454 NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5455 NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5456 NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5457 NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5458 NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5459 NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5460 NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5461 NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5462 NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5463 NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5464 NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
5465 NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
5466 NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
5467 NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
5468 NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
5469};
5470
5471#undef NEONMAP0
5472#undef NEONMAP1
5473#undef NEONMAP2
5474
5475#define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
5476 { \
5477 #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
5478 TypeModifier \
5479 }
5480
5481#define SVEMAP2(NameBase, TypeModifier) \
5482 { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
5483static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
5484#define GET_SVE_LLVM_INTRINSIC_MAP
5485#include "clang/Basic/arm_sve_builtin_cg.inc"
5486#undef GET_SVE_LLVM_INTRINSIC_MAP
5487};
5488
5489#undef SVEMAP1
5490#undef SVEMAP2
5491
5492static bool NEONSIMDIntrinsicsProvenSorted = false;
5493
5494static bool AArch64SIMDIntrinsicsProvenSorted = false;
5495static bool AArch64SISDIntrinsicsProvenSorted = false;
5496static bool AArch64SVEIntrinsicsProvenSorted = false;
5497
5498static const ARMVectorIntrinsicInfo *
5499findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
5500 unsigned BuiltinID, bool &MapProvenSorted) {
5501
5502#ifndef NDEBUG
5503 if (!MapProvenSorted) {
5504 assert(llvm::is_sorted(IntrinsicMap))((llvm::is_sorted(IntrinsicMap)) ? static_cast<void> (0
) : __assert_fail ("llvm::is_sorted(IntrinsicMap)", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 5504, __PRETTY_FUNCTION__))
;
5505 MapProvenSorted = true;
5506 }
5507#endif
5508
5509 const ARMVectorIntrinsicInfo *Builtin =
5510 llvm::lower_bound(IntrinsicMap, BuiltinID);
5511
5512 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
5513 return Builtin;
5514
5515 return nullptr;
5516}
5517
5518Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
5519 unsigned Modifier,
5520 llvm::Type *ArgType,
5521 const CallExpr *E) {
5522 int VectorSize = 0;
5523 if (Modifier & Use64BitVectors)
5524 VectorSize = 64;
5525 else if (Modifier & Use128BitVectors)
5526 VectorSize = 128;
5527
5528 // Return type.
5529 SmallVector<llvm::Type *, 3> Tys;
5530 if (Modifier & AddRetType) {
5531 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
5532 if (Modifier & VectorizeRetType)
5533 Ty = llvm::FixedVectorType::get(
5534 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
5535
5536 Tys.push_back(Ty);
5537 }
5538
5539 // Arguments.
5540 if (Modifier & VectorizeArgTypes) {
5541 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
5542 ArgType = llvm::FixedVectorType::get(ArgType, Elts);
5543 }
5544
5545 if (Modifier & (Add1ArgType | Add2ArgTypes))
5546 Tys.push_back(ArgType);
5547
5548 if (Modifier & Add2ArgTypes)
5549 Tys.push_back(ArgType);
5550
5551 if (Modifier & InventFloatType)
5552 Tys.push_back(FloatTy);
5553
5554 return CGM.getIntrinsic(IntrinsicID, Tys);
5555}
5556
5557static Value *EmitCommonNeonSISDBuiltinExpr(
5558 CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
5559 SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
5560 unsigned BuiltinID = SISDInfo.BuiltinID;
5561 unsigned int Int = SISDInfo.LLVMIntrinsic;
5562 unsigned Modifier = SISDInfo.TypeModifier;
5563 const char *s = SISDInfo.NameHint;
5564
5565 switch (BuiltinID) {
5566 case NEON::BI__builtin_neon_vcled_s64:
5567 case NEON::BI__builtin_neon_vcled_u64:
5568 case NEON::BI__builtin_neon_vcles_f32:
5569 case NEON::BI__builtin_neon_vcled_f64:
5570 case NEON::BI__builtin_neon_vcltd_s64:
5571 case NEON::BI__builtin_neon_vcltd_u64:
5572 case NEON::BI__builtin_neon_vclts_f32:
5573 case NEON::BI__builtin_neon_vcltd_f64:
5574 case NEON::BI__builtin_neon_vcales_f32:
5575 case NEON::BI__builtin_neon_vcaled_f64:
5576 case NEON::BI__builtin_neon_vcalts_f32:
5577 case NEON::BI__builtin_neon_vcaltd_f64:
5578 // Only one direction of comparisons actually exist, cmle is actually a cmge
5579 // with swapped operands. The table gives us the right intrinsic but we
5580 // still need to do the swap.
5581 std::swap(Ops[0], Ops[1]);
5582 break;
5583 }
5584
5585 assert(Int && "Generic code assumes a valid intrinsic")((Int && "Generic code assumes a valid intrinsic") ? static_cast
<void> (0) : __assert_fail ("Int && \"Generic code assumes a valid intrinsic\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 5585, __PRETTY_FUNCTION__))
;
5586
5587 // Determine the type(s) of this overloaded AArch64 intrinsic.
5588 const Expr *Arg = E->getArg(0);
5589 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
5590 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
5591
5592 int j = 0;
5593 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
5594 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5595 ai != ae; ++ai, ++j) {
5596 llvm::Type *ArgTy = ai->getType();
5597 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
5598 ArgTy->getPrimitiveSizeInBits())
5599 continue;
5600
5601 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy())((ArgTy->isVectorTy() && !Ops[j]->getType()->
isVectorTy()) ? static_cast<void> (0) : __assert_fail (
"ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 5601, __PRETTY_FUNCTION__))
;
5602 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
5603 // it before inserting.
5604 Ops[j] = CGF.Builder.CreateTruncOrBitCast(
5605 Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
5606 Ops[j] =
5607 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
5608 }
5609
5610 Value *Result = CGF.EmitNeonCall(F, Ops, s);
5611 llvm::Type *ResultType = CGF.ConvertType(E->getType());
5612 if (ResultType->getPrimitiveSizeInBits().getFixedSize() <
5613 Result->getType()->getPrimitiveSizeInBits().getFixedSize())
5614 return CGF.Builder.CreateExtractElement(Result, C0);
5615
5616 return CGF.Builder.CreateBitCast(Result, ResultType, s);
5617}
5618
5619Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
5620 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
5621 const char *NameHint, unsigned Modifier, const CallExpr *E,
5622 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
5623 llvm::Triple::ArchType Arch) {
5624 // Get the last argument, which specifies the vector type.
5625 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
5626 Optional<llvm::APSInt> NeonTypeConst =
5627 Arg->getIntegerConstantExpr(getContext());
5628 if (!NeonTypeConst)
5629 return nullptr;
5630
5631 // Determine the type of this overloaded NEON intrinsic.
5632 NeonTypeFlags Type(NeonTypeConst->getZExtValue());
5633 bool Usgn = Type.isUnsigned();
5634 bool Quad = Type.isQuad();
5635 const bool HasLegalHalfType = getTarget().hasLegalHalfType();
5636 const bool AllowBFloatArgsAndRet =
5637 getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
5638
5639 llvm::FixedVectorType *VTy =
5640 GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet);
5641 llvm::Type *Ty = VTy;
5642 if (!Ty)
5643 return nullptr;
5644
5645 auto getAlignmentValue32 = [&](Address addr) -> Value* {
5646 return Builder.getInt32(addr.getAlignment().getQuantity());
5647 };
5648
5649 unsigned Int = LLVMIntrinsic;
5650 if ((Modifier & UnsignedAlts) && !Usgn)
5651 Int = AltLLVMIntrinsic;
5652
5653 switch (BuiltinID) {
5654 default: break;
5655 case NEON::BI__builtin_neon_splat_lane_v:
5656 case NEON::BI__builtin_neon_splat_laneq_v:
5657 case NEON::BI__builtin_neon_splatq_lane_v:
5658 case NEON::BI__builtin_neon_splatq_laneq_v: {
5659 auto NumElements = VTy->getElementCount();
5660 if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
5661 NumElements = NumElements * 2;
5662 if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
5663 NumElements = NumElements.divideCoefficientBy(2);
5664
5665 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
5666 return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
5667 }
5668 case NEON::BI__builtin_neon_vpadd_v:
5669 case NEON::BI__builtin_neon_vpaddq_v:
5670 // We don't allow fp/int overloading of intrinsics.
5671 if (VTy->getElementType()->isFloatingPointTy() &&
5672 Int == Intrinsic::aarch64_neon_addp)
5673 Int = Intrinsic::aarch64_neon_faddp;
5674 break;
5675 case NEON::BI__builtin_neon_vabs_v:
5676 case NEON::BI__builtin_neon_vabsq_v:
5677 if (VTy->getElementType()->isFloatingPointTy())
5678 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
5679 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
5680 case NEON::BI__builtin_neon_vaddhn_v: {
5681 llvm::FixedVectorType *SrcTy =
5682 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
5683
5684 // %sum = add <4 x i32> %lhs, %rhs
5685 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5686 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
5687 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
5688
5689 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
5690 Constant *ShiftAmt =
5691 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
5692 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
5693
5694 // %res = trunc <4 x i32> %high to <4 x i16>
5695 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
5696 }
5697 case NEON::BI__builtin_neon_vcale_v:
5698 case NEON::BI__builtin_neon_vcaleq_v:
5699 case NEON::BI__builtin_neon_vcalt_v:
5700 case NEON::BI__builtin_neon_vcaltq_v:
5701 std::swap(Ops[0], Ops[1]);
5702 LLVM_FALLTHROUGH[[gnu::fallthrough]];
5703 case NEON::BI__builtin_neon_vcage_v:
5704 case NEON::BI__builtin_neon_vcageq_v:
5705 case NEON::BI__builtin_neon_vcagt_v:
5706 case NEON::BI__builtin_neon_vcagtq_v: {
5707 llvm::Type *Ty;
5708 switch (VTy->getScalarSizeInBits()) {
5709 default: llvm_unreachable("unexpected type")::llvm::llvm_unreachable_internal("unexpected type", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 5709)
;
5710 case 32:
5711 Ty = FloatTy;
5712 break;
5713 case 64:
5714 Ty = DoubleTy;
5715 break;
5716 case 16:
5717 Ty = HalfTy;
5718 break;
5719 }
5720 auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
5721 llvm::Type *Tys[] = { VTy, VecFlt };
5722 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5723 return EmitNeonCall(F, Ops, NameHint);
5724 }
5725 case NEON::BI__builtin_neon_vceqz_v:
5726 case NEON::BI__builtin_neon_vceqzq_v:
5727 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
5728 ICmpInst::ICMP_EQ, "vceqz");
5729 case NEON::BI__builtin_neon_vcgez_v:
5730 case NEON::BI__builtin_neon_vcgezq_v:
5731 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
5732 ICmpInst::ICMP_SGE, "vcgez");
5733 case NEON::BI__builtin_neon_vclez_v:
5734 case NEON::BI__builtin_neon_vclezq_v:
5735 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
5736 ICmpInst::ICMP_SLE, "vclez");
5737 case NEON::BI__builtin_neon_vcgtz_v:
5738 case NEON::BI__builtin_neon_vcgtzq_v:
5739 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
5740 ICmpInst::ICMP_SGT, "vcgtz");
5741 case NEON::BI__builtin_neon_vcltz_v:
5742 case NEON::BI__builtin_neon_vcltzq_v:
5743 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
5744 ICmpInst::ICMP_SLT, "vcltz");
5745 case NEON::BI__builtin_neon_vclz_v:
5746 case NEON::BI__builtin_neon_vclzq_v:
5747 // We generate target-independent intrinsic, which needs a second argument
5748 // for whether or not clz of zero is undefined; on ARM it isn't.
5749 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
5750 break;
5751 case NEON::BI__builtin_neon_vcvt_f32_v:
5752 case NEON::BI__builtin_neon_vcvtq_f32_v:
5753 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5754 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
5755 HasLegalHalfType);
5756 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5757 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5758 case NEON::BI__builtin_neon_vcvt_f16_v:
5759 case NEON::BI__builtin_neon_vcvtq_f16_v:
5760 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5761 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
5762 HasLegalHalfType);
5763 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5764 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5765 case NEON::BI__builtin_neon_vcvt_n_f16_v:
5766 case NEON::BI__builtin_neon_vcvt_n_f32_v:
5767 case NEON::BI__builtin_neon_vcvt_n_f64_v:
5768 case NEON::BI__builtin_neon_vcvtq_n_f16_v:
5769 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
5770 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
5771 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
5772 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
5773 Function *F = CGM.getIntrinsic(Int, Tys);
5774 return EmitNeonCall(F, Ops, "vcvt_n");
5775 }
5776 case NEON::BI__builtin_neon_vcvt_n_s16_v:
5777 case NEON::BI__builtin_neon_vcvt_n_s32_v:
5778 case NEON::BI__builtin_neon_vcvt_n_u16_v:
5779 case NEON::BI__builtin_neon_vcvt_n_u32_v:
5780 case NEON::BI__builtin_neon_vcvt_n_s64_v:
5781 case NEON::BI__builtin_neon_vcvt_n_u64_v:
5782 case NEON::BI__builtin_neon_vcvtq_n_s16_v:
5783 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
5784 case NEON::BI__builtin_neon_vcvtq_n_u16_v:
5785 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
5786 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
5787 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
5788 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
5789 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5790 return EmitNeonCall(F, Ops, "vcvt_n");
5791 }
5792 case NEON::BI__builtin_neon_vcvt_s32_v:
5793 case NEON::BI__builtin_neon_vcvt_u32_v:
5794 case NEON::BI__builtin_neon_vcvt_s64_v:
5795 case NEON::BI__builtin_neon_vcvt_u64_v:
5796 case NEON::BI__builtin_neon_vcvt_s16_v:
5797 case NEON::BI__builtin_neon_vcvt_u16_v:
5798 case NEON::BI__builtin_neon_vcvtq_s32_v:
5799 case NEON::BI__builtin_neon_vcvtq_u32_v:
5800 case NEON::BI__builtin_neon_vcvtq_s64_v:
5801 case NEON::BI__builtin_neon_vcvtq_u64_v:
5802 case NEON::BI__builtin_neon_vcvtq_s16_v:
5803 case NEON::BI__builtin_neon_vcvtq_u16_v: {
5804 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
5805 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
5806 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
5807 }
5808 case NEON::BI__builtin_neon_vcvta_s16_v:
5809 case NEON::BI__builtin_neon_vcvta_s32_v:
5810 case NEON::BI__builtin_neon_vcvta_s64_v:
5811 case NEON::BI__builtin_neon_vcvta_u16_v:
5812 case NEON::BI__builtin_neon_vcvta_u32_v:
5813 case NEON::BI__builtin_neon_vcvta_u64_v:
5814 case NEON::BI__builtin_neon_vcvtaq_s16_v:
5815 case NEON::BI__builtin_neon_vcvtaq_s32_v:
5816 case NEON::BI__builtin_neon_vcvtaq_s64_v:
5817 case NEON::BI__builtin_neon_vcvtaq_u16_v:
5818 case NEON::BI__builtin_neon_vcvtaq_u32_v:
5819 case NEON::BI__builtin_neon_vcvtaq_u64_v:
5820 case NEON::BI__builtin_neon_vcvtn_s16_v:
5821 case NEON::BI__builtin_neon_vcvtn_s32_v:
5822 case NEON::BI__builtin_neon_vcvtn_s64_v:
5823 case NEON::BI__builtin_neon_vcvtn_u16_v:
5824 case NEON::BI__builtin_neon_vcvtn_u32_v:
5825 case NEON::BI__builtin_neon_vcvtn_u64_v:
5826 case NEON::BI__builtin_neon_vcvtnq_s16_v:
5827 case NEON::BI__builtin_neon_vcvtnq_s32_v:
5828 case NEON::BI__builtin_neon_vcvtnq_s64_v:
5829 case NEON::BI__builtin_neon_vcvtnq_u16_v:
5830 case NEON::BI__builtin_neon_vcvtnq_u32_v:
5831 case NEON::BI__builtin_neon_vcvtnq_u64_v:
5832 case NEON::BI__builtin_neon_vcvtp_s16_v:
5833 case NEON::BI__builtin_neon_vcvtp_s32_v:
5834 case NEON::BI__builtin_neon_vcvtp_s64_v:
5835 case NEON::BI__builtin_neon_vcvtp_u16_v:
5836 case NEON::BI__builtin_neon_vcvtp_u32_v:
5837 case NEON::BI__builtin_neon_vcvtp_u64_v:
5838 case NEON::BI__builtin_neon_vcvtpq_s16_v:
5839 case NEON::BI__builtin_neon_vcvtpq_s32_v:
5840 case NEON::BI__builtin_neon_vcvtpq_s64_v:
5841 case NEON::BI__builtin_neon_vcvtpq_u16_v:
5842 case NEON::BI__builtin_neon_vcvtpq_u32_v:
5843 case NEON::BI__builtin_neon_vcvtpq_u64_v:
5844 case NEON::BI__builtin_neon_vcvtm_s16_v:
5845 case NEON::BI__builtin_neon_vcvtm_s32_v:
5846 case NEON::BI__builtin_neon_vcvtm_s64_v:
5847 case NEON::BI__builtin_neon_vcvtm_u16_v:
5848 case NEON::BI__builtin_neon_vcvtm_u32_v:
5849 case NEON::BI__builtin_neon_vcvtm_u64_v:
5850 case NEON::BI__builtin_neon_vcvtmq_s16_v:
5851 case NEON::BI__builtin_neon_vcvtmq_s32_v:
5852 case NEON::BI__builtin_neon_vcvtmq_s64_v:
5853 case NEON::BI__builtin_neon_vcvtmq_u16_v:
5854 case NEON::BI__builtin_neon_vcvtmq_u32_v:
5855 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
5856 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
5857 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
5858 }
5859 case NEON::BI__builtin_neon_vcvtx_f32_v: {
5860 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
5861 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
5862
5863 }
5864 case NEON::BI__builtin_neon_vext_v:
5865 case NEON::BI__builtin_neon_vextq_v: {
5866 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
5867 SmallVector<int, 16> Indices;
5868 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
5869 Indices.push_back(i+CV);
5870
5871 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5872 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5873 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
5874 }
5875 case NEON::BI__builtin_neon_vfma_v:
5876 case NEON::BI__builtin_neon_vfmaq_v: {
5877 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5878 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5879 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5880
5881 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
5882 return emitCallMaybeConstrainedFPBuiltin(
5883 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
5884 {Ops[1], Ops[2], Ops[0]});
5885 }
5886 case NEON::BI__builtin_neon_vld1_v:
5887 case NEON::BI__builtin_neon_vld1q_v: {
5888 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5889 Ops.push_back(getAlignmentValue32(PtrOp0));
5890 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
5891 }
5892 case NEON::BI__builtin_neon_vld1_x2_v:
5893 case NEON::BI__builtin_neon_vld1q_x2_v:
5894 case NEON::BI__builtin_neon_vld1_x3_v:
5895 case NEON::BI__builtin_neon_vld1q_x3_v:
5896 case NEON::BI__builtin_neon_vld1_x4_v:
5897 case NEON::BI__builtin_neon_vld1q_x4_v: {
5898 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
5899 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5900 llvm::Type *Tys[2] = { VTy, PTy };
5901 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5902 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
5903 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5904 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5905 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5906 }
5907 case NEON::BI__builtin_neon_vld2_v:
5908 case NEON::BI__builtin_neon_vld2q_v:
5909 case NEON::BI__builtin_neon_vld3_v:
5910 case NEON::BI__builtin_neon_vld3q_v:
5911 case NEON::BI__builtin_neon_vld4_v:
5912 case NEON::BI__builtin_neon_vld4q_v:
5913 case NEON::BI__builtin_neon_vld2_dup_v:
5914 case NEON::BI__builtin_neon_vld2q_dup_v:
5915 case NEON::BI__builtin_neon_vld3_dup_v:
5916 case NEON::BI__builtin_neon_vld3q_dup_v:
5917 case NEON::BI__builtin_neon_vld4_dup_v:
5918 case NEON::BI__builtin_neon_vld4q_dup_v: {
5919 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5920 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5921 Value *Align = getAlignmentValue32(PtrOp1);
5922 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
5923 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5924 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5925 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5926 }
5927 case NEON::BI__builtin_neon_vld1_dup_v:
5928 case NEON::BI__builtin_neon_vld1q_dup_v: {
5929 Value *V = UndefValue::get(Ty);
5930 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
5931 PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
5932 LoadInst *Ld = Builder.CreateLoad(PtrOp0);
5933 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
5934 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
5935 return EmitNeonSplat(Ops[0], CI);
5936 }
5937 case NEON::BI__builtin_neon_vld2_lane_v:
5938 case NEON::BI__builtin_neon_vld2q_lane_v:
5939 case NEON::BI__builtin_neon_vld3_lane_v:
5940 case NEON::BI__builtin_neon_vld3q_lane_v:
5941 case NEON::BI__builtin_neon_vld4_lane_v:
5942 case NEON::BI__builtin_neon_vld4q_lane_v: {
5943 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5944 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5945 for (unsigned I = 2; I < Ops.size() - 1; ++I)
5946 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
5947 Ops.push_back(getAlignmentValue32(PtrOp1));
5948 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
5949 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5950 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5951 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5952 }
5953 case NEON::BI__builtin_neon_vmovl_v: {
5954 llvm::FixedVectorType *DTy =
5955 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
5956 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
5957 if (Usgn)
5958 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
5959 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
5960 }
5961 case NEON::BI__builtin_neon_vmovn_v: {
5962 llvm::FixedVectorType *QTy =
5963 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
5964 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
5965 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
5966 }
5967 case NEON::BI__builtin_neon_vmull_v:
5968 // FIXME: the integer vmull operations could be emitted in terms of pure
5969 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
5970 // hoisting the exts outside loops. Until global ISel comes along that can
5971 // see through such movement this leads to bad CodeGen. So we need an
5972 // intrinsic for now.
5973 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
5974 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
5975 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
5976 case NEON::BI__builtin_neon_vpadal_v:
5977 case NEON::BI__builtin_neon_vpadalq_v: {
5978 // The source operand type has twice as many elements of half the size.
5979 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
5980 llvm::Type *EltTy =
5981 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
5982 auto *NarrowTy =
5983 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
5984 llvm::Type *Tys[2] = { Ty, NarrowTy };
5985 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
5986 }
5987 case NEON::BI__builtin_neon_vpaddl_v:
5988 case NEON::BI__builtin_neon_vpaddlq_v: {
5989 // The source operand type has twice as many elements of half the size.
5990 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
5991 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
5992 auto *NarrowTy =
5993 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
5994 llvm::Type *Tys[2] = { Ty, NarrowTy };
5995 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
5996 }
5997 case NEON::BI__builtin_neon_vqdmlal_v:
5998 case NEON::BI__builtin_neon_vqdmlsl_v: {
5999 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
6000 Ops[1] =
6001 EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
6002 Ops.resize(2);
6003 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
6004 }
6005 case NEON::BI__builtin_neon_vqdmulhq_lane_v:
6006 case NEON::BI__builtin_neon_vqdmulh_lane_v:
6007 case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
6008 case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
6009 auto *RTy = cast<llvm::FixedVectorType>(Ty);
6010 if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
6011 BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
6012 RTy = llvm::FixedVectorType::get(RTy->getElementType(),
6013 RTy->getNumElements() * 2);
6014 llvm::Type *Tys[2] = {
6015 RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6016 /*isQuad*/ false))};
6017 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6018 }
6019 case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
6020 case NEON::BI__builtin_neon_vqdmulh_laneq_v:
6021 case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
6022 case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
6023 llvm::Type *Tys[2] = {
6024 Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6025 /*isQuad*/ true))};
6026 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6027 }
6028 case NEON::BI__builtin_neon_vqshl_n_v:
6029 case NEON::BI__builtin_neon_vqshlq_n_v:
6030 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
6031 1, false);
6032 case NEON::BI__builtin_neon_vqshlu_n_v:
6033 case NEON::BI__builtin_neon_vqshluq_n_v:
6034 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
6035 1, false);
6036 case NEON::BI__builtin_neon_vrecpe_v:
6037 case NEON::BI__builtin_neon_vrecpeq_v:
6038 case NEON::BI__builtin_neon_vrsqrte_v:
6039 case NEON::BI__builtin_neon_vrsqrteq_v:
6040 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
6041 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6042 case NEON::BI__builtin_neon_vrndi_v:
6043 case NEON::BI__builtin_neon_vrndiq_v:
6044 Int = Builder.getIsFPConstrained()
6045 ? Intrinsic::experimental_constrained_nearbyint
6046 : Intrinsic::nearbyint;
6047 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6048 case NEON::BI__builtin_neon_vrshr_n_v:
6049 case NEON::BI__builtin_neon_vrshrq_n_v:
6050 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
6051 1, true);
6052 case NEON::BI__builtin_neon_vshl_n_v:
6053 case NEON::BI__builtin_neon_vshlq_n_v:
6054 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
6055 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
6056 "vshl_n");
6057 case NEON::BI__builtin_neon_vshll_n_v: {
6058 llvm::FixedVectorType *SrcTy =
6059 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
6060 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6061 if (Usgn)
6062 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
6063 else
6064 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
6065 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
6066 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
6067 }
6068 case NEON::BI__builtin_neon_vshrn_n_v: {
6069 llvm::FixedVectorType *SrcTy =
6070 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6071 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6072 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
6073 if (Usgn)
6074 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
6075 else
6076 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
6077 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
6078 }
6079 case NEON::BI__builtin_neon_vshr_n_v:
6080 case NEON::BI__builtin_neon_vshrq_n_v:
6081 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
6082 case NEON::BI__builtin_neon_vst1_v:
6083 case NEON::BI__builtin_neon_vst1q_v:
6084 case NEON::BI__builtin_neon_vst2_v:
6085 case NEON::BI__builtin_neon_vst2q_v:
6086 case NEON::BI__builtin_neon_vst3_v:
6087 case NEON::BI__builtin_neon_vst3q_v:
6088 case NEON::BI__builtin_neon_vst4_v:
6089 case NEON::BI__builtin_neon_vst4q_v:
6090 case NEON::BI__builtin_neon_vst2_lane_v:
6091 case NEON::BI__builtin_neon_vst2q_lane_v:
6092 case NEON::BI__builtin_neon_vst3_lane_v:
6093 case NEON::BI__builtin_neon_vst3q_lane_v:
6094 case NEON::BI__builtin_neon_vst4_lane_v:
6095 case NEON::BI__builtin_neon_vst4q_lane_v: {
6096 llvm::Type *Tys[] = {Int8PtrTy, Ty};
6097 Ops.push_back(getAlignmentValue32(PtrOp0));
6098 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
6099 }
6100 case NEON::BI__builtin_neon_vst1_x2_v:
6101 case NEON::BI__builtin_neon_vst1q_x2_v:
6102 case NEON::BI__builtin_neon_vst1_x3_v:
6103 case NEON::BI__builtin_neon_vst1q_x3_v:
6104 case NEON::BI__builtin_neon_vst1_x4_v:
6105 case NEON::BI__builtin_neon_vst1q_x4_v: {
6106 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
6107 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
6108 // in AArch64 it comes last. We may want to stick to one or another.
6109 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
6110 Arch == llvm::Triple::aarch64_32) {
6111 llvm::Type *Tys[2] = { VTy, PTy };
6112 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
6113 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
6114 }
6115 llvm::Type *Tys[2] = { PTy, VTy };
6116 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
6117 }
6118 case NEON::BI__builtin_neon_vsubhn_v: {
6119 llvm::FixedVectorType *SrcTy =
6120 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6121
6122 // %sum = add <4 x i32> %lhs, %rhs
6123 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6124 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
6125 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
6126
6127 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
6128 Constant *ShiftAmt =
6129 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
6130 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
6131
6132 // %res = trunc <4 x i32> %high to <4 x i16>
6133 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
6134 }
6135 case NEON::BI__builtin_neon_vtrn_v:
6136 case NEON::BI__builtin_neon_vtrnq_v: {
6137 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6138 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6139 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6140 Value *SV = nullptr;
6141
6142 for (unsigned vi = 0; vi != 2; ++vi) {
6143 SmallVector<int, 16> Indices;
6144 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6145 Indices.push_back(i+vi);
6146 Indices.push_back(i+e+vi);
6147 }
6148 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6149 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
6150 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6151 }
6152 return SV;
6153 }
6154 case NEON::BI__builtin_neon_vtst_v:
6155 case NEON::BI__builtin_neon_vtstq_v: {
6156 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6157 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6158 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
6159 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
6160 ConstantAggregateZero::get(Ty));
6161 return Builder.CreateSExt(Ops[0], Ty, "vtst");
6162 }
6163 case NEON::BI__builtin_neon_vuzp_v:
6164 case NEON::BI__builtin_neon_vuzpq_v: {
6165 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6166 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6167 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6168 Value *SV = nullptr;
6169
6170 for (unsigned vi = 0; vi != 2; ++vi) {
6171 SmallVector<int, 16> Indices;
6172 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
6173 Indices.push_back(2*i+vi);
6174
6175 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6176 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
6177 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6178 }
6179 return SV;
6180 }
6181 case NEON::BI__builtin_neon_vzip_v:
6182 case NEON::BI__builtin_neon_vzipq_v: {
6183 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6184 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6185 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6186 Value *SV = nullptr;
6187
6188 for (unsigned vi = 0; vi != 2; ++vi) {
6189 SmallVector<int, 16> Indices;
6190 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6191 Indices.push_back((i + vi*e) >> 1);
6192 Indices.push_back(((i + vi*e) >> 1)+e);
6193 }
6194 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6195 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
6196 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6197 }
6198 return SV;
6199 }
6200 case NEON::BI__builtin_neon_vdot_v:
6201 case NEON::BI__builtin_neon_vdotq_v: {
6202 auto *InputTy =
6203 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6204 llvm::Type *Tys[2] = { Ty, InputTy };
6205 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6206 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
6207 }
6208 case NEON::BI__builtin_neon_vfmlal_low_v:
6209 case NEON::BI__builtin_neon_vfmlalq_low_v: {
6210 auto *InputTy =
6211 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6212 llvm::Type *Tys[2] = { Ty, InputTy };
6213 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
6214 }
6215 case NEON::BI__builtin_neon_vfmlsl_low_v:
6216 case NEON::BI__builtin_neon_vfmlslq_low_v: {
6217 auto *InputTy =
6218 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6219 llvm::Type *Tys[2] = { Ty, InputTy };
6220 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
6221 }
6222 case NEON::BI__builtin_neon_vfmlal_high_v:
6223 case NEON::BI__builtin_neon_vfmlalq_high_v: {
6224 auto *InputTy =
6225 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6226 llvm::Type *Tys[2] = { Ty, InputTy };
6227 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
6228 }
6229 case NEON::BI__builtin_neon_vfmlsl_high_v:
6230 case NEON::BI__builtin_neon_vfmlslq_high_v: {
6231 auto *InputTy =
6232 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6233 llvm::Type *Tys[2] = { Ty, InputTy };
6234 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
6235 }
6236 case NEON::BI__builtin_neon_vmmlaq_v: {
6237 auto *InputTy =
6238 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6239 llvm::Type *Tys[2] = { Ty, InputTy };
6240 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6241 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla");
6242 }
6243 case NEON::BI__builtin_neon_vusmmlaq_v: {
6244 auto *InputTy =
6245 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6246 llvm::Type *Tys[2] = { Ty, InputTy };
6247 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
6248 }
6249 case NEON::BI__builtin_neon_vusdot_v:
6250 case NEON::BI__builtin_neon_vusdotq_v: {
6251 auto *InputTy =
6252 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6253 llvm::Type *Tys[2] = { Ty, InputTy };
6254 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
6255 }
6256 case NEON::BI__builtin_neon_vbfdot_v:
6257 case NEON::BI__builtin_neon_vbfdotq_v: {
6258 llvm::Type *InputTy =
6259 llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
6260 llvm::Type *Tys[2] = { Ty, InputTy };
6261 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
6262 }
6263 case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
6264 llvm::Type *Tys[1] = { Ty };
6265 Function *F = CGM.getIntrinsic(Int, Tys);
6266 return EmitNeonCall(F, Ops, "vcvtfp2bf");
6267 }
6268
6269 }
6270
6271 assert(Int && "Expected valid intrinsic number")((Int && "Expected valid intrinsic number") ? static_cast
<void> (0) : __assert_fail ("Int && \"Expected valid intrinsic number\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 6271, __PRETTY_FUNCTION__))
;
6272
6273 // Determine the type(s) of this overloaded AArch64 intrinsic.
6274 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
6275
6276 Value *Result = EmitNeonCall(F, Ops, NameHint);
6277 llvm::Type *ResultType = ConvertType(E->getType());
6278 // AArch64 intrinsic one-element vector type cast to
6279 // scalar type expected by the builtin
6280 return Builder.CreateBitCast(Result, ResultType, NameHint);
6281}
6282
6283Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
6284 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
6285 const CmpInst::Predicate Ip, const Twine &Name) {
6286 llvm::Type *OTy = Op->getType();
6287
6288 // FIXME: this is utterly horrific. We should not be looking at previous
6289 // codegen context to find out what needs doing. Unfortunately TableGen
6290 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
6291 // (etc).
6292 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
6293 OTy = BI->getOperand(0)->getType();
6294
6295 Op = Builder.CreateBitCast(Op, OTy);
6296 if (OTy->getScalarType()->isFloatingPointTy()) {
6297 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
6298 } else {
6299 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
6300 }
6301 return Builder.CreateSExt(Op, Ty, Name);
6302}
6303
6304static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
6305 Value *ExtOp, Value *IndexOp,
6306 llvm::Type *ResTy, unsigned IntID,
6307 const char *Name) {
6308 SmallVector<Value *, 2> TblOps;
6309 if (ExtOp)
6310 TblOps.push_back(ExtOp);
6311
6312 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
6313 SmallVector<int, 16> Indices;
6314 auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
6315 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
6316 Indices.push_back(2*i);
6317 Indices.push_back(2*i+1);
6318 }
6319
6320 int PairPos = 0, End = Ops.size() - 1;
6321 while (PairPos < End) {
6322 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6323 Ops[PairPos+1], Indices,
6324 Name));
6325 PairPos += 2;
6326 }
6327
6328 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
6329 // of the 128-bit lookup table with zero.
6330 if (PairPos == End) {
6331 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
6332 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6333 ZeroTbl, Indices, Name));
6334 }
6335
6336 Function *TblF;
6337 TblOps.push_back(IndexOp);
6338 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
6339
6340 return CGF.EmitNeonCall(TblF, TblOps, Name);
6341}
6342
6343Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
6344 unsigned Value;
6345 switch (BuiltinID) {
6346 default:
6347 return nullptr;
6348 case ARM::BI__builtin_arm_nop:
6349 Value = 0;
6350 break;
6351 case ARM::BI__builtin_arm_yield:
6352 case ARM::BI__yield:
6353 Value = 1;
6354 break;
6355 case ARM::BI__builtin_arm_wfe:
6356 case ARM::BI__wfe:
6357 Value = 2;
6358 break;
6359 case ARM::BI__builtin_arm_wfi:
6360 case ARM::BI__wfi:
6361 Value = 3;
6362 break;
6363 case ARM::BI__builtin_arm_sev:
6364 case ARM::BI__sev:
6365 Value = 4;
6366 break;
6367 case ARM::BI__builtin_arm_sevl:
6368 case ARM::BI__sevl:
6369 Value = 5;
6370 break;
6371 }
6372
6373 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
6374 llvm::ConstantInt::get(Int32Ty, Value));
6375}
6376
6377enum SpecialRegisterAccessKind {
6378 NormalRead,
6379 VolatileRead,
6380 Write,
6381};
6382
6383// Generates the IR for the read/write special register builtin,
6384// ValueType is the type of the value that is to be written or read,
6385// RegisterType is the type of the register being written to or read from.
6386static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
6387 const CallExpr *E,
6388 llvm::Type *RegisterType,
6389 llvm::Type *ValueType,
6390 SpecialRegisterAccessKind AccessKind,
6391 StringRef SysReg = "") {
6392 // write and register intrinsics only support 32 and 64 bit operations.
6393 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))(((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy
(64)) && "Unsupported size for register.") ? static_cast
<void> (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 6394, __PRETTY_FUNCTION__))
6394 && "Unsupported size for register.")(((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy
(64)) && "Unsupported size for register.") ? static_cast
<void> (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 6394, __PRETTY_FUNCTION__))
;
6395
6396 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6397 CodeGen::CodeGenModule &CGM = CGF.CGM;
6398 LLVMContext &Context = CGM.getLLVMContext();
6399
6400 if (SysReg.empty()) {
6401 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
6402 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
6403 }
6404
6405 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
6406 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
6407 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
6408
6409 llvm::Type *Types[] = { RegisterType };
6410
6411 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
6412 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))((!(RegisterType->isIntegerTy(32) && ValueType->
isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register"
) ? static_cast<void> (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 6413, __PRETTY_FUNCTION__))
6413 && "Can't fit 64-bit value in 32-bit register")((!(RegisterType->isIntegerTy(32) && ValueType->
isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register"
) ? static_cast<void> (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 6413, __PRETTY_FUNCTION__))
;
6414
6415 if (AccessKind != Write) {
6416 assert(AccessKind == NormalRead || AccessKind == VolatileRead)((AccessKind == NormalRead || AccessKind == VolatileRead) ? static_cast
<void> (0) : __assert_fail ("AccessKind == NormalRead || AccessKind == VolatileRead"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 6416, __PRETTY_FUNCTION__))
;
6417 llvm::Function *F = CGM.getIntrinsic(
6418 AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
6419 : llvm::Intrinsic::read_register,
6420 Types);
6421 llvm::Value *Call = Builder.CreateCall(F, Metadata);
6422
6423 if (MixedTypes)
6424 // Read into 64 bit register and then truncate result to 32 bit.
6425 return Builder.CreateTrunc(Call, ValueType);
6426
6427 if (ValueType->isPointerTy())
6428 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
6429 return Builder.CreateIntToPtr(Call, ValueType);
6430
6431 return Call;
6432 }
6433
6434 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
6435 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
6436 if (MixedTypes) {
6437 // Extend 32 bit write value to 64 bit to pass to write.
6438 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
6439 return Builder.CreateCall(F, { Metadata, ArgValue });
6440 }
6441
6442 if (ValueType->isPointerTy()) {
6443 // Have VoidPtrTy ArgValue but want to return an i32/i64.
6444 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
6445 return Builder.CreateCall(F, { Metadata, ArgValue });
6446 }
6447
6448 return Builder.CreateCall(F, { Metadata, ArgValue });
6449}
6450
6451/// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
6452/// argument that specifies the vector type.
6453static bool HasExtraNeonArgument(unsigned BuiltinID) {
6454 switch (BuiltinID) {
6455 default: break;
6456 case NEON::BI__builtin_neon_vget_lane_i8:
6457 case NEON::BI__builtin_neon_vget_lane_i16:
6458 case NEON::BI__builtin_neon_vget_lane_bf16:
6459 case NEON::BI__builtin_neon_vget_lane_i32:
6460 case NEON::BI__builtin_neon_vget_lane_i64:
6461 case NEON::BI__builtin_neon_vget_lane_f32:
6462 case NEON::BI__builtin_neon_vgetq_lane_i8:
6463 case NEON::BI__builtin_neon_vgetq_lane_i16:
6464 case NEON::BI__builtin_neon_vgetq_lane_bf16:
6465 case NEON::BI__builtin_neon_vgetq_lane_i32:
6466 case NEON::BI__builtin_neon_vgetq_lane_i64:
6467 case NEON::BI__builtin_neon_vgetq_lane_f32:
6468 case NEON::BI__builtin_neon_vduph_lane_bf16:
6469 case NEON::BI__builtin_neon_vduph_laneq_bf16:
6470 case NEON::BI__builtin_neon_vset_lane_i8:
6471 case NEON::BI__builtin_neon_vset_lane_i16:
6472 case NEON::BI__builtin_neon_vset_lane_bf16:
6473 case NEON::BI__builtin_neon_vset_lane_i32:
6474 case NEON::BI__builtin_neon_vset_lane_i64:
6475 case NEON::BI__builtin_neon_vset_lane_f32:
6476 case NEON::BI__builtin_neon_vsetq_lane_i8:
6477 case NEON::BI__builtin_neon_vsetq_lane_i16:
6478 case NEON::BI__builtin_neon_vsetq_lane_bf16:
6479 case NEON::BI__builtin_neon_vsetq_lane_i32:
6480 case NEON::BI__builtin_neon_vsetq_lane_i64:
6481 case NEON::BI__builtin_neon_vsetq_lane_f32:
6482 case NEON::BI__builtin_neon_vsha1h_u32:
6483 case NEON::BI__builtin_neon_vsha1cq_u32:
6484 case NEON::BI__builtin_neon_vsha1pq_u32:
6485 case NEON::BI__builtin_neon_vsha1mq_u32:
6486 case NEON::BI__builtin_neon_vcvth_bf16_f32:
6487 case clang::ARM::BI_MoveToCoprocessor:
6488 case clang::ARM::BI_MoveToCoprocessor2:
6489 return false;
6490 }
6491 return true;
6492}
6493
6494Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
6495 const CallExpr *E,
6496 ReturnValueSlot ReturnValue,
6497 llvm::Triple::ArchType Arch) {
6498 if (auto Hint = GetValueForARMHint(BuiltinID))
6499 return Hint;
6500
6501 if (BuiltinID == ARM::BI__emit) {
6502 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
6503 llvm::FunctionType *FTy =
6504 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
6505
6506 Expr::EvalResult Result;
6507 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
6508 llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 6508)
;
6509
6510 llvm::APSInt Value = Result.Val.getInt();
6511 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
6512
6513 llvm::InlineAsm *Emit =
6514 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
6515 /*hasSideEffects=*/true)
6516 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
6517 /*hasSideEffects=*/true);
6518
6519 return Builder.CreateCall(Emit);
6520 }
6521
6522 if (BuiltinID == ARM::BI__builtin_arm_dbg) {
6523 Value *Option = EmitScalarExpr(E->getArg(0));
6524 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
6525 }
6526
6527 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
6528 Value *Address = EmitScalarExpr(E->getArg(0));
6529 Value *RW = EmitScalarExpr(E->getArg(1));
6530 Value *IsData = EmitScalarExpr(E->getArg(2));
6531
6532 // Locality is not supported on ARM target
6533 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
6534
6535 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
6536 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
6537 }
6538
6539 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
6540 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6541 return Builder.CreateCall(
6542 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
6543 }
6544
6545 if (BuiltinID == ARM::BI__builtin_arm_cls) {
6546 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6547 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
6548 }
6549 if (BuiltinID == ARM::BI__builtin_arm_cls64) {
6550 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6551 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
6552 "cls");
6553 }
6554
6555 if (BuiltinID == ARM::BI__clear_cache) {
6556 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")((E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"
) ? static_cast<void> (0) : __assert_fail ("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 6556, __PRETTY_FUNCTION__))
;
6557 const FunctionDecl *FD = E->getDirectCallee();
6558 Value *Ops[2];
6559 for (unsigned i = 0; i < 2; i++)
6560 Ops[i] = EmitScalarExpr(E->getArg(i));
6561 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
6562 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
6563 StringRef Name = FD->getName();
6564 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
6565 }
6566
6567 if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
6568 BuiltinID == ARM::BI__builtin_arm_mcrr2) {
6569 Function *F;
6570
6571 switch (BuiltinID) {
6572 default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 6572)
;
6573 case ARM::BI__builtin_arm_mcrr:
6574 F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
6575 break;
6576 case ARM::BI__builtin_arm_mcrr2:
6577 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
6578 break;
6579 }
6580
6581 // MCRR{2} instruction has 5 operands but
6582 // the intrinsic has 4 because Rt and Rt2
6583 // are represented as a single unsigned 64
6584 // bit integer in the intrinsic definition
6585 // but internally it's represented as 2 32
6586 // bit integers.
6587
6588 Value *Coproc = EmitScalarExpr(E->getArg(0));
6589 Value *Opc1 = EmitScalarExpr(E->getArg(1));
6590 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
6591 Value *CRm = EmitScalarExpr(E->getArg(3));
6592
6593 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
6594 Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
6595 Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
6596 Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
6597
6598 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
6599 }
6600
6601 if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
6602 BuiltinID == ARM::BI__builtin_arm_mrrc2) {
6603 Function *F;
6604
6605 switch (BuiltinID) {
6606 default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 6606)
;
6607 case ARM::BI__builtin_arm_mrrc:
6608 F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
6609 break;
6610 case ARM::BI__builtin_arm_mrrc2:
6611 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
6612 break;
6613 }
6614
6615 Value *Coproc = EmitScalarExpr(E->getArg(0));
6616 Value *Opc1 = EmitScalarExpr(E->getArg(1));
6617 Value *CRm = EmitScalarExpr(E->getArg(2));
6618 Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
6619
6620 // Returns an unsigned 64 bit integer, represented
6621 // as two 32 bit integers.
6622
6623 Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
6624 Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
6625 Rt = Builder.CreateZExt(Rt, Int64Ty);
6626 Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
6627
6628 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
6629 RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
6630 RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
6631
6632 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
6633 }
6634
6635 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
6636 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
6637 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
6638 getContext().getTypeSize(E->getType()) == 64) ||
6639 BuiltinID == ARM::BI__ldrexd) {
6640 Function *F;
6641
6642 switch (BuiltinID) {
6643 default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 6643)
;
6644 case ARM::BI__builtin_arm_ldaex:
6645 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
6646 break;
6647 case ARM::BI__builtin_arm_ldrexd:
6648 case ARM::BI__builtin_arm_ldrex:
6649 case ARM::BI__ldrexd:
6650 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
6651 break;
6652 }
6653
6654 Value *LdPtr = EmitScalarExpr(E->getArg(0));
6655 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
6656 "ldrexd");
6657
6658 Value *Val0 = Builder.CreateExtractValue(Val, 1);
6659 Value *Val1 = Builder.CreateExtractValue(Val, 0);
6660 Val0 = Builder.CreateZExt(Val0, Int64Ty);
6661 Val1 = Builder.CreateZExt(Val1, Int64Ty);
6662
6663 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
6664 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
6665 Val = Builder.CreateOr(Val, Val1);
6666 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
6667 }
6668
6669 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
6670 BuiltinID == ARM::BI__builtin_arm_ldaex) {
6671 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
6672
6673 QualType Ty = E->getType();
6674 llvm::Type *RealResTy = ConvertType(Ty);
6675 llvm::Type *PtrTy = llvm::IntegerType::get(
6676 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
6677 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
6678
6679 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
6680 ? Intrinsic::arm_ldaex
6681 : Intrinsic::arm_ldrex,
6682 PtrTy);
6683 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
6684
6685 if (RealResTy->isPointerTy())
6686 return Builder.CreateIntToPtr(Val, RealResTy);
6687 else {
6688 llvm::Type *IntResTy = llvm::IntegerType::get(
6689 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
6690 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
6691 return Builder.CreateBitCast(Val, RealResTy);
6692 }
6693 }
6694
6695 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
6696 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
6697 BuiltinID == ARM::BI__builtin_arm_strex) &&
6698 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
6699 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
6700 ? Intrinsic::arm_stlexd
6701 : Intrinsic::arm_strexd);
6702 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
6703
6704 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
6705 Value *Val = EmitScalarExpr(E->getArg(0));
6706 Builder.CreateStore(Val, Tmp);
6707
6708 Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
6709 Val = Builder.CreateLoad(LdPtr);
6710
6711 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
6712 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
6713 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
6714 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
6715 }
6716
6717 if (BuiltinID == ARM::BI__builtin_arm_strex ||
6718 BuiltinID == ARM::BI__builtin_arm_stlex) {
6719 Value *StoreVal = EmitScalarExpr(E->getArg(0));
6720 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
6721
6722 QualType Ty = E->getArg(0)->getType();
6723 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
6724 getContext().getTypeSize(Ty));
6725 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
6726
6727 if (StoreVal->getType()->isPointerTy())
6728 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
6729 else {
6730 llvm::Type *IntTy = llvm::IntegerType::get(
6731 getLLVMContext(),
6732 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
6733 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
6734 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
6735 }
6736
6737 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
6738 ? Intrinsic::arm_stlex
6739 : Intrinsic::arm_strex,
6740 StoreAddr->getType());
6741 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
6742 }
6743
6744 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
6745 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
6746 return Builder.CreateCall(F);
6747 }
6748
6749 // CRC32
6750 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
6751 switch (BuiltinID) {
6752 case ARM::BI__builtin_arm_crc32b:
6753 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
6754 case ARM::BI__builtin_arm_crc32cb:
6755 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
6756 case ARM::BI__builtin_arm_crc32h:
6757 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
6758 case ARM::BI__builtin_arm_crc32ch:
6759 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
6760 case ARM::BI__builtin_arm_crc32w:
6761 case ARM::BI__builtin_arm_crc32d:
6762 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
6763 case ARM::BI__builtin_arm_crc32cw:
6764 case ARM::BI__builtin_arm_crc32cd:
6765 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
6766 }
6767
6768 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
6769 Value *Arg0 = EmitScalarExpr(E->getArg(0));
6770 Value *Arg1 = EmitScalarExpr(E->getArg(1));
6771
6772 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
6773 // intrinsics, hence we need different codegen for these cases.
6774 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
6775 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
6776 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
6777 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
6778 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
6779 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
6780
6781 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
6782 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
6783 return Builder.CreateCall(F, {Res, Arg1b});
6784 } else {
6785 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
6786
6787 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
6788 return Builder.CreateCall(F, {Arg0, Arg1});
6789 }
6790 }
6791
6792 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
6793 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6794 BuiltinID == ARM::BI__builtin_arm_rsrp ||
6795 BuiltinID == ARM::BI__builtin_arm_wsr ||
6796 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
6797 BuiltinID == ARM::BI__builtin_arm_wsrp) {
6798
6799 SpecialRegisterAccessKind AccessKind = Write;
6800 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
6801 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6802 BuiltinID == ARM::BI__builtin_arm_rsrp)
6803 AccessKind = VolatileRead;
6804
6805 bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
6806 BuiltinID == ARM::BI__builtin_arm_wsrp;
6807
6808 bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6809 BuiltinID == ARM::BI__builtin_arm_wsr64;
6810
6811 llvm::Type *ValueType;
6812 llvm::Type *RegisterType;
6813 if (IsPointerBuiltin) {
6814 ValueType = VoidPtrTy;
6815 RegisterType = Int32Ty;
6816 } else if (Is64Bit) {
6817 ValueType = RegisterType = Int64Ty;
6818 } else {
6819 ValueType = RegisterType = Int32Ty;
6820 }
6821
6822 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
6823 AccessKind);
6824 }
6825
6826 // Deal with MVE builtins
6827 if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
6828 return Result;
6829 // Handle CDE builtins
6830 if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
6831 return Result;
6832
6833 // Find out if any arguments are required to be integer constant
6834 // expressions.
6835 unsigned ICEArguments = 0;
6836 ASTContext::GetBuiltinTypeError Error;
6837 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6838 assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error"
) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 6838, __PRETTY_FUNCTION__))
;
6839
6840 auto getAlignmentValue32 = [&](Address addr) -> Value* {
6841 return Builder.getInt32(addr.getAlignment().getQuantity());
6842 };
6843
6844 Address PtrOp0 = Address::invalid();
6845 Address PtrOp1 = Address::invalid();
6846 SmallVector<Value*, 4> Ops;
6847 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
6848 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
6849 for (unsigned i = 0, e = NumArgs; i != e; i++) {
6850 if (i == 0) {
6851 switch (BuiltinID) {
6852 case NEON::BI__builtin_neon_vld1_v:
6853 case NEON::BI__builtin_neon_vld1q_v:
6854 case NEON::BI__builtin_neon_vld1q_lane_v:
6855 case NEON::BI__builtin_neon_vld1_lane_v:
6856 case NEON::BI__builtin_neon_vld1_dup_v:
6857 case NEON::BI__builtin_neon_vld1q_dup_v:
6858 case NEON::BI__builtin_neon_vst1_v:
6859 case NEON::BI__builtin_neon_vst1q_v:
6860 case NEON::BI__builtin_neon_vst1q_lane_v:
6861 case NEON::BI__builtin_neon_vst1_lane_v:
6862 case NEON::BI__builtin_neon_vst2_v:
6863 case NEON::BI__builtin_neon_vst2q_v:
6864 case NEON::BI__builtin_neon_vst2_lane_v:
6865 case NEON::BI__builtin_neon_vst2q_lane_v:
6866 case NEON::BI__builtin_neon_vst3_v:
6867 case NEON::BI__builtin_neon_vst3q_v:
6868 case NEON::BI__builtin_neon_vst3_lane_v:
6869 case NEON::BI__builtin_neon_vst3q_lane_v:
6870 case NEON::BI__builtin_neon_vst4_v:
6871 case NEON::BI__builtin_neon_vst4q_v:
6872 case NEON::BI__builtin_neon_vst4_lane_v:
6873 case NEON::BI__builtin_neon_vst4q_lane_v:
6874 // Get the alignment for the argument in addition to the value;
6875 // we'll use it later.
6876 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
6877 Ops.push_back(PtrOp0.getPointer());
6878 continue;
6879 }
6880 }
6881 if (i == 1) {
6882 switch (BuiltinID) {
6883 case NEON::BI__builtin_neon_vld2_v:
6884 case NEON::BI__builtin_neon_vld2q_v:
6885 case NEON::BI__builtin_neon_vld3_v:
6886 case NEON::BI__builtin_neon_vld3q_v:
6887 case NEON::BI__builtin_neon_vld4_v:
6888 case NEON::BI__builtin_neon_vld4q_v:
6889 case NEON::BI__builtin_neon_vld2_lane_v:
6890 case NEON::BI__builtin_neon_vld2q_lane_v:
6891 case NEON::BI__builtin_neon_vld3_lane_v:
6892 case NEON::BI__builtin_neon_vld3q_lane_v:
6893 case NEON::BI__builtin_neon_vld4_lane_v:
6894 case NEON::BI__builtin_neon_vld4q_lane_v:
6895 case NEON::BI__builtin_neon_vld2_dup_v:
6896 case NEON::BI__builtin_neon_vld2q_dup_v:
6897 case NEON::BI__builtin_neon_vld3_dup_v:
6898 case NEON::BI__builtin_neon_vld3q_dup_v:
6899 case NEON::BI__builtin_neon_vld4_dup_v:
6900 case NEON::BI__builtin_neon_vld4q_dup_v:
6901 // Get the alignment for the argument in addition to the value;
6902 // we'll use it later.
6903 PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
6904 Ops.push_back(PtrOp1.getPointer());
6905 continue;
6906 }
6907 }
6908
6909 if ((ICEArguments & (1 << i)) == 0) {
6910 Ops.push_back(EmitScalarExpr(E->getArg(i)));
6911 } else {
6912 // If this is required to be a constant, constant fold it so that we know
6913 // that the generated intrinsic gets a ConstantInt.
6914 Ops.push_back(llvm::ConstantInt::get(
6915 getLLVMContext(),
6916 *E->getArg(i)->getIntegerConstantExpr(getContext())));
6917 }
6918 }
6919
6920 switch (BuiltinID) {
6921 default: break;
6922
6923 case NEON::BI__builtin_neon_vget_lane_i8:
6924 case NEON::BI__builtin_neon_vget_lane_i16:
6925 case NEON::BI__builtin_neon_vget_lane_i32:
6926 case NEON::BI__builtin_neon_vget_lane_i64:
6927 case NEON::BI__builtin_neon_vget_lane_bf16:
6928 case NEON::BI__builtin_neon_vget_lane_f32:
6929 case NEON::BI__builtin_neon_vgetq_lane_i8:
6930 case NEON::BI__builtin_neon_vgetq_lane_i16:
6931 case NEON::BI__builtin_neon_vgetq_lane_i32:
6932 case NEON::BI__builtin_neon_vgetq_lane_i64:
6933 case NEON::BI__builtin_neon_vgetq_lane_bf16:
6934 case NEON::BI__builtin_neon_vgetq_lane_f32:
6935 case NEON::BI__builtin_neon_vduph_lane_bf16:
6936 case NEON::BI__builtin_neon_vduph_laneq_bf16:
6937 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
6938
6939 case NEON::BI__builtin_neon_vrndns_f32: {
6940 Value *Arg = EmitScalarExpr(E->getArg(0));
6941 llvm::Type *Tys[] = {Arg->getType()};
6942 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
6943 return Builder.CreateCall(F, {Arg}, "vrndn"); }
6944
6945 case NEON::BI__builtin_neon_vset_lane_i8:
6946 case NEON::BI__builtin_neon_vset_lane_i16:
6947 case NEON::BI__builtin_neon_vset_lane_i32:
6948 case NEON::BI__builtin_neon_vset_lane_i64:
6949 case NEON::BI__builtin_neon_vset_lane_bf16:
6950 case NEON::BI__builtin_neon_vset_lane_f32:
6951 case NEON::BI__builtin_neon_vsetq_lane_i8:
6952 case NEON::BI__builtin_neon_vsetq_lane_i16:
6953 case NEON::BI__builtin_neon_vsetq_lane_i32:
6954 case NEON::BI__builtin_neon_vsetq_lane_i64:
6955 case NEON::BI__builtin_neon_vsetq_lane_bf16:
6956 case NEON::BI__builtin_neon_vsetq_lane_f32:
6957 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
6958
6959 case NEON::BI__builtin_neon_vsha1h_u32:
6960 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
6961 "vsha1h");
6962 case NEON::BI__builtin_neon_vsha1cq_u32:
6963 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
6964 "vsha1h");
6965 case NEON::BI__builtin_neon_vsha1pq_u32:
6966 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
6967 "vsha1h");
6968 case NEON::BI__builtin_neon_vsha1mq_u32:
6969 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
6970 "vsha1h");
6971
6972 case NEON::BI__builtin_neon_vcvth_bf16_f32: {
6973 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
6974 "vcvtbfp2bf");
6975 }
6976
6977 // The ARM _MoveToCoprocessor builtins put the input register value as
6978 // the first argument, but the LLVM intrinsic expects it as the third one.
6979 case ARM::BI_MoveToCoprocessor:
6980 case ARM::BI_MoveToCoprocessor2: {
6981 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
6982 Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
6983 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
6984 Ops[3], Ops[4], Ops[5]});
6985 }
6986 case ARM::BI_BitScanForward:
6987 case ARM::BI_BitScanForward64:
6988 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
6989 case ARM::BI_BitScanReverse:
6990 case ARM::BI_BitScanReverse64:
6991 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
6992
6993 case ARM::BI_InterlockedAnd64:
6994 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
6995 case ARM::BI_InterlockedExchange64:
6996 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
6997 case ARM::BI_InterlockedExchangeAdd64:
6998 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
6999 case ARM::BI_InterlockedExchangeSub64:
7000 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
7001 case ARM::BI_InterlockedOr64:
7002 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
7003 case ARM::BI_InterlockedXor64:
7004 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
7005 case ARM::BI_InterlockedDecrement64:
7006 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
7007 case ARM::BI_InterlockedIncrement64:
7008 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
7009 case ARM::BI_InterlockedExchangeAdd8_acq:
7010 case ARM::BI_InterlockedExchangeAdd16_acq:
7011 case ARM::BI_InterlockedExchangeAdd_acq:
7012 case ARM::BI_InterlockedExchangeAdd64_acq:
7013 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
7014 case ARM::BI_InterlockedExchangeAdd8_rel:
7015 case ARM::BI_InterlockedExchangeAdd16_rel:
7016 case ARM::BI_InterlockedExchangeAdd_rel:
7017 case ARM::BI_InterlockedExchangeAdd64_rel:
7018 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
7019 case ARM::BI_InterlockedExchangeAdd8_nf:
7020 case ARM::BI_InterlockedExchangeAdd16_nf:
7021 case ARM::BI_InterlockedExchangeAdd_nf:
7022 case ARM::BI_InterlockedExchangeAdd64_nf:
7023 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
7024 case ARM::BI_InterlockedExchange8_acq:
7025 case ARM::BI_InterlockedExchange16_acq:
7026 case ARM::BI_InterlockedExchange_acq:
7027 case ARM::BI_InterlockedExchange64_acq:
7028 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
7029 case ARM::BI_InterlockedExchange8_rel:
7030 case ARM::BI_InterlockedExchange16_rel:
7031 case ARM::BI_InterlockedExchange_rel:
7032 case ARM::BI_InterlockedExchange64_rel:
7033 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
7034 case ARM::BI_InterlockedExchange8_nf:
7035 case ARM::BI_InterlockedExchange16_nf:
7036 case ARM::BI_InterlockedExchange_nf:
7037 case ARM::BI_InterlockedExchange64_nf:
7038 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
7039 case ARM::BI_InterlockedCompareExchange8_acq:
7040 case ARM::BI_InterlockedCompareExchange16_acq:
7041 case ARM::BI_InterlockedCompareExchange_acq:
7042 case ARM::BI_InterlockedCompareExchange64_acq:
7043 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
7044 case ARM::BI_InterlockedCompareExchange8_rel:
7045 case ARM::BI_InterlockedCompareExchange16_rel:
7046 case ARM::BI_InterlockedCompareExchange_rel:
7047 case ARM::BI_InterlockedCompareExchange64_rel:
7048 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
7049 case ARM::BI_InterlockedCompareExchange8_nf:
7050 case ARM::BI_InterlockedCompareExchange16_nf:
7051 case ARM::BI_InterlockedCompareExchange_nf:
7052 case ARM::BI_InterlockedCompareExchange64_nf:
7053 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
7054 case ARM::BI_InterlockedOr8_acq:
7055 case ARM::BI_InterlockedOr16_acq:
7056 case ARM::BI_InterlockedOr_acq:
7057 case ARM::BI_InterlockedOr64_acq:
7058 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
7059 case ARM::BI_InterlockedOr8_rel:
7060 case ARM::BI_InterlockedOr16_rel:
7061 case ARM::BI_InterlockedOr_rel:
7062 case ARM::BI_InterlockedOr64_rel:
7063 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
7064 case ARM::BI_InterlockedOr8_nf:
7065 case ARM::BI_InterlockedOr16_nf:
7066 case ARM::BI_InterlockedOr_nf:
7067 case ARM::BI_InterlockedOr64_nf:
7068 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
7069 case ARM::BI_InterlockedXor8_acq:
7070 case ARM::BI_InterlockedXor16_acq:
7071 case ARM::BI_InterlockedXor_acq:
7072 case ARM::BI_InterlockedXor64_acq:
7073 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
7074 case ARM::BI_InterlockedXor8_rel:
7075 case ARM::BI_InterlockedXor16_rel:
7076 case ARM::BI_InterlockedXor_rel:
7077 case ARM::BI_InterlockedXor64_rel:
7078 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
7079 case ARM::BI_InterlockedXor8_nf:
7080 case ARM::BI_InterlockedXor16_nf:
7081 case ARM::BI_InterlockedXor_nf:
7082 case ARM::BI_InterlockedXor64_nf:
7083 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
7084 case ARM::BI_InterlockedAnd8_acq:
7085 case ARM::BI_InterlockedAnd16_acq:
7086 case ARM::BI_InterlockedAnd_acq:
7087 case ARM::BI_InterlockedAnd64_acq:
7088 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
7089 case ARM::BI_InterlockedAnd8_rel:
7090 case ARM::BI_InterlockedAnd16_rel:
7091 case ARM::BI_InterlockedAnd_rel:
7092 case ARM::BI_InterlockedAnd64_rel:
7093 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
7094 case ARM::BI_InterlockedAnd8_nf:
7095 case ARM::BI_InterlockedAnd16_nf:
7096 case ARM::BI_InterlockedAnd_nf:
7097 case ARM::BI_InterlockedAnd64_nf:
7098 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
7099 case ARM::BI_InterlockedIncrement16_acq:
7100 case ARM::BI_InterlockedIncrement_acq:
7101 case ARM::BI_InterlockedIncrement64_acq:
7102 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
7103 case ARM::BI_InterlockedIncrement16_rel:
7104 case ARM::BI_InterlockedIncrement_rel:
7105 case ARM::BI_InterlockedIncrement64_rel:
7106 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
7107 case ARM::BI_InterlockedIncrement16_nf:
7108 case ARM::BI_InterlockedIncrement_nf:
7109 case ARM::BI_InterlockedIncrement64_nf:
7110 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
7111 case ARM::BI_InterlockedDecrement16_acq:
7112 case ARM::BI_InterlockedDecrement_acq:
7113 case ARM::BI_InterlockedDecrement64_acq:
7114 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
7115 case ARM::BI_InterlockedDecrement16_rel:
7116 case ARM::BI_InterlockedDecrement_rel:
7117 case ARM::BI_InterlockedDecrement64_rel:
7118 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
7119 case ARM::BI_InterlockedDecrement16_nf:
7120 case ARM::BI_InterlockedDecrement_nf:
7121 case ARM::BI_InterlockedDecrement64_nf:
7122 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
7123 }
7124
7125 // Get the last argument, which specifies the vector type.
7126 assert(HasExtraArg)((HasExtraArg) ? static_cast<void> (0) : __assert_fail (
"HasExtraArg", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7126, __PRETTY_FUNCTION__))
;
7127 const Expr *Arg = E->getArg(E->getNumArgs()-1);
7128 Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext());
7129 if (!Result)
7130 return nullptr;
7131
7132 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
7133 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
7134 // Determine the overloaded type of this builtin.
7135 llvm::Type *Ty;
7136 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
7137 Ty = FloatTy;
7138 else
7139 Ty = DoubleTy;
7140
7141 // Determine whether this is an unsigned conversion or not.
7142 bool usgn = Result->getZExtValue() == 1;
7143 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
7144
7145 // Call the appropriate intrinsic.
7146 Function *F = CGM.getIntrinsic(Int, Ty);
7147 return Builder.CreateCall(F, Ops, "vcvtr");
7148 }
7149
7150 // Determine the type of this overloaded NEON intrinsic.
7151 NeonTypeFlags Type = Result->getZExtValue();
7152 bool usgn = Type.isUnsigned();
7153 bool rightShift = false;
7154
7155 llvm::FixedVectorType *VTy =
7156 GetNeonType(this, Type, getTarget().hasLegalHalfType(), false,
7157 getTarget().hasBFloat16Type());
7158 llvm::Type *Ty = VTy;
7159 if (!Ty)
7160 return nullptr;
7161
7162 // Many NEON builtins have identical semantics and uses in ARM and
7163 // AArch64. Emit these in a single function.
7164 auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
7165 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
7166 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
7167 if (Builtin)
7168 return EmitCommonNeonBuiltinExpr(
7169 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
7170 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
7171
7172 unsigned Int;
7173 switch (BuiltinID) {
7174 default: return nullptr;
7175 case NEON::BI__builtin_neon_vld1q_lane_v:
7176 // Handle 64-bit integer elements as a special case. Use shuffles of
7177 // one-element vectors to avoid poor code for i64 in the backend.
7178 if (VTy->getElementType()->isIntegerTy(64)) {
7179 // Extract the other lane.
7180 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7181 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
7182 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
7183 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7184 // Load the value as a one-element vector.
7185 Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
7186 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7187 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
7188 Value *Align = getAlignmentValue32(PtrOp0);
7189 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
7190 // Combine them.
7191 int Indices[] = {1 - Lane, Lane};
7192 return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
7193 }
7194 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7195 case NEON::BI__builtin_neon_vld1_lane_v: {
7196 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7197 PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
7198 Value *Ld = Builder.CreateLoad(PtrOp0);
7199 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
7200 }
7201 case NEON::BI__builtin_neon_vqrshrn_n_v:
7202 Int =
7203 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
7204 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
7205 1, true);
7206 case NEON::BI__builtin_neon_vqrshrun_n_v:
7207 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
7208 Ops, "vqrshrun_n", 1, true);
7209 case NEON::BI__builtin_neon_vqshrn_n_v:
7210 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
7211 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
7212 1, true);
7213 case NEON::BI__builtin_neon_vqshrun_n_v:
7214 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
7215 Ops, "vqshrun_n", 1, true);
7216 case NEON::BI__builtin_neon_vrecpe_v:
7217 case NEON::BI__builtin_neon_vrecpeq_v:
7218 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
7219 Ops, "vrecpe");
7220 case NEON::BI__builtin_neon_vrshrn_n_v:
7221 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
7222 Ops, "vrshrn_n", 1, true);
7223 case NEON::BI__builtin_neon_vrsra_n_v:
7224 case NEON::BI__builtin_neon_vrsraq_n_v:
7225 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7226 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7227 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
7228 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
7229 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
7230 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
7231 case NEON::BI__builtin_neon_vsri_n_v:
7232 case NEON::BI__builtin_neon_vsriq_n_v:
7233 rightShift = true;
7234 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7235 case NEON::BI__builtin_neon_vsli_n_v:
7236 case NEON::BI__builtin_neon_vsliq_n_v:
7237 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
7238 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
7239 Ops, "vsli_n");
7240 case NEON::BI__builtin_neon_vsra_n_v:
7241 case NEON::BI__builtin_neon_vsraq_n_v:
7242 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7243 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
7244 return Builder.CreateAdd(Ops[0], Ops[1]);
7245 case NEON::BI__builtin_neon_vst1q_lane_v:
7246 // Handle 64-bit integer elements as a special case. Use a shuffle to get
7247 // a one-element vector and avoid poor code for i64 in the backend.
7248 if (VTy->getElementType()->isIntegerTy(64)) {
7249 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7250 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
7251 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7252 Ops[2] = getAlignmentValue32(PtrOp0);
7253 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
7254 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
7255 Tys), Ops);
7256 }
7257 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7258 case NEON::BI__builtin_neon_vst1_lane_v: {
7259 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7260 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
7261 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
7262 auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
7263 return St;
7264 }
7265 case NEON::BI__builtin_neon_vtbl1_v:
7266 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
7267 Ops, "vtbl1");
7268 case NEON::BI__builtin_neon_vtbl2_v:
7269 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
7270 Ops, "vtbl2");
7271 case NEON::BI__builtin_neon_vtbl3_v:
7272 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
7273 Ops, "vtbl3");
7274 case NEON::BI__builtin_neon_vtbl4_v:
7275 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
7276 Ops, "vtbl4");
7277 case NEON::BI__builtin_neon_vtbx1_v:
7278 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
7279 Ops, "vtbx1");
7280 case NEON::BI__builtin_neon_vtbx2_v:
7281 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
7282 Ops, "vtbx2");
7283 case NEON::BI__builtin_neon_vtbx3_v:
7284 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
7285 Ops, "vtbx3");
7286 case NEON::BI__builtin_neon_vtbx4_v:
7287 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
7288 Ops, "vtbx4");
7289 }
7290}
7291
7292template<typename Integer>
7293static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
7294 return E->getIntegerConstantExpr(Context)->getExtValue();
7295}
7296
7297static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
7298 llvm::Type *T, bool Unsigned) {
7299 // Helper function called by Tablegen-constructed ARM MVE builtin codegen,
7300 // which finds it convenient to specify signed/unsigned as a boolean flag.
7301 return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T);
7302}
7303
7304static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
7305 uint32_t Shift, bool Unsigned) {
7306 // MVE helper function for integer shift right. This must handle signed vs
7307 // unsigned, and also deal specially with the case where the shift count is
7308 // equal to the lane size. In LLVM IR, an LShr with that parameter would be
7309 // undefined behavior, but in MVE it's legal, so we must convert it to code
7310 // that is not undefined in IR.
7311 unsigned LaneBits = cast<llvm::VectorType>(V->getType())
7312 ->getElementType()
7313 ->getPrimitiveSizeInBits();
7314 if (Shift == LaneBits) {
7315 // An unsigned shift of the full lane size always generates zero, so we can
7316 // simply emit a zero vector. A signed shift of the full lane size does the
7317 // same thing as shifting by one bit fewer.
7318 if (Unsigned)
7319 return llvm::Constant::getNullValue(V->getType());
7320 else
7321 --Shift;
7322 }
7323 return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift);
7324}
7325
7326static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
7327 // MVE-specific helper function for a vector splat, which infers the element
7328 // count of the output vector by knowing that MVE vectors are all 128 bits
7329 // wide.
7330 unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits();
7331 return Builder.CreateVectorSplat(Elements, V);
7332}
7333
7334static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
7335 CodeGenFunction *CGF,
7336 llvm::Value *V,
7337 llvm::Type *DestType) {
7338 // Convert one MVE vector type into another by reinterpreting its in-register
7339 // format.
7340 //
7341 // Little-endian, this is identical to a bitcast (which reinterprets the
7342 // memory format). But big-endian, they're not necessarily the same, because
7343 // the register and memory formats map to each other differently depending on
7344 // the lane size.
7345 //
7346 // We generate a bitcast whenever we can (if we're little-endian, or if the
7347 // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
7348 // that performs the different kind of reinterpretation.
7349 if (CGF->getTarget().isBigEndian() &&
7350 V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
7351 return Builder.CreateCall(
7352 CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
7353 {DestType, V->getType()}),
7354 V);
7355 } else {
7356 return Builder.CreateBitCast(V, DestType);
7357 }
7358}
7359
7360static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
7361 // Make a shufflevector that extracts every other element of a vector (evens
7362 // or odds, as desired).
7363 SmallVector<int, 16> Indices;
7364 unsigned InputElements =
7365 cast<llvm::FixedVectorType>(V->getType())->getNumElements();
7366 for (unsigned i = 0; i < InputElements; i += 2)
7367 Indices.push_back(i + Odd);
7368 return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
7369 Indices);
7370}
7371
7372static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
7373 llvm::Value *V1) {
7374 // Make a shufflevector that interleaves two vectors element by element.
7375 assert(V0->getType() == V1->getType() && "Can't zip different vector types")((V0->getType() == V1->getType() && "Can't zip different vector types"
) ? static_cast<void> (0) : __assert_fail ("V0->getType() == V1->getType() && \"Can't zip different vector types\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7375, __PRETTY_FUNCTION__))
;
7376 SmallVector<int, 16> Indices;
7377 unsigned InputElements =
7378 cast<llvm::FixedVectorType>(V0->getType())->getNumElements();
7379 for (unsigned i = 0; i < InputElements; i++) {
7380 Indices.push_back(i);
7381 Indices.push_back(i + InputElements);
7382 }
7383 return Builder.CreateShuffleVector(V0, V1, Indices);
7384}
7385
7386template<unsigned HighBit, unsigned OtherBits>
7387static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
7388 // MVE-specific helper function to make a vector splat of a constant such as
7389 // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
7390 llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
7391 unsigned LaneBits = T->getPrimitiveSizeInBits();
7392 uint32_t Value = HighBit << (LaneBits - 1);
7393 if (OtherBits)
7394 Value |= (1UL << (LaneBits - 1)) - 1;
7395 llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
7396 return ARMMVEVectorSplat(Builder, Lane);
7397}
7398
7399static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
7400 llvm::Value *V,
7401 unsigned ReverseWidth) {
7402 // MVE-specific helper function which reverses the elements of a
7403 // vector within every (ReverseWidth)-bit collection of lanes.
7404 SmallVector<int, 16> Indices;
7405 unsigned LaneSize = V->getType()->getScalarSizeInBits();
7406 unsigned Elements = 128 / LaneSize;
7407 unsigned Mask = ReverseWidth / LaneSize - 1;
7408 for (unsigned i = 0; i < Elements; i++)
7409 Indices.push_back(i ^ Mask);
7410 return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()),
7411 Indices);
7412}
7413
7414Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
7415 const CallExpr *E,
7416 ReturnValueSlot ReturnValue,
7417 llvm::Triple::ArchType Arch) {
7418 enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType;
7419 Intrinsic::ID IRIntr;
7420 unsigned NumVectors;
7421
7422 // Code autogenerated by Tablegen will handle all the simple builtins.
7423 switch (BuiltinID) {
7424 #include "clang/Basic/arm_mve_builtin_cg.inc"
7425
7426 // If we didn't match an MVE builtin id at all, go back to the
7427 // main EmitARMBuiltinExpr.
7428 default:
7429 return nullptr;
7430 }
7431
7432 // Anything that breaks from that switch is an MVE builtin that
7433 // needs handwritten code to generate.
7434
7435 switch (CustomCodeGenType) {
7436
7437 case CustomCodeGen::VLD24: {
7438 llvm::SmallVector<Value *, 4> Ops;
7439 llvm::SmallVector<llvm::Type *, 4> Tys;
7440
7441 auto MvecCType = E->getType();
7442 auto MvecLType = ConvertType(MvecCType);
7443 assert(MvecLType->isStructTy() &&((MvecLType->isStructTy() && "Return type for vld[24]q should be a struct"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->isStructTy() && \"Return type for vld[24]q should be a struct\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7444, __PRETTY_FUNCTION__))
7444 "Return type for vld[24]q should be a struct")((MvecLType->isStructTy() && "Return type for vld[24]q should be a struct"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->isStructTy() && \"Return type for vld[24]q should be a struct\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7444, __PRETTY_FUNCTION__))
;
7445 assert(MvecLType->getStructNumElements() == 1 &&((MvecLType->getStructNumElements() == 1 && "Return-type struct for vld[24]q should have one element"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Return-type struct for vld[24]q should have one element\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7446, __PRETTY_FUNCTION__))
7446 "Return-type struct for vld[24]q should have one element")((MvecLType->getStructNumElements() == 1 && "Return-type struct for vld[24]q should have one element"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Return-type struct for vld[24]q should have one element\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7446, __PRETTY_FUNCTION__))
;
7447 auto MvecLTypeInner = MvecLType->getStructElementType(0);
7448 assert(MvecLTypeInner->isArrayTy() &&((MvecLTypeInner->isArrayTy() && "Return-type struct for vld[24]q should contain an array"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Return-type struct for vld[24]q should contain an array\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7449, __PRETTY_FUNCTION__))
7449 "Return-type struct for vld[24]q should contain an array")((MvecLTypeInner->isArrayTy() && "Return-type struct for vld[24]q should contain an array"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Return-type struct for vld[24]q should contain an array\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7449, __PRETTY_FUNCTION__))
;
7450 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&((MvecLTypeInner->getArrayNumElements() == NumVectors &&
"Array member of return-type struct vld[24]q has wrong length"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7451, __PRETTY_FUNCTION__))
7451 "Array member of return-type struct vld[24]q has wrong length")((MvecLTypeInner->getArrayNumElements() == NumVectors &&
"Array member of return-type struct vld[24]q has wrong length"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7451, __PRETTY_FUNCTION__))
;
7452 auto VecLType = MvecLTypeInner->getArrayElementType();
7453
7454 Tys.push_back(VecLType);
7455
7456 auto Addr = E->getArg(0);
7457 Ops.push_back(EmitScalarExpr(Addr));
7458 Tys.push_back(ConvertType(Addr->getType()));
7459
7460 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
7461 Value *LoadResult = Builder.CreateCall(F, Ops);
7462 Value *MvecOut = UndefValue::get(MvecLType);
7463 for (unsigned i = 0; i < NumVectors; ++i) {
7464 Value *Vec = Builder.CreateExtractValue(LoadResult, i);
7465 MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
7466 }
7467
7468 if (ReturnValue.isNull())
7469 return MvecOut;
7470 else
7471 return Builder.CreateStore(MvecOut, ReturnValue.getValue());
7472 }
7473
7474 case CustomCodeGen::VST24: {
7475 llvm::SmallVector<Value *, 4> Ops;
7476 llvm::SmallVector<llvm::Type *, 4> Tys;
7477
7478 auto Addr = E->getArg(0);
7479 Ops.push_back(EmitScalarExpr(Addr));
7480 Tys.push_back(ConvertType(Addr->getType()));
7481
7482 auto MvecCType = E->getArg(1)->getType();
7483 auto MvecLType = ConvertType(MvecCType);
7484 assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct")((MvecLType->isStructTy() && "Data type for vst2q should be a struct"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->isStructTy() && \"Data type for vst2q should be a struct\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7484, __PRETTY_FUNCTION__))
;
7485 assert(MvecLType->getStructNumElements() == 1 &&((MvecLType->getStructNumElements() == 1 && "Data-type struct for vst2q should have one element"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Data-type struct for vst2q should have one element\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7486, __PRETTY_FUNCTION__))
7486 "Data-type struct for vst2q should have one element")((MvecLType->getStructNumElements() == 1 && "Data-type struct for vst2q should have one element"
) ? static_cast<void> (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Data-type struct for vst2q should have one element\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7486, __PRETTY_FUNCTION__))
;
7487 auto MvecLTypeInner = MvecLType->getStructElementType(0);
7488 assert(MvecLTypeInner->isArrayTy() &&((MvecLTypeInner->isArrayTy() && "Data-type struct for vst2q should contain an array"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Data-type struct for vst2q should contain an array\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7489, __PRETTY_FUNCTION__))
7489 "Data-type struct for vst2q should contain an array")((MvecLTypeInner->isArrayTy() && "Data-type struct for vst2q should contain an array"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Data-type struct for vst2q should contain an array\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7489, __PRETTY_FUNCTION__))
;
7490 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&((MvecLTypeInner->getArrayNumElements() == NumVectors &&
"Array member of return-type struct vld[24]q has wrong length"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7491, __PRETTY_FUNCTION__))
7491 "Array member of return-type struct vld[24]q has wrong length")((MvecLTypeInner->getArrayNumElements() == NumVectors &&
"Array member of return-type struct vld[24]q has wrong length"
) ? static_cast<void> (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7491, __PRETTY_FUNCTION__))
;
7492 auto VecLType = MvecLTypeInner->getArrayElementType();
7493
7494 Tys.push_back(VecLType);
7495
7496 AggValueSlot MvecSlot = CreateAggTemp(MvecCType);
7497 EmitAggExpr(E->getArg(1), MvecSlot);
7498 auto Mvec = Builder.CreateLoad(MvecSlot.getAddress());
7499 for (unsigned i = 0; i < NumVectors; i++)
7500 Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
7501
7502 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
7503 Value *ToReturn = nullptr;
7504 for (unsigned i = 0; i < NumVectors; i++) {
7505 Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
7506 ToReturn = Builder.CreateCall(F, Ops);
7507 Ops.pop_back();
7508 }
7509 return ToReturn;
7510 }
7511 }
7512 llvm_unreachable("unknown custom codegen type.")::llvm::llvm_unreachable_internal("unknown custom codegen type."
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7512)
;
7513}
7514
7515Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
7516 const CallExpr *E,
7517 ReturnValueSlot ReturnValue,
7518 llvm::Triple::ArchType Arch) {
7519 switch (BuiltinID) {
7520 default:
7521 return nullptr;
7522#include "clang/Basic/arm_cde_builtin_cg.inc"
7523 }
7524}
7525
7526static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
7527 const CallExpr *E,
7528 SmallVectorImpl<Value *> &Ops,
7529 llvm::Triple::ArchType Arch) {
7530 unsigned int Int = 0;
7531 const char *s = nullptr;
7532
7533 switch (BuiltinID) {
7534 default:
7535 return nullptr;
7536 case NEON::BI__builtin_neon_vtbl1_v:
7537 case NEON::BI__builtin_neon_vqtbl1_v:
7538 case NEON::BI__builtin_neon_vqtbl1q_v:
7539 case NEON::BI__builtin_neon_vtbl2_v:
7540 case NEON::BI__builtin_neon_vqtbl2_v:
7541 case NEON::BI__builtin_neon_vqtbl2q_v:
7542 case NEON::BI__builtin_neon_vtbl3_v:
7543 case NEON::BI__builtin_neon_vqtbl3_v:
7544 case NEON::BI__builtin_neon_vqtbl3q_v:
7545 case NEON::BI__builtin_neon_vtbl4_v:
7546 case NEON::BI__builtin_neon_vqtbl4_v:
7547 case NEON::BI__builtin_neon_vqtbl4q_v:
7548 break;
7549 case NEON::BI__builtin_neon_vtbx1_v:
7550 case NEON::BI__builtin_neon_vqtbx1_v:
7551 case NEON::BI__builtin_neon_vqtbx1q_v:
7552 case NEON::BI__builtin_neon_vtbx2_v:
7553 case NEON::BI__builtin_neon_vqtbx2_v:
7554 case NEON::BI__builtin_neon_vqtbx2q_v:
7555 case NEON::BI__builtin_neon_vtbx3_v:
7556 case NEON::BI__builtin_neon_vqtbx3_v:
7557 case NEON::BI__builtin_neon_vqtbx3q_v:
7558 case NEON::BI__builtin_neon_vtbx4_v:
7559 case NEON::BI__builtin_neon_vqtbx4_v:
7560 case NEON::BI__builtin_neon_vqtbx4q_v:
7561 break;
7562 }
7563
7564 assert(E->getNumArgs() >= 3)((E->getNumArgs() >= 3) ? static_cast<void> (0) :
__assert_fail ("E->getNumArgs() >= 3", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7564, __PRETTY_FUNCTION__))
;
7565
7566 // Get the last argument, which specifies the vector type.
7567 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
7568 Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext());
7569 if (!Result)
7570 return nullptr;
7571
7572 // Determine the type of this overloaded NEON intrinsic.
7573 NeonTypeFlags Type = Result->getZExtValue();
7574 llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type);
7575 if (!Ty)
7576 return nullptr;
7577
7578 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7579
7580 // AArch64 scalar builtins are not overloaded, they do not have an extra
7581 // argument that specifies the vector type, need to handle each case.
7582 switch (BuiltinID) {
7583 case NEON::BI__builtin_neon_vtbl1_v: {
7584 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
7585 Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
7586 "vtbl1");
7587 }
7588 case NEON::BI__builtin_neon_vtbl2_v: {
7589 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
7590 Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
7591 "vtbl1");
7592 }
7593 case NEON::BI__builtin_neon_vtbl3_v: {
7594 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
7595 Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
7596 "vtbl2");
7597 }
7598 case NEON::BI__builtin_neon_vtbl4_v: {
7599 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
7600 Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
7601 "vtbl2");
7602 }
7603 case NEON::BI__builtin_neon_vtbx1_v: {
7604 Value *TblRes =
7605 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
7606 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
7607
7608 llvm::Constant *EightV = ConstantInt::get(Ty, 8);
7609 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
7610 CmpRes = Builder.CreateSExt(CmpRes, Ty);
7611
7612 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
7613 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
7614 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
7615 }
7616 case NEON::BI__builtin_neon_vtbx2_v: {
7617 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
7618 Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
7619 "vtbx1");
7620 }
7621 case NEON::BI__builtin_neon_vtbx3_v: {
7622 Value *TblRes =
7623 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
7624 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
7625
7626 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
7627 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
7628 TwentyFourV);
7629 CmpRes = Builder.CreateSExt(CmpRes, Ty);
7630
7631 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
7632 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
7633 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
7634 }
7635 case NEON::BI__builtin_neon_vtbx4_v: {
7636 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
7637 Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
7638 "vtbx2");
7639 }
7640 case NEON::BI__builtin_neon_vqtbl1_v:
7641 case NEON::BI__builtin_neon_vqtbl1q_v:
7642 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
7643 case NEON::BI__builtin_neon_vqtbl2_v:
7644 case NEON::BI__builtin_neon_vqtbl2q_v: {
7645 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
7646 case NEON::BI__builtin_neon_vqtbl3_v:
7647 case NEON::BI__builtin_neon_vqtbl3q_v:
7648 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
7649 case NEON::BI__builtin_neon_vqtbl4_v:
7650 case NEON::BI__builtin_neon_vqtbl4q_v:
7651 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
7652 case NEON::BI__builtin_neon_vqtbx1_v:
7653 case NEON::BI__builtin_neon_vqtbx1q_v:
7654 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
7655 case NEON::BI__builtin_neon_vqtbx2_v:
7656 case NEON::BI__builtin_neon_vqtbx2q_v:
7657 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
7658 case NEON::BI__builtin_neon_vqtbx3_v:
7659 case NEON::BI__builtin_neon_vqtbx3q_v:
7660 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
7661 case NEON::BI__builtin_neon_vqtbx4_v:
7662 case NEON::BI__builtin_neon_vqtbx4q_v:
7663 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
7664 }
7665 }
7666
7667 if (!Int)
7668 return nullptr;
7669
7670 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
7671 return CGF.EmitNeonCall(F, Ops, s);
7672}
7673
7674Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
7675 auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
7676 Op = Builder.CreateBitCast(Op, Int16Ty);
7677 Value *V = UndefValue::get(VTy);
7678 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
7679 Op = Builder.CreateInsertElement(V, Op, CI);
7680 return Op;
7681}
7682
7683/// SVEBuiltinMemEltTy - Returns the memory element type for this memory
7684/// access builtin. Only required if it can't be inferred from the base pointer
7685/// operand.
7686llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(SVETypeFlags TypeFlags) {
7687 switch (TypeFlags.getMemEltType()) {
7688 case SVETypeFlags::MemEltTyDefault:
7689 return getEltType(TypeFlags);
7690 case SVETypeFlags::MemEltTyInt8:
7691 return Builder.getInt8Ty();
7692 case SVETypeFlags::MemEltTyInt16:
7693 return Builder.getInt16Ty();
7694 case SVETypeFlags::MemEltTyInt32:
7695 return Builder.getInt32Ty();
7696 case SVETypeFlags::MemEltTyInt64:
7697 return Builder.getInt64Ty();
7698 }
7699 llvm_unreachable("Unknown MemEltType")::llvm::llvm_unreachable_internal("Unknown MemEltType", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7699)
;
7700}
7701
7702llvm::Type *CodeGenFunction::getEltType(SVETypeFlags TypeFlags) {
7703 switch (TypeFlags.getEltType()) {
7704 default:
7705 llvm_unreachable("Invalid SVETypeFlag!")::llvm::llvm_unreachable_internal("Invalid SVETypeFlag!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7705)
;
7706
7707 case SVETypeFlags::EltTyInt8:
7708 return Builder.getInt8Ty();
7709 case SVETypeFlags::EltTyInt16:
7710 return Builder.getInt16Ty();
7711 case SVETypeFlags::EltTyInt32:
7712 return Builder.getInt32Ty();
7713 case SVETypeFlags::EltTyInt64:
7714 return Builder.getInt64Ty();
7715
7716 case SVETypeFlags::EltTyFloat16:
7717 return Builder.getHalfTy();
7718 case SVETypeFlags::EltTyFloat32:
7719 return Builder.getFloatTy();
7720 case SVETypeFlags::EltTyFloat64:
7721 return Builder.getDoubleTy();
7722
7723 case SVETypeFlags::EltTyBFloat16:
7724 return Builder.getBFloatTy();
7725
7726 case SVETypeFlags::EltTyBool8:
7727 case SVETypeFlags::EltTyBool16:
7728 case SVETypeFlags::EltTyBool32:
7729 case SVETypeFlags::EltTyBool64:
7730 return Builder.getInt1Ty();
7731 }
7732}
7733
7734// Return the llvm predicate vector type corresponding to the specified element
7735// TypeFlags.
7736llvm::ScalableVectorType *
7737CodeGenFunction::getSVEPredType(SVETypeFlags TypeFlags) {
7738 switch (TypeFlags.getEltType()) {
7739 default: llvm_unreachable("Unhandled SVETypeFlag!")::llvm::llvm_unreachable_internal("Unhandled SVETypeFlag!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7739)
;
7740
7741 case SVETypeFlags::EltTyInt8:
7742 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
7743 case SVETypeFlags::EltTyInt16:
7744 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7745 case SVETypeFlags::EltTyInt32:
7746 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
7747 case SVETypeFlags::EltTyInt64:
7748 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
7749
7750 case SVETypeFlags::EltTyBFloat16:
7751 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7752 case SVETypeFlags::EltTyFloat16:
7753 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7754 case SVETypeFlags::EltTyFloat32:
7755 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
7756 case SVETypeFlags::EltTyFloat64:
7757 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
7758
7759 case SVETypeFlags::EltTyBool8:
7760 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
7761 case SVETypeFlags::EltTyBool16:
7762 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7763 case SVETypeFlags::EltTyBool32:
7764 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
7765 case SVETypeFlags::EltTyBool64:
7766 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
7767 }
7768}
7769
7770// Return the llvm vector type corresponding to the specified element TypeFlags.
7771llvm::ScalableVectorType *
7772CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
7773 switch (TypeFlags.getEltType()) {
7774 default:
7775 llvm_unreachable("Invalid SVETypeFlag!")::llvm::llvm_unreachable_internal("Invalid SVETypeFlag!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7775)
;
7776
7777 case SVETypeFlags::EltTyInt8:
7778 return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
7779 case SVETypeFlags::EltTyInt16:
7780 return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
7781 case SVETypeFlags::EltTyInt32:
7782 return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
7783 case SVETypeFlags::EltTyInt64:
7784 return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
7785
7786 case SVETypeFlags::EltTyFloat16:
7787 return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
7788 case SVETypeFlags::EltTyBFloat16:
7789 return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
7790 case SVETypeFlags::EltTyFloat32:
7791 return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
7792 case SVETypeFlags::EltTyFloat64:
7793 return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
7794
7795 case SVETypeFlags::EltTyBool8:
7796 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
7797 case SVETypeFlags::EltTyBool16:
7798 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
7799 case SVETypeFlags::EltTyBool32:
7800 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
7801 case SVETypeFlags::EltTyBool64:
7802 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
7803 }
7804}
7805
7806llvm::Value *CodeGenFunction::EmitSVEAllTruePred(SVETypeFlags TypeFlags) {
7807 Function *Ptrue =
7808 CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
7809 return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
7810}
7811
7812constexpr unsigned SVEBitsPerBlock = 128;
7813
7814static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
7815 unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
7816 return llvm::ScalableVectorType::get(EltTy, NumElts);
7817}
7818
7819// Reinterpret the input predicate so that it can be used to correctly isolate
7820// the elements of the specified datatype.
7821Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
7822 llvm::ScalableVectorType *VTy) {
7823 auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
7824 if (Pred->getType() == RTy)
7825 return Pred;
7826
7827 unsigned IntID;
7828 llvm::Type *IntrinsicTy;
7829 switch (VTy->getMinNumElements()) {
7830 default:
7831 llvm_unreachable("unsupported element count!")::llvm::llvm_unreachable_internal("unsupported element count!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7831)
;
7832 case 2:
7833 case 4:
7834 case 8:
7835 IntID = Intrinsic::aarch64_sve_convert_from_svbool;
7836 IntrinsicTy = RTy;
7837 break;
7838 case 16:
7839 IntID = Intrinsic::aarch64_sve_convert_to_svbool;
7840 IntrinsicTy = Pred->getType();
7841 break;
7842 }
7843
7844 Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
7845 Value *C = Builder.CreateCall(F, Pred);
7846 assert(C->getType() == RTy && "Unexpected return type!")((C->getType() == RTy && "Unexpected return type!"
) ? static_cast<void> (0) : __assert_fail ("C->getType() == RTy && \"Unexpected return type!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7846, __PRETTY_FUNCTION__))
;
7847 return C;
7848}
7849
7850Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags,
7851 SmallVectorImpl<Value *> &Ops,
7852 unsigned IntID) {
7853 auto *ResultTy = getSVEType(TypeFlags);
7854 auto *OverloadedTy =
7855 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
7856
7857 // At the ACLE level there's only one predicate type, svbool_t, which is
7858 // mapped to <n x 16 x i1>. However, this might be incompatible with the
7859 // actual type being loaded. For example, when loading doubles (i64) the
7860 // predicated should be <n x 2 x i1> instead. At the IR level the type of
7861 // the predicate and the data being loaded must match. Cast accordingly.
7862 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
7863
7864 Function *F = nullptr;
7865 if (Ops[1]->getType()->isVectorTy())
7866 // This is the "vector base, scalar offset" case. In order to uniquely
7867 // map this built-in to an LLVM IR intrinsic, we need both the return type
7868 // and the type of the vector base.
7869 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
7870 else
7871 // This is the "scalar base, vector offset case". The type of the offset
7872 // is encoded in the name of the intrinsic. We only need to specify the
7873 // return type in order to uniquely map this built-in to an LLVM IR
7874 // intrinsic.
7875 F = CGM.getIntrinsic(IntID, OverloadedTy);
7876
7877 // Pass 0 when the offset is missing. This can only be applied when using
7878 // the "vector base" addressing mode for which ACLE allows no offset. The
7879 // corresponding LLVM IR always requires an offset.
7880 if (Ops.size() == 2) {
7881 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset")((Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset"
) ? static_cast<void> (0) : __assert_fail ("Ops[1]->getType()->isVectorTy() && \"Scalar base requires an offset\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7881, __PRETTY_FUNCTION__))
;
7882 Ops.push_back(ConstantInt::get(Int64Ty, 0));
7883 }
7884
7885 // For "vector base, scalar index" scale the index so that it becomes a
7886 // scalar offset.
7887 if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
7888 unsigned BytesPerElt =
7889 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
7890 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
7891 Ops[2] = Builder.CreateMul(Ops[2], Scale);
7892 }
7893
7894 Value *Call = Builder.CreateCall(F, Ops);
7895
7896 // The following sext/zext is only needed when ResultTy != OverloadedTy. In
7897 // other cases it's folded into a nop.
7898 return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
7899 : Builder.CreateSExt(Call, ResultTy);
7900}
7901
7902Value *CodeGenFunction::EmitSVEScatterStore(SVETypeFlags TypeFlags,
7903 SmallVectorImpl<Value *> &Ops,
7904 unsigned IntID) {
7905 auto *SrcDataTy = getSVEType(TypeFlags);
7906 auto *OverloadedTy =
7907 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
7908
7909 // In ACLE the source data is passed in the last argument, whereas in LLVM IR
7910 // it's the first argument. Move it accordingly.
7911 Ops.insert(Ops.begin(), Ops.pop_back_val());
7912
7913 Function *F = nullptr;
7914 if (Ops[2]->getType()->isVectorTy())
7915 // This is the "vector base, scalar offset" case. In order to uniquely
7916 // map this built-in to an LLVM IR intrinsic, we need both the return type
7917 // and the type of the vector base.
7918 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
7919 else
7920 // This is the "scalar base, vector offset case". The type of the offset
7921 // is encoded in the name of the intrinsic. We only need to specify the
7922 // return type in order to uniquely map this built-in to an LLVM IR
7923 // intrinsic.
7924 F = CGM.getIntrinsic(IntID, OverloadedTy);
7925
7926 // Pass 0 when the offset is missing. This can only be applied when using
7927 // the "vector base" addressing mode for which ACLE allows no offset. The
7928 // corresponding LLVM IR always requires an offset.
7929 if (Ops.size() == 3) {
7930 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset")((Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset"
) ? static_cast<void> (0) : __assert_fail ("Ops[1]->getType()->isVectorTy() && \"Scalar base requires an offset\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 7930, __PRETTY_FUNCTION__))
;
7931 Ops.push_back(ConstantInt::get(Int64Ty, 0));
7932 }
7933
7934 // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
7935 // folded into a nop.
7936 Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
7937
7938 // At the ACLE level there's only one predicate type, svbool_t, which is
7939 // mapped to <n x 16 x i1>. However, this might be incompatible with the
7940 // actual type being stored. For example, when storing doubles (i64) the
7941 // predicated should be <n x 2 x i1> instead. At the IR level the type of
7942 // the predicate and the data being stored must match. Cast accordingly.
7943 Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
7944
7945 // For "vector base, scalar index" scale the index so that it becomes a
7946 // scalar offset.
7947 if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
7948 unsigned BytesPerElt =
7949 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
7950 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
7951 Ops[3] = Builder.CreateMul(Ops[3], Scale);
7952 }
7953
7954 return Builder.CreateCall(F, Ops);
7955}
7956
7957Value *CodeGenFunction::EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
7958 SmallVectorImpl<Value *> &Ops,
7959 unsigned IntID) {
7960 // The gather prefetches are overloaded on the vector input - this can either
7961 // be the vector of base addresses or vector of offsets.
7962 auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
7963 if (!OverloadedTy)
7964 OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
7965
7966 // Cast the predicate from svbool_t to the right number of elements.
7967 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
7968
7969 // vector + imm addressing modes
7970 if (Ops[1]->getType()->isVectorTy()) {
7971 if (Ops.size() == 3) {
7972 // Pass 0 for 'vector+imm' when the index is omitted.
7973 Ops.push_back(ConstantInt::get(Int64Ty, 0));
7974
7975 // The sv_prfop is the last operand in the builtin and IR intrinsic.
7976 std::swap(Ops[2], Ops[3]);
7977 } else {
7978 // Index needs to be passed as scaled offset.
7979 llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
7980 unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
7981 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
7982 Ops[2] = Builder.CreateMul(Ops[2], Scale);
7983 }
7984 }
7985
7986 Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
7987 return Builder.CreateCall(F, Ops);
7988}
7989
7990Value *CodeGenFunction::EmitSVEStructLoad(SVETypeFlags TypeFlags,
7991 SmallVectorImpl<Value*> &Ops,
7992 unsigned IntID) {
7993 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
7994 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
7995 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
7996
7997 unsigned N;
7998 switch (IntID) {
7999 case Intrinsic::aarch64_sve_ld2:
8000 N = 2;
8001 break;
8002 case Intrinsic::aarch64_sve_ld3:
8003 N = 3;
8004 break;
8005 case Intrinsic::aarch64_sve_ld4:
8006 N = 4;
8007 break;
8008 default:
8009 llvm_unreachable("unknown intrinsic!")::llvm::llvm_unreachable_internal("unknown intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8009)
;
8010 }
8011 auto RetTy = llvm::VectorType::get(VTy->getElementType(),
8012 VTy->getElementCount() * N);
8013
8014 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8015 Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy);
8016 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8017 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8018 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8019
8020 Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()});
8021 return Builder.CreateCall(F, { Predicate, BasePtr });
8022}
8023
8024Value *CodeGenFunction::EmitSVEStructStore(SVETypeFlags TypeFlags,
8025 SmallVectorImpl<Value*> &Ops,
8026 unsigned IntID) {
8027 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8028 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8029 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8030
8031 unsigned N;
8032 switch (IntID) {
8033 case Intrinsic::aarch64_sve_st2:
8034 N = 2;
8035 break;
8036 case Intrinsic::aarch64_sve_st3:
8037 N = 3;
8038 break;
8039 case Intrinsic::aarch64_sve_st4:
8040 N = 4;
8041 break;
8042 default:
8043 llvm_unreachable("unknown intrinsic!")::llvm::llvm_unreachable_internal("unknown intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8043)
;
8044 }
8045 auto TupleTy =
8046 llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N);
8047
8048 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8049 Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy);
8050 Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0);
8051 Value *Val = Ops.back();
8052 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8053 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8054
8055 // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
8056 // need to break up the tuple vector.
8057 SmallVector<llvm::Value*, 5> Operands;
8058 Function *FExtr =
8059 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8060 for (unsigned I = 0; I < N; ++I)
8061 Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)}));
8062 Operands.append({Predicate, BasePtr});
8063
8064 Function *F = CGM.getIntrinsic(IntID, { VTy });
8065 return Builder.CreateCall(F, Operands);
8066}
8067
8068// SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
8069// svpmullt_pair intrinsics, with the exception that their results are bitcast
8070// to a wider type.
8071Value *CodeGenFunction::EmitSVEPMull(SVETypeFlags TypeFlags,
8072 SmallVectorImpl<Value *> &Ops,
8073 unsigned BuiltinID) {
8074 // Splat scalar operand to vector (intrinsics with _n infix)
8075 if (TypeFlags.hasSplatOperand()) {
8076 unsigned OpNo = TypeFlags.getSplatOperand();
8077 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8078 }
8079
8080 // The pair-wise function has a narrower overloaded type.
8081 Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
8082 Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
8083
8084 // Now bitcast to the wider result type.
8085 llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
8086 return EmitSVEReinterpret(Call, Ty);
8087}
8088
8089Value *CodeGenFunction::EmitSVEMovl(SVETypeFlags TypeFlags,
8090 ArrayRef<Value *> Ops, unsigned BuiltinID) {
8091 llvm::Type *OverloadedTy = getSVEType(TypeFlags);
8092 Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
8093 return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
8094}
8095
8096Value *CodeGenFunction::EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
8097 SmallVectorImpl<Value *> &Ops,
8098 unsigned BuiltinID) {
8099 auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8100 auto *VectorTy = getSVEVectorForElementType(MemEltTy);
8101 auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8102
8103 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8104 Value *BasePtr = Ops[1];
8105
8106 // Implement the index operand if not omitted.
8107 if (Ops.size() > 3) {
8108 BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
8109 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
8110 }
8111
8112 // Prefetch intriniscs always expect an i8*
8113 BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
8114 Value *PrfOp = Ops.back();
8115
8116 Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
8117 return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
8118}
8119
8120Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
8121 llvm::Type *ReturnTy,
8122 SmallVectorImpl<Value *> &Ops,
8123 unsigned BuiltinID,
8124 bool IsZExtReturn) {
8125 QualType LangPTy = E->getArg(1)->getType();
8126 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8127 LangPTy->getAs<PointerType>()->getPointeeType());
8128
8129 // The vector type that is returned may be different from the
8130 // eventual type loaded from memory.
8131 auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
8132 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8133
8134 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8135 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8136 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8137 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8138
8139 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8140 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8141 Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
8142
8143 return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
8144 : Builder.CreateSExt(Load, VectorTy);
8145}
8146
8147Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
8148 SmallVectorImpl<Value *> &Ops,
8149 unsigned BuiltinID) {
8150 QualType LangPTy = E->getArg(1)->getType();
8151 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8152 LangPTy->getAs<PointerType>()->getPointeeType());
8153
8154 // The vector type that is stored may be different from the
8155 // eventual type stored to memory.
8156 auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
8157 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8158
8159 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8160 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8161 Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0);
8162 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8163
8164 // Last value is always the data
8165 llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
8166
8167 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8168 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8169 return Builder.CreateCall(F, {Val, Predicate, BasePtr});
8170}
8171
8172// Limit the usage of scalable llvm IR generated by the ACLE by using the
8173// sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
8174Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
8175 auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
8176 return Builder.CreateCall(F, Scalar);
8177}
8178
8179Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
8180 return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
8181}
8182
8183Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
8184 // FIXME: For big endian this needs an additional REV, or needs a separate
8185 // intrinsic that is code-generated as a no-op, because the LLVM bitcast
8186 // instruction is defined as 'bitwise' equivalent from memory point of
8187 // view (when storing/reloading), whereas the svreinterpret builtin
8188 // implements bitwise equivalent cast from register point of view.
8189 // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
8190 return Builder.CreateBitCast(Val, Ty);
8191}
8192
8193static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8194 SmallVectorImpl<Value *> &Ops) {
8195 auto *SplatZero = Constant::getNullValue(Ty);
8196 Ops.insert(Ops.begin(), SplatZero);
8197}
8198
8199static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8200 SmallVectorImpl<Value *> &Ops) {
8201 auto *SplatUndef = UndefValue::get(Ty);
8202 Ops.insert(Ops.begin(), SplatUndef);
8203}
8204
8205SmallVector<llvm::Type *, 2> CodeGenFunction::getSVEOverloadTypes(
8206 SVETypeFlags TypeFlags, llvm::Type *ResultType, ArrayRef<Value *> Ops) {
8207 if (TypeFlags.isOverloadNone())
8208 return {};
8209
8210 llvm::Type *DefaultType = getSVEType(TypeFlags);
8211
8212 if (TypeFlags.isOverloadWhile())
8213 return {DefaultType, Ops[1]->getType()};
8214
8215 if (TypeFlags.isOverloadWhileRW())
8216 return {getSVEPredType(TypeFlags), Ops[0]->getType()};
8217
8218 if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
8219 return {Ops[0]->getType(), Ops.back()->getType()};
8220
8221 if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
8222 return {ResultType, Ops[0]->getType()};
8223
8224 assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads")((TypeFlags.isOverloadDefault() && "Unexpected value for overloads"
) ? static_cast<void> (0) : __assert_fail ("TypeFlags.isOverloadDefault() && \"Unexpected value for overloads\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8224, __PRETTY_FUNCTION__))
;
8225 return {DefaultType};
8226}
8227
8228Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
8229 const CallExpr *E) {
8230 // Find out if any arguments are required to be integer constant expressions.
8231 unsigned ICEArguments = 0;
8232 ASTContext::GetBuiltinTypeError Error;
8233 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
8234 assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error"
) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8234, __PRETTY_FUNCTION__))
;
8235
8236 llvm::Type *Ty = ConvertType(E->getType());
8237 if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
8238 BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
8239 Value *Val = EmitScalarExpr(E->getArg(0));
8240 return EmitSVEReinterpret(Val, Ty);
8241 }
8242
8243 llvm::SmallVector<Value *, 4> Ops;
8244 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
8245 if ((ICEArguments & (1 << i)) == 0)
8246 Ops.push_back(EmitScalarExpr(E->getArg(i)));
8247 else {
8248 // If this is required to be a constant, constant fold it so that we know
8249 // that the generated intrinsic gets a ConstantInt.
8250 Optional<llvm::APSInt> Result =
8251 E->getArg(i)->getIntegerConstantExpr(getContext());
8252 assert(Result && "Expected argument to be a constant")((Result && "Expected argument to be a constant") ? static_cast
<void> (0) : __assert_fail ("Result && \"Expected argument to be a constant\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8252, __PRETTY_FUNCTION__))
;
8253
8254 // Immediates for SVE llvm intrinsics are always 32bit. We can safely
8255 // truncate because the immediate has been range checked and no valid
8256 // immediate requires more than a handful of bits.
8257 *Result = Result->extOrTrunc(32);
8258 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
8259 }
8260 }
8261
8262 auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
8263 AArch64SVEIntrinsicsProvenSorted);
8264 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8265 if (TypeFlags.isLoad())
8266 return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
8267 TypeFlags.isZExtReturn());
8268 else if (TypeFlags.isStore())
8269 return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
8270 else if (TypeFlags.isGatherLoad())
8271 return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8272 else if (TypeFlags.isScatterStore())
8273 return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8274 else if (TypeFlags.isPrefetch())
8275 return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8276 else if (TypeFlags.isGatherPrefetch())
8277 return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8278 else if (TypeFlags.isStructLoad())
8279 return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8280 else if (TypeFlags.isStructStore())
8281 return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8282 else if (TypeFlags.isUndef())
8283 return UndefValue::get(Ty);
8284 else if (Builtin->LLVMIntrinsic != 0) {
8285 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
8286 InsertExplicitZeroOperand(Builder, Ty, Ops);
8287
8288 if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
8289 InsertExplicitUndefOperand(Builder, Ty, Ops);
8290
8291 // Some ACLE builtins leave out the argument to specify the predicate
8292 // pattern, which is expected to be expanded to an SV_ALL pattern.
8293 if (TypeFlags.isAppendSVALL())
8294 Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
8295 if (TypeFlags.isInsertOp1SVALL())
8296 Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
8297
8298 // Predicates must match the main datatype.
8299 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
8300 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
8301 if (PredTy->getElementType()->isIntegerTy(1))
8302 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
8303
8304 // Splat scalar operand to vector (intrinsics with _n infix)
8305 if (TypeFlags.hasSplatOperand()) {
8306 unsigned OpNo = TypeFlags.getSplatOperand();
8307 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8308 }
8309
8310 if (TypeFlags.isReverseCompare())
8311 std::swap(Ops[1], Ops[2]);
8312
8313 if (TypeFlags.isReverseUSDOT())
8314 std::swap(Ops[1], Ops[2]);
8315
8316 // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
8317 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
8318 llvm::Type *OpndTy = Ops[1]->getType();
8319 auto *SplatZero = Constant::getNullValue(OpndTy);
8320 Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
8321 Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
8322 }
8323
8324 Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
8325 getSVEOverloadTypes(TypeFlags, Ty, Ops));
8326 Value *Call = Builder.CreateCall(F, Ops);
8327
8328 // Predicate results must be converted to svbool_t.
8329 if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
8330 if (PredTy->getScalarType()->isIntegerTy(1))
8331 Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
8332
8333 return Call;
8334 }
8335
8336 switch (BuiltinID) {
8337 default:
8338 return nullptr;
8339
8340 case SVE::BI__builtin_sve_svmov_b_z: {
8341 // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
8342 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8343 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
8344 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
8345 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
8346 }
8347
8348 case SVE::BI__builtin_sve_svnot_b_z: {
8349 // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
8350 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8351 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
8352 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
8353 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
8354 }
8355
8356 case SVE::BI__builtin_sve_svmovlb_u16:
8357 case SVE::BI__builtin_sve_svmovlb_u32:
8358 case SVE::BI__builtin_sve_svmovlb_u64:
8359 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
8360
8361 case SVE::BI__builtin_sve_svmovlb_s16:
8362 case SVE::BI__builtin_sve_svmovlb_s32:
8363 case SVE::BI__builtin_sve_svmovlb_s64:
8364 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
8365
8366 case SVE::BI__builtin_sve_svmovlt_u16:
8367 case SVE::BI__builtin_sve_svmovlt_u32:
8368 case SVE::BI__builtin_sve_svmovlt_u64:
8369 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
8370
8371 case SVE::BI__builtin_sve_svmovlt_s16:
8372 case SVE::BI__builtin_sve_svmovlt_s32:
8373 case SVE::BI__builtin_sve_svmovlt_s64:
8374 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
8375
8376 case SVE::BI__builtin_sve_svpmullt_u16:
8377 case SVE::BI__builtin_sve_svpmullt_u64:
8378 case SVE::BI__builtin_sve_svpmullt_n_u16:
8379 case SVE::BI__builtin_sve_svpmullt_n_u64:
8380 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
8381
8382 case SVE::BI__builtin_sve_svpmullb_u16:
8383 case SVE::BI__builtin_sve_svpmullb_u64:
8384 case SVE::BI__builtin_sve_svpmullb_n_u16:
8385 case SVE::BI__builtin_sve_svpmullb_n_u64:
8386 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
8387
8388 case SVE::BI__builtin_sve_svdup_n_b8:
8389 case SVE::BI__builtin_sve_svdup_n_b16:
8390 case SVE::BI__builtin_sve_svdup_n_b32:
8391 case SVE::BI__builtin_sve_svdup_n_b64: {
8392 Value *CmpNE =
8393 Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
8394 llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
8395 Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
8396 return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
8397 }
8398
8399 case SVE::BI__builtin_sve_svdupq_n_b8:
8400 case SVE::BI__builtin_sve_svdupq_n_b16:
8401 case SVE::BI__builtin_sve_svdupq_n_b32:
8402 case SVE::BI__builtin_sve_svdupq_n_b64:
8403 case SVE::BI__builtin_sve_svdupq_n_u8:
8404 case SVE::BI__builtin_sve_svdupq_n_s8:
8405 case SVE::BI__builtin_sve_svdupq_n_u64:
8406 case SVE::BI__builtin_sve_svdupq_n_f64:
8407 case SVE::BI__builtin_sve_svdupq_n_s64:
8408 case SVE::BI__builtin_sve_svdupq_n_u16:
8409 case SVE::BI__builtin_sve_svdupq_n_f16:
8410 case SVE::BI__builtin_sve_svdupq_n_bf16:
8411 case SVE::BI__builtin_sve_svdupq_n_s16:
8412 case SVE::BI__builtin_sve_svdupq_n_u32:
8413 case SVE::BI__builtin_sve_svdupq_n_f32:
8414 case SVE::BI__builtin_sve_svdupq_n_s32: {
8415 // These builtins are implemented by storing each element to an array and using
8416 // ld1rq to materialize a vector.
8417 unsigned NumOpnds = Ops.size();
8418
8419 bool IsBoolTy =
8420 cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
8421
8422 // For svdupq_n_b* the element type of is an integer of type 128/numelts,
8423 // so that the compare can use the width that is natural for the expected
8424 // number of predicate lanes.
8425 llvm::Type *EltTy = Ops[0]->getType();
8426 if (IsBoolTy)
8427 EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
8428
8429 Address Alloca = CreateTempAlloca(llvm::ArrayType::get(EltTy, NumOpnds),
8430 CharUnits::fromQuantity(16));
8431 for (unsigned I = 0; I < NumOpnds; ++I)
8432 Builder.CreateDefaultAlignedStore(
8433 IsBoolTy ? Builder.CreateZExt(Ops[I], EltTy) : Ops[I],
8434 Builder.CreateGEP(Alloca.getPointer(),
8435 {Builder.getInt64(0), Builder.getInt64(I)}));
8436
8437 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8438 Value *Pred = EmitSVEAllTruePred(TypeFlags);
8439
8440 llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
8441 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_ld1rq, OverloadedTy);
8442 Value *Alloca0 = Builder.CreateGEP(
8443 Alloca.getPointer(), {Builder.getInt64(0), Builder.getInt64(0)});
8444 Value *LD1RQ = Builder.CreateCall(F, {Pred, Alloca0});
8445
8446 if (!IsBoolTy)
8447 return LD1RQ;
8448
8449 // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
8450 F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
8451 : Intrinsic::aarch64_sve_cmpne_wide,
8452 OverloadedTy);
8453 Value *Call =
8454 Builder.CreateCall(F, {Pred, LD1RQ, EmitSVEDupX(Builder.getInt64(0))});
8455 return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
8456 }
8457
8458 case SVE::BI__builtin_sve_svpfalse_b:
8459 return ConstantInt::getFalse(Ty);
8460
8461 case SVE::BI__builtin_sve_svlen_bf16:
8462 case SVE::BI__builtin_sve_svlen_f16:
8463 case SVE::BI__builtin_sve_svlen_f32:
8464 case SVE::BI__builtin_sve_svlen_f64:
8465 case SVE::BI__builtin_sve_svlen_s8:
8466 case SVE::BI__builtin_sve_svlen_s16:
8467 case SVE::BI__builtin_sve_svlen_s32:
8468 case SVE::BI__builtin_sve_svlen_s64:
8469 case SVE::BI__builtin_sve_svlen_u8:
8470 case SVE::BI__builtin_sve_svlen_u16:
8471 case SVE::BI__builtin_sve_svlen_u32:
8472 case SVE::BI__builtin_sve_svlen_u64: {
8473 SVETypeFlags TF(Builtin->TypeModifier);
8474 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
8475 auto *NumEls =
8476 llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue());
8477
8478 Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
8479 return Builder.CreateMul(NumEls, Builder.CreateCall(F));
8480 }
8481
8482 case SVE::BI__builtin_sve_svtbl2_u8:
8483 case SVE::BI__builtin_sve_svtbl2_s8:
8484 case SVE::BI__builtin_sve_svtbl2_u16:
8485 case SVE::BI__builtin_sve_svtbl2_s16:
8486 case SVE::BI__builtin_sve_svtbl2_u32:
8487 case SVE::BI__builtin_sve_svtbl2_s32:
8488 case SVE::BI__builtin_sve_svtbl2_u64:
8489 case SVE::BI__builtin_sve_svtbl2_s64:
8490 case SVE::BI__builtin_sve_svtbl2_f16:
8491 case SVE::BI__builtin_sve_svtbl2_bf16:
8492 case SVE::BI__builtin_sve_svtbl2_f32:
8493 case SVE::BI__builtin_sve_svtbl2_f64: {
8494 SVETypeFlags TF(Builtin->TypeModifier);
8495 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
8496 auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy);
8497 Function *FExtr =
8498 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8499 Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
8500 Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)});
8501 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
8502 return Builder.CreateCall(F, {V0, V1, Ops[1]});
8503 }
8504 }
8505
8506 /// Should not happen
8507 return nullptr;
8508}
8509
8510Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
8511 const CallExpr *E,
8512 llvm::Triple::ArchType Arch) {
8513 if (BuiltinID >= AArch64::FirstSVEBuiltin &&
8514 BuiltinID <= AArch64::LastSVEBuiltin)
8515 return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
8516
8517 unsigned HintID = static_cast<unsigned>(-1);
8518 switch (BuiltinID) {
8519 default: break;
8520 case AArch64::BI__builtin_arm_nop:
8521 HintID = 0;
8522 break;
8523 case AArch64::BI__builtin_arm_yield:
8524 case AArch64::BI__yield:
8525 HintID = 1;
8526 break;
8527 case AArch64::BI__builtin_arm_wfe:
8528 case AArch64::BI__wfe:
8529 HintID = 2;
8530 break;
8531 case AArch64::BI__builtin_arm_wfi:
8532 case AArch64::BI__wfi:
8533 HintID = 3;
8534 break;
8535 case AArch64::BI__builtin_arm_sev:
8536 case AArch64::BI__sev:
8537 HintID = 4;
8538 break;
8539 case AArch64::BI__builtin_arm_sevl:
8540 case AArch64::BI__sevl:
8541 HintID = 5;
8542 break;
8543 }
8544
8545 if (HintID != static_cast<unsigned>(-1)) {
8546 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
8547 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
8548 }
8549
8550 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
8551 Value *Address = EmitScalarExpr(E->getArg(0));
8552 Value *RW = EmitScalarExpr(E->getArg(1));
8553 Value *CacheLevel = EmitScalarExpr(E->getArg(2));
8554 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
8555 Value *IsData = EmitScalarExpr(E->getArg(4));
8556
8557 Value *Locality = nullptr;
8558 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
8559 // Temporal fetch, needs to convert cache level to locality.
8560 Locality = llvm::ConstantInt::get(Int32Ty,
8561 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
8562 } else {
8563 // Streaming fetch.
8564 Locality = llvm::ConstantInt::get(Int32Ty, 0);
8565 }
8566
8567 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
8568 // PLDL3STRM or PLDL2STRM.
8569 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
8570 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
8571 }
8572
8573 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
8574 assert((getContext().getTypeSize(E->getType()) == 32) &&(((getContext().getTypeSize(E->getType()) == 32) &&
"rbit of unusual size!") ? static_cast<void> (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8575, __PRETTY_FUNCTION__))
8575 "rbit of unusual size!")(((getContext().getTypeSize(E->getType()) == 32) &&
"rbit of unusual size!") ? static_cast<void> (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8575, __PRETTY_FUNCTION__))
;
8576 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8577 return Builder.CreateCall(
8578 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
8579 }
8580 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
8581 assert((getContext().getTypeSize(E->getType()) == 64) &&(((getContext().getTypeSize(E->getType()) == 64) &&
"rbit of unusual size!") ? static_cast<void> (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8582, __PRETTY_FUNCTION__))
8582 "rbit of unusual size!")(((getContext().getTypeSize(E->getType()) == 64) &&
"rbit of unusual size!") ? static_cast<void> (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8582, __PRETTY_FUNCTION__))
;
8583 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8584 return Builder.CreateCall(
8585 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
8586 }
8587
8588 if (BuiltinID == AArch64::BI__builtin_arm_cls) {
8589 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8590 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
8591 "cls");
8592 }
8593 if (BuiltinID == AArch64::BI__builtin_arm_cls64) {
8594 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8595 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
8596 "cls");
8597 }
8598
8599 if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
8600 assert((getContext().getTypeSize(E->getType()) == 32) &&(((getContext().getTypeSize(E->getType()) == 32) &&
"__jcvt of unusual size!") ? static_cast<void> (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 32) && \"__jcvt of unusual size!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8601, __PRETTY_FUNCTION__))
8601 "__jcvt of unusual size!")(((getContext().getTypeSize(E->getType()) == 32) &&
"__jcvt of unusual size!") ? static_cast<void> (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 32) && \"__jcvt of unusual size!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8601, __PRETTY_FUNCTION__))
;
8602 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8603 return Builder.CreateCall(
8604 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
8605 }
8606
8607 if (BuiltinID == AArch64::BI__clear_cache) {
8608 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")((E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"
) ? static_cast<void> (0) : __assert_fail ("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8608, __PRETTY_FUNCTION__))
;
8609 const FunctionDecl *FD = E->getDirectCallee();
8610 Value *Ops[2];
8611 for (unsigned i = 0; i < 2; i++)
8612 Ops[i] = EmitScalarExpr(E->getArg(i));
8613 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
8614 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
8615 StringRef Name = FD->getName();
8616 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
8617 }
8618
8619 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
8620 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
8621 getContext().getTypeSize(E->getType()) == 128) {
8622 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
8623 ? Intrinsic::aarch64_ldaxp
8624 : Intrinsic::aarch64_ldxp);
8625
8626 Value *LdPtr = EmitScalarExpr(E->getArg(0));
8627 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
8628 "ldxp");
8629
8630 Value *Val0 = Builder.CreateExtractValue(Val, 1);
8631 Value *Val1 = Builder.CreateExtractValue(Val, 0);
8632 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
8633 Val0 = Builder.CreateZExt(Val0, Int128Ty);
8634 Val1 = Builder.CreateZExt(Val1, Int128Ty);
8635
8636 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
8637 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
8638 Val = Builder.CreateOr(Val, Val1);
8639 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
8640 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
8641 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
8642 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
8643
8644 QualType Ty = E->getType();
8645 llvm::Type *RealResTy = ConvertType(Ty);
8646 llvm::Type *PtrTy = llvm::IntegerType::get(
8647 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
8648 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
8649
8650 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
8651 ? Intrinsic::aarch64_ldaxr
8652 : Intrinsic::aarch64_ldxr,
8653 PtrTy);
8654 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
8655
8656 if (RealResTy->isPointerTy())
8657 return Builder.CreateIntToPtr(Val, RealResTy);
8658
8659 llvm::Type *IntResTy = llvm::IntegerType::get(
8660 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
8661 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
8662 return Builder.CreateBitCast(Val, RealResTy);
8663 }
8664
8665 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
8666 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
8667 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
8668 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
8669 ? Intrinsic::aarch64_stlxp
8670 : Intrinsic::aarch64_stxp);
8671 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
8672
8673 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
8674 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
8675
8676 Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
8677 llvm::Value *Val = Builder.CreateLoad(Tmp);
8678
8679 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
8680 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
8681 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
8682 Int8PtrTy);
8683 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
8684 }
8685
8686 if (BuiltinID == AArch64::BI__builtin_arm_strex ||
8687 BuiltinID == AArch64::BI__builtin_arm_stlex) {
8688 Value *StoreVal = EmitScalarExpr(E->getArg(0));
8689 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
8690
8691 QualType Ty = E->getArg(0)->getType();
8692 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
8693 getContext().getTypeSize(Ty));
8694 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
8695
8696 if (StoreVal->getType()->isPointerTy())
8697 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
8698 else {
8699 llvm::Type *IntTy = llvm::IntegerType::get(
8700 getLLVMContext(),
8701 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
8702 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
8703 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
8704 }
8705
8706 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
8707 ? Intrinsic::aarch64_stlxr
8708 : Intrinsic::aarch64_stxr,
8709 StoreAddr->getType());
8710 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
8711 }
8712
8713 if (BuiltinID == AArch64::BI__getReg) {
8714 Expr::EvalResult Result;
8715 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
8716 llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8716)
;
8717
8718 llvm::APSInt Value = Result.Val.getInt();
8719 LLVMContext &Context = CGM.getLLVMContext();
8720 std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10);
8721
8722 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
8723 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
8724 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
8725
8726 llvm::Function *F =
8727 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
8728 return Builder.CreateCall(F, Metadata);
8729 }
8730
8731 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
8732 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
8733 return Builder.CreateCall(F);
8734 }
8735
8736 if (BuiltinID == AArch64::BI_ReadWriteBarrier)
8737 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
8738 llvm::SyncScope::SingleThread);
8739
8740 // CRC32
8741 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
8742 switch (BuiltinID) {
8743 case AArch64::BI__builtin_arm_crc32b:
8744 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
8745 case AArch64::BI__builtin_arm_crc32cb:
8746 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
8747 case AArch64::BI__builtin_arm_crc32h:
8748 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
8749 case AArch64::BI__builtin_arm_crc32ch:
8750 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
8751 case AArch64::BI__builtin_arm_crc32w:
8752 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
8753 case AArch64::BI__builtin_arm_crc32cw:
8754 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
8755 case AArch64::BI__builtin_arm_crc32d:
8756 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
8757 case AArch64::BI__builtin_arm_crc32cd:
8758 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
8759 }
8760
8761 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
8762 Value *Arg0 = EmitScalarExpr(E->getArg(0));
8763 Value *Arg1 = EmitScalarExpr(E->getArg(1));
8764 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
8765
8766 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
8767 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
8768
8769 return Builder.CreateCall(F, {Arg0, Arg1});
8770 }
8771
8772 // Memory Tagging Extensions (MTE) Intrinsics
8773 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
8774 switch (BuiltinID) {
8775 case AArch64::BI__builtin_arm_irg:
8776 MTEIntrinsicID = Intrinsic::aarch64_irg; break;
8777 case AArch64::BI__builtin_arm_addg:
8778 MTEIntrinsicID = Intrinsic::aarch64_addg; break;
8779 case AArch64::BI__builtin_arm_gmi:
8780 MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
8781 case AArch64::BI__builtin_arm_ldg:
8782 MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
8783 case AArch64::BI__builtin_arm_stg:
8784 MTEIntrinsicID = Intrinsic::aarch64_stg; break;
8785 case AArch64::BI__builtin_arm_subp:
8786 MTEIntrinsicID = Intrinsic::aarch64_subp; break;
8787 }
8788
8789 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
8790 llvm::Type *T = ConvertType(E->getType());
8791
8792 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
8793 Value *Pointer = EmitScalarExpr(E->getArg(0));
8794 Value *Mask = EmitScalarExpr(E->getArg(1));
8795
8796 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
8797 Mask = Builder.CreateZExt(Mask, Int64Ty);
8798 Value *RV = Builder.CreateCall(
8799 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
8800 return Builder.CreatePointerCast(RV, T);
8801 }
8802 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
8803 Value *Pointer = EmitScalarExpr(E->getArg(0));
8804 Value *TagOffset = EmitScalarExpr(E->getArg(1));
8805
8806 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
8807 TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
8808 Value *RV = Builder.CreateCall(
8809 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
8810 return Builder.CreatePointerCast(RV, T);
8811 }
8812 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
8813 Value *Pointer = EmitScalarExpr(E->getArg(0));
8814 Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
8815
8816 ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
8817 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
8818 return Builder.CreateCall(
8819 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
8820 }
8821 // Although it is possible to supply a different return
8822 // address (first arg) to this intrinsic, for now we set
8823 // return address same as input address.
8824 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
8825 Value *TagAddress = EmitScalarExpr(E->getArg(0));
8826 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
8827 Value *RV = Builder.CreateCall(
8828 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
8829 return Builder.CreatePointerCast(RV, T);
8830 }
8831 // Although it is possible to supply a different tag (to set)
8832 // to this intrinsic (as first arg), for now we supply
8833 // the tag that is in input address arg (common use case).
8834 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
8835 Value *TagAddress = EmitScalarExpr(E->getArg(0));
8836 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
8837 return Builder.CreateCall(
8838 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
8839 }
8840 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
8841 Value *PointerA = EmitScalarExpr(E->getArg(0));
8842 Value *PointerB = EmitScalarExpr(E->getArg(1));
8843 PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
8844 PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
8845 return Builder.CreateCall(
8846 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
8847 }
8848 }
8849
8850 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
8851 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
8852 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
8853 BuiltinID == AArch64::BI__builtin_arm_wsr ||
8854 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
8855 BuiltinID == AArch64::BI__builtin_arm_wsrp) {
8856
8857 SpecialRegisterAccessKind AccessKind = Write;
8858 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
8859 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
8860 BuiltinID == AArch64::BI__builtin_arm_rsrp)
8861 AccessKind = VolatileRead;
8862
8863 bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
8864 BuiltinID == AArch64::BI__builtin_arm_wsrp;
8865
8866 bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
8867 BuiltinID != AArch64::BI__builtin_arm_wsr;
8868
8869 llvm::Type *ValueType;
8870 llvm::Type *RegisterType = Int64Ty;
8871 if (IsPointerBuiltin) {
8872 ValueType = VoidPtrTy;
8873 } else if (Is64Bit) {
8874 ValueType = Int64Ty;
8875 } else {
8876 ValueType = Int32Ty;
8877 }
8878
8879 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
8880 AccessKind);
8881 }
8882
8883 if (BuiltinID == AArch64::BI_ReadStatusReg ||
8884 BuiltinID == AArch64::BI_WriteStatusReg) {
8885 LLVMContext &Context = CGM.getLLVMContext();
8886
8887 unsigned SysReg =
8888 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
8889
8890 std::string SysRegStr;
8891 llvm::raw_string_ostream(SysRegStr) <<
8892 ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
8893 ((SysReg >> 11) & 7) << ":" <<
8894 ((SysReg >> 7) & 15) << ":" <<
8895 ((SysReg >> 3) & 15) << ":" <<
8896 ( SysReg & 7);
8897
8898 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
8899 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
8900 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
8901
8902 llvm::Type *RegisterType = Int64Ty;
8903 llvm::Type *Types[] = { RegisterType };
8904
8905 if (BuiltinID == AArch64::BI_ReadStatusReg) {
8906 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
8907
8908 return Builder.CreateCall(F, Metadata);
8909 }
8910
8911 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
8912 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
8913
8914 return Builder.CreateCall(F, { Metadata, ArgValue });
8915 }
8916
8917 if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
8918 llvm::Function *F =
8919 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
8920 return Builder.CreateCall(F);
8921 }
8922
8923 if (BuiltinID == AArch64::BI__builtin_sponentry) {
8924 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
8925 return Builder.CreateCall(F);
8926 }
8927
8928 // Find out if any arguments are required to be integer constant
8929 // expressions.
8930 unsigned ICEArguments = 0;
8931 ASTContext::GetBuiltinTypeError Error;
8932 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
8933 assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error"
) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8933, __PRETTY_FUNCTION__))
;
8934
8935 llvm::SmallVector<Value*, 4> Ops;
8936 Address PtrOp0 = Address::invalid();
8937 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
8938 if (i == 0) {
8939 switch (BuiltinID) {
8940 case NEON::BI__builtin_neon_vld1_v:
8941 case NEON::BI__builtin_neon_vld1q_v:
8942 case NEON::BI__builtin_neon_vld1_dup_v:
8943 case NEON::BI__builtin_neon_vld1q_dup_v:
8944 case NEON::BI__builtin_neon_vld1_lane_v:
8945 case NEON::BI__builtin_neon_vld1q_lane_v:
8946 case NEON::BI__builtin_neon_vst1_v:
8947 case NEON::BI__builtin_neon_vst1q_v:
8948 case NEON::BI__builtin_neon_vst1_lane_v:
8949 case NEON::BI__builtin_neon_vst1q_lane_v:
8950 // Get the alignment for the argument in addition to the value;
8951 // we'll use it later.
8952 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
8953 Ops.push_back(PtrOp0.getPointer());
8954 continue;
8955 }
8956 }
8957 if ((ICEArguments & (1 << i)) == 0) {
8958 Ops.push_back(EmitScalarExpr(E->getArg(i)));
8959 } else {
8960 // If this is required to be a constant, constant fold it so that we know
8961 // that the generated intrinsic gets a ConstantInt.
8962 Ops.push_back(llvm::ConstantInt::get(
8963 getLLVMContext(),
8964 *E->getArg(i)->getIntegerConstantExpr(getContext())));
8965 }
8966 }
8967
8968 auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
8969 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
8970 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
8971
8972 if (Builtin) {
8973 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
8974 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
8975 assert(Result && "SISD intrinsic should have been handled")((Result && "SISD intrinsic should have been handled"
) ? static_cast<void> (0) : __assert_fail ("Result && \"SISD intrinsic should have been handled\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 8975, __PRETTY_FUNCTION__))
;
8976 return Result;
8977 }
8978
8979 const Expr *Arg = E->getArg(E->getNumArgs()-1);
8980 NeonTypeFlags Type(0);
8981 if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()))
8982 // Determine the type of this overloaded NEON intrinsic.
8983 Type = NeonTypeFlags(Result->getZExtValue());
8984
8985 bool usgn = Type.isUnsigned();
8986 bool quad = Type.isQuad();
8987
8988 // Handle non-overloaded intrinsics first.
8989 switch (BuiltinID) {
8990 default: break;
8991 case NEON::BI__builtin_neon_vabsh_f16:
8992 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8993 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
8994 case NEON::BI__builtin_neon_vldrq_p128: {
8995 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
8996 llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
8997 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
8998 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
8999 CharUnits::fromQuantity(16));
9000 }
9001 case NEON::BI__builtin_neon_vstrq_p128: {
9002 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
9003 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
9004 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
9005 }
9006 case NEON::BI__builtin_neon_vcvts_f32_u32:
9007 case NEON::BI__builtin_neon_vcvtd_f64_u64:
9008 usgn = true;
9009 LLVM_FALLTHROUGH[[gnu::fallthrough]];
9010 case NEON::BI__builtin_neon_vcvts_f32_s32:
9011 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
9012 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9013 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
9014 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
9015 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
9016 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9017 if (usgn)
9018 return Builder.CreateUIToFP(Ops[0], FTy);
9019 return Builder.CreateSIToFP(Ops[0], FTy);
9020 }
9021 case NEON::BI__builtin_neon_vcvth_f16_u16:
9022 case NEON::BI__builtin_neon_vcvth_f16_u32:
9023 case NEON::BI__builtin_neon_vcvth_f16_u64:
9024 usgn = true;
9025 LLVM_FALLTHROUGH[[gnu::fallthrough]];
9026 case NEON::BI__builtin_neon_vcvth_f16_s16:
9027 case NEON::BI__builtin_neon_vcvth_f16_s32:
9028 case NEON::BI__builtin_neon_vcvth_f16_s64: {
9029 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9030 llvm::Type *FTy = HalfTy;
9031 llvm::Type *InTy;
9032 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
9033 InTy = Int64Ty;
9034 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
9035 InTy = Int32Ty;
9036 else
9037 InTy = Int16Ty;
9038 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9039 if (usgn)
9040 return Builder.CreateUIToFP(Ops[0], FTy);
9041 return Builder.CreateSIToFP(Ops[0], FTy);
9042 }
9043 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9044 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9045 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9046 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9047 case NEON::BI__builtin_neon_vcvth_u16_f16:
9048 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9049 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9050 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9051 case NEON::BI__builtin_neon_vcvtph_s16_f16:
9052 case NEON::BI__builtin_neon_vcvth_s16_f16: {
9053 unsigned Int;
9054 llvm::Type* InTy = Int32Ty;
9055 llvm::Type* FTy = HalfTy;
9056 llvm::Type *Tys[2] = {InTy, FTy};
9057 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9058 switch (BuiltinID) {
9059 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 9059)
;
9060 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9061 Int = Intrinsic::aarch64_neon_fcvtau; break;
9062 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9063 Int = Intrinsic::aarch64_neon_fcvtmu; break;
9064 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9065 Int = Intrinsic::aarch64_neon_fcvtnu; break;
9066 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9067 Int = Intrinsic::aarch64_neon_fcvtpu; break;
9068 case NEON::BI__builtin_neon_vcvth_u16_f16:
9069 Int = Intrinsic::aarch64_neon_fcvtzu; break;
9070 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9071 Int = Intrinsic::aarch64_neon_fcvtas; break;
9072 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9073 Int = Intrinsic::aarch64_neon_fcvtms; break;
9074 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9075 Int = Intrinsic::aarch64_neon_fcvtns; break;
9076 case NEON::BI__builtin_neon_vcvtph_s16_f16:
9077 Int = Intrinsic::aarch64_neon_fcvtps; break;
9078 case NEON::BI__builtin_neon_vcvth_s16_f16:
9079 Int = Intrinsic::aarch64_neon_fcvtzs; break;
9080 }
9081 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
9082 return Builder.CreateTrunc(Ops[0], Int16Ty);
9083 }
9084 case NEON::BI__builtin_neon_vcaleh_f16:
9085 case NEON::BI__builtin_neon_vcalth_f16:
9086 case NEON::BI__builtin_neon_vcageh_f16:
9087 case NEON::BI__builtin_neon_vcagth_f16: {
9088 unsigned Int;
9089 llvm::Type* InTy = Int32Ty;
9090 llvm::Type* FTy = HalfTy;
9091 llvm::Type *Tys[2] = {InTy, FTy};
9092 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9093 switch (BuiltinID) {
9094 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 9094)
;
9095 case NEON::BI__builtin_neon_vcageh_f16:
9096 Int = Intrinsic::aarch64_neon_facge; break;
9097 case NEON::BI__builtin_neon_vcagth_f16:
9098 Int = Intrinsic::aarch64_neon_facgt; break;
9099 case NEON::BI__builtin_neon_vcaleh_f16:
9100 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
9101 case NEON::BI__builtin_neon_vcalth_f16:
9102 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
9103 }
9104 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
9105 return Builder.CreateTrunc(Ops[0], Int16Ty);
9106 }
9107 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
9108 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
9109 unsigned Int;
9110 llvm::Type* InTy = Int32Ty;
9111 llvm::Type* FTy = HalfTy;
9112 llvm::Type *Tys[2] = {InTy, FTy};
9113 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9114 switch (BuiltinID) {
9115 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 9115)
;
9116 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
9117 Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
9118 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
9119 Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
9120 }
9121 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
9122 return Builder.CreateTrunc(Ops[0], Int16Ty);
9123 }
9124 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
9125 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
9126 unsigned Int;
9127 llvm::Type* FTy = HalfTy;
9128 llvm::Type* InTy = Int32Ty;
9129 llvm::Type *Tys[2] = {FTy, InTy};
9130 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9131 switch (BuiltinID) {
9132 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 9132)
;
9133 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
9134 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
9135 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
9136 break;
9137 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
9138 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
9139 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
9140 break;
9141 }
9142 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
9143 }
9144 case NEON::BI__builtin_neon_vpaddd_s64: {
9145 auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
9146 Value *Vec = EmitScalarExpr(E->getArg(0));
9147 // The vector is v2f64, so make sure it's bitcast to that.
9148 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
9149 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9150 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9151 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9152 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9153 // Pairwise addition of a v2f64 into a scalar f64.
9154 return Builder.CreateAdd(Op0, Op1, "vpaddd");
9155 }
9156 case NEON::BI__builtin_neon_vpaddd_f64: {
9157 auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
9158 Value *Vec = EmitScalarExpr(E->getArg(0));
9159 // The vector is v2f64, so make sure it's bitcast to that.
9160 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
9161 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9162 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9163 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9164 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9165 // Pairwise addition of a v2f64 into a scalar f64.
9166 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
9167 }
9168 case NEON::BI__builtin_neon_vpadds_f32: {
9169 auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
9170 Value *Vec = EmitScalarExpr(E->getArg(0));
9171 // The vector is v2f32, so make sure it's bitcast to that.
9172 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
9173 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9174 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9175 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9176 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9177 // Pairwise addition of a v2f32 into a scalar f32.
9178 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
9179 }
9180 case NEON::BI__builtin_neon_vceqzd_s64:
9181 case NEON::BI__builtin_neon_vceqzd_f64:
9182 case NEON::BI__builtin_neon_vceqzs_f32:
9183 case NEON::BI__builtin_neon_vceqzh_f16:
9184 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9185 return EmitAArch64CompareBuiltinExpr(
9186 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9187 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
9188 case NEON::BI__builtin_neon_vcgezd_s64:
9189 case NEON::BI__builtin_neon_vcgezd_f64:
9190 case NEON::BI__builtin_neon_vcgezs_f32:
9191 case NEON::BI__builtin_neon_vcgezh_f16:
9192 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9193 return EmitAArch64CompareBuiltinExpr(
9194 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9195 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
9196 case NEON::BI__builtin_neon_vclezd_s64:
9197 case NEON::BI__builtin_neon_vclezd_f64:
9198 case NEON::BI__builtin_neon_vclezs_f32:
9199 case NEON::BI__builtin_neon_vclezh_f16:
9200 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9201 return EmitAArch64CompareBuiltinExpr(
9202 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9203 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
9204 case NEON::BI__builtin_neon_vcgtzd_s64:
9205 case NEON::BI__builtin_neon_vcgtzd_f64:
9206 case NEON::BI__builtin_neon_vcgtzs_f32:
9207 case NEON::BI__builtin_neon_vcgtzh_f16:
9208 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9209 return EmitAArch64CompareBuiltinExpr(
9210 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9211 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
9212 case NEON::BI__builtin_neon_vcltzd_s64:
9213 case NEON::BI__builtin_neon_vcltzd_f64:
9214 case NEON::BI__builtin_neon_vcltzs_f32:
9215 case NEON::BI__builtin_neon_vcltzh_f16:
9216 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9217 return EmitAArch64CompareBuiltinExpr(
9218 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9219 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
9220
9221 case NEON::BI__builtin_neon_vceqzd_u64: {
9222 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9223 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9224 Ops[0] =
9225 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
9226 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
9227 }
9228 case NEON::BI__builtin_neon_vceqd_f64:
9229 case NEON::BI__builtin_neon_vcled_f64:
9230 case NEON::BI__builtin_neon_vcltd_f64:
9231 case NEON::BI__builtin_neon_vcged_f64:
9232 case NEON::BI__builtin_neon_vcgtd_f64: {
9233 llvm::CmpInst::Predicate P;
9234 switch (BuiltinID) {
9235 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 9235)
;
9236 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
9237 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
9238 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
9239 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
9240 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
9241 }
9242 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9243 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
9244 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
9245 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9246 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
9247 }
9248 case NEON::BI__builtin_neon_vceqs_f32:
9249 case NEON::BI__builtin_neon_vcles_f32:
9250 case NEON::BI__builtin_neon_vclts_f32:
9251 case NEON::BI__builtin_neon_vcges_f32:
9252 case NEON::BI__builtin_neon_vcgts_f32: {
9253 llvm::CmpInst::Predicate P;
9254 switch (BuiltinID) {
9255 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 9255)
;
9256 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
9257 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
9258 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
9259 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
9260 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
9261 }
9262 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9263 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
9264 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
9265 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9266 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
9267 }
9268 case NEON::BI__builtin_neon_vceqh_f16:
9269 case NEON::BI__builtin_neon_vcleh_f16:
9270 case NEON::BI__builtin_neon_vclth_f16:
9271 case NEON::BI__builtin_neon_vcgeh_f16:
9272 case NEON::BI__builtin_neon_vcgth_f16: {
9273 llvm::CmpInst::Predicate P;
9274 switch (BuiltinID) {
9275 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 9275)
;
9276 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
9277 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
9278 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
9279 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
9280 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
9281 }
9282 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9283 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
9284 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
9285 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9286 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
9287 }
9288 case NEON::BI__builtin_neon_vceqd_s64:
9289 case NEON::BI__builtin_neon_vceqd_u64:
9290 case NEON::BI__builtin_neon_vcgtd_s64:
9291 case NEON::BI__builtin_neon_vcgtd_u64:
9292 case NEON::BI__builtin_neon_vcltd_s64:
9293 case NEON::BI__builtin_neon_vcltd_u64:
9294 case NEON::BI__builtin_neon_vcged_u64:
9295 case NEON::BI__builtin_neon_vcged_s64:
9296 case NEON::BI__builtin_neon_vcled_u64:
9297 case NEON::BI__builtin_neon_vcled_s64: {
9298 llvm::CmpInst::Predicate P;
9299 switch (BuiltinID) {
9300 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 9300)
;
9301 case NEON::BI__builtin_neon_vceqd_s64:
9302 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
9303 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
9304 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
9305 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
9306 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
9307 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
9308 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
9309 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
9310 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
9311 }
9312 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9313 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9314 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9315 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
9316 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
9317 }
9318 case NEON::BI__builtin_neon_vtstd_s64:
9319 case NEON::BI__builtin_neon_vtstd_u64: {
9320 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9321 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9322 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9323 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
9324 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
9325 llvm::Constant::getNullValue(Int64Ty));
9326 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
9327 }
9328 case NEON::BI__builtin_neon_vset_lane_i8:
9329 case NEON::BI__builtin_neon_vset_lane_i16:
9330 case NEON::BI__builtin_neon_vset_lane_i32:
9331 case NEON::BI__builtin_neon_vset_lane_i64:
9332 case NEON::BI__builtin_neon_vset_lane_bf16:
9333 case NEON::BI__builtin_neon_vset_lane_f32:
9334 case NEON::BI__builtin_neon_vsetq_lane_i8:
9335 case NEON::BI__builtin_neon_vsetq_lane_i16:
9336 case NEON::BI__builtin_neon_vsetq_lane_i32:
9337 case NEON::BI__builtin_neon_vsetq_lane_i64:
9338 case NEON::BI__builtin_neon_vsetq_lane_bf16:
9339 case NEON::BI__builtin_neon_vsetq_lane_f32:
9340 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9341 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9342 case NEON::BI__builtin_neon_vset_lane_f64:
9343 // The vector type needs a cast for the v1f64 variant.
9344 Ops[1] =
9345 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
9346 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9347 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9348 case NEON::BI__builtin_neon_vsetq_lane_f64:
9349 // The vector type needs a cast for the v2f64 variant.
9350 Ops[1] =
9351 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
9352 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9353 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9354
9355 case NEON::BI__builtin_neon_vget_lane_i8:
9356 case NEON::BI__builtin_neon_vdupb_lane_i8:
9357 Ops[0] =
9358 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
9359 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9360 "vget_lane");
9361 case NEON::BI__builtin_neon_vgetq_lane_i8:
9362 case NEON::BI__builtin_neon_vdupb_laneq_i8:
9363 Ops[0] =
9364 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
9365 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9366 "vgetq_lane");
9367 case NEON::BI__builtin_neon_vget_lane_i16:
9368 case NEON::BI__builtin_neon_vduph_lane_i16:
9369 Ops[0] =
9370 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
9371 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9372 "vget_lane");
9373 case NEON::BI__builtin_neon_vgetq_lane_i16:
9374 case NEON::BI__builtin_neon_vduph_laneq_i16:
9375 Ops[0] =
9376 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
9377 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9378 "vgetq_lane");
9379 case NEON::BI__builtin_neon_vget_lane_i32:
9380 case NEON::BI__builtin_neon_vdups_lane_i32:
9381 Ops[0] =
9382 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
9383 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9384 "vget_lane");
9385 case NEON::BI__builtin_neon_vdups_lane_f32:
9386 Ops[0] =
9387 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
9388 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9389 "vdups_lane");
9390 case NEON::BI__builtin_neon_vgetq_lane_i32:
9391 case NEON::BI__builtin_neon_vdups_laneq_i32:
9392 Ops[0] =
9393 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
9394 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9395 "vgetq_lane");
9396 case NEON::BI__builtin_neon_vget_lane_i64:
9397 case NEON::BI__builtin_neon_vdupd_lane_i64:
9398 Ops[0] =
9399 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
9400 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9401 "vget_lane");
9402 case NEON::BI__builtin_neon_vdupd_lane_f64:
9403 Ops[0] =
9404 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
9405 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9406 "vdupd_lane");
9407 case NEON::BI__builtin_neon_vgetq_lane_i64:
9408 case NEON::BI__builtin_neon_vdupd_laneq_i64:
9409 Ops[0] =
9410 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
9411 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9412 "vgetq_lane");
9413 case NEON::BI__builtin_neon_vget_lane_f32:
9414 Ops[0] =
9415 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
9416 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9417 "vget_lane");
9418 case NEON::BI__builtin_neon_vget_lane_f64:
9419 Ops[0] =
9420 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
9421 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9422 "vget_lane");
9423 case NEON::BI__builtin_neon_vgetq_lane_f32:
9424 case NEON::BI__builtin_neon_vdups_laneq_f32:
9425 Ops[0] =
9426 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
9427 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9428 "vgetq_lane");
9429 case NEON::BI__builtin_neon_vgetq_lane_f64:
9430 case NEON::BI__builtin_neon_vdupd_laneq_f64:
9431 Ops[0] =
9432 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
9433 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9434 "vgetq_lane");
9435 case NEON::BI__builtin_neon_vaddh_f16:
9436 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9437 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
9438 case NEON::BI__builtin_neon_vsubh_f16:
9439 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9440 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
9441 case NEON::BI__builtin_neon_vmulh_f16:
9442 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9443 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
9444 case NEON::BI__builtin_neon_vdivh_f16:
9445 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9446 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
9447 case NEON::BI__builtin_neon_vfmah_f16:
9448 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
9449 return emitCallMaybeConstrainedFPBuiltin(
9450 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
9451 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
9452 case NEON::BI__builtin_neon_vfmsh_f16: {
9453 // FIXME: This should be an fneg instruction:
9454 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
9455 Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
9456
9457 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
9458 return emitCallMaybeConstrainedFPBuiltin(
9459 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
9460 {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
9461 }
9462 case NEON::BI__builtin_neon_vaddd_s64:
9463 case NEON::BI__builtin_neon_vaddd_u64:
9464 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
9465 case NEON::BI__builtin_neon_vsubd_s64:
9466 case NEON::BI__builtin_neon_vsubd_u64:
9467 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
9468 case NEON::BI__builtin_neon_vqdmlalh_s16:
9469 case NEON::BI__builtin_neon_vqdmlslh_s16: {
9470 SmallVector<Value *, 2> ProductOps;
9471 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
9472 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
9473 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
9474 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
9475 ProductOps, "vqdmlXl");
9476 Constant *CI = ConstantInt::get(SizeTy, 0);
9477 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
9478
9479 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
9480 ? Intrinsic::aarch64_neon_sqadd
9481 : Intrinsic::aarch64_neon_sqsub;
9482 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
9483 }
9484 case NEON::BI__builtin_neon_vqshlud_n_s64: {
9485 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9486 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
9487 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
9488 Ops, "vqshlu_n");
9489 }
9490 case NEON::BI__builtin_neon_vqshld_n_u64:
9491 case NEON::BI__builtin_neon_vqshld_n_s64: {
9492 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
9493 ? Intrinsic::aarch64_neon_uqshl
9494 : Intrinsic::aarch64_neon_sqshl;
9495 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9496 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
9497 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
9498 }
9499 case NEON::BI__builtin_neon_vrshrd_n_u64:
9500 case NEON::BI__builtin_neon_vrshrd_n_s64: {
9501 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
9502 ? Intrinsic::aarch64_neon_urshl
9503 : Intrinsic::aarch64_neon_srshl;
9504 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9505 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
9506 Ops[1] = ConstantInt::get(Int64Ty, -SV);
9507 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
9508 }
9509 case NEON::BI__builtin_neon_vrsrad_n_u64:
9510 case NEON::BI__builtin_neon_vrsrad_n_s64: {
9511 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
9512 ? Intrinsic::aarch64_neon_urshl
9513 : Intrinsic::aarch64_neon_srshl;
9514 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9515 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
9516 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
9517 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
9518 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
9519 }
9520 case NEON::BI__builtin_neon_vshld_n_s64:
9521 case NEON::BI__builtin_neon_vshld_n_u64: {
9522 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9523 return Builder.CreateShl(
9524 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
9525 }
9526 case NEON::BI__builtin_neon_vshrd_n_s64: {
9527 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9528 return Builder.CreateAShr(
9529 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
9530 Amt->getZExtValue())),
9531 "shrd_n");
9532 }
9533 case NEON::BI__builtin_neon_vshrd_n_u64: {
9534 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
9535 uint64_t ShiftAmt = Amt->getZExtValue();
9536 // Right-shifting an unsigned value by its size yields 0.
9537 if (ShiftAmt == 64)
9538 return ConstantInt::get(Int64Ty, 0);
9539 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
9540 "shrd_n");
9541 }
9542 case NEON::BI__builtin_neon_vsrad_n_s64: {
9543 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
9544 Ops[1] = Builder.CreateAShr(
9545 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
9546 Amt->getZExtValue())),
9547 "shrd_n");
9548 return Builder.CreateAdd(Ops[0], Ops[1]);
9549 }
9550 case NEON::BI__builtin_neon_vsrad_n_u64: {
9551 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
9552 uint64_t ShiftAmt = Amt->getZExtValue();
9553 // Right-shifting an unsigned value by its size yields 0.
9554 // As Op + 0 = Op, return Ops[0] directly.
9555 if (ShiftAmt == 64)
9556 return Ops[0];
9557 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
9558 "shrd_n");
9559 return Builder.CreateAdd(Ops[0], Ops[1]);
9560 }
9561 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
9562 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
9563 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
9564 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
9565 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
9566 "lane");
9567 SmallVector<Value *, 2> ProductOps;
9568 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
9569 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
9570 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
9571 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
9572 ProductOps, "vqdmlXl");
9573 Constant *CI = ConstantInt::get(SizeTy, 0);
9574 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
9575 Ops.pop_back();
9576
9577 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
9578 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
9579 ? Intrinsic::aarch64_neon_sqadd
9580 : Intrinsic::aarch64_neon_sqsub;
9581 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
9582 }
9583 case NEON::BI__builtin_neon_vqdmlals_s32:
9584 case NEON::BI__builtin_neon_vqdmlsls_s32: {
9585 SmallVector<Value *, 2> ProductOps;
9586 ProductOps.push_back(Ops[1]);
9587 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
9588 Ops[1] =
9589 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
9590 ProductOps, "vqdmlXl");
9591
9592 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
9593 ? Intrinsic::aarch64_neon_sqadd
9594 : Intrinsic::aarch64_neon_sqsub;
9595 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
9596 }
9597 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
9598 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
9599 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
9600 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
9601 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
9602 "lane");
9603 SmallVector<Value *, 2> ProductOps;
9604 ProductOps.push_back(Ops[1]);
9605 ProductOps.push_back(Ops[2]);
9606 Ops[1] =
9607 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
9608 ProductOps, "vqdmlXl");
9609 Ops.pop_back();
9610
9611 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
9612 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
9613 ? Intrinsic::aarch64_neon_sqadd
9614 : Intrinsic::aarch64_neon_sqsub;
9615 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
9616 }
9617 case NEON::BI__builtin_neon_vget_lane_bf16:
9618 case NEON::BI__builtin_neon_vduph_lane_bf16:
9619 case NEON::BI__builtin_neon_vduph_lane_f16: {
9620 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9621 "vget_lane");
9622 }
9623 case NEON::BI__builtin_neon_vgetq_lane_bf16:
9624 case NEON::BI__builtin_neon_vduph_laneq_bf16:
9625 case NEON::BI__builtin_neon_vduph_laneq_f16: {
9626 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9627 "vgetq_lane");
9628 }
9629 case AArch64::BI_BitScanForward:
9630 case AArch64::BI_BitScanForward64:
9631 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
9632 case AArch64::BI_BitScanReverse:
9633 case AArch64::BI_BitScanReverse64:
9634 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
9635 case AArch64::BI_InterlockedAnd64:
9636 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
9637 case AArch64::BI_InterlockedExchange64:
9638 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
9639 case AArch64::BI_InterlockedExchangeAdd64:
9640 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
9641 case AArch64::BI_InterlockedExchangeSub64:
9642 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
9643 case AArch64::BI_InterlockedOr64:
9644 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
9645 case AArch64::BI_InterlockedXor64:
9646 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
9647 case AArch64::BI_InterlockedDecrement64:
9648 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
9649 case AArch64::BI_InterlockedIncrement64:
9650 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
9651 case AArch64::BI_InterlockedExchangeAdd8_acq:
9652 case AArch64::BI_InterlockedExchangeAdd16_acq:
9653 case AArch64::BI_InterlockedExchangeAdd_acq:
9654 case AArch64::BI_InterlockedExchangeAdd64_acq:
9655 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
9656 case AArch64::BI_InterlockedExchangeAdd8_rel:
9657 case AArch64::BI_InterlockedExchangeAdd16_rel:
9658 case AArch64::BI_InterlockedExchangeAdd_rel:
9659 case AArch64::BI_InterlockedExchangeAdd64_rel:
9660 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
9661 case AArch64::BI_InterlockedExchangeAdd8_nf:
9662 case AArch64::BI_InterlockedExchangeAdd16_nf:
9663 case AArch64::BI_InterlockedExchangeAdd_nf:
9664 case AArch64::BI_InterlockedExchangeAdd64_nf:
9665 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
9666 case AArch64::BI_InterlockedExchange8_acq:
9667 case AArch64::BI_InterlockedExchange16_acq:
9668 case AArch64::BI_InterlockedExchange_acq:
9669 case AArch64::BI_InterlockedExchange64_acq:
9670 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
9671 case AArch64::BI_InterlockedExchange8_rel:
9672 case AArch64::BI_InterlockedExchange16_rel:
9673 case AArch64::BI_InterlockedExchange_rel:
9674 case AArch64::BI_InterlockedExchange64_rel:
9675 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
9676 case AArch64::BI_InterlockedExchange8_nf:
9677 case AArch64::BI_InterlockedExchange16_nf:
9678 case AArch64::BI_InterlockedExchange_nf:
9679 case AArch64::BI_InterlockedExchange64_nf:
9680 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
9681 case AArch64::BI_InterlockedCompareExchange8_acq:
9682 case AArch64::BI_InterlockedCompareExchange16_acq:
9683 case AArch64::BI_InterlockedCompareExchange_acq:
9684 case AArch64::BI_InterlockedCompareExchange64_acq:
9685 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
9686 case AArch64::BI_InterlockedCompareExchange8_rel:
9687 case AArch64::BI_InterlockedCompareExchange16_rel:
9688 case AArch64::BI_InterlockedCompareExchange_rel:
9689 case AArch64::BI_InterlockedCompareExchange64_rel:
9690 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
9691 case AArch64::BI_InterlockedCompareExchange8_nf:
9692 case AArch64::BI_InterlockedCompareExchange16_nf:
9693 case AArch64::BI_InterlockedCompareExchange_nf:
9694 case AArch64::BI_InterlockedCompareExchange64_nf:
9695 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
9696 case AArch64::BI_InterlockedOr8_acq:
9697 case AArch64::BI_InterlockedOr16_acq:
9698 case AArch64::BI_InterlockedOr_acq:
9699 case AArch64::BI_InterlockedOr64_acq:
9700 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
9701 case AArch64::BI_InterlockedOr8_rel:
9702 case AArch64::BI_InterlockedOr16_rel:
9703 case AArch64::BI_InterlockedOr_rel:
9704 case AArch64::BI_InterlockedOr64_rel:
9705 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
9706 case AArch64::BI_InterlockedOr8_nf:
9707 case AArch64::BI_InterlockedOr16_nf:
9708 case AArch64::BI_InterlockedOr_nf:
9709 case AArch64::BI_InterlockedOr64_nf:
9710 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
9711 case AArch64::BI_InterlockedXor8_acq:
9712 case AArch64::BI_InterlockedXor16_acq:
9713 case AArch64::BI_InterlockedXor_acq:
9714 case AArch64::BI_InterlockedXor64_acq:
9715 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
9716 case AArch64::BI_InterlockedXor8_rel:
9717 case AArch64::BI_InterlockedXor16_rel:
9718 case AArch64::BI_InterlockedXor_rel:
9719 case AArch64::BI_InterlockedXor64_rel:
9720 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
9721 case AArch64::BI_InterlockedXor8_nf:
9722 case AArch64::BI_InterlockedXor16_nf:
9723 case AArch64::BI_InterlockedXor_nf:
9724 case AArch64::BI_InterlockedXor64_nf:
9725 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
9726 case AArch64::BI_InterlockedAnd8_acq:
9727 case AArch64::BI_InterlockedAnd16_acq:
9728 case AArch64::BI_InterlockedAnd_acq:
9729 case AArch64::BI_InterlockedAnd64_acq:
9730 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
9731 case AArch64::BI_InterlockedAnd8_rel:
9732 case AArch64::BI_InterlockedAnd16_rel:
9733 case AArch64::BI_InterlockedAnd_rel:
9734 case AArch64::BI_InterlockedAnd64_rel:
9735 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
9736 case AArch64::BI_InterlockedAnd8_nf:
9737 case AArch64::BI_InterlockedAnd16_nf:
9738 case AArch64::BI_InterlockedAnd_nf:
9739 case AArch64::BI_InterlockedAnd64_nf:
9740 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
9741 case AArch64::BI_InterlockedIncrement16_acq:
9742 case AArch64::BI_InterlockedIncrement_acq:
9743 case AArch64::BI_InterlockedIncrement64_acq:
9744 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
9745 case AArch64::BI_InterlockedIncrement16_rel:
9746 case AArch64::BI_InterlockedIncrement_rel:
9747 case AArch64::BI_InterlockedIncrement64_rel:
9748 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
9749 case AArch64::BI_InterlockedIncrement16_nf:
9750 case AArch64::BI_InterlockedIncrement_nf:
9751 case AArch64::BI_InterlockedIncrement64_nf:
9752 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
9753 case AArch64::BI_InterlockedDecrement16_acq:
9754 case AArch64::BI_InterlockedDecrement_acq:
9755 case AArch64::BI_InterlockedDecrement64_acq:
9756 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
9757 case AArch64::BI_InterlockedDecrement16_rel:
9758 case AArch64::BI_InterlockedDecrement_rel:
9759 case AArch64::BI_InterlockedDecrement64_rel:
9760 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
9761 case AArch64::BI_InterlockedDecrement16_nf:
9762 case AArch64::BI_InterlockedDecrement_nf:
9763 case AArch64::BI_InterlockedDecrement64_nf:
9764 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
9765
9766 case AArch64::BI_InterlockedAdd: {
9767 Value *Arg0 = EmitScalarExpr(E->getArg(0));
9768 Value *Arg1 = EmitScalarExpr(E->getArg(1));
9769 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
9770 AtomicRMWInst::Add, Arg0, Arg1,
9771 llvm::AtomicOrdering::SequentiallyConsistent);
9772 return Builder.CreateAdd(RMWI, Arg1);
9773 }
9774 }
9775
9776 llvm::FixedVectorType *VTy = GetNeonType(this, Type);
9777 llvm::Type *Ty = VTy;
9778 if (!Ty)
9779 return nullptr;
9780
9781 // Not all intrinsics handled by the common case work for AArch64 yet, so only
9782 // defer to common code if it's been added to our special map.
9783 Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
9784 AArch64SIMDIntrinsicsProvenSorted);
9785
9786 if (Builtin)
9787 return EmitCommonNeonBuiltinExpr(
9788 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
9789 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
9790 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
9791
9792 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
9793 return V;
9794
9795 unsigned Int;
9796 switch (BuiltinID) {
9797 default: return nullptr;
9798 case NEON::BI__builtin_neon_vbsl_v:
9799 case NEON::BI__builtin_neon_vbslq_v: {
9800 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
9801 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
9802 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
9803 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
9804
9805 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
9806 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
9807 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
9808 return Builder.CreateBitCast(Ops[0], Ty);
9809 }
9810 case NEON::BI__builtin_neon_vfma_lane_v:
9811 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
9812 // The ARM builtins (and instructions) have the addend as the first
9813 // operand, but the 'fma' intrinsics have it last. Swap it around here.
9814 Value *Addend = Ops[0];
9815 Value *Multiplicand = Ops[1];
9816 Value *LaneSource = Ops[2];
9817 Ops[0] = Multiplicand;
9818 Ops[1] = LaneSource;
9819 Ops[2] = Addend;
9820
9821 // Now adjust things to handle the lane access.
9822 auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
9823 ? llvm::FixedVectorType::get(VTy->getElementType(),
9824 VTy->getNumElements() / 2)
9825 : VTy;
9826 llvm::Constant *cst = cast<Constant>(Ops[3]);
9827 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
9828 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
9829 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
9830
9831 Ops.pop_back();
9832 Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
9833 : Intrinsic::fma;
9834 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
9835 }
9836 case NEON::BI__builtin_neon_vfma_laneq_v: {
9837 auto *VTy = cast<llvm::FixedVectorType>(Ty);
9838 // v1f64 fma should be mapped to Neon scalar f64 fma
9839 if (VTy && VTy->getElementType() == DoubleTy) {
9840 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
9841 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
9842 llvm::FixedVectorType *VTy =
9843 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
9844 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
9845 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
9846 Value *Result;
9847 Result = emitCallMaybeConstrainedFPBuiltin(
9848 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
9849 DoubleTy, {Ops[1], Ops[2], Ops[0]});
9850 return Builder.CreateBitCast(Result, Ty);
9851 }
9852 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9853 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9854
9855 auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
9856 VTy->getNumElements() * 2);
9857 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
9858 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
9859 cast<ConstantInt>(Ops[3]));
9860 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
9861
9862 return emitCallMaybeConstrainedFPBuiltin(
9863 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
9864 {Ops[2], Ops[1], Ops[0]});
9865 }
9866 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
9867 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9868 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9869
9870 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9871 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
9872 return emitCallMaybeConstrainedFPBuiltin(
9873 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
9874 {Ops[2], Ops[1], Ops[0]});
9875 }
9876 case NEON::BI__builtin_neon_vfmah_lane_f16:
9877 case NEON::BI__builtin_neon_vfmas_lane_f32:
9878 case NEON::BI__builtin_neon_vfmah_laneq_f16:
9879 case NEON::BI__builtin_neon_vfmas_laneq_f32:
9880 case NEON::BI__builtin_neon_vfmad_lane_f64:
9881 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
9882 Ops.push_back(EmitScalarExpr(E->getArg(3)));
9883 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
9884 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
9885 return emitCallMaybeConstrainedFPBuiltin(
9886 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
9887 {Ops[1], Ops[2], Ops[0]});
9888 }
9889 case NEON::BI__builtin_neon_vmull_v:
9890 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9891 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
9892 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
9893 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
9894 case NEON::BI__builtin_neon_vmax_v:
9895 case NEON::BI__builtin_neon_vmaxq_v:
9896 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9897 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
9898 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
9899 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
9900 case NEON::BI__builtin_neon_vmaxh_f16: {
9901 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9902 Int = Intrinsic::aarch64_neon_fmax;
9903 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
9904 }
9905 case NEON::BI__builtin_neon_vmin_v:
9906 case NEON::BI__builtin_neon_vminq_v:
9907 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9908 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
9909 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
9910 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
9911 case NEON::BI__builtin_neon_vminh_f16: {
9912 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9913 Int = Intrinsic::aarch64_neon_fmin;
9914 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
9915 }
9916 case NEON::BI__builtin_neon_vabd_v:
9917 case NEON::BI__builtin_neon_vabdq_v:
9918 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9919 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
9920 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
9921 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
9922 case NEON::BI__builtin_neon_vpadal_v:
9923 case NEON::BI__builtin_neon_vpadalq_v: {
9924 unsigned ArgElts = VTy->getNumElements();
9925 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
9926 unsigned BitWidth = EltTy->getBitWidth();
9927 auto *ArgTy = llvm::FixedVectorType::get(
9928 llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
9929 llvm::Type* Tys[2] = { VTy, ArgTy };
9930 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
9931 SmallVector<llvm::Value*, 1> TmpOps;
9932 TmpOps.push_back(Ops[1]);
9933 Function *F = CGM.getIntrinsic(Int, Tys);
9934 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
9935 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
9936 return Builder.CreateAdd(tmp, addend);
9937 }
9938 case NEON::BI__builtin_neon_vpmin_v:
9939 case NEON::BI__builtin_neon_vpminq_v:
9940 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9941 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
9942 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
9943 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
9944 case NEON::BI__builtin_neon_vpmax_v:
9945 case NEON::BI__builtin_neon_vpmaxq_v:
9946 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
9947 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
9948 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
9949 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
9950 case NEON::BI__builtin_neon_vminnm_v:
9951 case NEON::BI__builtin_neon_vminnmq_v:
9952 Int = Intrinsic::aarch64_neon_fminnm;
9953 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
9954 case NEON::BI__builtin_neon_vminnmh_f16:
9955 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9956 Int = Intrinsic::aarch64_neon_fminnm;
9957 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
9958 case NEON::BI__builtin_neon_vmaxnm_v:
9959 case NEON::BI__builtin_neon_vmaxnmq_v:
9960 Int = Intrinsic::aarch64_neon_fmaxnm;
9961 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
9962 case NEON::BI__builtin_neon_vmaxnmh_f16:
9963 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9964 Int = Intrinsic::aarch64_neon_fmaxnm;
9965 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
9966 case NEON::BI__builtin_neon_vrecpss_f32: {
9967 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9968 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
9969 Ops, "vrecps");
9970 }
9971 case NEON::BI__builtin_neon_vrecpsd_f64:
9972 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9973 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
9974 Ops, "vrecps");
9975 case NEON::BI__builtin_neon_vrecpsh_f16:
9976 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9977 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
9978 Ops, "vrecps");
9979 case NEON::BI__builtin_neon_vqshrun_n_v:
9980 Int = Intrinsic::aarch64_neon_sqshrun;
9981 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
9982 case NEON::BI__builtin_neon_vqrshrun_n_v:
9983 Int = Intrinsic::aarch64_neon_sqrshrun;
9984 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
9985 case NEON::BI__builtin_neon_vqshrn_n_v:
9986 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
9987 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
9988 case NEON::BI__builtin_neon_vrshrn_n_v:
9989 Int = Intrinsic::aarch64_neon_rshrn;
9990 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
9991 case NEON::BI__builtin_neon_vqrshrn_n_v:
9992 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
9993 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
9994 case NEON::BI__builtin_neon_vrndah_f16: {
9995 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9996 Int = Builder.getIsFPConstrained()
9997 ? Intrinsic::experimental_constrained_round
9998 : Intrinsic::round;
9999 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
10000 }
10001 case NEON::BI__builtin_neon_vrnda_v:
10002 case NEON::BI__builtin_neon_vrndaq_v: {
10003 Int = Builder.getIsFPConstrained()
10004 ? Intrinsic::experimental_constrained_round
10005 : Intrinsic::round;
10006 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
10007 }
10008 case NEON::BI__builtin_neon_vrndih_f16: {
10009 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10010 Int = Builder.getIsFPConstrained()
10011 ? Intrinsic::experimental_constrained_nearbyint
10012 : Intrinsic::nearbyint;
10013 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
10014 }
10015 case NEON::BI__builtin_neon_vrndmh_f16: {
10016 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10017 Int = Builder.getIsFPConstrained()
10018 ? Intrinsic::experimental_constrained_floor
10019 : Intrinsic::floor;
10020 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
10021 }
10022 case NEON::BI__builtin_neon_vrndm_v:
10023 case NEON::BI__builtin_neon_vrndmq_v: {
10024 Int = Builder.getIsFPConstrained()
10025 ? Intrinsic::experimental_constrained_floor
10026 : Intrinsic::floor;
10027 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
10028 }
10029 case NEON::BI__builtin_neon_vrndnh_f16: {
10030 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10031 Int = Intrinsic::aarch64_neon_frintn;
10032 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
10033 }
10034 case NEON::BI__builtin_neon_vrndn_v:
10035 case NEON::BI__builtin_neon_vrndnq_v: {
10036 Int = Intrinsic::aarch64_neon_frintn;
10037 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
10038 }
10039 case NEON::BI__builtin_neon_vrndns_f32: {
10040 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10041 Int = Intrinsic::aarch64_neon_frintn;
10042 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
10043 }
10044 case NEON::BI__builtin_neon_vrndph_f16: {
10045 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10046 Int = Builder.getIsFPConstrained()
10047 ? Intrinsic::experimental_constrained_ceil
10048 : Intrinsic::ceil;
10049 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
10050 }
10051 case NEON::BI__builtin_neon_vrndp_v:
10052 case NEON::BI__builtin_neon_vrndpq_v: {
10053 Int = Builder.getIsFPConstrained()
10054 ? Intrinsic::experimental_constrained_ceil
10055 : Intrinsic::ceil;
10056 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
10057 }
10058 case NEON::BI__builtin_neon_vrndxh_f16: {
10059 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10060 Int = Builder.getIsFPConstrained()
10061 ? Intrinsic::experimental_constrained_rint
10062 : Intrinsic::rint;
10063 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
10064 }
10065 case NEON::BI__builtin_neon_vrndx_v:
10066 case NEON::BI__builtin_neon_vrndxq_v: {
10067 Int = Builder.getIsFPConstrained()
10068 ? Intrinsic::experimental_constrained_rint
10069 : Intrinsic::rint;
10070 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
10071 }
10072 case NEON::BI__builtin_neon_vrndh_f16: {
10073 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10074 Int = Builder.getIsFPConstrained()
10075 ? Intrinsic::experimental_constrained_trunc
10076 : Intrinsic::trunc;
10077 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
10078 }
10079 case NEON::BI__builtin_neon_vrnd_v:
10080 case NEON::BI__builtin_neon_vrndq_v: {
10081 Int = Builder.getIsFPConstrained()
10082 ? Intrinsic::experimental_constrained_trunc
10083 : Intrinsic::trunc;
10084 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
10085 }
10086 case NEON::BI__builtin_neon_vcvt_f64_v:
10087 case NEON::BI__builtin_neon_vcvtq_f64_v:
10088 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10089 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
10090 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
10091 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
10092 case NEON::BI__builtin_neon_vcvt_f64_f32: {
10093 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&((Type.getEltType() == NeonTypeFlags::Float64 && quad
&& "unexpected vcvt_f64_f32 builtin") ? static_cast<
void> (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 10094, __PRETTY_FUNCTION__))
10094 "unexpected vcvt_f64_f32 builtin")((Type.getEltType() == NeonTypeFlags::Float64 && quad
&& "unexpected vcvt_f64_f32 builtin") ? static_cast<
void> (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 10094, __PRETTY_FUNCTION__))
;
10095 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
10096 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10097
10098 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
10099 }
10100 case NEON::BI__builtin_neon_vcvt_f32_f64: {
10101 assert(Type.getEltType() == NeonTypeFlags::Float32 &&((Type.getEltType() == NeonTypeFlags::Float32 && "unexpected vcvt_f32_f64 builtin"
) ? static_cast<void> (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 10102, __PRETTY_FUNCTION__))
10102 "unexpected vcvt_f32_f64 builtin")((Type.getEltType() == NeonTypeFlags::Float32 && "unexpected vcvt_f32_f64 builtin"
) ? static_cast<void> (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 10102, __PRETTY_FUNCTION__))
;
10103 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
10104 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10105
10106 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
10107 }
10108 case NEON::BI__builtin_neon_vcvt_s32_v:
10109 case NEON::BI__builtin_neon_vcvt_u32_v:
10110 case NEON::BI__builtin_neon_vcvt_s64_v:
10111 case NEON::BI__builtin_neon_vcvt_u64_v:
10112 case NEON::BI__builtin_neon_vcvt_s16_v:
10113 case NEON::BI__builtin_neon_vcvt_u16_v:
10114 case NEON::BI__builtin_neon_vcvtq_s32_v:
10115 case NEON::BI__builtin_neon_vcvtq_u32_v:
10116 case NEON::BI__builtin_neon_vcvtq_s64_v:
10117 case NEON::BI__builtin_neon_vcvtq_u64_v:
10118 case NEON::BI__builtin_neon_vcvtq_s16_v:
10119 case NEON::BI__builtin_neon_vcvtq_u16_v: {
10120 Int =
10121 usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
10122 llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
10123 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
10124 }
10125 case NEON::BI__builtin_neon_vcvta_s16_v:
10126 case NEON::BI__builtin_neon_vcvta_u16_v:
10127 case NEON::BI__builtin_neon_vcvta_s32_v:
10128 case NEON::BI__builtin_neon_vcvtaq_s16_v:
10129 case NEON::BI__builtin_neon_vcvtaq_s32_v:
10130 case NEON::BI__builtin_neon_vcvta_u32_v:
10131 case NEON::BI__builtin_neon_vcvtaq_u16_v:
10132 case NEON::BI__builtin_neon_vcvtaq_u32_v:
10133 case NEON::BI__builtin_neon_vcvta_s64_v:
10134 case NEON::BI__builtin_neon_vcvtaq_s64_v:
10135 case NEON::BI__builtin_neon_vcvta_u64_v:
10136 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
10137 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
10138 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10139 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
10140 }
10141 case NEON::BI__builtin_neon_vcvtm_s16_v:
10142 case NEON::BI__builtin_neon_vcvtm_s32_v:
10143 case NEON::BI__builtin_neon_vcvtmq_s16_v:
10144 case NEON::BI__builtin_neon_vcvtmq_s32_v:
10145 case NEON::BI__builtin_neon_vcvtm_u16_v:
10146 case NEON::BI__builtin_neon_vcvtm_u32_v:
10147 case NEON::BI__builtin_neon_vcvtmq_u16_v:
10148 case NEON::BI__builtin_neon_vcvtmq_u32_v:
10149 case NEON::BI__builtin_neon_vcvtm_s64_v:
10150 case NEON::BI__builtin_neon_vcvtmq_s64_v:
10151 case NEON::BI__builtin_neon_vcvtm_u64_v:
10152 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
10153 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
10154 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10155 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
10156 }
10157 case NEON::BI__builtin_neon_vcvtn_s16_v:
10158 case NEON::BI__builtin_neon_vcvtn_s32_v:
10159 case NEON::BI__builtin_neon_vcvtnq_s16_v:
10160 case NEON::BI__builtin_neon_vcvtnq_s32_v:
10161 case NEON::BI__builtin_neon_vcvtn_u16_v:
10162 case NEON::BI__builtin_neon_vcvtn_u32_v:
10163 case NEON::BI__builtin_neon_vcvtnq_u16_v:
10164 case NEON::BI__builtin_neon_vcvtnq_u32_v:
10165 case NEON::BI__builtin_neon_vcvtn_s64_v:
10166 case NEON::BI__builtin_neon_vcvtnq_s64_v:
10167 case NEON::BI__builtin_neon_vcvtn_u64_v:
10168 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
10169 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
10170 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10171 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
10172 }
10173 case NEON::BI__builtin_neon_vcvtp_s16_v:
10174 case NEON::BI__builtin_neon_vcvtp_s32_v:
10175 case NEON::BI__builtin_neon_vcvtpq_s16_v:
10176 case NEON::BI__builtin_neon_vcvtpq_s32_v:
10177 case NEON::BI__builtin_neon_vcvtp_u16_v:
10178 case NEON::BI__builtin_neon_vcvtp_u32_v:
10179 case NEON::BI__builtin_neon_vcvtpq_u16_v:
10180 case NEON::BI__builtin_neon_vcvtpq_u32_v:
10181 case NEON::BI__builtin_neon_vcvtp_s64_v:
10182 case NEON::BI__builtin_neon_vcvtpq_s64_v:
10183 case NEON::BI__builtin_neon_vcvtp_u64_v:
10184 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
10185 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
10186 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10187 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
10188 }
10189 case NEON::BI__builtin_neon_vmulx_v:
10190 case NEON::BI__builtin_neon_vmulxq_v: {
10191 Int = Intrinsic::aarch64_neon_fmulx;
10192 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
10193 }
10194 case NEON::BI__builtin_neon_vmulxh_lane_f16:
10195 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
10196 // vmulx_lane should be mapped to Neon scalar mulx after
10197 // extracting the scalar element
10198 Ops.push_back(EmitScalarExpr(E->getArg(2)));
10199 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
10200 Ops.pop_back();
10201 Int = Intrinsic::aarch64_neon_fmulx;
10202 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
10203 }
10204 case NEON::BI__builtin_neon_vmul_lane_v:
10205 case NEON::BI__builtin_neon_vmul_laneq_v: {
10206 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
10207 bool Quad = false;
10208 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
10209 Quad = true;
10210 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10211 llvm::FixedVectorType *VTy =
10212 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
10213 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
10214 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
10215 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
10216 return Builder.CreateBitCast(Result, Ty);
10217 }
10218 case NEON::BI__builtin_neon_vnegd_s64:
10219 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
10220 case NEON::BI__builtin_neon_vnegh_f16:
10221 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
10222 case NEON::BI__builtin_neon_vpmaxnm_v:
10223 case NEON::BI__builtin_neon_vpmaxnmq_v: {
10224 Int = Intrinsic::aarch64_neon_fmaxnmp;
10225 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
10226 }
10227 case NEON::BI__builtin_neon_vpminnm_v:
10228 case NEON::BI__builtin_neon_vpminnmq_v: {
10229 Int = Intrinsic::aarch64_neon_fminnmp;
10230 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
10231 }
10232 case NEON::BI__builtin_neon_vsqrth_f16: {
10233 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10234 Int = Builder.getIsFPConstrained()
10235 ? Intrinsic::experimental_constrained_sqrt
10236 : Intrinsic::sqrt;
10237 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
10238 }
10239 case NEON::BI__builtin_neon_vsqrt_v:
10240 case NEON::BI__builtin_neon_vsqrtq_v: {
10241 Int = Builder.getIsFPConstrained()
10242 ? Intrinsic::experimental_constrained_sqrt
10243 : Intrinsic::sqrt;
10244 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10245 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
10246 }
10247 case NEON::BI__builtin_neon_vrbit_v:
10248 case NEON::BI__builtin_neon_vrbitq_v: {
10249 Int = Intrinsic::aarch64_neon_rbit;
10250 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
10251 }
10252 case NEON::BI__builtin_neon_vaddv_u8:
10253 // FIXME: These are handled by the AArch64 scalar code.
10254 usgn = true;
10255 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10256 case NEON::BI__builtin_neon_vaddv_s8: {
10257 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10258 Ty = Int32Ty;
10259 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10260 llvm::Type *Tys[2] = { Ty, VTy };
10261 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10262 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10263 return Builder.CreateTrunc(Ops[0], Int8Ty);
10264 }
10265 case NEON::BI__builtin_neon_vaddv_u16:
10266 usgn = true;
10267 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10268 case NEON::BI__builtin_neon_vaddv_s16: {
10269 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10270 Ty = Int32Ty;
10271 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10272 llvm::Type *Tys[2] = { Ty, VTy };
10273 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10274 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10275 return Builder.CreateTrunc(Ops[0], Int16Ty);
10276 }
10277 case NEON::BI__builtin_neon_vaddvq_u8:
10278 usgn = true;
10279 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10280 case NEON::BI__builtin_neon_vaddvq_s8: {
10281 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10282 Ty = Int32Ty;
10283 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10284 llvm::Type *Tys[2] = { Ty, VTy };
10285 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10286 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10287 return Builder.CreateTrunc(Ops[0], Int8Ty);
10288 }
10289 case NEON::BI__builtin_neon_vaddvq_u16:
10290 usgn = true;
10291 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10292 case NEON::BI__builtin_neon_vaddvq_s16: {
10293 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10294 Ty = Int32Ty;
10295 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10296 llvm::Type *Tys[2] = { Ty, VTy };
10297 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10298 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10299 return Builder.CreateTrunc(Ops[0], Int16Ty);
10300 }
10301 case NEON::BI__builtin_neon_vmaxv_u8: {
10302 Int = Intrinsic::aarch64_neon_umaxv;
10303 Ty = Int32Ty;
10304 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10305 llvm::Type *Tys[2] = { Ty, VTy };
10306 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10307 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10308 return Builder.CreateTrunc(Ops[0], Int8Ty);
10309 }
10310 case NEON::BI__builtin_neon_vmaxv_u16: {
10311 Int = Intrinsic::aarch64_neon_umaxv;
10312 Ty = Int32Ty;
10313 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10314 llvm::Type *Tys[2] = { Ty, VTy };
10315 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10316 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10317 return Builder.CreateTrunc(Ops[0], Int16Ty);
10318 }
10319 case NEON::BI__builtin_neon_vmaxvq_u8: {
10320 Int = Intrinsic::aarch64_neon_umaxv;
10321 Ty = Int32Ty;
10322 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10323 llvm::Type *Tys[2] = { Ty, VTy };
10324 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10325 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10326 return Builder.CreateTrunc(Ops[0], Int8Ty);
10327 }
10328 case NEON::BI__builtin_neon_vmaxvq_u16: {
10329 Int = Intrinsic::aarch64_neon_umaxv;
10330 Ty = Int32Ty;
10331 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10332 llvm::Type *Tys[2] = { Ty, VTy };
10333 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10334 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10335 return Builder.CreateTrunc(Ops[0], Int16Ty);
10336 }
10337 case NEON::BI__builtin_neon_vmaxv_s8: {
10338 Int = Intrinsic::aarch64_neon_smaxv;
10339 Ty = Int32Ty;
10340 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10341 llvm::Type *Tys[2] = { Ty, VTy };
10342 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10343 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10344 return Builder.CreateTrunc(Ops[0], Int8Ty);
10345 }
10346 case NEON::BI__builtin_neon_vmaxv_s16: {
10347 Int = Intrinsic::aarch64_neon_smaxv;
10348 Ty = Int32Ty;
10349 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10350 llvm::Type *Tys[2] = { Ty, VTy };
10351 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10352 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10353 return Builder.CreateTrunc(Ops[0], Int16Ty);
10354 }
10355 case NEON::BI__builtin_neon_vmaxvq_s8: {
10356 Int = Intrinsic::aarch64_neon_smaxv;
10357 Ty = Int32Ty;
10358 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10359 llvm::Type *Tys[2] = { Ty, VTy };
10360 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10361 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10362 return Builder.CreateTrunc(Ops[0], Int8Ty);
10363 }
10364 case NEON::BI__builtin_neon_vmaxvq_s16: {
10365 Int = Intrinsic::aarch64_neon_smaxv;
10366 Ty = Int32Ty;
10367 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10368 llvm::Type *Tys[2] = { Ty, VTy };
10369 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10370 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10371 return Builder.CreateTrunc(Ops[0], Int16Ty);
10372 }
10373 case NEON::BI__builtin_neon_vmaxv_f16: {
10374 Int = Intrinsic::aarch64_neon_fmaxv;
10375 Ty = HalfTy;
10376 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10377 llvm::Type *Tys[2] = { Ty, VTy };
10378 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10379 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10380 return Builder.CreateTrunc(Ops[0], HalfTy);
10381 }
10382 case NEON::BI__builtin_neon_vmaxvq_f16: {
10383 Int = Intrinsic::aarch64_neon_fmaxv;
10384 Ty = HalfTy;
10385 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10386 llvm::Type *Tys[2] = { Ty, VTy };
10387 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10388 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10389 return Builder.CreateTrunc(Ops[0], HalfTy);
10390 }
10391 case NEON::BI__builtin_neon_vminv_u8: {
10392 Int = Intrinsic::aarch64_neon_uminv;
10393 Ty = Int32Ty;
10394 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10395 llvm::Type *Tys[2] = { Ty, VTy };
10396 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10397 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10398 return Builder.CreateTrunc(Ops[0], Int8Ty);
10399 }
10400 case NEON::BI__builtin_neon_vminv_u16: {
10401 Int = Intrinsic::aarch64_neon_uminv;
10402 Ty = Int32Ty;
10403 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10404 llvm::Type *Tys[2] = { Ty, VTy };
10405 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10406 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10407 return Builder.CreateTrunc(Ops[0], Int16Ty);
10408 }
10409 case NEON::BI__builtin_neon_vminvq_u8: {
10410 Int = Intrinsic::aarch64_neon_uminv;
10411 Ty = Int32Ty;
10412 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10413 llvm::Type *Tys[2] = { Ty, VTy };
10414 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10415 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10416 return Builder.CreateTrunc(Ops[0], Int8Ty);
10417 }
10418 case NEON::BI__builtin_neon_vminvq_u16: {
10419 Int = Intrinsic::aarch64_neon_uminv;
10420 Ty = Int32Ty;
10421 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10422 llvm::Type *Tys[2] = { Ty, VTy };
10423 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10424 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10425 return Builder.CreateTrunc(Ops[0], Int16Ty);
10426 }
10427 case NEON::BI__builtin_neon_vminv_s8: {
10428 Int = Intrinsic::aarch64_neon_sminv;
10429 Ty = Int32Ty;
10430 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10431 llvm::Type *Tys[2] = { Ty, VTy };
10432 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10433 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10434 return Builder.CreateTrunc(Ops[0], Int8Ty);
10435 }
10436 case NEON::BI__builtin_neon_vminv_s16: {
10437 Int = Intrinsic::aarch64_neon_sminv;
10438 Ty = Int32Ty;
10439 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10440 llvm::Type *Tys[2] = { Ty, VTy };
10441 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10442 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10443 return Builder.CreateTrunc(Ops[0], Int16Ty);
10444 }
10445 case NEON::BI__builtin_neon_vminvq_s8: {
10446 Int = Intrinsic::aarch64_neon_sminv;
10447 Ty = Int32Ty;
10448 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10449 llvm::Type *Tys[2] = { Ty, VTy };
10450 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10451 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10452 return Builder.CreateTrunc(Ops[0], Int8Ty);
10453 }
10454 case NEON::BI__builtin_neon_vminvq_s16: {
10455 Int = Intrinsic::aarch64_neon_sminv;
10456 Ty = Int32Ty;
10457 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10458 llvm::Type *Tys[2] = { Ty, VTy };
10459 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10460 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10461 return Builder.CreateTrunc(Ops[0], Int16Ty);
10462 }
10463 case NEON::BI__builtin_neon_vminv_f16: {
10464 Int = Intrinsic::aarch64_neon_fminv;
10465 Ty = HalfTy;
10466 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10467 llvm::Type *Tys[2] = { Ty, VTy };
10468 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10469 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10470 return Builder.CreateTrunc(Ops[0], HalfTy);
10471 }
10472 case NEON::BI__builtin_neon_vminvq_f16: {
10473 Int = Intrinsic::aarch64_neon_fminv;
10474 Ty = HalfTy;
10475 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10476 llvm::Type *Tys[2] = { Ty, VTy };
10477 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10478 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10479 return Builder.CreateTrunc(Ops[0], HalfTy);
10480 }
10481 case NEON::BI__builtin_neon_vmaxnmv_f16: {
10482 Int = Intrinsic::aarch64_neon_fmaxnmv;
10483 Ty = HalfTy;
10484 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10485 llvm::Type *Tys[2] = { Ty, VTy };
10486 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10487 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
10488 return Builder.CreateTrunc(Ops[0], HalfTy);
10489 }
10490 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
10491 Int = Intrinsic::aarch64_neon_fmaxnmv;
10492 Ty = HalfTy;
10493 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10494 llvm::Type *Tys[2] = { Ty, VTy };
10495 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10496 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
10497 return Builder.CreateTrunc(Ops[0], HalfTy);
10498 }
10499 case NEON::BI__builtin_neon_vminnmv_f16: {
10500 Int = Intrinsic::aarch64_neon_fminnmv;
10501 Ty = HalfTy;
10502 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10503 llvm::Type *Tys[2] = { Ty, VTy };
10504 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10505 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
10506 return Builder.CreateTrunc(Ops[0], HalfTy);
10507 }
10508 case NEON::BI__builtin_neon_vminnmvq_f16: {
10509 Int = Intrinsic::aarch64_neon_fminnmv;
10510 Ty = HalfTy;
10511 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10512 llvm::Type *Tys[2] = { Ty, VTy };
10513 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10514 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
10515 return Builder.CreateTrunc(Ops[0], HalfTy);
10516 }
10517 case NEON::BI__builtin_neon_vmul_n_f64: {
10518 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10519 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
10520 return Builder.CreateFMul(Ops[0], RHS);
10521 }
10522 case NEON::BI__builtin_neon_vaddlv_u8: {
10523 Int = Intrinsic::aarch64_neon_uaddlv;
10524 Ty = Int32Ty;
10525 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10526 llvm::Type *Tys[2] = { Ty, VTy };
10527 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10528 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10529 return Builder.CreateTrunc(Ops[0], Int16Ty);
10530 }
10531 case NEON::BI__builtin_neon_vaddlv_u16: {
10532 Int = Intrinsic::aarch64_neon_uaddlv;
10533 Ty = Int32Ty;
10534 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10535 llvm::Type *Tys[2] = { Ty, VTy };
10536 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10537 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10538 }
10539 case NEON::BI__builtin_neon_vaddlvq_u8: {
10540 Int = Intrinsic::aarch64_neon_uaddlv;
10541 Ty = Int32Ty;
10542 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10543 llvm::Type *Tys[2] = { Ty, VTy };
10544 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10545 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10546 return Builder.CreateTrunc(Ops[0], Int16Ty);
10547 }
10548 case NEON::BI__builtin_neon_vaddlvq_u16: {
10549 Int = Intrinsic::aarch64_neon_uaddlv;
10550 Ty = Int32Ty;
10551 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10552 llvm::Type *Tys[2] = { Ty, VTy };
10553 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10554 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10555 }
10556 case NEON::BI__builtin_neon_vaddlv_s8: {
10557 Int = Intrinsic::aarch64_neon_saddlv;
10558 Ty = Int32Ty;
10559 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10560 llvm::Type *Tys[2] = { Ty, VTy };
10561 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10562 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10563 return Builder.CreateTrunc(Ops[0], Int16Ty);
10564 }
10565 case NEON::BI__builtin_neon_vaddlv_s16: {
10566 Int = Intrinsic::aarch64_neon_saddlv;
10567 Ty = Int32Ty;
10568 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10569 llvm::Type *Tys[2] = { Ty, VTy };
10570 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10571 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10572 }
10573 case NEON::BI__builtin_neon_vaddlvq_s8: {
10574 Int = Intrinsic::aarch64_neon_saddlv;
10575 Ty = Int32Ty;
10576 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10577 llvm::Type *Tys[2] = { Ty, VTy };
10578 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10579 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10580 return Builder.CreateTrunc(Ops[0], Int16Ty);
10581 }
10582 case NEON::BI__builtin_neon_vaddlvq_s16: {
10583 Int = Intrinsic::aarch64_neon_saddlv;
10584 Ty = Int32Ty;
10585 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10586 llvm::Type *Tys[2] = { Ty, VTy };
10587 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10588 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10589 }
10590 case NEON::BI__builtin_neon_vsri_n_v:
10591 case NEON::BI__builtin_neon_vsriq_n_v: {
10592 Int = Intrinsic::aarch64_neon_vsri;
10593 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
10594 return EmitNeonCall(Intrin, Ops, "vsri_n");
10595 }
10596 case NEON::BI__builtin_neon_vsli_n_v:
10597 case NEON::BI__builtin_neon_vsliq_n_v: {
10598 Int = Intrinsic::aarch64_neon_vsli;
10599 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
10600 return EmitNeonCall(Intrin, Ops, "vsli_n");
10601 }
10602 case NEON::BI__builtin_neon_vsra_n_v:
10603 case NEON::BI__builtin_neon_vsraq_n_v:
10604 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10605 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
10606 return Builder.CreateAdd(Ops[0], Ops[1]);
10607 case NEON::BI__builtin_neon_vrsra_n_v:
10608 case NEON::BI__builtin_neon_vrsraq_n_v: {
10609 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
10610 SmallVector<llvm::Value*,2> TmpOps;
10611 TmpOps.push_back(Ops[1]);
10612 TmpOps.push_back(Ops[2]);
10613 Function* F = CGM.getIntrinsic(Int, Ty);
10614 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
10615 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
10616 return Builder.CreateAdd(Ops[0], tmp);
10617 }
10618 case NEON::BI__builtin_neon_vld1_v:
10619 case NEON::BI__builtin_neon_vld1q_v: {
10620 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
10621 return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
10622 }
10623 case NEON::BI__builtin_neon_vst1_v:
10624 case NEON::BI__builtin_neon_vst1q_v:
10625 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
10626 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
10627 return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
10628 case NEON::BI__builtin_neon_vld1_lane_v:
10629 case NEON::BI__builtin_neon_vld1q_lane_v: {
10630 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10631 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
10632 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10633 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
10634 PtrOp0.getAlignment());
10635 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
10636 }
10637 case NEON::BI__builtin_neon_vld1_dup_v:
10638 case NEON::BI__builtin_neon_vld1q_dup_v: {
10639 Value *V = UndefValue::get(Ty);
10640 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
10641 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10642 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
10643 PtrOp0.getAlignment());
10644 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
10645 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
10646 return EmitNeonSplat(Ops[0], CI);
10647 }
10648 case NEON::BI__builtin_neon_vst1_lane_v:
10649 case NEON::BI__builtin_neon_vst1q_lane_v:
10650 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10651 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
10652 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10653 return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
10654 PtrOp0.getAlignment());
10655 case NEON::BI__builtin_neon_vld2_v:
10656 case NEON::BI__builtin_neon_vld2q_v: {
10657 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
10658 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10659 llvm::Type *Tys[2] = { VTy, PTy };
10660 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
10661 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
10662 Ops[0] = Builder.CreateBitCast(Ops[0],
10663 llvm::PointerType::getUnqual(Ops[1]->getType()));
10664 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10665 }
10666 case NEON::BI__builtin_neon_vld3_v:
10667 case NEON::BI__builtin_neon_vld3q_v: {
10668 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
10669 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10670 llvm::Type *Tys[2] = { VTy, PTy };
10671 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
10672 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
10673 Ops[0] = Builder.CreateBitCast(Ops[0],
10674 llvm::PointerType::getUnqual(Ops[1]->getType()));
10675 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10676 }
10677 case NEON::BI__builtin_neon_vld4_v:
10678 case NEON::BI__builtin_neon_vld4q_v: {
10679 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
10680 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10681 llvm::Type *Tys[2] = { VTy, PTy };
10682 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
10683 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
10684 Ops[0] = Builder.CreateBitCast(Ops[0],
10685 llvm::PointerType::getUnqual(Ops[1]->getType()));
10686 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10687 }
10688 case NEON::BI__builtin_neon_vld2_dup_v:
10689 case NEON::BI__builtin_neon_vld2q_dup_v: {
10690 llvm::Type *PTy =
10691 llvm::PointerType::getUnqual(VTy->getElementType());
10692 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10693 llvm::Type *Tys[2] = { VTy, PTy };
10694 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
10695 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
10696 Ops[0] = Builder.CreateBitCast(Ops[0],
10697 llvm::PointerType::getUnqual(Ops[1]->getType()));
10698 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10699 }
10700 case NEON::BI__builtin_neon_vld3_dup_v:
10701 case NEON::BI__builtin_neon_vld3q_dup_v: {
10702 llvm::Type *PTy =
10703 llvm::PointerType::getUnqual(VTy->getElementType());
10704 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10705 llvm::Type *Tys[2] = { VTy, PTy };
10706 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
10707 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
10708 Ops[0] = Builder.CreateBitCast(Ops[0],
10709 llvm::PointerType::getUnqual(Ops[1]->getType()));
10710 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10711 }
10712 case NEON::BI__builtin_neon_vld4_dup_v:
10713 case NEON::BI__builtin_neon_vld4q_dup_v: {
10714 llvm::Type *PTy =
10715 llvm::PointerType::getUnqual(VTy->getElementType());
10716 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
10717 llvm::Type *Tys[2] = { VTy, PTy };
10718 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
10719 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
10720 Ops[0] = Builder.CreateBitCast(Ops[0],
10721 llvm::PointerType::getUnqual(Ops[1]->getType()));
10722 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10723 }
10724 case NEON::BI__builtin_neon_vld2_lane_v:
10725 case NEON::BI__builtin_neon_vld2q_lane_v: {
10726 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
10727 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
10728 Ops.push_back(Ops[1]);
10729 Ops.erase(Ops.begin()+1);
10730 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10731 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10732 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
10733 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
10734 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10735 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10736 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10737 }
10738 case NEON::BI__builtin_neon_vld3_lane_v:
10739 case NEON::BI__builtin_neon_vld3q_lane_v: {
10740 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
10741 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
10742 Ops.push_back(Ops[1]);
10743 Ops.erase(Ops.begin()+1);
10744 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10745 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10746 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
10747 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
10748 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
10749 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10750 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10751 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10752 }
10753 case NEON::BI__builtin_neon_vld4_lane_v:
10754 case NEON::BI__builtin_neon_vld4q_lane_v: {
10755 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
10756 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
10757 Ops.push_back(Ops[1]);
10758 Ops.erase(Ops.begin()+1);
10759 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10760 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10761 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
10762 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
10763 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
10764 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
10765 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
10766 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10767 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10768 }
10769 case NEON::BI__builtin_neon_vst2_v:
10770 case NEON::BI__builtin_neon_vst2q_v: {
10771 Ops.push_back(Ops[0]);
10772 Ops.erase(Ops.begin());
10773 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
10774 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
10775 Ops, "");
10776 }
10777 case NEON::BI__builtin_neon_vst2_lane_v:
10778 case NEON::BI__builtin_neon_vst2q_lane_v: {
10779 Ops.push_back(Ops[0]);
10780 Ops.erase(Ops.begin());
10781 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
10782 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
10783 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
10784 Ops, "");
10785 }
10786 case NEON::BI__builtin_neon_vst3_v:
10787 case NEON::BI__builtin_neon_vst3q_v: {
10788 Ops.push_back(Ops[0]);
10789 Ops.erase(Ops.begin());
10790 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
10791 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
10792 Ops, "");
10793 }
10794 case NEON::BI__builtin_neon_vst3_lane_v:
10795 case NEON::BI__builtin_neon_vst3q_lane_v: {
10796 Ops.push_back(Ops[0]);
10797 Ops.erase(Ops.begin());
10798 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
10799 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
10800 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
10801 Ops, "");
10802 }
10803 case NEON::BI__builtin_neon_vst4_v:
10804 case NEON::BI__builtin_neon_vst4q_v: {
10805 Ops.push_back(Ops[0]);
10806 Ops.erase(Ops.begin());
10807 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
10808 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
10809 Ops, "");
10810 }
10811 case NEON::BI__builtin_neon_vst4_lane_v:
10812 case NEON::BI__builtin_neon_vst4q_lane_v: {
10813 Ops.push_back(Ops[0]);
10814 Ops.erase(Ops.begin());
10815 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
10816 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
10817 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
10818 Ops, "");
10819 }
10820 case NEON::BI__builtin_neon_vtrn_v:
10821 case NEON::BI__builtin_neon_vtrnq_v: {
10822 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
10823 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10824 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10825 Value *SV = nullptr;
10826
10827 for (unsigned vi = 0; vi != 2; ++vi) {
10828 SmallVector<int, 16> Indices;
10829 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
10830 Indices.push_back(i+vi);
10831 Indices.push_back(i+e+vi);
10832 }
10833 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
10834 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
10835 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
10836 }
10837 return SV;
10838 }
10839 case NEON::BI__builtin_neon_vuzp_v:
10840 case NEON::BI__builtin_neon_vuzpq_v: {
10841 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
10842 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10843 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10844 Value *SV = nullptr;
10845
10846 for (unsigned vi = 0; vi != 2; ++vi) {
10847 SmallVector<int, 16> Indices;
10848 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
10849 Indices.push_back(2*i+vi);
10850
10851 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
10852 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
10853 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
10854 }
10855 return SV;
10856 }
10857 case NEON::BI__builtin_neon_vzip_v:
10858 case NEON::BI__builtin_neon_vzipq_v: {
10859 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
10860 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10861 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10862 Value *SV = nullptr;
10863
10864 for (unsigned vi = 0; vi != 2; ++vi) {
10865 SmallVector<int, 16> Indices;
10866 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
10867 Indices.push_back((i + vi*e) >> 1);
10868 Indices.push_back(((i + vi*e) >> 1)+e);
10869 }
10870 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
10871 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
10872 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
10873 }
10874 return SV;
10875 }
10876 case NEON::BI__builtin_neon_vqtbl1q_v: {
10877 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
10878 Ops, "vtbl1");
10879 }
10880 case NEON::BI__builtin_neon_vqtbl2q_v: {
10881 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
10882 Ops, "vtbl2");
10883 }
10884 case NEON::BI__builtin_neon_vqtbl3q_v: {
10885 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
10886 Ops, "vtbl3");
10887 }
10888 case NEON::BI__builtin_neon_vqtbl4q_v: {
10889 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
10890 Ops, "vtbl4");
10891 }
10892 case NEON::BI__builtin_neon_vqtbx1q_v: {
10893 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
10894 Ops, "vtbx1");
10895 }
10896 case NEON::BI__builtin_neon_vqtbx2q_v: {
10897 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
10898 Ops, "vtbx2");
10899 }
10900 case NEON::BI__builtin_neon_vqtbx3q_v: {
10901 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
10902 Ops, "vtbx3");
10903 }
10904 case NEON::BI__builtin_neon_vqtbx4q_v: {
10905 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
10906 Ops, "vtbx4");
10907 }
10908 case NEON::BI__builtin_neon_vsqadd_v:
10909 case NEON::BI__builtin_neon_vsqaddq_v: {
10910 Int = Intrinsic::aarch64_neon_usqadd;
10911 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
10912 }
10913 case NEON::BI__builtin_neon_vuqadd_v:
10914 case NEON::BI__builtin_neon_vuqaddq_v: {
10915 Int = Intrinsic::aarch64_neon_suqadd;
10916 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
10917 }
10918 }
10919}
10920
10921Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
10922 const CallExpr *E) {
10923 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||(((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID
== BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info
|| BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin") ? static_cast<void> (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 10927, __PRETTY_FUNCTION__))
10924 BuiltinID == BPF::BI__builtin_btf_type_id ||(((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID
== BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info
|| BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin") ? static_cast<void> (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 10927, __PRETTY_FUNCTION__))
10925 BuiltinID == BPF::BI__builtin_preserve_type_info ||(((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID
== BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info
|| BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin") ? static_cast<void> (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 10927, __PRETTY_FUNCTION__))
10926 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&(((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID
== BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info
|| BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin") ? static_cast<void> (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 10927, __PRETTY_FUNCTION__))
10927 "unexpected BPF builtin")(((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID
== BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info
|| BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
"unexpected BPF builtin") ? static_cast<void> (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 10927, __PRETTY_FUNCTION__))
;
10928
10929 // A sequence number, injected into IR builtin functions, to
10930 // prevent CSE given the only difference of the funciton
10931 // may just be the debuginfo metadata.
10932 static uint32_t BuiltinSeqNum;
10933
10934 switch (BuiltinID) {
10935 default:
10936 llvm_unreachable("Unexpected BPF builtin")::llvm::llvm_unreachable_internal("Unexpected BPF builtin", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 10936)
;
10937 case BPF::BI__builtin_preserve_field_info: {
10938 const Expr *Arg = E->getArg(0);
10939 bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
10940
10941 if (!getDebugInfo()) {
10942 CGM.Error(E->getExprLoc(),
10943 "using __builtin_preserve_field_info() without -g");
10944 return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
10945 : EmitLValue(Arg).getPointer(*this);
10946 }
10947
10948 // Enable underlying preserve_*_access_index() generation.
10949 bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
10950 IsInPreservedAIRegion = true;
10951 Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
10952 : EmitLValue(Arg).getPointer(*this);
10953 IsInPreservedAIRegion = OldIsInPreservedAIRegion;
10954
10955 ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10956 Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
10957
10958 // Built the IR for the preserve_field_info intrinsic.
10959 llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
10960 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
10961 {FieldAddr->getType()});
10962 return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
10963 }
10964 case BPF::BI__builtin_btf_type_id:
10965 case BPF::BI__builtin_preserve_type_info: {
10966 if (!getDebugInfo()) {
10967 CGM.Error(E->getExprLoc(), "using builtin function without -g");
10968 return nullptr;
10969 }
10970
10971 const Expr *Arg0 = E->getArg(0);
10972 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
10973 Arg0->getType(), Arg0->getExprLoc());
10974
10975 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10976 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
10977 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
10978
10979 llvm::Function *FnDecl;
10980 if (BuiltinID == BPF::BI__builtin_btf_type_id)
10981 FnDecl = llvm::Intrinsic::getDeclaration(
10982 &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
10983 else
10984 FnDecl = llvm::Intrinsic::getDeclaration(
10985 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
10986 CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
10987 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
10988 return Fn;
10989 }
10990 case BPF::BI__builtin_preserve_enum_value: {
10991 if (!getDebugInfo()) {
10992 CGM.Error(E->getExprLoc(), "using builtin function without -g");
10993 return nullptr;
10994 }
10995
10996 const Expr *Arg0 = E->getArg(0);
10997 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
10998 Arg0->getType(), Arg0->getExprLoc());
10999
11000 // Find enumerator
11001 const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens());
11002 const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr());
11003 const auto *DR = cast<DeclRefExpr>(CE->getSubExpr());
11004 const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl());
11005
11006 auto &InitVal = Enumerator->getInitVal();
11007 std::string InitValStr;
11008 if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX(9223372036854775807L)))
11009 InitValStr = std::to_string(InitVal.getSExtValue());
11010 else
11011 InitValStr = std::to_string(InitVal.getZExtValue());
11012 std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr;
11013 Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr);
11014
11015 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11016 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11017 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
11018
11019 llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration(
11020 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
11021 CallInst *Fn =
11022 Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
11023 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11024 return Fn;
11025 }
11026 }
11027}
11028
11029llvm::Value *CodeGenFunction::
11030BuildVector(ArrayRef<llvm::Value*> Ops) {
11031 assert((Ops.size() & (Ops.size() - 1)) == 0 &&(((Ops.size() & (Ops.size() - 1)) == 0 && "Not a power-of-two sized vector!"
) ? static_cast<void> (0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11032, __PRETTY_FUNCTION__))
11032 "Not a power-of-two sized vector!")(((Ops.size() & (Ops.size() - 1)) == 0 && "Not a power-of-two sized vector!"
) ? static_cast<void> (0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11032, __PRETTY_FUNCTION__))
;
11033 bool AllConstants = true;
11034 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
11035 AllConstants &= isa<Constant>(Ops[i]);
11036
11037 // If this is a constant vector, create a ConstantVector.
11038 if (AllConstants) {
11039 SmallVector<llvm::Constant*, 16> CstOps;
11040 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11041 CstOps.push_back(cast<Constant>(Ops[i]));
11042 return llvm::ConstantVector::get(CstOps);
11043 }
11044
11045 // Otherwise, insertelement the values to build the vector.
11046 Value *Result = llvm::UndefValue::get(
11047 llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
11048
11049 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11050 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
11051
11052 return Result;
11053}
11054
11055// Convert the mask from an integer type to a vector of i1.
11056static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
11057 unsigned NumElts) {
11058
11059 auto *MaskTy = llvm::FixedVectorType::get(
11060 CGF.Builder.getInt1Ty(),
11061 cast<IntegerType>(Mask->getType())->getBitWidth());
11062 Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
11063
11064 // If we have less than 8 elements, then the starting mask was an i8 and
11065 // we need to extract down to the right number of elements.
11066 if (NumElts < 8) {
11067 int Indices[4];
11068 for (unsigned i = 0; i != NumElts; ++i)
11069 Indices[i] = i;
11070 MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
11071 makeArrayRef(Indices, NumElts),
11072 "extract");
11073 }
11074 return MaskVec;
11075}
11076
11077static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11078 Align Alignment) {
11079 // Cast the pointer to right type.
11080 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11081 llvm::PointerType::getUnqual(Ops[1]->getType()));
11082
11083 Value *MaskVec = getMaskVecValue(
11084 CGF, Ops[2],
11085 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
11086
11087 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
11088}
11089
11090static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11091 Align Alignment) {
11092 // Cast the pointer to right type.
11093 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11094 llvm::PointerType::getUnqual(Ops[1]->getType()));
11095
11096 Value *MaskVec = getMaskVecValue(
11097 CGF, Ops[2],
11098 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
11099
11100 return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
11101}
11102
11103static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
11104 ArrayRef<Value *> Ops) {
11105 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
11106 llvm::Type *PtrTy = ResultTy->getElementType();
11107
11108 // Cast the pointer to element type.
11109 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11110 llvm::PointerType::getUnqual(PtrTy));
11111
11112 Value *MaskVec = getMaskVecValue(
11113 CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
11114
11115 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
11116 ResultTy);
11117 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
11118}
11119
11120static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
11121 ArrayRef<Value *> Ops,
11122 bool IsCompress) {
11123 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11124
11125 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11126
11127 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
11128 : Intrinsic::x86_avx512_mask_expand;
11129 llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
11130 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
11131}
11132
11133static Value *EmitX86CompressStore(CodeGenFunction &CGF,
11134 ArrayRef<Value *> Ops) {
11135 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11136 llvm::Type *PtrTy = ResultTy->getElementType();
11137
11138 // Cast the pointer to element type.
11139 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11140 llvm::PointerType::getUnqual(PtrTy));
11141
11142 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11143
11144 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
11145 ResultTy);
11146 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
11147}
11148
11149static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
11150 ArrayRef<Value *> Ops,
11151 bool InvertLHS = false) {
11152 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11153 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
11154 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
11155
11156 if (InvertLHS)
11157 LHS = CGF.Builder.CreateNot(LHS);
11158
11159 return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
11160 Ops[0]->getType());
11161}
11162
11163static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
11164 Value *Amt, bool IsRight) {
11165 llvm::Type *Ty = Op0->getType();
11166
11167 // Amount may be scalar immediate, in which case create a splat vector.
11168 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
11169 // we only care about the lowest log2 bits anyway.
11170 if (Amt->getType() != Ty) {
11171 unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements();
11172 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
11173 Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
11174 }
11175
11176 unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
11177 Function *F = CGF.CGM.getIntrinsic(IID, Ty);
11178 return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
11179}
11180
11181static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11182 bool IsSigned) {
11183 Value *Op0 = Ops[0];
11184 Value *Op1 = Ops[1];
11185 llvm::Type *Ty = Op0->getType();
11186 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
11187
11188 CmpInst::Predicate Pred;
11189 switch (Imm) {
11190 case 0x0:
11191 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
11192 break;
11193 case 0x1:
11194 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
11195 break;
11196 case 0x2:
11197 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
11198 break;
11199 case 0x3:
11200 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
11201 break;
11202 case 0x4:
11203 Pred = ICmpInst::ICMP_EQ;
11204 break;
11205 case 0x5:
11206 Pred = ICmpInst::ICMP_NE;
11207 break;
11208 case 0x6:
11209 return llvm::Constant::getNullValue(Ty); // FALSE
11210 case 0x7:
11211 return llvm::Constant::getAllOnesValue(Ty); // TRUE
11212 default:
11213 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate")::llvm::llvm_unreachable_internal("Unexpected XOP vpcom/vpcomu predicate"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11213)
;
11214 }
11215
11216 Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
11217 Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
11218 return Res;
11219}
11220
11221static Value *EmitX86Select(CodeGenFunction &CGF,
11222 Value *Mask, Value *Op0, Value *Op1) {
11223
11224 // If the mask is all ones just return first argument.
11225 if (const auto *C = dyn_cast<Constant>(Mask))
11226 if (C->isAllOnesValue())
11227 return Op0;
11228
11229 Mask = getMaskVecValue(
11230 CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements());
11231
11232 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
11233}
11234
11235static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
11236 Value *Mask, Value *Op0, Value *Op1) {
11237 // If the mask is all ones just return first argument.
11238 if (const auto *C = dyn_cast<Constant>(Mask))
11239 if (C->isAllOnesValue())
11240 return Op0;
11241
11242 auto *MaskTy = llvm::FixedVectorType::get(
11243 CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
11244 Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
11245 Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
11246 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
11247}
11248
11249static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
11250 unsigned NumElts, Value *MaskIn) {
11251 if (MaskIn) {
11252 const auto *C = dyn_cast<Constant>(MaskIn);
11253 if (!C || !C->isAllOnesValue())
11254 Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
11255 }
11256
11257 if (NumElts < 8) {
11258 int Indices[8];
11259 for (unsigned i = 0; i != NumElts; ++i)
11260 Indices[i] = i;
11261 for (unsigned i = NumElts; i != 8; ++i)
11262 Indices[i] = i % NumElts + NumElts;
11263 Cmp = CGF.Builder.CreateShuffleVector(
11264 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
11265 }
11266
11267 return CGF.Builder.CreateBitCast(Cmp,
11268 IntegerType::get(CGF.getLLVMContext(),
11269 std::max(NumElts, 8U)));
11270}
11271
11272static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
11273 bool Signed, ArrayRef<Value *> Ops) {
11274 assert((Ops.size() == 2 || Ops.size() == 4) &&(((Ops.size() == 2 || Ops.size() == 4) && "Unexpected number of arguments"
) ? static_cast<void> (0) : __assert_fail ("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11275, __PRETTY_FUNCTION__))
11275 "Unexpected number of arguments")(((Ops.size() == 2 || Ops.size() == 4) && "Unexpected number of arguments"
) ? static_cast<void> (0) : __assert_fail ("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11275, __PRETTY_FUNCTION__))
;
11276 unsigned NumElts =
11277 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
11278 Value *Cmp;
11279
11280 if (CC == 3) {
11281 Cmp = Constant::getNullValue(
11282 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
11283 } else if (CC == 7) {
11284 Cmp = Constant::getAllOnesValue(
11285 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
11286 } else {
11287 ICmpInst::Predicate Pred;
11288 switch (CC) {
11289 default: llvm_unreachable("Unknown condition code")::llvm::llvm_unreachable_internal("Unknown condition code", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11289)
;
11290 case 0: Pred = ICmpInst::ICMP_EQ; break;
11291 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
11292 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
11293 case 4: Pred = ICmpInst::ICMP_NE; break;
11294 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
11295 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
11296 }
11297 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
11298 }
11299
11300 Value *MaskIn = nullptr;
11301 if (Ops.size() == 4)
11302 MaskIn = Ops[3];
11303
11304 return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
11305}
11306
11307static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
11308 Value *Zero = Constant::getNullValue(In->getType());
11309 return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
11310}
11311
11312static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
11313 ArrayRef<Value *> Ops, bool IsSigned) {
11314 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
11315 llvm::Type *Ty = Ops[1]->getType();
11316
11317 Value *Res;
11318 if (Rnd != 4) {
11319 Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
11320 : Intrinsic::x86_avx512_uitofp_round;
11321 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
11322 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
11323 } else {
11324 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
11325 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
11326 }
11327
11328 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
11329}
11330
11331// Lowers X86 FMA intrinsics to IR.
11332static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11333 unsigned BuiltinID, bool IsAddSub) {
11334
11335 bool Subtract = false;
11336 Intrinsic::ID IID = Intrinsic::not_intrinsic;
11337 switch (BuiltinID) {
11338 default: break;
11339 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
11340 Subtract = true;
11341 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11342 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
11343 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
11344 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
11345 IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
11346 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
11347 Subtract = true;
11348 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11349 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
11350 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
11351 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
11352 IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
11353 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
11354 Subtract = true;
11355 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11356 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
11357 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
11358 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
11359 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
11360 break;
11361 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
11362 Subtract = true;
11363 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11364 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
11365 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
11366 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
11367 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
11368 break;
11369 }
11370
11371 Value *A = Ops[0];
11372 Value *B = Ops[1];
11373 Value *C = Ops[2];
11374
11375 if (Subtract)
11376 C = CGF.Builder.CreateFNeg(C);
11377
11378 Value *Res;
11379
11380 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
11381 if (IID != Intrinsic::not_intrinsic &&
11382 (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
11383 IsAddSub)) {
11384 Function *Intr = CGF.CGM.getIntrinsic(IID);
11385 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
11386 } else {
11387 llvm::Type *Ty = A->getType();
11388 Function *FMA;
11389 if (CGF.Builder.getIsFPConstrained()) {
11390 FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
11391 Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
11392 } else {
11393 FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
11394 Res = CGF.Builder.CreateCall(FMA, {A, B, C});
11395 }
11396 }
11397
11398 // Handle any required masking.
11399 Value *MaskFalseVal = nullptr;
11400 switch (BuiltinID) {
11401 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
11402 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
11403 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
11404 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
11405 MaskFalseVal = Ops[0];
11406 break;
11407 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
11408 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
11409 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
11410 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
11411 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
11412 break;
11413 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
11414 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
11415 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
11416 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
11417 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
11418 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
11419 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
11420 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
11421 MaskFalseVal = Ops[2];
11422 break;
11423 }
11424
11425 if (MaskFalseVal)
11426 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
11427
11428 return Res;
11429}
11430
11431static Value *
11432EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
11433 Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0,
11434 bool NegAcc = false) {
11435 unsigned Rnd = 4;
11436 if (Ops.size() > 4)
11437 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
11438
11439 if (NegAcc)
11440 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
11441
11442 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
11443 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
11444 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
11445 Value *Res;
11446 if (Rnd != 4) {
11447 Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
11448 Intrinsic::x86_avx512_vfmadd_f32 :
11449 Intrinsic::x86_avx512_vfmadd_f64;
11450 Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
11451 {Ops[0], Ops[1], Ops[2], Ops[4]});
11452 } else if (CGF.Builder.getIsFPConstrained()) {
11453 Function *FMA = CGF.CGM.getIntrinsic(
11454 Intrinsic::experimental_constrained_fma, Ops[0]->getType());
11455 Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
11456 } else {
11457 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
11458 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
11459 }
11460 // If we have more than 3 arguments, we need to do masking.
11461 if (Ops.size() > 3) {
11462 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
11463 : Ops[PTIdx];
11464
11465 // If we negated the accumulator and the its the PassThru value we need to
11466 // bypass the negate. Conveniently Upper should be the same thing in this
11467 // case.
11468 if (NegAcc && PTIdx == 2)
11469 PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
11470
11471 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
11472 }
11473 return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
11474}
11475
11476static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
11477 ArrayRef<Value *> Ops) {
11478 llvm::Type *Ty = Ops[0]->getType();
11479 // Arguments have a vXi32 type so cast to vXi64.
11480 Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
11481 Ty->getPrimitiveSizeInBits() / 64);
11482 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
11483 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
11484
11485 if (IsSigned) {
11486 // Shift left then arithmetic shift right.
11487 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
11488 LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
11489 LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
11490 RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
11491 RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
11492 } else {
11493 // Clear the upper bits.
11494 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
11495 LHS = CGF.Builder.CreateAnd(LHS, Mask);
11496 RHS = CGF.Builder.CreateAnd(RHS, Mask);
11497 }
11498
11499 return CGF.Builder.CreateMul(LHS, RHS);
11500}
11501
11502// Emit a masked pternlog intrinsic. This only exists because the header has to
11503// use a macro and we aren't able to pass the input argument to a pternlog
11504// builtin and a select builtin without evaluating it twice.
11505static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
11506 ArrayRef<Value *> Ops) {
11507 llvm::Type *Ty = Ops[0]->getType();
11508
11509 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
11510 unsigned EltWidth = Ty->getScalarSizeInBits();
11511 Intrinsic::ID IID;
11512 if (VecWidth == 128 && EltWidth == 32)
11513 IID = Intrinsic::x86_avx512_pternlog_d_128;
11514 else if (VecWidth == 256 && EltWidth == 32)
11515 IID = Intrinsic::x86_avx512_pternlog_d_256;
11516 else if (VecWidth == 512 && EltWidth == 32)
11517 IID = Intrinsic::x86_avx512_pternlog_d_512;
11518 else if (VecWidth == 128 && EltWidth == 64)
11519 IID = Intrinsic::x86_avx512_pternlog_q_128;
11520 else if (VecWidth == 256 && EltWidth == 64)
11521 IID = Intrinsic::x86_avx512_pternlog_q_256;
11522 else if (VecWidth == 512 && EltWidth == 64)
11523 IID = Intrinsic::x86_avx512_pternlog_q_512;
11524 else
11525 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11525)
;
11526
11527 Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
11528 Ops.drop_back());
11529 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
11530 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
11531}
11532
11533static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
11534 llvm::Type *DstTy) {
11535 unsigned NumberOfElements =
11536 cast<llvm::FixedVectorType>(DstTy)->getNumElements();
11537 Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
11538 return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
11539}
11540
11541// Emit binary intrinsic with the same type used in result/args.
11542static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF,
11543 ArrayRef<Value *> Ops, Intrinsic::ID IID) {
11544 llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
11545 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
11546}
11547
11548Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
11549 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
11550 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
11551 return EmitX86CpuIs(CPUStr);
11552}
11553
11554// Convert F16 halfs to floats.
11555static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
11556 ArrayRef<Value *> Ops,
11557 llvm::Type *DstTy) {
11558 assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&(((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
"Unknown cvtph2ps intrinsic") ? static_cast<void> (0) :
__assert_fail ("(Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && \"Unknown cvtph2ps intrinsic\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11559, __PRETTY_FUNCTION__))
11559 "Unknown cvtph2ps intrinsic")(((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
"Unknown cvtph2ps intrinsic") ? static_cast<void> (0) :
__assert_fail ("(Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && \"Unknown cvtph2ps intrinsic\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11559, __PRETTY_FUNCTION__))
;
11560
11561 // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
11562 if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
11563 Function *F =
11564 CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
11565 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
11566 }
11567
11568 unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
11569 Value *Src = Ops[0];
11570
11571 // Extract the subvector.
11572 if (NumDstElts !=
11573 cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
11574 assert(NumDstElts == 4 && "Unexpected vector size")((NumDstElts == 4 && "Unexpected vector size") ? static_cast
<void> (0) : __assert_fail ("NumDstElts == 4 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11574, __PRETTY_FUNCTION__))
;
11575 Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()),
11576 ArrayRef<int>{0, 1, 2, 3});
11577 }
11578
11579 // Bitcast from vXi16 to vXf16.
11580 auto *HalfTy = llvm::FixedVectorType::get(
11581 llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
11582 Src = CGF.Builder.CreateBitCast(Src, HalfTy);
11583
11584 // Perform the fp-extension.
11585 Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
11586
11587 if (Ops.size() >= 3)
11588 Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
11589 return Res;
11590}
11591
11592// Convert a BF16 to a float.
11593static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
11594 const CallExpr *E,
11595 ArrayRef<Value *> Ops) {
11596 llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
11597 Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
11598 Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
11599 llvm::Type *ResultType = CGF.ConvertType(E->getType());
11600 Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
11601 return BitCast;
11602}
11603
11604Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
11605
11606 llvm::Type *Int32Ty = Builder.getInt32Ty();
11607
11608 // Matching the struct layout from the compiler-rt/libgcc structure that is
11609 // filled in:
11610 // unsigned int __cpu_vendor;
11611 // unsigned int __cpu_type;
11612 // unsigned int __cpu_subtype;
11613 // unsigned int __cpu_features[1];
11614 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
11615 llvm::ArrayType::get(Int32Ty, 1));
11616
11617 // Grab the global __cpu_model.
11618 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
11619 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
11620
11621 // Calculate the index needed to access the correct field based on the
11622 // range. Also adjust the expected value.
11623 unsigned Index;
11624 unsigned Value;
11625 std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
11626#define X86_VENDOR(ENUM, STRING) \
11627 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
11628#define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
11629 .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
11630#define X86_CPU_TYPE(ENUM, STR) \
11631 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
11632#define X86_CPU_SUBTYPE(ENUM, STR) \
11633 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
11634#include "llvm/Support/X86TargetParser.def"
11635 .Default({0, 0});
11636 assert(Value != 0 && "Invalid CPUStr passed to CpuIs")((Value != 0 && "Invalid CPUStr passed to CpuIs") ? static_cast
<void> (0) : __assert_fail ("Value != 0 && \"Invalid CPUStr passed to CpuIs\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11636, __PRETTY_FUNCTION__))
;
11637
11638 // Grab the appropriate field from __cpu_model.
11639 llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
11640 ConstantInt::get(Int32Ty, Index)};
11641 llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
11642 CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4));
11643
11644 // Check the value of the field against the requested value.
11645 return Builder.CreateICmpEQ(CpuValue,
11646 llvm::ConstantInt::get(Int32Ty, Value));
11647}
11648
11649Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
11650 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
11651 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
11652 return EmitX86CpuSupports(FeatureStr);
11653}
11654
11655uint64_t
11656CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
11657 // Processor features and mapping to processor feature value.
11658 uint64_t FeaturesMask = 0;
11659 for (const StringRef &FeatureStr : FeatureStrs) {
11660 unsigned Feature =
11661 StringSwitch<unsigned>(FeatureStr)
11662#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM)
11663#include "llvm/Support/X86TargetParser.def"
11664 ;
11665 FeaturesMask |= (1ULL << Feature);
11666 }
11667 return FeaturesMask;
11668}
11669
11670Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
11671 return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
11672}
11673
11674llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
11675 uint32_t Features1 = Lo_32(FeaturesMask);
11676 uint32_t Features2 = Hi_32(FeaturesMask);
11677
11678 Value *Result = Builder.getTrue();
11679
11680 if (Features1 != 0) {
11681 // Matching the struct layout from the compiler-rt/libgcc structure that is
11682 // filled in:
11683 // unsigned int __cpu_vendor;
11684 // unsigned int __cpu_type;
11685 // unsigned int __cpu_subtype;
11686 // unsigned int __cpu_features[1];
11687 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
11688 llvm::ArrayType::get(Int32Ty, 1));
11689
11690 // Grab the global __cpu_model.
11691 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
11692 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
11693
11694 // Grab the first (0th) element from the field __cpu_features off of the
11695 // global in the struct STy.
11696 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
11697 Builder.getInt32(0)};
11698 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
11699 Value *Features =
11700 Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
11701
11702 // Check the value of the bit corresponding to the feature requested.
11703 Value *Mask = Builder.getInt32(Features1);
11704 Value *Bitset = Builder.CreateAnd(Features, Mask);
11705 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
11706 Result = Builder.CreateAnd(Result, Cmp);
11707 }
11708
11709 if (Features2 != 0) {
11710 llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
11711 "__cpu_features2");
11712 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
11713
11714 Value *Features =
11715 Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4));
11716
11717 // Check the value of the bit corresponding to the feature requested.
11718 Value *Mask = Builder.getInt32(Features2);
11719 Value *Bitset = Builder.CreateAnd(Features, Mask);
11720 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
11721 Result = Builder.CreateAnd(Result, Cmp);
11722 }
11723
11724 return Result;
11725}
11726
11727Value *CodeGenFunction::EmitX86CpuInit() {
11728 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
11729 /*Variadic*/ false);
11730 llvm::FunctionCallee Func =
11731 CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
11732 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
11733 cast<llvm::GlobalValue>(Func.getCallee())
11734 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
11735 return Builder.CreateCall(Func);
11736}
11737
11738Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
11739 const CallExpr *E) {
11740 if (BuiltinID == X86::BI__builtin_cpu_is)
11741 return EmitX86CpuIs(E);
11742 if (BuiltinID == X86::BI__builtin_cpu_supports)
11743 return EmitX86CpuSupports(E);
11744 if (BuiltinID == X86::BI__builtin_cpu_init)
11745 return EmitX86CpuInit();
11746
11747 SmallVector<Value*, 4> Ops;
11748 bool IsMaskFCmp = false;
11749
11750 // Find out if any arguments are required to be integer constant expressions.
11751 unsigned ICEArguments = 0;
11752 ASTContext::GetBuiltinTypeError Error;
11753 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
11754 assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error"
) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11754, __PRETTY_FUNCTION__))
;
11755
11756 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
11757 // If this is a normal argument, just emit it as a scalar.
11758 if ((ICEArguments & (1 << i)) == 0) {
11759 Ops.push_back(EmitScalarExpr(E->getArg(i)));
11760 continue;
11761 }
11762
11763 // If this is required to be a constant, constant fold it so that we know
11764 // that the generated intrinsic gets a ConstantInt.
11765 Ops.push_back(llvm::ConstantInt::get(
11766 getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
11767 }
11768
11769 // These exist so that the builtin that takes an immediate can be bounds
11770 // checked by clang to avoid passing bad immediates to the backend. Since
11771 // AVX has a larger immediate than SSE we would need separate builtins to
11772 // do the different bounds checking. Rather than create a clang specific
11773 // SSE only builtin, this implements eight separate builtins to match gcc
11774 // implementation.
11775 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
11776 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
11777 llvm::Function *F = CGM.getIntrinsic(ID);
11778 return Builder.CreateCall(F, Ops);
11779 };
11780
11781 // For the vector forms of FP comparisons, translate the builtins directly to
11782 // IR.
11783 // TODO: The builtins could be removed if the SSE header files used vector
11784 // extension comparisons directly (vector ordered/unordered may need
11785 // additional support via __builtin_isnan()).
11786 auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred,
11787 bool IsSignaling) {
11788 Value *Cmp;
11789 if (IsSignaling)
11790 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
11791 else
11792 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
11793 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
11794 llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
11795 Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
11796 return Builder.CreateBitCast(Sext, FPVecTy);
11797 };
11798
11799 switch (BuiltinID) {
11800 default: return nullptr;
11801 case X86::BI_mm_prefetch: {
11802 Value *Address = Ops[0];
11803 ConstantInt *C = cast<ConstantInt>(Ops[1]);
11804 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
11805 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
11806 Value *Data = ConstantInt::get(Int32Ty, 1);
11807 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
11808 return Builder.CreateCall(F, {Address, RW, Locality, Data});
11809 }
11810 case X86::BI_mm_clflush: {
11811 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
11812 Ops[0]);
11813 }
11814 case X86::BI_mm_lfence: {
11815 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
11816 }
11817 case X86::BI_mm_mfence: {
11818 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
11819 }
11820 case X86::BI_mm_sfence: {
11821 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
11822 }
11823 case X86::BI_mm_pause: {
11824 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
11825 }
11826 case X86::BI__rdtsc: {
11827 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
11828 }
11829 case X86::BI__builtin_ia32_rdtscp: {
11830 Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
11831 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
11832 Ops[0]);
11833 return Builder.CreateExtractValue(Call, 0);
11834 }
11835 case X86::BI__builtin_ia32_lzcnt_u16:
11836 case X86::BI__builtin_ia32_lzcnt_u32:
11837 case X86::BI__builtin_ia32_lzcnt_u64: {
11838 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
11839 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
11840 }
11841 case X86::BI__builtin_ia32_tzcnt_u16:
11842 case X86::BI__builtin_ia32_tzcnt_u32:
11843 case X86::BI__builtin_ia32_tzcnt_u64: {
11844 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
11845 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
11846 }
11847 case X86::BI__builtin_ia32_undef128:
11848 case X86::BI__builtin_ia32_undef256:
11849 case X86::BI__builtin_ia32_undef512:
11850 // The x86 definition of "undef" is not the same as the LLVM definition
11851 // (PR32176). We leave optimizing away an unnecessary zero constant to the
11852 // IR optimizer and backend.
11853 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
11854 // value, we should use that here instead of a zero.
11855 return llvm::Constant::getNullValue(ConvertType(E->getType()));
11856 case X86::BI__builtin_ia32_vec_init_v8qi:
11857 case X86::BI__builtin_ia32_vec_init_v4hi:
11858 case X86::BI__builtin_ia32_vec_init_v2si:
11859 return Builder.CreateBitCast(BuildVector(Ops),
11860 llvm::Type::getX86_MMXTy(getLLVMContext()));
11861 case X86::BI__builtin_ia32_vec_ext_v2si:
11862 case X86::BI__builtin_ia32_vec_ext_v16qi:
11863 case X86::BI__builtin_ia32_vec_ext_v8hi:
11864 case X86::BI__builtin_ia32_vec_ext_v4si:
11865 case X86::BI__builtin_ia32_vec_ext_v4sf:
11866 case X86::BI__builtin_ia32_vec_ext_v2di:
11867 case X86::BI__builtin_ia32_vec_ext_v32qi:
11868 case X86::BI__builtin_ia32_vec_ext_v16hi:
11869 case X86::BI__builtin_ia32_vec_ext_v8si:
11870 case X86::BI__builtin_ia32_vec_ext_v4di: {
11871 unsigned NumElts =
11872 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
11873 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
11874 Index &= NumElts - 1;
11875 // These builtins exist so we can ensure the index is an ICE and in range.
11876 // Otherwise we could just do this in the header file.
11877 return Builder.CreateExtractElement(Ops[0], Index);
11878 }
11879 case X86::BI__builtin_ia32_vec_set_v16qi:
11880 case X86::BI__builtin_ia32_vec_set_v8hi:
11881 case X86::BI__builtin_ia32_vec_set_v4si:
11882 case X86::BI__builtin_ia32_vec_set_v2di:
11883 case X86::BI__builtin_ia32_vec_set_v32qi:
11884 case X86::BI__builtin_ia32_vec_set_v16hi:
11885 case X86::BI__builtin_ia32_vec_set_v8si:
11886 case X86::BI__builtin_ia32_vec_set_v4di: {
11887 unsigned NumElts =
11888 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
11889 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
11890 Index &= NumElts - 1;
11891 // These builtins exist so we can ensure the index is an ICE and in range.
11892 // Otherwise we could just do this in the header file.
11893 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
11894 }
11895 case X86::BI_mm_setcsr:
11896 case X86::BI__builtin_ia32_ldmxcsr: {
11897 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
11898 Builder.CreateStore(Ops[0], Tmp);
11899 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
11900 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
11901 }
11902 case X86::BI_mm_getcsr:
11903 case X86::BI__builtin_ia32_stmxcsr: {
11904 Address Tmp = CreateMemTemp(E->getType());
11905 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
11906 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
11907 return Builder.CreateLoad(Tmp, "stmxcsr");
11908 }
11909 case X86::BI__builtin_ia32_xsave:
11910 case X86::BI__builtin_ia32_xsave64:
11911 case X86::BI__builtin_ia32_xrstor:
11912 case X86::BI__builtin_ia32_xrstor64:
11913 case X86::BI__builtin_ia32_xsaveopt:
11914 case X86::BI__builtin_ia32_xsaveopt64:
11915 case X86::BI__builtin_ia32_xrstors:
11916 case X86::BI__builtin_ia32_xrstors64:
11917 case X86::BI__builtin_ia32_xsavec:
11918 case X86::BI__builtin_ia32_xsavec64:
11919 case X86::BI__builtin_ia32_xsaves:
11920 case X86::BI__builtin_ia32_xsaves64:
11921 case X86::BI__builtin_ia32_xsetbv:
11922 case X86::BI_xsetbv: {
11923 Intrinsic::ID ID;
11924#define INTRINSIC_X86_XSAVE_ID(NAME) \
11925 case X86::BI__builtin_ia32_##NAME: \
11926 ID = Intrinsic::x86_##NAME; \
11927 break
11928 switch (BuiltinID) {
11929 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 11929)
;
11930 INTRINSIC_X86_XSAVE_ID(xsave);
11931 INTRINSIC_X86_XSAVE_ID(xsave64);
11932 INTRINSIC_X86_XSAVE_ID(xrstor);
11933 INTRINSIC_X86_XSAVE_ID(xrstor64);
11934 INTRINSIC_X86_XSAVE_ID(xsaveopt);
11935 INTRINSIC_X86_XSAVE_ID(xsaveopt64);
11936 INTRINSIC_X86_XSAVE_ID(xrstors);
11937 INTRINSIC_X86_XSAVE_ID(xrstors64);
11938 INTRINSIC_X86_XSAVE_ID(xsavec);
11939 INTRINSIC_X86_XSAVE_ID(xsavec64);
11940 INTRINSIC_X86_XSAVE_ID(xsaves);
11941 INTRINSIC_X86_XSAVE_ID(xsaves64);
11942 INTRINSIC_X86_XSAVE_ID(xsetbv);
11943 case X86::BI_xsetbv:
11944 ID = Intrinsic::x86_xsetbv;
11945 break;
11946 }
11947#undef INTRINSIC_X86_XSAVE_ID
11948 Value *Mhi = Builder.CreateTrunc(
11949 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
11950 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
11951 Ops[1] = Mhi;
11952 Ops.push_back(Mlo);
11953 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
11954 }
11955 case X86::BI__builtin_ia32_xgetbv:
11956 case X86::BI_xgetbv:
11957 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
11958 case X86::BI__builtin_ia32_storedqudi128_mask:
11959 case X86::BI__builtin_ia32_storedqusi128_mask:
11960 case X86::BI__builtin_ia32_storedquhi128_mask:
11961 case X86::BI__builtin_ia32_storedquqi128_mask:
11962 case X86::BI__builtin_ia32_storeupd128_mask:
11963 case X86::BI__builtin_ia32_storeups128_mask:
11964 case X86::BI__builtin_ia32_storedqudi256_mask:
11965 case X86::BI__builtin_ia32_storedqusi256_mask:
11966 case X86::BI__builtin_ia32_storedquhi256_mask:
11967 case X86::BI__builtin_ia32_storedquqi256_mask:
11968 case X86::BI__builtin_ia32_storeupd256_mask:
11969 case X86::BI__builtin_ia32_storeups256_mask:
11970 case X86::BI__builtin_ia32_storedqudi512_mask:
11971 case X86::BI__builtin_ia32_storedqusi512_mask:
11972 case X86::BI__builtin_ia32_storedquhi512_mask:
11973 case X86::BI__builtin_ia32_storedquqi512_mask:
11974 case X86::BI__builtin_ia32_storeupd512_mask:
11975 case X86::BI__builtin_ia32_storeups512_mask:
11976 return EmitX86MaskedStore(*this, Ops, Align(1));
11977
11978 case X86::BI__builtin_ia32_storess128_mask:
11979 case X86::BI__builtin_ia32_storesd128_mask:
11980 return EmitX86MaskedStore(*this, Ops, Align(1));
11981
11982 case X86::BI__builtin_ia32_vpopcntb_128:
11983 case X86::BI__builtin_ia32_vpopcntd_128:
11984 case X86::BI__builtin_ia32_vpopcntq_128:
11985 case X86::BI__builtin_ia32_vpopcntw_128:
11986 case X86::BI__builtin_ia32_vpopcntb_256:
11987 case X86::BI__builtin_ia32_vpopcntd_256:
11988 case X86::BI__builtin_ia32_vpopcntq_256:
11989 case X86::BI__builtin_ia32_vpopcntw_256:
11990 case X86::BI__builtin_ia32_vpopcntb_512:
11991 case X86::BI__builtin_ia32_vpopcntd_512:
11992 case X86::BI__builtin_ia32_vpopcntq_512:
11993 case X86::BI__builtin_ia32_vpopcntw_512: {
11994 llvm::Type *ResultType = ConvertType(E->getType());
11995 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
11996 return Builder.CreateCall(F, Ops);
11997 }
11998 case X86::BI__builtin_ia32_cvtmask2b128:
11999 case X86::BI__builtin_ia32_cvtmask2b256:
12000 case X86::BI__builtin_ia32_cvtmask2b512:
12001 case X86::BI__builtin_ia32_cvtmask2w128:
12002 case X86::BI__builtin_ia32_cvtmask2w256:
12003 case X86::BI__builtin_ia32_cvtmask2w512:
12004 case X86::BI__builtin_ia32_cvtmask2d128:
12005 case X86::BI__builtin_ia32_cvtmask2d256:
12006 case X86::BI__builtin_ia32_cvtmask2d512:
12007 case X86::BI__builtin_ia32_cvtmask2q128:
12008 case X86::BI__builtin_ia32_cvtmask2q256:
12009 case X86::BI__builtin_ia32_cvtmask2q512:
12010 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
12011
12012 case X86::BI__builtin_ia32_cvtb2mask128:
12013 case X86::BI__builtin_ia32_cvtb2mask256:
12014 case X86::BI__builtin_ia32_cvtb2mask512:
12015 case X86::BI__builtin_ia32_cvtw2mask128:
12016 case X86::BI__builtin_ia32_cvtw2mask256:
12017 case X86::BI__builtin_ia32_cvtw2mask512:
12018 case X86::BI__builtin_ia32_cvtd2mask128:
12019 case X86::BI__builtin_ia32_cvtd2mask256:
12020 case X86::BI__builtin_ia32_cvtd2mask512:
12021 case X86::BI__builtin_ia32_cvtq2mask128:
12022 case X86::BI__builtin_ia32_cvtq2mask256:
12023 case X86::BI__builtin_ia32_cvtq2mask512:
12024 return EmitX86ConvertToMask(*this, Ops[0]);
12025
12026 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
12027 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
12028 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
12029 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/true);
12030 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
12031 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
12032 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
12033 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/false);
12034
12035 case X86::BI__builtin_ia32_vfmaddss3:
12036 case X86::BI__builtin_ia32_vfmaddsd3:
12037 case X86::BI__builtin_ia32_vfmaddss3_mask:
12038 case X86::BI__builtin_ia32_vfmaddsd3_mask:
12039 return EmitScalarFMAExpr(*this, Ops, Ops[0]);
12040 case X86::BI__builtin_ia32_vfmaddss:
12041 case X86::BI__builtin_ia32_vfmaddsd:
12042 return EmitScalarFMAExpr(*this, Ops,
12043 Constant::getNullValue(Ops[0]->getType()));
12044 case X86::BI__builtin_ia32_vfmaddss3_maskz:
12045 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
12046 return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true);
12047 case X86::BI__builtin_ia32_vfmaddss3_mask3:
12048 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
12049 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2);
12050 case X86::BI__builtin_ia32_vfmsubss3_mask3:
12051 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
12052 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2,
12053 /*NegAcc*/true);
12054 case X86::BI__builtin_ia32_vfmaddps:
12055 case X86::BI__builtin_ia32_vfmaddpd:
12056 case X86::BI__builtin_ia32_vfmaddps256:
12057 case X86::BI__builtin_ia32_vfmaddpd256:
12058 case X86::BI__builtin_ia32_vfmaddps512_mask:
12059 case X86::BI__builtin_ia32_vfmaddps512_maskz:
12060 case X86::BI__builtin_ia32_vfmaddps512_mask3:
12061 case X86::BI__builtin_ia32_vfmsubps512_mask3:
12062 case X86::BI__builtin_ia32_vfmaddpd512_mask:
12063 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
12064 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
12065 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
12066 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
12067 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
12068 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12069 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12070 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12071 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12072 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12073 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12074 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12075 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true);
12076
12077 case X86::BI__builtin_ia32_movdqa32store128_mask:
12078 case X86::BI__builtin_ia32_movdqa64store128_mask:
12079 case X86::BI__builtin_ia32_storeaps128_mask:
12080 case X86::BI__builtin_ia32_storeapd128_mask:
12081 case X86::BI__builtin_ia32_movdqa32store256_mask:
12082 case X86::BI__builtin_ia32_movdqa64store256_mask:
12083 case X86::BI__builtin_ia32_storeaps256_mask:
12084 case X86::BI__builtin_ia32_storeapd256_mask:
12085 case X86::BI__builtin_ia32_movdqa32store512_mask:
12086 case X86::BI__builtin_ia32_movdqa64store512_mask:
12087 case X86::BI__builtin_ia32_storeaps512_mask:
12088 case X86::BI__builtin_ia32_storeapd512_mask:
12089 return EmitX86MaskedStore(
12090 *this, Ops,
12091 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12092
12093 case X86::BI__builtin_ia32_loadups128_mask:
12094 case X86::BI__builtin_ia32_loadups256_mask:
12095 case X86::BI__builtin_ia32_loadups512_mask:
12096 case X86::BI__builtin_ia32_loadupd128_mask:
12097 case X86::BI__builtin_ia32_loadupd256_mask:
12098 case X86::BI__builtin_ia32_loadupd512_mask:
12099 case X86::BI__builtin_ia32_loaddquqi128_mask:
12100 case X86::BI__builtin_ia32_loaddquqi256_mask:
12101 case X86::BI__builtin_ia32_loaddquqi512_mask:
12102 case X86::BI__builtin_ia32_loaddquhi128_mask:
12103 case X86::BI__builtin_ia32_loaddquhi256_mask:
12104 case X86::BI__builtin_ia32_loaddquhi512_mask:
12105 case X86::BI__builtin_ia32_loaddqusi128_mask:
12106 case X86::BI__builtin_ia32_loaddqusi256_mask:
12107 case X86::BI__builtin_ia32_loaddqusi512_mask:
12108 case X86::BI__builtin_ia32_loaddqudi128_mask:
12109 case X86::BI__builtin_ia32_loaddqudi256_mask:
12110 case X86::BI__builtin_ia32_loaddqudi512_mask:
12111 return EmitX86MaskedLoad(*this, Ops, Align(1));
12112
12113 case X86::BI__builtin_ia32_loadss128_mask:
12114 case X86::BI__builtin_ia32_loadsd128_mask:
12115 return EmitX86MaskedLoad(*this, Ops, Align(1));
12116
12117 case X86::BI__builtin_ia32_loadaps128_mask:
12118 case X86::BI__builtin_ia32_loadaps256_mask:
12119 case X86::BI__builtin_ia32_loadaps512_mask:
12120 case X86::BI__builtin_ia32_loadapd128_mask:
12121 case X86::BI__builtin_ia32_loadapd256_mask:
12122 case X86::BI__builtin_ia32_loadapd512_mask:
12123 case X86::BI__builtin_ia32_movdqa32load128_mask:
12124 case X86::BI__builtin_ia32_movdqa32load256_mask:
12125 case X86::BI__builtin_ia32_movdqa32load512_mask:
12126 case X86::BI__builtin_ia32_movdqa64load128_mask:
12127 case X86::BI__builtin_ia32_movdqa64load256_mask:
12128 case X86::BI__builtin_ia32_movdqa64load512_mask:
12129 return EmitX86MaskedLoad(
12130 *this, Ops,
12131 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12132
12133 case X86::BI__builtin_ia32_expandloaddf128_mask:
12134 case X86::BI__builtin_ia32_expandloaddf256_mask:
12135 case X86::BI__builtin_ia32_expandloaddf512_mask:
12136 case X86::BI__builtin_ia32_expandloadsf128_mask:
12137 case X86::BI__builtin_ia32_expandloadsf256_mask:
12138 case X86::BI__builtin_ia32_expandloadsf512_mask:
12139 case X86::BI__builtin_ia32_expandloaddi128_mask:
12140 case X86::BI__builtin_ia32_expandloaddi256_mask:
12141 case X86::BI__builtin_ia32_expandloaddi512_mask:
12142 case X86::BI__builtin_ia32_expandloadsi128_mask:
12143 case X86::BI__builtin_ia32_expandloadsi256_mask:
12144 case X86::BI__builtin_ia32_expandloadsi512_mask:
12145 case X86::BI__builtin_ia32_expandloadhi128_mask:
12146 case X86::BI__builtin_ia32_expandloadhi256_mask:
12147 case X86::BI__builtin_ia32_expandloadhi512_mask:
12148 case X86::BI__builtin_ia32_expandloadqi128_mask:
12149 case X86::BI__builtin_ia32_expandloadqi256_mask:
12150 case X86::BI__builtin_ia32_expandloadqi512_mask:
12151 return EmitX86ExpandLoad(*this, Ops);
12152
12153 case X86::BI__builtin_ia32_compressstoredf128_mask:
12154 case X86::BI__builtin_ia32_compressstoredf256_mask:
12155 case X86::BI__builtin_ia32_compressstoredf512_mask:
12156 case X86::BI__builtin_ia32_compressstoresf128_mask:
12157 case X86::BI__builtin_ia32_compressstoresf256_mask:
12158 case X86::BI__builtin_ia32_compressstoresf512_mask:
12159 case X86::BI__builtin_ia32_compressstoredi128_mask:
12160 case X86::BI__builtin_ia32_compressstoredi256_mask:
12161 case X86::BI__builtin_ia32_compressstoredi512_mask:
12162 case X86::BI__builtin_ia32_compressstoresi128_mask:
12163 case X86::BI__builtin_ia32_compressstoresi256_mask:
12164 case X86::BI__builtin_ia32_compressstoresi512_mask:
12165 case X86::BI__builtin_ia32_compressstorehi128_mask:
12166 case X86::BI__builtin_ia32_compressstorehi256_mask:
12167 case X86::BI__builtin_ia32_compressstorehi512_mask:
12168 case X86::BI__builtin_ia32_compressstoreqi128_mask:
12169 case X86::BI__builtin_ia32_compressstoreqi256_mask:
12170 case X86::BI__builtin_ia32_compressstoreqi512_mask:
12171 return EmitX86CompressStore(*this, Ops);
12172
12173 case X86::BI__builtin_ia32_expanddf128_mask:
12174 case X86::BI__builtin_ia32_expanddf256_mask:
12175 case X86::BI__builtin_ia32_expanddf512_mask:
12176 case X86::BI__builtin_ia32_expandsf128_mask:
12177 case X86::BI__builtin_ia32_expandsf256_mask:
12178 case X86::BI__builtin_ia32_expandsf512_mask:
12179 case X86::BI__builtin_ia32_expanddi128_mask:
12180 case X86::BI__builtin_ia32_expanddi256_mask:
12181 case X86::BI__builtin_ia32_expanddi512_mask:
12182 case X86::BI__builtin_ia32_expandsi128_mask:
12183 case X86::BI__builtin_ia32_expandsi256_mask:
12184 case X86::BI__builtin_ia32_expandsi512_mask:
12185 case X86::BI__builtin_ia32_expandhi128_mask:
12186 case X86::BI__builtin_ia32_expandhi256_mask:
12187 case X86::BI__builtin_ia32_expandhi512_mask:
12188 case X86::BI__builtin_ia32_expandqi128_mask:
12189 case X86::BI__builtin_ia32_expandqi256_mask:
12190 case X86::BI__builtin_ia32_expandqi512_mask:
12191 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
12192
12193 case X86::BI__builtin_ia32_compressdf128_mask:
12194 case X86::BI__builtin_ia32_compressdf256_mask:
12195 case X86::BI__builtin_ia32_compressdf512_mask:
12196 case X86::BI__builtin_ia32_compresssf128_mask:
12197 case X86::BI__builtin_ia32_compresssf256_mask:
12198 case X86::BI__builtin_ia32_compresssf512_mask:
12199 case X86::BI__builtin_ia32_compressdi128_mask:
12200 case X86::BI__builtin_ia32_compressdi256_mask:
12201 case X86::BI__builtin_ia32_compressdi512_mask:
12202 case X86::BI__builtin_ia32_compresssi128_mask:
12203 case X86::BI__builtin_ia32_compresssi256_mask:
12204 case X86::BI__builtin_ia32_compresssi512_mask:
12205 case X86::BI__builtin_ia32_compresshi128_mask:
12206 case X86::BI__builtin_ia32_compresshi256_mask:
12207 case X86::BI__builtin_ia32_compresshi512_mask:
12208 case X86::BI__builtin_ia32_compressqi128_mask:
12209 case X86::BI__builtin_ia32_compressqi256_mask:
12210 case X86::BI__builtin_ia32_compressqi512_mask:
12211 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
12212
12213 case X86::BI__builtin_ia32_gather3div2df:
12214 case X86::BI__builtin_ia32_gather3div2di:
12215 case X86::BI__builtin_ia32_gather3div4df:
12216 case X86::BI__builtin_ia32_gather3div4di:
12217 case X86::BI__builtin_ia32_gather3div4sf:
12218 case X86::BI__builtin_ia32_gather3div4si:
12219 case X86::BI__builtin_ia32_gather3div8sf:
12220 case X86::BI__builtin_ia32_gather3div8si:
12221 case X86::BI__builtin_ia32_gather3siv2df:
12222 case X86::BI__builtin_ia32_gather3siv2di:
12223 case X86::BI__builtin_ia32_gather3siv4df:
12224 case X86::BI__builtin_ia32_gather3siv4di:
12225 case X86::BI__builtin_ia32_gather3siv4sf:
12226 case X86::BI__builtin_ia32_gather3siv4si:
12227 case X86::BI__builtin_ia32_gather3siv8sf:
12228 case X86::BI__builtin_ia32_gather3siv8si:
12229 case X86::BI__builtin_ia32_gathersiv8df:
12230 case X86::BI__builtin_ia32_gathersiv16sf:
12231 case X86::BI__builtin_ia32_gatherdiv8df:
12232 case X86::BI__builtin_ia32_gatherdiv16sf:
12233 case X86::BI__builtin_ia32_gathersiv8di:
12234 case X86::BI__builtin_ia32_gathersiv16si:
12235 case X86::BI__builtin_ia32_gatherdiv8di:
12236 case X86::BI__builtin_ia32_gatherdiv16si: {
12237 Intrinsic::ID IID;
12238 switch (BuiltinID) {
12239 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 12239)
;
12240 case X86::BI__builtin_ia32_gather3div2df:
12241 IID = Intrinsic::x86_avx512_mask_gather3div2_df;
12242 break;
12243 case X86::BI__builtin_ia32_gather3div2di:
12244 IID = Intrinsic::x86_avx512_mask_gather3div2_di;
12245 break;
12246 case X86::BI__builtin_ia32_gather3div4df:
12247 IID = Intrinsic::x86_avx512_mask_gather3div4_df;
12248 break;
12249 case X86::BI__builtin_ia32_gather3div4di:
12250 IID = Intrinsic::x86_avx512_mask_gather3div4_di;
12251 break;
12252 case X86::BI__builtin_ia32_gather3div4sf:
12253 IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
12254 break;
12255 case X86::BI__builtin_ia32_gather3div4si:
12256 IID = Intrinsic::x86_avx512_mask_gather3div4_si;
12257 break;
12258 case X86::BI__builtin_ia32_gather3div8sf:
12259 IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
12260 break;
12261 case X86::BI__builtin_ia32_gather3div8si:
12262 IID = Intrinsic::x86_avx512_mask_gather3div8_si;
12263 break;
12264 case X86::BI__builtin_ia32_gather3siv2df:
12265 IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
12266 break;
12267 case X86::BI__builtin_ia32_gather3siv2di:
12268 IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
12269 break;
12270 case X86::BI__builtin_ia32_gather3siv4df:
12271 IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
12272 break;
12273 case X86::BI__builtin_ia32_gather3siv4di:
12274 IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
12275 break;
12276 case X86::BI__builtin_ia32_gather3siv4sf:
12277 IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
12278 break;
12279 case X86::BI__builtin_ia32_gather3siv4si:
12280 IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
12281 break;
12282 case X86::BI__builtin_ia32_gather3siv8sf:
12283 IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
12284 break;
12285 case X86::BI__builtin_ia32_gather3siv8si:
12286 IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
12287 break;
12288 case X86::BI__builtin_ia32_gathersiv8df:
12289 IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
12290 break;
12291 case X86::BI__builtin_ia32_gathersiv16sf:
12292 IID = Intrinsic::x86_avx512_mask_gather_dps_512;
12293 break;
12294 case X86::BI__builtin_ia32_gatherdiv8df:
12295 IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
12296 break;
12297 case X86::BI__builtin_ia32_gatherdiv16sf:
12298 IID = Intrinsic::x86_avx512_mask_gather_qps_512;
12299 break;
12300 case X86::BI__builtin_ia32_gathersiv8di:
12301 IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
12302 break;
12303 case X86::BI__builtin_ia32_gathersiv16si:
12304 IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
12305 break;
12306 case X86::BI__builtin_ia32_gatherdiv8di:
12307 IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
12308 break;
12309 case X86::BI__builtin_ia32_gatherdiv16si:
12310 IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
12311 break;
12312 }
12313
12314 unsigned MinElts = std::min(
12315 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
12316 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
12317 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
12318 Function *Intr = CGM.getIntrinsic(IID);
12319 return Builder.CreateCall(Intr, Ops);
12320 }
12321
12322 case X86::BI__builtin_ia32_scattersiv8df:
12323 case X86::BI__builtin_ia32_scattersiv16sf:
12324 case X86::BI__builtin_ia32_scatterdiv8df:
12325 case X86::BI__builtin_ia32_scatterdiv16sf:
12326 case X86::BI__builtin_ia32_scattersiv8di:
12327 case X86::BI__builtin_ia32_scattersiv16si:
12328 case X86::BI__builtin_ia32_scatterdiv8di:
12329 case X86::BI__builtin_ia32_scatterdiv16si:
12330 case X86::BI__builtin_ia32_scatterdiv2df:
12331 case X86::BI__builtin_ia32_scatterdiv2di:
12332 case X86::BI__builtin_ia32_scatterdiv4df:
12333 case X86::BI__builtin_ia32_scatterdiv4di:
12334 case X86::BI__builtin_ia32_scatterdiv4sf:
12335 case X86::BI__builtin_ia32_scatterdiv4si:
12336 case X86::BI__builtin_ia32_scatterdiv8sf:
12337 case X86::BI__builtin_ia32_scatterdiv8si:
12338 case X86::BI__builtin_ia32_scattersiv2df:
12339 case X86::BI__builtin_ia32_scattersiv2di:
12340 case X86::BI__builtin_ia32_scattersiv4df:
12341 case X86::BI__builtin_ia32_scattersiv4di:
12342 case X86::BI__builtin_ia32_scattersiv4sf:
12343 case X86::BI__builtin_ia32_scattersiv4si:
12344 case X86::BI__builtin_ia32_scattersiv8sf:
12345 case X86::BI__builtin_ia32_scattersiv8si: {
12346 Intrinsic::ID IID;
12347 switch (BuiltinID) {
12348 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 12348)
;
12349 case X86::BI__builtin_ia32_scattersiv8df:
12350 IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
12351 break;
12352 case X86::BI__builtin_ia32_scattersiv16sf:
12353 IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
12354 break;
12355 case X86::BI__builtin_ia32_scatterdiv8df:
12356 IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
12357 break;
12358 case X86::BI__builtin_ia32_scatterdiv16sf:
12359 IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
12360 break;
12361 case X86::BI__builtin_ia32_scattersiv8di:
12362 IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
12363 break;
12364 case X86::BI__builtin_ia32_scattersiv16si:
12365 IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
12366 break;
12367 case X86::BI__builtin_ia32_scatterdiv8di:
12368 IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
12369 break;
12370 case X86::BI__builtin_ia32_scatterdiv16si:
12371 IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
12372 break;
12373 case X86::BI__builtin_ia32_scatterdiv2df:
12374 IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
12375 break;
12376 case X86::BI__builtin_ia32_scatterdiv2di:
12377 IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
12378 break;
12379 case X86::BI__builtin_ia32_scatterdiv4df:
12380 IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
12381 break;
12382 case X86::BI__builtin_ia32_scatterdiv4di:
12383 IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
12384 break;
12385 case X86::BI__builtin_ia32_scatterdiv4sf:
12386 IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
12387 break;
12388 case X86::BI__builtin_ia32_scatterdiv4si:
12389 IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
12390 break;
12391 case X86::BI__builtin_ia32_scatterdiv8sf:
12392 IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
12393 break;
12394 case X86::BI__builtin_ia32_scatterdiv8si:
12395 IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
12396 break;
12397 case X86::BI__builtin_ia32_scattersiv2df:
12398 IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
12399 break;
12400 case X86::BI__builtin_ia32_scattersiv2di:
12401 IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
12402 break;
12403 case X86::BI__builtin_ia32_scattersiv4df:
12404 IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
12405 break;
12406 case X86::BI__builtin_ia32_scattersiv4di:
12407 IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
12408 break;
12409 case X86::BI__builtin_ia32_scattersiv4sf:
12410 IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
12411 break;
12412 case X86::BI__builtin_ia32_scattersiv4si:
12413 IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
12414 break;
12415 case X86::BI__builtin_ia32_scattersiv8sf:
12416 IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
12417 break;
12418 case X86::BI__builtin_ia32_scattersiv8si:
12419 IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
12420 break;
12421 }
12422
12423 unsigned MinElts = std::min(
12424 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
12425 cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
12426 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
12427 Function *Intr = CGM.getIntrinsic(IID);
12428 return Builder.CreateCall(Intr, Ops);
12429 }
12430
12431 case X86::BI__builtin_ia32_vextractf128_pd256:
12432 case X86::BI__builtin_ia32_vextractf128_ps256:
12433 case X86::BI__builtin_ia32_vextractf128_si256:
12434 case X86::BI__builtin_ia32_extract128i256:
12435 case X86::BI__builtin_ia32_extractf64x4_mask:
12436 case X86::BI__builtin_ia32_extractf32x4_mask:
12437 case X86::BI__builtin_ia32_extracti64x4_mask:
12438 case X86::BI__builtin_ia32_extracti32x4_mask:
12439 case X86::BI__builtin_ia32_extractf32x8_mask:
12440 case X86::BI__builtin_ia32_extracti32x8_mask:
12441 case X86::BI__builtin_ia32_extractf32x4_256_mask:
12442 case X86::BI__builtin_ia32_extracti32x4_256_mask:
12443 case X86::BI__builtin_ia32_extractf64x2_256_mask:
12444 case X86::BI__builtin_ia32_extracti64x2_256_mask:
12445 case X86::BI__builtin_ia32_extractf64x2_512_mask:
12446 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
12447 auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType()));
12448 unsigned NumElts = DstTy->getNumElements();
12449 unsigned SrcNumElts =
12450 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12451 unsigned SubVectors = SrcNumElts / NumElts;
12452 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
12453 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")((llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"
) ? static_cast<void> (0) : __assert_fail ("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 12453, __PRETTY_FUNCTION__))
;
12454 Index &= SubVectors - 1; // Remove any extra bits.
12455 Index *= NumElts;
12456
12457 int Indices[16];
12458 for (unsigned i = 0; i != NumElts; ++i)
12459 Indices[i] = i + Index;
12460
12461 Value *Res = Builder.CreateShuffleVector(Ops[0],
12462 UndefValue::get(Ops[0]->getType()),
12463 makeArrayRef(Indices, NumElts),
12464 "extract");
12465
12466 if (Ops.size() == 4)
12467 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
12468
12469 return Res;
12470 }
12471 case X86::BI__builtin_ia32_vinsertf128_pd256:
12472 case X86::BI__builtin_ia32_vinsertf128_ps256:
12473 case X86::BI__builtin_ia32_vinsertf128_si256:
12474 case X86::BI__builtin_ia32_insert128i256:
12475 case X86::BI__builtin_ia32_insertf64x4:
12476 case X86::BI__builtin_ia32_insertf32x4:
12477 case X86::BI__builtin_ia32_inserti64x4:
12478 case X86::BI__builtin_ia32_inserti32x4:
12479 case X86::BI__builtin_ia32_insertf32x8:
12480 case X86::BI__builtin_ia32_inserti32x8:
12481 case X86::BI__builtin_ia32_insertf32x4_256:
12482 case X86::BI__builtin_ia32_inserti32x4_256:
12483 case X86::BI__builtin_ia32_insertf64x2_256:
12484 case X86::BI__builtin_ia32_inserti64x2_256:
12485 case X86::BI__builtin_ia32_insertf64x2_512:
12486 case X86::BI__builtin_ia32_inserti64x2_512: {
12487 unsigned DstNumElts =
12488 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12489 unsigned SrcNumElts =
12490 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
12491 unsigned SubVectors = DstNumElts / SrcNumElts;
12492 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
12493 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")((llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"
) ? static_cast<void> (0) : __assert_fail ("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 12493, __PRETTY_FUNCTION__))
;
12494 Index &= SubVectors - 1; // Remove any extra bits.
12495 Index *= SrcNumElts;
12496
12497 int Indices[16];
12498 for (unsigned i = 0; i != DstNumElts; ++i)
12499 Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
12500
12501 Value *Op1 = Builder.CreateShuffleVector(Ops[1],
12502 UndefValue::get(Ops[1]->getType()),
12503 makeArrayRef(Indices, DstNumElts),
12504 "widen");
12505
12506 for (unsigned i = 0; i != DstNumElts; ++i) {
12507 if (i >= Index && i < (Index + SrcNumElts))
12508 Indices[i] = (i - Index) + DstNumElts;
12509 else
12510 Indices[i] = i;
12511 }
12512
12513 return Builder.CreateShuffleVector(Ops[0], Op1,
12514 makeArrayRef(Indices, DstNumElts),
12515 "insert");
12516 }
12517 case X86::BI__builtin_ia32_pmovqd512_mask:
12518 case X86::BI__builtin_ia32_pmovwb512_mask: {
12519 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
12520 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
12521 }
12522 case X86::BI__builtin_ia32_pmovdb512_mask:
12523 case X86::BI__builtin_ia32_pmovdw512_mask:
12524 case X86::BI__builtin_ia32_pmovqw512_mask: {
12525 if (const auto *C = dyn_cast<Constant>(Ops[2]))
12526 if (C->isAllOnesValue())
12527 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
12528
12529 Intrinsic::ID IID;
12530 switch (BuiltinID) {
12531 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 12531)
;
12532 case X86::BI__builtin_ia32_pmovdb512_mask:
12533 IID = Intrinsic::x86_avx512_mask_pmov_db_512;
12534 break;
12535 case X86::BI__builtin_ia32_pmovdw512_mask:
12536 IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
12537 break;
12538 case X86::BI__builtin_ia32_pmovqw512_mask:
12539 IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
12540 break;
12541 }
12542
12543 Function *Intr = CGM.getIntrinsic(IID);
12544 return Builder.CreateCall(Intr, Ops);
12545 }
12546 case X86::BI__builtin_ia32_pblendw128:
12547 case X86::BI__builtin_ia32_blendpd:
12548 case X86::BI__builtin_ia32_blendps:
12549 case X86::BI__builtin_ia32_blendpd256:
12550 case X86::BI__builtin_ia32_blendps256:
12551 case X86::BI__builtin_ia32_pblendw256:
12552 case X86::BI__builtin_ia32_pblendd128:
12553 case X86::BI__builtin_ia32_pblendd256: {
12554 unsigned NumElts =
12555 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12556 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12557
12558 int Indices[16];
12559 // If there are more than 8 elements, the immediate is used twice so make
12560 // sure we handle that.
12561 for (unsigned i = 0; i != NumElts; ++i)
12562 Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
12563
12564 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12565 makeArrayRef(Indices, NumElts),
12566 "blend");
12567 }
12568 case X86::BI__builtin_ia32_pshuflw:
12569 case X86::BI__builtin_ia32_pshuflw256:
12570 case X86::BI__builtin_ia32_pshuflw512: {
12571 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12572 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12573 unsigned NumElts = Ty->getNumElements();
12574
12575 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12576 Imm = (Imm & 0xff) * 0x01010101;
12577
12578 int Indices[32];
12579 for (unsigned l = 0; l != NumElts; l += 8) {
12580 for (unsigned i = 0; i != 4; ++i) {
12581 Indices[l + i] = l + (Imm & 3);
12582 Imm >>= 2;
12583 }
12584 for (unsigned i = 4; i != 8; ++i)
12585 Indices[l + i] = l + i;
12586 }
12587
12588 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12589 makeArrayRef(Indices, NumElts),
12590 "pshuflw");
12591 }
12592 case X86::BI__builtin_ia32_pshufhw:
12593 case X86::BI__builtin_ia32_pshufhw256:
12594 case X86::BI__builtin_ia32_pshufhw512: {
12595 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12596 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12597 unsigned NumElts = Ty->getNumElements();
12598
12599 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12600 Imm = (Imm & 0xff) * 0x01010101;
12601
12602 int Indices[32];
12603 for (unsigned l = 0; l != NumElts; l += 8) {
12604 for (unsigned i = 0; i != 4; ++i)
12605 Indices[l + i] = l + i;
12606 for (unsigned i = 4; i != 8; ++i) {
12607 Indices[l + i] = l + 4 + (Imm & 3);
12608 Imm >>= 2;
12609 }
12610 }
12611
12612 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12613 makeArrayRef(Indices, NumElts),
12614 "pshufhw");
12615 }
12616 case X86::BI__builtin_ia32_pshufd:
12617 case X86::BI__builtin_ia32_pshufd256:
12618 case X86::BI__builtin_ia32_pshufd512:
12619 case X86::BI__builtin_ia32_vpermilpd:
12620 case X86::BI__builtin_ia32_vpermilps:
12621 case X86::BI__builtin_ia32_vpermilpd256:
12622 case X86::BI__builtin_ia32_vpermilps256:
12623 case X86::BI__builtin_ia32_vpermilpd512:
12624 case X86::BI__builtin_ia32_vpermilps512: {
12625 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12626 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12627 unsigned NumElts = Ty->getNumElements();
12628 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
12629 unsigned NumLaneElts = NumElts / NumLanes;
12630
12631 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12632 Imm = (Imm & 0xff) * 0x01010101;
12633
12634 int Indices[16];
12635 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12636 for (unsigned i = 0; i != NumLaneElts; ++i) {
12637 Indices[i + l] = (Imm % NumLaneElts) + l;
12638 Imm /= NumLaneElts;
12639 }
12640 }
12641
12642 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12643 makeArrayRef(Indices, NumElts),
12644 "permil");
12645 }
12646 case X86::BI__builtin_ia32_shufpd:
12647 case X86::BI__builtin_ia32_shufpd256:
12648 case X86::BI__builtin_ia32_shufpd512:
12649 case X86::BI__builtin_ia32_shufps:
12650 case X86::BI__builtin_ia32_shufps256:
12651 case X86::BI__builtin_ia32_shufps512: {
12652 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12653 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12654 unsigned NumElts = Ty->getNumElements();
12655 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
12656 unsigned NumLaneElts = NumElts / NumLanes;
12657
12658 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12659 Imm = (Imm & 0xff) * 0x01010101;
12660
12661 int Indices[16];
12662 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12663 for (unsigned i = 0; i != NumLaneElts; ++i) {
12664 unsigned Index = Imm % NumLaneElts;
12665 Imm /= NumLaneElts;
12666 if (i >= (NumLaneElts / 2))
12667 Index += NumElts;
12668 Indices[l + i] = l + Index;
12669 }
12670 }
12671
12672 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12673 makeArrayRef(Indices, NumElts),
12674 "shufp");
12675 }
12676 case X86::BI__builtin_ia32_permdi256:
12677 case X86::BI__builtin_ia32_permdf256:
12678 case X86::BI__builtin_ia32_permdi512:
12679 case X86::BI__builtin_ia32_permdf512: {
12680 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12681 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12682 unsigned NumElts = Ty->getNumElements();
12683
12684 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
12685 int Indices[8];
12686 for (unsigned l = 0; l != NumElts; l += 4)
12687 for (unsigned i = 0; i != 4; ++i)
12688 Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
12689
12690 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
12691 makeArrayRef(Indices, NumElts),
12692 "perm");
12693 }
12694 case X86::BI__builtin_ia32_palignr128:
12695 case X86::BI__builtin_ia32_palignr256:
12696 case X86::BI__builtin_ia32_palignr512: {
12697 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
12698
12699 unsigned NumElts =
12700 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12701 assert(NumElts % 16 == 0)((NumElts % 16 == 0) ? static_cast<void> (0) : __assert_fail
("NumElts % 16 == 0", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 12701, __PRETTY_FUNCTION__))
;
12702
12703 // If palignr is shifting the pair of vectors more than the size of two
12704 // lanes, emit zero.
12705 if (ShiftVal >= 32)
12706 return llvm::Constant::getNullValue(ConvertType(E->getType()));
12707
12708 // If palignr is shifting the pair of input vectors more than one lane,
12709 // but less than two lanes, convert to shifting in zeroes.
12710 if (ShiftVal > 16) {
12711 ShiftVal -= 16;
12712 Ops[1] = Ops[0];
12713 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
12714 }
12715
12716 int Indices[64];
12717 // 256-bit palignr operates on 128-bit lanes so we need to handle that
12718 for (unsigned l = 0; l != NumElts; l += 16) {
12719 for (unsigned i = 0; i != 16; ++i) {
12720 unsigned Idx = ShiftVal + i;
12721 if (Idx >= 16)
12722 Idx += NumElts - 16; // End of lane, switch operand.
12723 Indices[l + i] = Idx + l;
12724 }
12725 }
12726
12727 return Builder.CreateShuffleVector(Ops[1], Ops[0],
12728 makeArrayRef(Indices, NumElts),
12729 "palignr");
12730 }
12731 case X86::BI__builtin_ia32_alignd128:
12732 case X86::BI__builtin_ia32_alignd256:
12733 case X86::BI__builtin_ia32_alignd512:
12734 case X86::BI__builtin_ia32_alignq128:
12735 case X86::BI__builtin_ia32_alignq256:
12736 case X86::BI__builtin_ia32_alignq512: {
12737 unsigned NumElts =
12738 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12739 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
12740
12741 // Mask the shift amount to width of two vectors.
12742 ShiftVal &= (2 * NumElts) - 1;
12743
12744 int Indices[16];
12745 for (unsigned i = 0; i != NumElts; ++i)
12746 Indices[i] = i + ShiftVal;
12747
12748 return Builder.CreateShuffleVector(Ops[1], Ops[0],
12749 makeArrayRef(Indices, NumElts),
12750 "valign");
12751 }
12752 case X86::BI__builtin_ia32_shuf_f32x4_256:
12753 case X86::BI__builtin_ia32_shuf_f64x2_256:
12754 case X86::BI__builtin_ia32_shuf_i32x4_256:
12755 case X86::BI__builtin_ia32_shuf_i64x2_256:
12756 case X86::BI__builtin_ia32_shuf_f32x4:
12757 case X86::BI__builtin_ia32_shuf_f64x2:
12758 case X86::BI__builtin_ia32_shuf_i32x4:
12759 case X86::BI__builtin_ia32_shuf_i64x2: {
12760 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12761 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12762 unsigned NumElts = Ty->getNumElements();
12763 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
12764 unsigned NumLaneElts = NumElts / NumLanes;
12765
12766 int Indices[16];
12767 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12768 unsigned Index = (Imm % NumLanes) * NumLaneElts;
12769 Imm /= NumLanes; // Discard the bits we just used.
12770 if (l >= (NumElts / 2))
12771 Index += NumElts; // Switch to other source.
12772 for (unsigned i = 0; i != NumLaneElts; ++i) {
12773 Indices[l + i] = Index + i;
12774 }
12775 }
12776
12777 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12778 makeArrayRef(Indices, NumElts),
12779 "shuf");
12780 }
12781
12782 case X86::BI__builtin_ia32_vperm2f128_pd256:
12783 case X86::BI__builtin_ia32_vperm2f128_ps256:
12784 case X86::BI__builtin_ia32_vperm2f128_si256:
12785 case X86::BI__builtin_ia32_permti256: {
12786 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12787 unsigned NumElts =
12788 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12789
12790 // This takes a very simple approach since there are two lanes and a
12791 // shuffle can have 2 inputs. So we reserve the first input for the first
12792 // lane and the second input for the second lane. This may result in
12793 // duplicate sources, but this can be dealt with in the backend.
12794
12795 Value *OutOps[2];
12796 int Indices[8];
12797 for (unsigned l = 0; l != 2; ++l) {
12798 // Determine the source for this lane.
12799 if (Imm & (1 << ((l * 4) + 3)))
12800 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
12801 else if (Imm & (1 << ((l * 4) + 1)))
12802 OutOps[l] = Ops[1];
12803 else
12804 OutOps[l] = Ops[0];
12805
12806 for (unsigned i = 0; i != NumElts/2; ++i) {
12807 // Start with ith element of the source for this lane.
12808 unsigned Idx = (l * NumElts) + i;
12809 // If bit 0 of the immediate half is set, switch to the high half of
12810 // the source.
12811 if (Imm & (1 << (l * 4)))
12812 Idx += NumElts/2;
12813 Indices[(l * (NumElts/2)) + i] = Idx;
12814 }
12815 }
12816
12817 return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
12818 makeArrayRef(Indices, NumElts),
12819 "vperm");
12820 }
12821
12822 case X86::BI__builtin_ia32_pslldqi128_byteshift:
12823 case X86::BI__builtin_ia32_pslldqi256_byteshift:
12824 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
12825 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
12826 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
12827 // Builtin type is vXi64 so multiply by 8 to get bytes.
12828 unsigned NumElts = ResultType->getNumElements() * 8;
12829
12830 // If pslldq is shifting the vector more than 15 bytes, emit zero.
12831 if (ShiftVal >= 16)
12832 return llvm::Constant::getNullValue(ResultType);
12833
12834 int Indices[64];
12835 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
12836 for (unsigned l = 0; l != NumElts; l += 16) {
12837 for (unsigned i = 0; i != 16; ++i) {
12838 unsigned Idx = NumElts + i - ShiftVal;
12839 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
12840 Indices[l + i] = Idx + l;
12841 }
12842 }
12843
12844 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
12845 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
12846 Value *Zero = llvm::Constant::getNullValue(VecTy);
12847 Value *SV = Builder.CreateShuffleVector(Zero, Cast,
12848 makeArrayRef(Indices, NumElts),
12849 "pslldq");
12850 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
12851 }
12852 case X86::BI__builtin_ia32_psrldqi128_byteshift:
12853 case X86::BI__builtin_ia32_psrldqi256_byteshift:
12854 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
12855 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
12856 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
12857 // Builtin type is vXi64 so multiply by 8 to get bytes.
12858 unsigned NumElts = ResultType->getNumElements() * 8;
12859
12860 // If psrldq is shifting the vector more than 15 bytes, emit zero.
12861 if (ShiftVal >= 16)
12862 return llvm::Constant::getNullValue(ResultType);
12863
12864 int Indices[64];
12865 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
12866 for (unsigned l = 0; l != NumElts; l += 16) {
12867 for (unsigned i = 0; i != 16; ++i) {
12868 unsigned Idx = i + ShiftVal;
12869 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
12870 Indices[l + i] = Idx + l;
12871 }
12872 }
12873
12874 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
12875 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
12876 Value *Zero = llvm::Constant::getNullValue(VecTy);
12877 Value *SV = Builder.CreateShuffleVector(Cast, Zero,
12878 makeArrayRef(Indices, NumElts),
12879 "psrldq");
12880 return Builder.CreateBitCast(SV, ResultType, "cast");
12881 }
12882 case X86::BI__builtin_ia32_kshiftliqi:
12883 case X86::BI__builtin_ia32_kshiftlihi:
12884 case X86::BI__builtin_ia32_kshiftlisi:
12885 case X86::BI__builtin_ia32_kshiftlidi: {
12886 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
12887 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
12888
12889 if (ShiftVal >= NumElts)
12890 return llvm::Constant::getNullValue(Ops[0]->getType());
12891
12892 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
12893
12894 int Indices[64];
12895 for (unsigned i = 0; i != NumElts; ++i)
12896 Indices[i] = NumElts + i - ShiftVal;
12897
12898 Value *Zero = llvm::Constant::getNullValue(In->getType());
12899 Value *SV = Builder.CreateShuffleVector(Zero, In,
12900 makeArrayRef(Indices, NumElts),
12901 "kshiftl");
12902 return Builder.CreateBitCast(SV, Ops[0]->getType());
12903 }
12904 case X86::BI__builtin_ia32_kshiftriqi:
12905 case X86::BI__builtin_ia32_kshiftrihi:
12906 case X86::BI__builtin_ia32_kshiftrisi:
12907 case X86::BI__builtin_ia32_kshiftridi: {
12908 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
12909 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
12910
12911 if (ShiftVal >= NumElts)
12912 return llvm::Constant::getNullValue(Ops[0]->getType());
12913
12914 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
12915
12916 int Indices[64];
12917 for (unsigned i = 0; i != NumElts; ++i)
12918 Indices[i] = i + ShiftVal;
12919
12920 Value *Zero = llvm::Constant::getNullValue(In->getType());
12921 Value *SV = Builder.CreateShuffleVector(In, Zero,
12922 makeArrayRef(Indices, NumElts),
12923 "kshiftr");
12924 return Builder.CreateBitCast(SV, Ops[0]->getType());
12925 }
12926 case X86::BI__builtin_ia32_movnti:
12927 case X86::BI__builtin_ia32_movnti64:
12928 case X86::BI__builtin_ia32_movntsd:
12929 case X86::BI__builtin_ia32_movntss: {
12930 llvm::MDNode *Node = llvm::MDNode::get(
12931 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
12932
12933 Value *Ptr = Ops[0];
12934 Value *Src = Ops[1];
12935
12936 // Extract the 0'th element of the source vector.
12937 if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
12938 BuiltinID == X86::BI__builtin_ia32_movntss)
12939 Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
12940
12941 // Convert the type of the pointer to a pointer to the stored type.
12942 Value *BC = Builder.CreateBitCast(
12943 Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
12944
12945 // Unaligned nontemporal store of the scalar value.
12946 StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
12947 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
12948 SI->setAlignment(llvm::Align(1));
12949 return SI;
12950 }
12951 // Rotate is a special case of funnel shift - 1st 2 args are the same.
12952 case X86::BI__builtin_ia32_vprotb:
12953 case X86::BI__builtin_ia32_vprotw:
12954 case X86::BI__builtin_ia32_vprotd:
12955 case X86::BI__builtin_ia32_vprotq:
12956 case X86::BI__builtin_ia32_vprotbi:
12957 case X86::BI__builtin_ia32_vprotwi:
12958 case X86::BI__builtin_ia32_vprotdi:
12959 case X86::BI__builtin_ia32_vprotqi:
12960 case X86::BI__builtin_ia32_prold128:
12961 case X86::BI__builtin_ia32_prold256:
12962 case X86::BI__builtin_ia32_prold512:
12963 case X86::BI__builtin_ia32_prolq128:
12964 case X86::BI__builtin_ia32_prolq256:
12965 case X86::BI__builtin_ia32_prolq512:
12966 case X86::BI__builtin_ia32_prolvd128:
12967 case X86::BI__builtin_ia32_prolvd256:
12968 case X86::BI__builtin_ia32_prolvd512:
12969 case X86::BI__builtin_ia32_prolvq128:
12970 case X86::BI__builtin_ia32_prolvq256:
12971 case X86::BI__builtin_ia32_prolvq512:
12972 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
12973 case X86::BI__builtin_ia32_prord128:
12974 case X86::BI__builtin_ia32_prord256:
12975 case X86::BI__builtin_ia32_prord512:
12976 case X86::BI__builtin_ia32_prorq128:
12977 case X86::BI__builtin_ia32_prorq256:
12978 case X86::BI__builtin_ia32_prorq512:
12979 case X86::BI__builtin_ia32_prorvd128:
12980 case X86::BI__builtin_ia32_prorvd256:
12981 case X86::BI__builtin_ia32_prorvd512:
12982 case X86::BI__builtin_ia32_prorvq128:
12983 case X86::BI__builtin_ia32_prorvq256:
12984 case X86::BI__builtin_ia32_prorvq512:
12985 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
12986 case X86::BI__builtin_ia32_selectb_128:
12987 case X86::BI__builtin_ia32_selectb_256:
12988 case X86::BI__builtin_ia32_selectb_512:
12989 case X86::BI__builtin_ia32_selectw_128:
12990 case X86::BI__builtin_ia32_selectw_256:
12991 case X86::BI__builtin_ia32_selectw_512:
12992 case X86::BI__builtin_ia32_selectd_128:
12993 case X86::BI__builtin_ia32_selectd_256:
12994 case X86::BI__builtin_ia32_selectd_512:
12995 case X86::BI__builtin_ia32_selectq_128:
12996 case X86::BI__builtin_ia32_selectq_256:
12997 case X86::BI__builtin_ia32_selectq_512:
12998 case X86::BI__builtin_ia32_selectps_128:
12999 case X86::BI__builtin_ia32_selectps_256:
13000 case X86::BI__builtin_ia32_selectps_512:
13001 case X86::BI__builtin_ia32_selectpd_128:
13002 case X86::BI__builtin_ia32_selectpd_256:
13003 case X86::BI__builtin_ia32_selectpd_512:
13004 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
13005 case X86::BI__builtin_ia32_selectss_128:
13006 case X86::BI__builtin_ia32_selectsd_128: {
13007 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13008 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13009 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
13010 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
13011 }
13012 case X86::BI__builtin_ia32_cmpb128_mask:
13013 case X86::BI__builtin_ia32_cmpb256_mask:
13014 case X86::BI__builtin_ia32_cmpb512_mask:
13015 case X86::BI__builtin_ia32_cmpw128_mask:
13016 case X86::BI__builtin_ia32_cmpw256_mask:
13017 case X86::BI__builtin_ia32_cmpw512_mask:
13018 case X86::BI__builtin_ia32_cmpd128_mask:
13019 case X86::BI__builtin_ia32_cmpd256_mask:
13020 case X86::BI__builtin_ia32_cmpd512_mask:
13021 case X86::BI__builtin_ia32_cmpq128_mask:
13022 case X86::BI__builtin_ia32_cmpq256_mask:
13023 case X86::BI__builtin_ia32_cmpq512_mask: {
13024 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13025 return EmitX86MaskedCompare(*this, CC, true, Ops);
13026 }
13027 case X86::BI__builtin_ia32_ucmpb128_mask:
13028 case X86::BI__builtin_ia32_ucmpb256_mask:
13029 case X86::BI__builtin_ia32_ucmpb512_mask:
13030 case X86::BI__builtin_ia32_ucmpw128_mask:
13031 case X86::BI__builtin_ia32_ucmpw256_mask:
13032 case X86::BI__builtin_ia32_ucmpw512_mask:
13033 case X86::BI__builtin_ia32_ucmpd128_mask:
13034 case X86::BI__builtin_ia32_ucmpd256_mask:
13035 case X86::BI__builtin_ia32_ucmpd512_mask:
13036 case X86::BI__builtin_ia32_ucmpq128_mask:
13037 case X86::BI__builtin_ia32_ucmpq256_mask:
13038 case X86::BI__builtin_ia32_ucmpq512_mask: {
13039 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13040 return EmitX86MaskedCompare(*this, CC, false, Ops);
13041 }
13042 case X86::BI__builtin_ia32_vpcomb:
13043 case X86::BI__builtin_ia32_vpcomw:
13044 case X86::BI__builtin_ia32_vpcomd:
13045 case X86::BI__builtin_ia32_vpcomq:
13046 return EmitX86vpcom(*this, Ops, true);
13047 case X86::BI__builtin_ia32_vpcomub:
13048 case X86::BI__builtin_ia32_vpcomuw:
13049 case X86::BI__builtin_ia32_vpcomud:
13050 case X86::BI__builtin_ia32_vpcomuq:
13051 return EmitX86vpcom(*this, Ops, false);
13052
13053 case X86::BI__builtin_ia32_kortestcqi:
13054 case X86::BI__builtin_ia32_kortestchi:
13055 case X86::BI__builtin_ia32_kortestcsi:
13056 case X86::BI__builtin_ia32_kortestcdi: {
13057 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13058 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
13059 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13060 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13061 }
13062 case X86::BI__builtin_ia32_kortestzqi:
13063 case X86::BI__builtin_ia32_kortestzhi:
13064 case X86::BI__builtin_ia32_kortestzsi:
13065 case X86::BI__builtin_ia32_kortestzdi: {
13066 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13067 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
13068 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13069 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13070 }
13071
13072 case X86::BI__builtin_ia32_ktestcqi:
13073 case X86::BI__builtin_ia32_ktestzqi:
13074 case X86::BI__builtin_ia32_ktestchi:
13075 case X86::BI__builtin_ia32_ktestzhi:
13076 case X86::BI__builtin_ia32_ktestcsi:
13077 case X86::BI__builtin_ia32_ktestzsi:
13078 case X86::BI__builtin_ia32_ktestcdi:
13079 case X86::BI__builtin_ia32_ktestzdi: {
13080 Intrinsic::ID IID;
13081 switch (BuiltinID) {
13082 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 13082)
;
13083 case X86::BI__builtin_ia32_ktestcqi:
13084 IID = Intrinsic::x86_avx512_ktestc_b;
13085 break;
13086 case X86::BI__builtin_ia32_ktestzqi:
13087 IID = Intrinsic::x86_avx512_ktestz_b;
13088 break;
13089 case X86::BI__builtin_ia32_ktestchi:
13090 IID = Intrinsic::x86_avx512_ktestc_w;
13091 break;
13092 case X86::BI__builtin_ia32_ktestzhi:
13093 IID = Intrinsic::x86_avx512_ktestz_w;
13094 break;
13095 case X86::BI__builtin_ia32_ktestcsi:
13096 IID = Intrinsic::x86_avx512_ktestc_d;
13097 break;
13098 case X86::BI__builtin_ia32_ktestzsi:
13099 IID = Intrinsic::x86_avx512_ktestz_d;
13100 break;
13101 case X86::BI__builtin_ia32_ktestcdi:
13102 IID = Intrinsic::x86_avx512_ktestc_q;
13103 break;
13104 case X86::BI__builtin_ia32_ktestzdi:
13105 IID = Intrinsic::x86_avx512_ktestz_q;
13106 break;
13107 }
13108
13109 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13110 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13111 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13112 Function *Intr = CGM.getIntrinsic(IID);
13113 return Builder.CreateCall(Intr, {LHS, RHS});
13114 }
13115
13116 case X86::BI__builtin_ia32_kaddqi:
13117 case X86::BI__builtin_ia32_kaddhi:
13118 case X86::BI__builtin_ia32_kaddsi:
13119 case X86::BI__builtin_ia32_kadddi: {
13120 Intrinsic::ID IID;
13121 switch (BuiltinID) {
13122 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 13122)
;
13123 case X86::BI__builtin_ia32_kaddqi:
13124 IID = Intrinsic::x86_avx512_kadd_b;
13125 break;
13126 case X86::BI__builtin_ia32_kaddhi:
13127 IID = Intrinsic::x86_avx512_kadd_w;
13128 break;
13129 case X86::BI__builtin_ia32_kaddsi:
13130 IID = Intrinsic::x86_avx512_kadd_d;
13131 break;
13132 case X86::BI__builtin_ia32_kadddi:
13133 IID = Intrinsic::x86_avx512_kadd_q;
13134 break;
13135 }
13136
13137 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13138 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13139 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13140 Function *Intr = CGM.getIntrinsic(IID);
13141 Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
13142 return Builder.CreateBitCast(Res, Ops[0]->getType());
13143 }
13144 case X86::BI__builtin_ia32_kandqi:
13145 case X86::BI__builtin_ia32_kandhi:
13146 case X86::BI__builtin_ia32_kandsi:
13147 case X86::BI__builtin_ia32_kanddi:
13148 return EmitX86MaskLogic(*this, Instruction::And, Ops);
13149 case X86::BI__builtin_ia32_kandnqi:
13150 case X86::BI__builtin_ia32_kandnhi:
13151 case X86::BI__builtin_ia32_kandnsi:
13152 case X86::BI__builtin_ia32_kandndi:
13153 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
13154 case X86::BI__builtin_ia32_korqi:
13155 case X86::BI__builtin_ia32_korhi:
13156 case X86::BI__builtin_ia32_korsi:
13157 case X86::BI__builtin_ia32_kordi:
13158 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
13159 case X86::BI__builtin_ia32_kxnorqi:
13160 case X86::BI__builtin_ia32_kxnorhi:
13161 case X86::BI__builtin_ia32_kxnorsi:
13162 case X86::BI__builtin_ia32_kxnordi:
13163 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
13164 case X86::BI__builtin_ia32_kxorqi:
13165 case X86::BI__builtin_ia32_kxorhi:
13166 case X86::BI__builtin_ia32_kxorsi:
13167 case X86::BI__builtin_ia32_kxordi:
13168 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
13169 case X86::BI__builtin_ia32_knotqi:
13170 case X86::BI__builtin_ia32_knothi:
13171 case X86::BI__builtin_ia32_knotsi:
13172 case X86::BI__builtin_ia32_knotdi: {
13173 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13174 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
13175 return Builder.CreateBitCast(Builder.CreateNot(Res),
13176 Ops[0]->getType());
13177 }
13178 case X86::BI__builtin_ia32_kmovb:
13179 case X86::BI__builtin_ia32_kmovw:
13180 case X86::BI__builtin_ia32_kmovd:
13181 case X86::BI__builtin_ia32_kmovq: {
13182 // Bitcast to vXi1 type and then back to integer. This gets the mask
13183 // register type into the IR, but might be optimized out depending on
13184 // what's around it.
13185 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13186 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
13187 return Builder.CreateBitCast(Res, Ops[0]->getType());
13188 }
13189
13190 case X86::BI__builtin_ia32_kunpckdi:
13191 case X86::BI__builtin_ia32_kunpcksi:
13192 case X86::BI__builtin_ia32_kunpckhi: {
13193 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13194 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13195 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13196 int Indices[64];
13197 for (unsigned i = 0; i != NumElts; ++i)
13198 Indices[i] = i;
13199
13200 // First extract half of each vector. This gives better codegen than
13201 // doing it in a single shuffle.
13202 LHS = Builder.CreateShuffleVector(LHS, LHS,
13203 makeArrayRef(Indices, NumElts / 2));
13204 RHS = Builder.CreateShuffleVector(RHS, RHS,
13205 makeArrayRef(Indices, NumElts / 2));
13206 // Concat the vectors.
13207 // NOTE: Operands are swapped to match the intrinsic definition.
13208 Value *Res = Builder.CreateShuffleVector(RHS, LHS,
13209 makeArrayRef(Indices, NumElts));
13210 return Builder.CreateBitCast(Res, Ops[0]->getType());
13211 }
13212
13213 case X86::BI__builtin_ia32_vplzcntd_128:
13214 case X86::BI__builtin_ia32_vplzcntd_256:
13215 case X86::BI__builtin_ia32_vplzcntd_512:
13216 case X86::BI__builtin_ia32_vplzcntq_128:
13217 case X86::BI__builtin_ia32_vplzcntq_256:
13218 case X86::BI__builtin_ia32_vplzcntq_512: {
13219 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
13220 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
13221 }
13222 case X86::BI__builtin_ia32_sqrtss:
13223 case X86::BI__builtin_ia32_sqrtsd: {
13224 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
13225 Function *F;
13226 if (Builder.getIsFPConstrained()) {
13227 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13228 A->getType());
13229 A = Builder.CreateConstrainedFPCall(F, {A});
13230 } else {
13231 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
13232 A = Builder.CreateCall(F, {A});
13233 }
13234 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
13235 }
13236 case X86::BI__builtin_ia32_sqrtsd_round_mask:
13237 case X86::BI__builtin_ia32_sqrtss_round_mask: {
13238 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
13239 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
13240 // otherwise keep the intrinsic.
13241 if (CC != 4) {
13242 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
13243 Intrinsic::x86_avx512_mask_sqrt_sd :
13244 Intrinsic::x86_avx512_mask_sqrt_ss;
13245 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13246 }
13247 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13248 Function *F;
13249 if (Builder.getIsFPConstrained()) {
13250 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13251 A->getType());
13252 A = Builder.CreateConstrainedFPCall(F, A);
13253 } else {
13254 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
13255 A = Builder.CreateCall(F, A);
13256 }
13257 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13258 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
13259 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
13260 }
13261 case X86::BI__builtin_ia32_sqrtpd256:
13262 case X86::BI__builtin_ia32_sqrtpd:
13263 case X86::BI__builtin_ia32_sqrtps256:
13264 case X86::BI__builtin_ia32_sqrtps:
13265 case X86::BI__builtin_ia32_sqrtps512:
13266 case X86::BI__builtin_ia32_sqrtpd512: {
13267 if (Ops.size() == 2) {
13268 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13269 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
13270 // otherwise keep the intrinsic.
13271 if (CC != 4) {
13272 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
13273 Intrinsic::x86_avx512_sqrt_ps_512 :
13274 Intrinsic::x86_avx512_sqrt_pd_512;
13275 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13276 }
13277 }
13278 if (Builder.getIsFPConstrained()) {
13279 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13280 Ops[0]->getType());
13281 return Builder.CreateConstrainedFPCall(F, Ops[0]);
13282 } else {
13283 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
13284 return Builder.CreateCall(F, Ops[0]);
13285 }
13286 }
13287 case X86::BI__builtin_ia32_pabsb128:
13288 case X86::BI__builtin_ia32_pabsw128:
13289 case X86::BI__builtin_ia32_pabsd128:
13290 case X86::BI__builtin_ia32_pabsb256:
13291 case X86::BI__builtin_ia32_pabsw256:
13292 case X86::BI__builtin_ia32_pabsd256:
13293 case X86::BI__builtin_ia32_pabsq128:
13294 case X86::BI__builtin_ia32_pabsq256:
13295 case X86::BI__builtin_ia32_pabsb512:
13296 case X86::BI__builtin_ia32_pabsw512:
13297 case X86::BI__builtin_ia32_pabsd512:
13298 case X86::BI__builtin_ia32_pabsq512: {
13299 Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType());
13300 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
13301 }
13302 case X86::BI__builtin_ia32_pmaxsb128:
13303 case X86::BI__builtin_ia32_pmaxsw128:
13304 case X86::BI__builtin_ia32_pmaxsd128:
13305 case X86::BI__builtin_ia32_pmaxsq128:
13306 case X86::BI__builtin_ia32_pmaxsb256:
13307 case X86::BI__builtin_ia32_pmaxsw256:
13308 case X86::BI__builtin_ia32_pmaxsd256:
13309 case X86::BI__builtin_ia32_pmaxsq256:
13310 case X86::BI__builtin_ia32_pmaxsb512:
13311 case X86::BI__builtin_ia32_pmaxsw512:
13312 case X86::BI__builtin_ia32_pmaxsd512:
13313 case X86::BI__builtin_ia32_pmaxsq512:
13314 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax);
13315 case X86::BI__builtin_ia32_pmaxub128:
13316 case X86::BI__builtin_ia32_pmaxuw128:
13317 case X86::BI__builtin_ia32_pmaxud128:
13318 case X86::BI__builtin_ia32_pmaxuq128:
13319 case X86::BI__builtin_ia32_pmaxub256:
13320 case X86::BI__builtin_ia32_pmaxuw256:
13321 case X86::BI__builtin_ia32_pmaxud256:
13322 case X86::BI__builtin_ia32_pmaxuq256:
13323 case X86::BI__builtin_ia32_pmaxub512:
13324 case X86::BI__builtin_ia32_pmaxuw512:
13325 case X86::BI__builtin_ia32_pmaxud512:
13326 case X86::BI__builtin_ia32_pmaxuq512:
13327 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax);
13328 case X86::BI__builtin_ia32_pminsb128:
13329 case X86::BI__builtin_ia32_pminsw128:
13330 case X86::BI__builtin_ia32_pminsd128:
13331 case X86::BI__builtin_ia32_pminsq128:
13332 case X86::BI__builtin_ia32_pminsb256:
13333 case X86::BI__builtin_ia32_pminsw256:
13334 case X86::BI__builtin_ia32_pminsd256:
13335 case X86::BI__builtin_ia32_pminsq256:
13336 case X86::BI__builtin_ia32_pminsb512:
13337 case X86::BI__builtin_ia32_pminsw512:
13338 case X86::BI__builtin_ia32_pminsd512:
13339 case X86::BI__builtin_ia32_pminsq512:
13340 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin);
13341 case X86::BI__builtin_ia32_pminub128:
13342 case X86::BI__builtin_ia32_pminuw128:
13343 case X86::BI__builtin_ia32_pminud128:
13344 case X86::BI__builtin_ia32_pminuq128:
13345 case X86::BI__builtin_ia32_pminub256:
13346 case X86::BI__builtin_ia32_pminuw256:
13347 case X86::BI__builtin_ia32_pminud256:
13348 case X86::BI__builtin_ia32_pminuq256:
13349 case X86::BI__builtin_ia32_pminub512:
13350 case X86::BI__builtin_ia32_pminuw512:
13351 case X86::BI__builtin_ia32_pminud512:
13352 case X86::BI__builtin_ia32_pminuq512:
13353 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin);
13354
13355 case X86::BI__builtin_ia32_pmuludq128:
13356 case X86::BI__builtin_ia32_pmuludq256:
13357 case X86::BI__builtin_ia32_pmuludq512:
13358 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
13359
13360 case X86::BI__builtin_ia32_pmuldq128:
13361 case X86::BI__builtin_ia32_pmuldq256:
13362 case X86::BI__builtin_ia32_pmuldq512:
13363 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
13364
13365 case X86::BI__builtin_ia32_pternlogd512_mask:
13366 case X86::BI__builtin_ia32_pternlogq512_mask:
13367 case X86::BI__builtin_ia32_pternlogd128_mask:
13368 case X86::BI__builtin_ia32_pternlogd256_mask:
13369 case X86::BI__builtin_ia32_pternlogq128_mask:
13370 case X86::BI__builtin_ia32_pternlogq256_mask:
13371 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
13372
13373 case X86::BI__builtin_ia32_pternlogd512_maskz:
13374 case X86::BI__builtin_ia32_pternlogq512_maskz:
13375 case X86::BI__builtin_ia32_pternlogd128_maskz:
13376 case X86::BI__builtin_ia32_pternlogd256_maskz:
13377 case X86::BI__builtin_ia32_pternlogq128_maskz:
13378 case X86::BI__builtin_ia32_pternlogq256_maskz:
13379 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
13380
13381 case X86::BI__builtin_ia32_vpshldd128:
13382 case X86::BI__builtin_ia32_vpshldd256:
13383 case X86::BI__builtin_ia32_vpshldd512:
13384 case X86::BI__builtin_ia32_vpshldq128:
13385 case X86::BI__builtin_ia32_vpshldq256:
13386 case X86::BI__builtin_ia32_vpshldq512:
13387 case X86::BI__builtin_ia32_vpshldw128:
13388 case X86::BI__builtin_ia32_vpshldw256:
13389 case X86::BI__builtin_ia32_vpshldw512:
13390 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
13391
13392 case X86::BI__builtin_ia32_vpshrdd128:
13393 case X86::BI__builtin_ia32_vpshrdd256:
13394 case X86::BI__builtin_ia32_vpshrdd512:
13395 case X86::BI__builtin_ia32_vpshrdq128:
13396 case X86::BI__builtin_ia32_vpshrdq256:
13397 case X86::BI__builtin_ia32_vpshrdq512:
13398 case X86::BI__builtin_ia32_vpshrdw128:
13399 case X86::BI__builtin_ia32_vpshrdw256:
13400 case X86::BI__builtin_ia32_vpshrdw512:
13401 // Ops 0 and 1 are swapped.
13402 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
13403
13404 case X86::BI__builtin_ia32_vpshldvd128:
13405 case X86::BI__builtin_ia32_vpshldvd256:
13406 case X86::BI__builtin_ia32_vpshldvd512:
13407 case X86::BI__builtin_ia32_vpshldvq128:
13408 case X86::BI__builtin_ia32_vpshldvq256:
13409 case X86::BI__builtin_ia32_vpshldvq512:
13410 case X86::BI__builtin_ia32_vpshldvw128:
13411 case X86::BI__builtin_ia32_vpshldvw256:
13412 case X86::BI__builtin_ia32_vpshldvw512:
13413 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
13414
13415 case X86::BI__builtin_ia32_vpshrdvd128:
13416 case X86::BI__builtin_ia32_vpshrdvd256:
13417 case X86::BI__builtin_ia32_vpshrdvd512:
13418 case X86::BI__builtin_ia32_vpshrdvq128:
13419 case X86::BI__builtin_ia32_vpshrdvq256:
13420 case X86::BI__builtin_ia32_vpshrdvq512:
13421 case X86::BI__builtin_ia32_vpshrdvw128:
13422 case X86::BI__builtin_ia32_vpshrdvw256:
13423 case X86::BI__builtin_ia32_vpshrdvw512:
13424 // Ops 0 and 1 are swapped.
13425 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
13426
13427 // Reductions
13428 case X86::BI__builtin_ia32_reduce_add_d512:
13429 case X86::BI__builtin_ia32_reduce_add_q512: {
13430 Function *F =
13431 CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType());
13432 return Builder.CreateCall(F, {Ops[0]});
13433 }
13434 case X86::BI__builtin_ia32_reduce_and_d512:
13435 case X86::BI__builtin_ia32_reduce_and_q512: {
13436 Function *F =
13437 CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType());
13438 return Builder.CreateCall(F, {Ops[0]});
13439 }
13440 case X86::BI__builtin_ia32_reduce_mul_d512:
13441 case X86::BI__builtin_ia32_reduce_mul_q512: {
13442 Function *F =
13443 CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType());
13444 return Builder.CreateCall(F, {Ops[0]});
13445 }
13446 case X86::BI__builtin_ia32_reduce_or_d512:
13447 case X86::BI__builtin_ia32_reduce_or_q512: {
13448 Function *F =
13449 CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType());
13450 return Builder.CreateCall(F, {Ops[0]});
13451 }
13452 case X86::BI__builtin_ia32_reduce_smax_d512:
13453 case X86::BI__builtin_ia32_reduce_smax_q512: {
13454 Function *F =
13455 CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType());
13456 return Builder.CreateCall(F, {Ops[0]});
13457 }
13458 case X86::BI__builtin_ia32_reduce_smin_d512:
13459 case X86::BI__builtin_ia32_reduce_smin_q512: {
13460 Function *F =
13461 CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType());
13462 return Builder.CreateCall(F, {Ops[0]});
13463 }
13464 case X86::BI__builtin_ia32_reduce_umax_d512:
13465 case X86::BI__builtin_ia32_reduce_umax_q512: {
13466 Function *F =
13467 CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType());
13468 return Builder.CreateCall(F, {Ops[0]});
13469 }
13470 case X86::BI__builtin_ia32_reduce_umin_d512:
13471 case X86::BI__builtin_ia32_reduce_umin_q512: {
13472 Function *F =
13473 CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType());
13474 return Builder.CreateCall(F, {Ops[0]});
13475 }
13476
13477 // 3DNow!
13478 case X86::BI__builtin_ia32_pswapdsf:
13479 case X86::BI__builtin_ia32_pswapdsi: {
13480 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
13481 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
13482 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
13483 return Builder.CreateCall(F, Ops, "pswapd");
13484 }
13485 case X86::BI__builtin_ia32_rdrand16_step:
13486 case X86::BI__builtin_ia32_rdrand32_step:
13487 case X86::BI__builtin_ia32_rdrand64_step:
13488 case X86::BI__builtin_ia32_rdseed16_step:
13489 case X86::BI__builtin_ia32_rdseed32_step:
13490 case X86::BI__builtin_ia32_rdseed64_step: {
13491 Intrinsic::ID ID;
13492 switch (BuiltinID) {
13493 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 13493)
;
13494 case X86::BI__builtin_ia32_rdrand16_step:
13495 ID = Intrinsic::x86_rdrand_16;
13496 break;
13497 case X86::BI__builtin_ia32_rdrand32_step:
13498 ID = Intrinsic::x86_rdrand_32;
13499 break;
13500 case X86::BI__builtin_ia32_rdrand64_step:
13501 ID = Intrinsic::x86_rdrand_64;
13502 break;
13503 case X86::BI__builtin_ia32_rdseed16_step:
13504 ID = Intrinsic::x86_rdseed_16;
13505 break;
13506 case X86::BI__builtin_ia32_rdseed32_step:
13507 ID = Intrinsic::x86_rdseed_32;
13508 break;
13509 case X86::BI__builtin_ia32_rdseed64_step:
13510 ID = Intrinsic::x86_rdseed_64;
13511 break;
13512 }
13513
13514 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
13515 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
13516 Ops[0]);
13517 return Builder.CreateExtractValue(Call, 1);
13518 }
13519 case X86::BI__builtin_ia32_addcarryx_u32:
13520 case X86::BI__builtin_ia32_addcarryx_u64:
13521 case X86::BI__builtin_ia32_subborrow_u32:
13522 case X86::BI__builtin_ia32_subborrow_u64: {
13523 Intrinsic::ID IID;
13524 switch (BuiltinID) {
13525 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 13525)
;
13526 case X86::BI__builtin_ia32_addcarryx_u32:
13527 IID = Intrinsic::x86_addcarry_32;
13528 break;
13529 case X86::BI__builtin_ia32_addcarryx_u64:
13530 IID = Intrinsic::x86_addcarry_64;
13531 break;
13532 case X86::BI__builtin_ia32_subborrow_u32:
13533 IID = Intrinsic::x86_subborrow_32;
13534 break;
13535 case X86::BI__builtin_ia32_subborrow_u64:
13536 IID = Intrinsic::x86_subborrow_64;
13537 break;
13538 }
13539
13540 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
13541 { Ops[0], Ops[1], Ops[2] });
13542 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
13543 Ops[3]);
13544 return Builder.CreateExtractValue(Call, 0);
13545 }
13546
13547 case X86::BI__builtin_ia32_fpclassps128_mask:
13548 case X86::BI__builtin_ia32_fpclassps256_mask:
13549 case X86::BI__builtin_ia32_fpclassps512_mask:
13550 case X86::BI__builtin_ia32_fpclasspd128_mask:
13551 case X86::BI__builtin_ia32_fpclasspd256_mask:
13552 case X86::BI__builtin_ia32_fpclasspd512_mask: {
13553 unsigned NumElts =
13554 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13555 Value *MaskIn = Ops[2];
13556 Ops.erase(&Ops[2]);
13557
13558 Intrinsic::ID ID;
13559 switch (BuiltinID) {
13560 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 13560)
;
13561 case X86::BI__builtin_ia32_fpclassps128_mask:
13562 ID = Intrinsic::x86_avx512_fpclass_ps_128;
13563 break;
13564 case X86::BI__builtin_ia32_fpclassps256_mask:
13565 ID = Intrinsic::x86_avx512_fpclass_ps_256;
13566 break;
13567 case X86::BI__builtin_ia32_fpclassps512_mask:
13568 ID = Intrinsic::x86_avx512_fpclass_ps_512;
13569 break;
13570 case X86::BI__builtin_ia32_fpclasspd128_mask:
13571 ID = Intrinsic::x86_avx512_fpclass_pd_128;
13572 break;
13573 case X86::BI__builtin_ia32_fpclasspd256_mask:
13574 ID = Intrinsic::x86_avx512_fpclass_pd_256;
13575 break;
13576 case X86::BI__builtin_ia32_fpclasspd512_mask:
13577 ID = Intrinsic::x86_avx512_fpclass_pd_512;
13578 break;
13579 }
13580
13581 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13582 return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
13583 }
13584
13585 case X86::BI__builtin_ia32_vp2intersect_q_512:
13586 case X86::BI__builtin_ia32_vp2intersect_q_256:
13587 case X86::BI__builtin_ia32_vp2intersect_q_128:
13588 case X86::BI__builtin_ia32_vp2intersect_d_512:
13589 case X86::BI__builtin_ia32_vp2intersect_d_256:
13590 case X86::BI__builtin_ia32_vp2intersect_d_128: {
13591 unsigned NumElts =
13592 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13593 Intrinsic::ID ID;
13594
13595 switch (BuiltinID) {
13596 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 13596)
;
13597 case X86::BI__builtin_ia32_vp2intersect_q_512:
13598 ID = Intrinsic::x86_avx512_vp2intersect_q_512;
13599 break;
13600 case X86::BI__builtin_ia32_vp2intersect_q_256:
13601 ID = Intrinsic::x86_avx512_vp2intersect_q_256;
13602 break;
13603 case X86::BI__builtin_ia32_vp2intersect_q_128:
13604 ID = Intrinsic::x86_avx512_vp2intersect_q_128;
13605 break;
13606 case X86::BI__builtin_ia32_vp2intersect_d_512:
13607 ID = Intrinsic::x86_avx512_vp2intersect_d_512;
13608 break;
13609 case X86::BI__builtin_ia32_vp2intersect_d_256:
13610 ID = Intrinsic::x86_avx512_vp2intersect_d_256;
13611 break;
13612 case X86::BI__builtin_ia32_vp2intersect_d_128:
13613 ID = Intrinsic::x86_avx512_vp2intersect_d_128;
13614 break;
13615 }
13616
13617 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
13618 Value *Result = Builder.CreateExtractValue(Call, 0);
13619 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
13620 Builder.CreateDefaultAlignedStore(Result, Ops[2]);
13621
13622 Result = Builder.CreateExtractValue(Call, 1);
13623 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
13624 return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
13625 }
13626
13627 case X86::BI__builtin_ia32_vpmultishiftqb128:
13628 case X86::BI__builtin_ia32_vpmultishiftqb256:
13629 case X86::BI__builtin_ia32_vpmultishiftqb512: {
13630 Intrinsic::ID ID;
13631 switch (BuiltinID) {
13632 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 13632)
;
13633 case X86::BI__builtin_ia32_vpmultishiftqb128:
13634 ID = Intrinsic::x86_avx512_pmultishift_qb_128;
13635 break;
13636 case X86::BI__builtin_ia32_vpmultishiftqb256:
13637 ID = Intrinsic::x86_avx512_pmultishift_qb_256;
13638 break;
13639 case X86::BI__builtin_ia32_vpmultishiftqb512:
13640 ID = Intrinsic::x86_avx512_pmultishift_qb_512;
13641 break;
13642 }
13643
13644 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13645 }
13646
13647 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
13648 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
13649 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
13650 unsigned NumElts =
13651 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13652 Value *MaskIn = Ops[2];
13653 Ops.erase(&Ops[2]);
13654
13655 Intrinsic::ID ID;
13656 switch (BuiltinID) {
13657 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 13657)
;
13658 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
13659 ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
13660 break;
13661 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
13662 ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
13663 break;
13664 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
13665 ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
13666 break;
13667 }
13668
13669 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13670 return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
13671 }
13672
13673 // packed comparison intrinsics
13674 case X86::BI__builtin_ia32_cmpeqps:
13675 case X86::BI__builtin_ia32_cmpeqpd:
13676 return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
13677 case X86::BI__builtin_ia32_cmpltps:
13678 case X86::BI__builtin_ia32_cmpltpd:
13679 return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
13680 case X86::BI__builtin_ia32_cmpleps:
13681 case X86::BI__builtin_ia32_cmplepd:
13682 return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
13683 case X86::BI__builtin_ia32_cmpunordps:
13684 case X86::BI__builtin_ia32_cmpunordpd:
13685 return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
13686 case X86::BI__builtin_ia32_cmpneqps:
13687 case X86::BI__builtin_ia32_cmpneqpd:
13688 return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
13689 case X86::BI__builtin_ia32_cmpnltps:
13690 case X86::BI__builtin_ia32_cmpnltpd:
13691 return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
13692 case X86::BI__builtin_ia32_cmpnleps:
13693 case X86::BI__builtin_ia32_cmpnlepd:
13694 return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
13695 case X86::BI__builtin_ia32_cmpordps:
13696 case X86::BI__builtin_ia32_cmpordpd:
13697 return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
13698 case X86::BI__builtin_ia32_cmpps128_mask:
13699 case X86::BI__builtin_ia32_cmpps256_mask:
13700 case X86::BI__builtin_ia32_cmpps512_mask:
13701 case X86::BI__builtin_ia32_cmppd128_mask:
13702 case X86::BI__builtin_ia32_cmppd256_mask:
13703 case X86::BI__builtin_ia32_cmppd512_mask:
13704 IsMaskFCmp = true;
13705 LLVM_FALLTHROUGH[[gnu::fallthrough]];
13706 case X86::BI__builtin_ia32_cmpps:
13707 case X86::BI__builtin_ia32_cmpps256:
13708 case X86::BI__builtin_ia32_cmppd:
13709 case X86::BI__builtin_ia32_cmppd256: {
13710 // Lowering vector comparisons to fcmp instructions, while
13711 // ignoring signalling behaviour requested
13712 // ignoring rounding mode requested
13713 // This is is only possible as long as FENV_ACCESS is not implemented.
13714 // See also: https://reviews.llvm.org/D45616
13715
13716 // The third argument is the comparison condition, and integer in the
13717 // range [0, 31]
13718 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
13719
13720 // Lowering to IR fcmp instruction.
13721 // Ignoring requested signaling behaviour,
13722 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
13723 FCmpInst::Predicate Pred;
13724 bool IsSignaling;
13725 // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
13726 // behavior is inverted. We'll handle that after the switch.
13727 switch (CC & 0xf) {
13728 case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
13729 case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
13730 case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
13731 case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
13732 case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
13733 case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
13734 case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
13735 case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
13736 case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
13737 case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
13738 case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
13739 case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
13740 case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
13741 case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
13742 case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
13743 case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
13744 default: llvm_unreachable("Unhandled CC")::llvm::llvm_unreachable_internal("Unhandled CC", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 13744)
;
13745 }
13746
13747 // Invert the signalling behavior for 16-31.
13748 if (CC & 0x10)
13749 IsSignaling = !IsSignaling;
13750
13751 // If the predicate is true or false and we're using constrained intrinsics,
13752 // we don't have a compare intrinsic we can use. Just use the legacy X86
13753 // specific intrinsic.
13754 // If the intrinsic is mask enabled and we're using constrained intrinsics,
13755 // use the legacy X86 specific intrinsic.
13756 if (Builder.getIsFPConstrained() &&
13757 (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE ||
13758 IsMaskFCmp)) {
13759
13760 Intrinsic::ID IID;
13761 switch (BuiltinID) {
13762 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 13762)
;
13763 case X86::BI__builtin_ia32_cmpps:
13764 IID = Intrinsic::x86_sse_cmp_ps;
13765 break;
13766 case X86::BI__builtin_ia32_cmpps256:
13767 IID = Intrinsic::x86_avx_cmp_ps_256;
13768 break;
13769 case X86::BI__builtin_ia32_cmppd:
13770 IID = Intrinsic::x86_sse2_cmp_pd;
13771 break;
13772 case X86::BI__builtin_ia32_cmppd256:
13773 IID = Intrinsic::x86_avx_cmp_pd_256;
13774 break;
13775 case X86::BI__builtin_ia32_cmpps512_mask:
13776 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
13777 break;
13778 case X86::BI__builtin_ia32_cmppd512_mask:
13779 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
13780 break;
13781 case X86::BI__builtin_ia32_cmpps128_mask:
13782 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
13783 break;
13784 case X86::BI__builtin_ia32_cmpps256_mask:
13785 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
13786 break;
13787 case X86::BI__builtin_ia32_cmppd128_mask:
13788 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
13789 break;
13790 case X86::BI__builtin_ia32_cmppd256_mask:
13791 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
13792 break;
13793 }
13794
13795 Function *Intr = CGM.getIntrinsic(IID);
13796 if (IsMaskFCmp) {
13797 unsigned NumElts =
13798 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13799 Ops[3] = getMaskVecValue(*this, Ops[3], NumElts);
13800 Value *Cmp = Builder.CreateCall(Intr, Ops);
13801 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr);
13802 }
13803
13804 return Builder.CreateCall(Intr, Ops);
13805 }
13806
13807 // Builtins without the _mask suffix return a vector of integers
13808 // of the same width as the input vectors
13809 if (IsMaskFCmp) {
13810 // We ignore SAE if strict FP is disabled. We only keep precise
13811 // exception behavior under strict FP.
13812 unsigned NumElts =
13813 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13814 Value *Cmp;
13815 if (IsSignaling)
13816 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
13817 else
13818 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
13819 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
13820 }
13821
13822 return getVectorFCmpIR(Pred, IsSignaling);
13823 }
13824
13825 // SSE scalar comparison intrinsics
13826 case X86::BI__builtin_ia32_cmpeqss:
13827 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
13828 case X86::BI__builtin_ia32_cmpltss:
13829 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
13830 case X86::BI__builtin_ia32_cmpless:
13831 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
13832 case X86::BI__builtin_ia32_cmpunordss:
13833 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
13834 case X86::BI__builtin_ia32_cmpneqss:
13835 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
13836 case X86::BI__builtin_ia32_cmpnltss:
13837 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
13838 case X86::BI__builtin_ia32_cmpnless:
13839 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
13840 case X86::BI__builtin_ia32_cmpordss:
13841 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
13842 case X86::BI__builtin_ia32_cmpeqsd:
13843 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
13844 case X86::BI__builtin_ia32_cmpltsd:
13845 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
13846 case X86::BI__builtin_ia32_cmplesd:
13847 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
13848 case X86::BI__builtin_ia32_cmpunordsd:
13849 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
13850 case X86::BI__builtin_ia32_cmpneqsd:
13851 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
13852 case X86::BI__builtin_ia32_cmpnltsd:
13853 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
13854 case X86::BI__builtin_ia32_cmpnlesd:
13855 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
13856 case X86::BI__builtin_ia32_cmpordsd:
13857 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
13858
13859 // f16c half2float intrinsics
13860 case X86::BI__builtin_ia32_vcvtph2ps:
13861 case X86::BI__builtin_ia32_vcvtph2ps256:
13862 case X86::BI__builtin_ia32_vcvtph2ps_mask:
13863 case X86::BI__builtin_ia32_vcvtph2ps256_mask:
13864 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
13865 return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
13866
13867// AVX512 bf16 intrinsics
13868 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
13869 Ops[2] = getMaskVecValue(
13870 *this, Ops[2],
13871 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
13872 Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
13873 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13874 }
13875 case X86::BI__builtin_ia32_cvtsbf162ss_32:
13876 return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
13877
13878 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
13879 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
13880 Intrinsic::ID IID;
13881 switch (BuiltinID) {
13882 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 13882)
;
13883 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
13884 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
13885 break;
13886 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
13887 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
13888 break;
13889 }
13890 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
13891 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
13892 }
13893
13894 case X86::BI__emul:
13895 case X86::BI__emulu: {
13896 llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
13897 bool isSigned = (BuiltinID == X86::BI__emul);
13898 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
13899 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
13900 return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
13901 }
13902 case X86::BI__mulh:
13903 case X86::BI__umulh:
13904 case X86::BI_mul128:
13905 case X86::BI_umul128: {
13906 llvm::Type *ResType = ConvertType(E->getType());
13907 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
13908
13909 bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
13910 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
13911 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
13912
13913 Value *MulResult, *HigherBits;
13914 if (IsSigned) {
13915 MulResult = Builder.CreateNSWMul(LHS, RHS);
13916 HigherBits = Builder.CreateAShr(MulResult, 64);
13917 } else {
13918 MulResult = Builder.CreateNUWMul(LHS, RHS);
13919 HigherBits = Builder.CreateLShr(MulResult, 64);
13920 }
13921 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
13922
13923 if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
13924 return HigherBits;
13925
13926 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
13927 Builder.CreateStore(HigherBits, HighBitsAddress);
13928 return Builder.CreateIntCast(MulResult, ResType, IsSigned);
13929 }
13930
13931 case X86::BI__faststorefence: {
13932 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
13933 llvm::SyncScope::System);
13934 }
13935 case X86::BI__shiftleft128:
13936 case X86::BI__shiftright128: {
13937 llvm::Function *F = CGM.getIntrinsic(
13938 BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
13939 Int64Ty);
13940 // Flip low/high ops and zero-extend amount to matching type.
13941 // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt)
13942 // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt)
13943 std::swap(Ops[0], Ops[1]);
13944 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
13945 return Builder.CreateCall(F, Ops);
13946 }
13947 case X86::BI_ReadWriteBarrier:
13948 case X86::BI_ReadBarrier:
13949 case X86::BI_WriteBarrier: {
13950 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
13951 llvm::SyncScope::SingleThread);
13952 }
13953 case X86::BI_BitScanForward:
13954 case X86::BI_BitScanForward64:
13955 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
13956 case X86::BI_BitScanReverse:
13957 case X86::BI_BitScanReverse64:
13958 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
13959
13960 case X86::BI_InterlockedAnd64:
13961 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
13962 case X86::BI_InterlockedExchange64:
13963 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
13964 case X86::BI_InterlockedExchangeAdd64:
13965 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
13966 case X86::BI_InterlockedExchangeSub64:
13967 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
13968 case X86::BI_InterlockedOr64:
13969 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
13970 case X86::BI_InterlockedXor64:
13971 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
13972 case X86::BI_InterlockedDecrement64:
13973 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
13974 case X86::BI_InterlockedIncrement64:
13975 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
13976 case X86::BI_InterlockedCompareExchange128: {
13977 // InterlockedCompareExchange128 doesn't directly refer to 128bit ints,
13978 // instead it takes pointers to 64bit ints for Destination and
13979 // ComparandResult, and exchange is taken as two 64bit ints (high & low).
13980 // The previous value is written to ComparandResult, and success is
13981 // returned.
13982
13983 llvm::Type *Int128Ty = Builder.getInt128Ty();
13984 llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
13985
13986 Value *Destination =
13987 Builder.CreateBitCast(Ops[0], Int128PtrTy);
13988 Value *ExchangeHigh128 = Builder.CreateZExt(Ops[1], Int128Ty);
13989 Value *ExchangeLow128 = Builder.CreateZExt(Ops[2], Int128Ty);
13990 Address ComparandResult(Builder.CreateBitCast(Ops[3], Int128PtrTy),
13991 getContext().toCharUnitsFromBits(128));
13992
13993 Value *Exchange = Builder.CreateOr(
13994 Builder.CreateShl(ExchangeHigh128, 64, "", false, false),
13995 ExchangeLow128);
13996
13997 Value *Comparand = Builder.CreateLoad(ComparandResult);
13998
13999 AtomicCmpXchgInst *CXI =
14000 Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
14001 AtomicOrdering::SequentiallyConsistent,
14002 AtomicOrdering::SequentiallyConsistent);
14003 CXI->setVolatile(true);
14004
14005 // Write the result back to the inout pointer.
14006 Builder.CreateStore(Builder.CreateExtractValue(CXI, 0), ComparandResult);
14007
14008 // Get the success boolean and zero extend it to i8.
14009 Value *Success = Builder.CreateExtractValue(CXI, 1);
14010 return Builder.CreateZExt(Success, ConvertType(E->getType()));
14011 }
14012
14013 case X86::BI_AddressOfReturnAddress: {
14014 Function *F =
14015 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
14016 return Builder.CreateCall(F);
14017 }
14018 case X86::BI__stosb: {
14019 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
14020 // instruction, but it will create a memset that won't be optimized away.
14021 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
14022 }
14023 case X86::BI__ud2:
14024 // llvm.trap makes a ud2a instruction on x86.
14025 return EmitTrapCall(Intrinsic::trap);
14026 case X86::BI__int2c: {
14027 // This syscall signals a driver assertion failure in x86 NT kernels.
14028 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
14029 llvm::InlineAsm *IA =
14030 llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
14031 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
14032 getLLVMContext(), llvm::AttributeList::FunctionIndex,
14033 llvm::Attribute::NoReturn);
14034 llvm::CallInst *CI = Builder.CreateCall(IA);
14035 CI->setAttributes(NoReturnAttr);
14036 return CI;
14037 }
14038 case X86::BI__readfsbyte:
14039 case X86::BI__readfsword:
14040 case X86::BI__readfsdword:
14041 case X86::BI__readfsqword: {
14042 llvm::Type *IntTy = ConvertType(E->getType());
14043 Value *Ptr =
14044 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
14045 LoadInst *Load = Builder.CreateAlignedLoad(
14046 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14047 Load->setVolatile(true);
14048 return Load;
14049 }
14050 case X86::BI__readgsbyte:
14051 case X86::BI__readgsword:
14052 case X86::BI__readgsdword:
14053 case X86::BI__readgsqword: {
14054 llvm::Type *IntTy = ConvertType(E->getType());
14055 Value *Ptr =
14056 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
14057 LoadInst *Load = Builder.CreateAlignedLoad(
14058 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14059 Load->setVolatile(true);
14060 return Load;
14061 }
14062 case X86::BI__builtin_ia32_paddsb512:
14063 case X86::BI__builtin_ia32_paddsw512:
14064 case X86::BI__builtin_ia32_paddsb256:
14065 case X86::BI__builtin_ia32_paddsw256:
14066 case X86::BI__builtin_ia32_paddsb128:
14067 case X86::BI__builtin_ia32_paddsw128:
14068 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat);
14069 case X86::BI__builtin_ia32_paddusb512:
14070 case X86::BI__builtin_ia32_paddusw512:
14071 case X86::BI__builtin_ia32_paddusb256:
14072 case X86::BI__builtin_ia32_paddusw256:
14073 case X86::BI__builtin_ia32_paddusb128:
14074 case X86::BI__builtin_ia32_paddusw128:
14075 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat);
14076 case X86::BI__builtin_ia32_psubsb512:
14077 case X86::BI__builtin_ia32_psubsw512:
14078 case X86::BI__builtin_ia32_psubsb256:
14079 case X86::BI__builtin_ia32_psubsw256:
14080 case X86::BI__builtin_ia32_psubsb128:
14081 case X86::BI__builtin_ia32_psubsw128:
14082 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat);
14083 case X86::BI__builtin_ia32_psubusb512:
14084 case X86::BI__builtin_ia32_psubusw512:
14085 case X86::BI__builtin_ia32_psubusb256:
14086 case X86::BI__builtin_ia32_psubusw256:
14087 case X86::BI__builtin_ia32_psubusb128:
14088 case X86::BI__builtin_ia32_psubusw128:
14089 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat);
14090 case X86::BI__builtin_ia32_encodekey128_u32: {
14091 Intrinsic::ID IID = Intrinsic::x86_encodekey128;
14092
14093 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]});
14094
14095 for (int i = 0; i < 6; ++i) {
14096 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14097 Value *Ptr = Builder.CreateConstGEP1_32(Ops[2], i * 16);
14098 Ptr = Builder.CreateBitCast(
14099 Ptr, llvm::PointerType::getUnqual(Extract->getType()));
14100 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
14101 }
14102
14103 return Builder.CreateExtractValue(Call, 0);
14104 }
14105 case X86::BI__builtin_ia32_encodekey256_u32: {
14106 Intrinsic::ID IID = Intrinsic::x86_encodekey256;
14107
14108 Value *Call =
14109 Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
14110
14111 for (int i = 0; i < 7; ++i) {
14112 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14113 Value *Ptr = Builder.CreateConstGEP1_32(Ops[3], i * 16);
14114 Ptr = Builder.CreateBitCast(
14115 Ptr, llvm::PointerType::getUnqual(Extract->getType()));
14116 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
14117 }
14118
14119 return Builder.CreateExtractValue(Call, 0);
14120 }
14121 case X86::BI__builtin_ia32_aesenc128kl_u8:
14122 case X86::BI__builtin_ia32_aesdec128kl_u8:
14123 case X86::BI__builtin_ia32_aesenc256kl_u8:
14124 case X86::BI__builtin_ia32_aesdec256kl_u8: {
14125 Intrinsic::ID IID;
14126 switch (BuiltinID) {
14127 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14127)
;
14128 case X86::BI__builtin_ia32_aesenc128kl_u8:
14129 IID = Intrinsic::x86_aesenc128kl;
14130 break;
14131 case X86::BI__builtin_ia32_aesdec128kl_u8:
14132 IID = Intrinsic::x86_aesdec128kl;
14133 break;
14134 case X86::BI__builtin_ia32_aesenc256kl_u8:
14135 IID = Intrinsic::x86_aesenc256kl;
14136 break;
14137 case X86::BI__builtin_ia32_aesdec256kl_u8:
14138 IID = Intrinsic::x86_aesdec256kl;
14139 break;
14140 }
14141
14142 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]});
14143
14144 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
14145 Ops[0]);
14146
14147 return Builder.CreateExtractValue(Call, 0);
14148 }
14149 case X86::BI__builtin_ia32_aesencwide128kl_u8:
14150 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
14151 case X86::BI__builtin_ia32_aesencwide256kl_u8:
14152 case X86::BI__builtin_ia32_aesdecwide256kl_u8: {
14153 Intrinsic::ID IID;
14154 switch (BuiltinID) {
14155 case X86::BI__builtin_ia32_aesencwide128kl_u8:
14156 IID = Intrinsic::x86_aesencwide128kl;
14157 break;
14158 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
14159 IID = Intrinsic::x86_aesdecwide128kl;
14160 break;
14161 case X86::BI__builtin_ia32_aesencwide256kl_u8:
14162 IID = Intrinsic::x86_aesencwide256kl;
14163 break;
14164 case X86::BI__builtin_ia32_aesdecwide256kl_u8:
14165 IID = Intrinsic::x86_aesdecwide256kl;
14166 break;
14167 }
14168
14169 Value *InOps[9];
14170 InOps[0] = Ops[2];
14171 for (int i = 0; i != 8; ++i) {
14172 Value *Ptr = Builder.CreateConstGEP1_32(Ops[1], i);
14173 InOps[i + 1] = Builder.CreateAlignedLoad(Ptr, Align(16));
14174 }
14175
14176 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps);
14177
14178 for (int i = 0; i != 8; ++i) {
14179 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14180 Value *Ptr = Builder.CreateConstGEP1_32(Ops[0], i);
14181 Builder.CreateAlignedStore(Extract, Ptr, Align(16));
14182 }
14183
14184 return Builder.CreateExtractValue(Call, 0);
14185 }
14186 }
14187}
14188
14189Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
14190 const CallExpr *E) {
14191 SmallVector<Value*, 4> Ops;
14192
14193 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
14194 Ops.push_back(EmitScalarExpr(E->getArg(i)));
14195
14196 Intrinsic::ID ID = Intrinsic::not_intrinsic;
14197
14198 switch (BuiltinID) {
14199 default: return nullptr;
14200
14201 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
14202 // call __builtin_readcyclecounter.
14203 case PPC::BI__builtin_ppc_get_timebase:
14204 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
14205
14206 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
14207 case PPC::BI__builtin_altivec_lvx:
14208 case PPC::BI__builtin_altivec_lvxl:
14209 case PPC::BI__builtin_altivec_lvebx:
14210 case PPC::BI__builtin_altivec_lvehx:
14211 case PPC::BI__builtin_altivec_lvewx:
14212 case PPC::BI__builtin_altivec_lvsl:
14213 case PPC::BI__builtin_altivec_lvsr:
14214 case PPC::BI__builtin_vsx_lxvd2x:
14215 case PPC::BI__builtin_vsx_lxvw4x:
14216 case PPC::BI__builtin_vsx_lxvd2x_be:
14217 case PPC::BI__builtin_vsx_lxvw4x_be:
14218 case PPC::BI__builtin_vsx_lxvl:
14219 case PPC::BI__builtin_vsx_lxvll:
14220 {
14221 if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
14222 BuiltinID == PPC::BI__builtin_vsx_lxvll){
14223 Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
14224 }else {
14225 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14226 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
14227 Ops.pop_back();
14228 }
14229
14230 switch (BuiltinID) {
14231 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!")::llvm::llvm_unreachable_internal("Unsupported ld/lvsl/lvsr intrinsic!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14231)
;
14232 case PPC::BI__builtin_altivec_lvx:
14233 ID = Intrinsic::ppc_altivec_lvx;
14234 break;
14235 case PPC::BI__builtin_altivec_lvxl:
14236 ID = Intrinsic::ppc_altivec_lvxl;
14237 break;
14238 case PPC::BI__builtin_altivec_lvebx:
14239 ID = Intrinsic::ppc_altivec_lvebx;
14240 break;
14241 case PPC::BI__builtin_altivec_lvehx:
14242 ID = Intrinsic::ppc_altivec_lvehx;
14243 break;
14244 case PPC::BI__builtin_altivec_lvewx:
14245 ID = Intrinsic::ppc_altivec_lvewx;
14246 break;
14247 case PPC::BI__builtin_altivec_lvsl:
14248 ID = Intrinsic::ppc_altivec_lvsl;
14249 break;
14250 case PPC::BI__builtin_altivec_lvsr:
14251 ID = Intrinsic::ppc_altivec_lvsr;
14252 break;
14253 case PPC::BI__builtin_vsx_lxvd2x:
14254 ID = Intrinsic::ppc_vsx_lxvd2x;
14255 break;
14256 case PPC::BI__builtin_vsx_lxvw4x:
14257 ID = Intrinsic::ppc_vsx_lxvw4x;
14258 break;
14259 case PPC::BI__builtin_vsx_lxvd2x_be:
14260 ID = Intrinsic::ppc_vsx_lxvd2x_be;
14261 break;
14262 case PPC::BI__builtin_vsx_lxvw4x_be:
14263 ID = Intrinsic::ppc_vsx_lxvw4x_be;
14264 break;
14265 case PPC::BI__builtin_vsx_lxvl:
14266 ID = Intrinsic::ppc_vsx_lxvl;
14267 break;
14268 case PPC::BI__builtin_vsx_lxvll:
14269 ID = Intrinsic::ppc_vsx_lxvll;
14270 break;
14271 }
14272 llvm::Function *F = CGM.getIntrinsic(ID);
14273 return Builder.CreateCall(F, Ops, "");
14274 }
14275
14276 // vec_st, vec_xst_be
14277 case PPC::BI__builtin_altivec_stvx:
14278 case PPC::BI__builtin_altivec_stvxl:
14279 case PPC::BI__builtin_altivec_stvebx:
14280 case PPC::BI__builtin_altivec_stvehx:
14281 case PPC::BI__builtin_altivec_stvewx:
14282 case PPC::BI__builtin_vsx_stxvd2x:
14283 case PPC::BI__builtin_vsx_stxvw4x:
14284 case PPC::BI__builtin_vsx_stxvd2x_be:
14285 case PPC::BI__builtin_vsx_stxvw4x_be:
14286 case PPC::BI__builtin_vsx_stxvl:
14287 case PPC::BI__builtin_vsx_stxvll:
14288 {
14289 if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
14290 BuiltinID == PPC::BI__builtin_vsx_stxvll ){
14291 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14292 }else {
14293 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
14294 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
14295 Ops.pop_back();
14296 }
14297
14298 switch (BuiltinID) {
14299 default: llvm_unreachable("Unsupported st intrinsic!")::llvm::llvm_unreachable_internal("Unsupported st intrinsic!"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14299)
;
14300 case PPC::BI__builtin_altivec_stvx:
14301 ID = Intrinsic::ppc_altivec_stvx;
14302 break;
14303 case PPC::BI__builtin_altivec_stvxl:
14304 ID = Intrinsic::ppc_altivec_stvxl;
14305 break;
14306 case PPC::BI__builtin_altivec_stvebx:
14307 ID = Intrinsic::ppc_altivec_stvebx;
14308 break;
14309 case PPC::BI__builtin_altivec_stvehx:
14310 ID = Intrinsic::ppc_altivec_stvehx;
14311 break;
14312 case PPC::BI__builtin_altivec_stvewx:
14313 ID = Intrinsic::ppc_altivec_stvewx;
14314 break;
14315 case PPC::BI__builtin_vsx_stxvd2x:
14316 ID = Intrinsic::ppc_vsx_stxvd2x;
14317 break;
14318 case PPC::BI__builtin_vsx_stxvw4x:
14319 ID = Intrinsic::ppc_vsx_stxvw4x;
14320 break;
14321 case PPC::BI__builtin_vsx_stxvd2x_be:
14322 ID = Intrinsic::ppc_vsx_stxvd2x_be;
14323 break;
14324 case PPC::BI__builtin_vsx_stxvw4x_be:
14325 ID = Intrinsic::ppc_vsx_stxvw4x_be;
14326 break;
14327 case PPC::BI__builtin_vsx_stxvl:
14328 ID = Intrinsic::ppc_vsx_stxvl;
14329 break;
14330 case PPC::BI__builtin_vsx_stxvll:
14331 ID = Intrinsic::ppc_vsx_stxvll;
14332 break;
14333 }
14334 llvm::Function *F = CGM.getIntrinsic(ID);
14335 return Builder.CreateCall(F, Ops, "");
14336 }
14337 // Square root
14338 case PPC::BI__builtin_vsx_xvsqrtsp:
14339 case PPC::BI__builtin_vsx_xvsqrtdp: {
14340 llvm::Type *ResultType = ConvertType(E->getType());
14341 Value *X = EmitScalarExpr(E->getArg(0));
14342 if (Builder.getIsFPConstrained()) {
14343 llvm::Function *F = CGM.getIntrinsic(
14344 Intrinsic::experimental_constrained_sqrt, ResultType);
14345 return Builder.CreateConstrainedFPCall(F, X);
14346 } else {
14347 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
14348 return Builder.CreateCall(F, X);
14349 }
14350 }
14351 // Count leading zeros
14352 case PPC::BI__builtin_altivec_vclzb:
14353 case PPC::BI__builtin_altivec_vclzh:
14354 case PPC::BI__builtin_altivec_vclzw:
14355 case PPC::BI__builtin_altivec_vclzd: {
14356 llvm::Type *ResultType = ConvertType(E->getType());
14357 Value *X = EmitScalarExpr(E->getArg(0));
14358 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14359 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
14360 return Builder.CreateCall(F, {X, Undef});
14361 }
14362 case PPC::BI__builtin_altivec_vctzb:
14363 case PPC::BI__builtin_altivec_vctzh:
14364 case PPC::BI__builtin_altivec_vctzw:
14365 case PPC::BI__builtin_altivec_vctzd: {
14366 llvm::Type *ResultType = ConvertType(E->getType());
14367 Value *X = EmitScalarExpr(E->getArg(0));
14368 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14369 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
14370 return Builder.CreateCall(F, {X, Undef});
14371 }
14372 case PPC::BI__builtin_altivec_vec_replace_elt:
14373 case PPC::BI__builtin_altivec_vec_replace_unaligned: {
14374 // The third argument of vec_replace_elt and vec_replace_unaligned must
14375 // be a compile time constant and will be emitted either to the vinsw
14376 // or vinsd instruction.
14377 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14378 assert(ArgCI &&((ArgCI && "Third Arg to vinsw/vinsd intrinsic must be a constant integer!"
) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Third Arg to vinsw/vinsd intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14379, __PRETTY_FUNCTION__))
14379 "Third Arg to vinsw/vinsd intrinsic must be a constant integer!")((ArgCI && "Third Arg to vinsw/vinsd intrinsic must be a constant integer!"
) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Third Arg to vinsw/vinsd intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14379, __PRETTY_FUNCTION__))
;
14380 llvm::Type *ResultType = ConvertType(E->getType());
14381 llvm::Function *F = nullptr;
14382 Value *Call = nullptr;
14383 int64_t ConstArg = ArgCI->getSExtValue();
14384 unsigned ArgWidth = Ops[1]->getType()->getPrimitiveSizeInBits();
14385 bool Is32Bit = false;
14386 assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width")(((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width"
) ? static_cast<void> (0) : __assert_fail ("(ArgWidth == 32 || ArgWidth == 64) && \"Invalid argument width\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14386, __PRETTY_FUNCTION__))
;
14387 // The input to vec_replace_elt is an element index, not a byte index.
14388 if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt)
14389 ConstArg *= ArgWidth / 8;
14390 if (ArgWidth == 32) {
14391 Is32Bit = true;
14392 // When the second argument is 32 bits, it can either be an integer or
14393 // a float. The vinsw intrinsic is used in this case.
14394 F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw);
14395 // Fix the constant according to endianess.
14396 if (getTarget().isLittleEndian())
14397 ConstArg = 12 - ConstArg;
14398 } else {
14399 // When the second argument is 64 bits, it can either be a long long or
14400 // a double. The vinsd intrinsic is used in this case.
14401 F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd);
14402 // Fix the constant for little endian.
14403 if (getTarget().isLittleEndian())
14404 ConstArg = 8 - ConstArg;
14405 }
14406 Ops[2] = ConstantInt::getSigned(Int32Ty, ConstArg);
14407 // Depending on ArgWidth, the input vector could be a float or a double.
14408 // If the input vector is a float type, bitcast the inputs to integers. Or,
14409 // if the input vector is a double, bitcast the inputs to 64-bit integers.
14410 if (!Ops[1]->getType()->isIntegerTy(ArgWidth)) {
14411 Ops[0] = Builder.CreateBitCast(
14412 Ops[0], Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4)
14413 : llvm::FixedVectorType::get(Int64Ty, 2));
14414 Ops[1] = Builder.CreateBitCast(Ops[1], Is32Bit ? Int32Ty : Int64Ty);
14415 }
14416 // Emit the call to vinsw or vinsd.
14417 Call = Builder.CreateCall(F, Ops);
14418 // Depending on the builtin, bitcast to the approriate result type.
14419 if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
14420 !Ops[1]->getType()->isIntegerTy())
14421 return Builder.CreateBitCast(Call, ResultType);
14422 else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
14423 Ops[1]->getType()->isIntegerTy())
14424 return Call;
14425 else
14426 return Builder.CreateBitCast(Call,
14427 llvm::FixedVectorType::get(Int8Ty, 16));
14428 }
14429 case PPC::BI__builtin_altivec_vpopcntb:
14430 case PPC::BI__builtin_altivec_vpopcnth:
14431 case PPC::BI__builtin_altivec_vpopcntw:
14432 case PPC::BI__builtin_altivec_vpopcntd: {
14433 llvm::Type *ResultType = ConvertType(E->getType());
14434 Value *X = EmitScalarExpr(E->getArg(0));
14435 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
14436 return Builder.CreateCall(F, X);
14437 }
14438 // Copy sign
14439 case PPC::BI__builtin_vsx_xvcpsgnsp:
14440 case PPC::BI__builtin_vsx_xvcpsgndp: {
14441 llvm::Type *ResultType = ConvertType(E->getType());
14442 Value *X = EmitScalarExpr(E->getArg(0));
14443 Value *Y = EmitScalarExpr(E->getArg(1));
14444 ID = Intrinsic::copysign;
14445 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
14446 return Builder.CreateCall(F, {X, Y});
14447 }
14448 // Rounding/truncation
14449 case PPC::BI__builtin_vsx_xvrspip:
14450 case PPC::BI__builtin_vsx_xvrdpip:
14451 case PPC::BI__builtin_vsx_xvrdpim:
14452 case PPC::BI__builtin_vsx_xvrspim:
14453 case PPC::BI__builtin_vsx_xvrdpi:
14454 case PPC::BI__builtin_vsx_xvrspi:
14455 case PPC::BI__builtin_vsx_xvrdpic:
14456 case PPC::BI__builtin_vsx_xvrspic:
14457 case PPC::BI__builtin_vsx_xvrdpiz:
14458 case PPC::BI__builtin_vsx_xvrspiz: {
14459 llvm::Type *ResultType = ConvertType(E->getType());
14460 Value *X = EmitScalarExpr(E->getArg(0));
14461 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
14462 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
14463 ID = Builder.getIsFPConstrained()
14464 ? Intrinsic::experimental_constrained_floor
14465 : Intrinsic::floor;
14466 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
14467 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
14468 ID = Builder.getIsFPConstrained()
14469 ? Intrinsic::experimental_constrained_round
14470 : Intrinsic::round;
14471 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
14472 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
14473 ID = Builder.getIsFPConstrained()
14474 ? Intrinsic::experimental_constrained_rint
14475 : Intrinsic::rint;
14476 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
14477 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
14478 ID = Builder.getIsFPConstrained()
14479 ? Intrinsic::experimental_constrained_ceil
14480 : Intrinsic::ceil;
14481 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
14482 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
14483 ID = Builder.getIsFPConstrained()
14484 ? Intrinsic::experimental_constrained_trunc
14485 : Intrinsic::trunc;
14486 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
14487 return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
14488 : Builder.CreateCall(F, X);
14489 }
14490
14491 // Absolute value
14492 case PPC::BI__builtin_vsx_xvabsdp:
14493 case PPC::BI__builtin_vsx_xvabssp: {
14494 llvm::Type *ResultType = ConvertType(E->getType());
14495 Value *X = EmitScalarExpr(E->getArg(0));
14496 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
14497 return Builder.CreateCall(F, X);
14498 }
14499
14500 // FMA variations
14501 case PPC::BI__builtin_vsx_xvmaddadp:
14502 case PPC::BI__builtin_vsx_xvmaddasp:
14503 case PPC::BI__builtin_vsx_xvnmaddadp:
14504 case PPC::BI__builtin_vsx_xvnmaddasp:
14505 case PPC::BI__builtin_vsx_xvmsubadp:
14506 case PPC::BI__builtin_vsx_xvmsubasp:
14507 case PPC::BI__builtin_vsx_xvnmsubadp:
14508 case PPC::BI__builtin_vsx_xvnmsubasp: {
14509 llvm::Type *ResultType = ConvertType(E->getType());
14510 Value *X = EmitScalarExpr(E->getArg(0));
14511 Value *Y = EmitScalarExpr(E->getArg(1));
14512 Value *Z = EmitScalarExpr(E->getArg(2));
14513 llvm::Function *F;
14514 if (Builder.getIsFPConstrained())
14515 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
14516 else
14517 F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
14518 switch (BuiltinID) {
14519 case PPC::BI__builtin_vsx_xvmaddadp:
14520 case PPC::BI__builtin_vsx_xvmaddasp:
14521 if (Builder.getIsFPConstrained())
14522 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
14523 else
14524 return Builder.CreateCall(F, {X, Y, Z});
14525 case PPC::BI__builtin_vsx_xvnmaddadp:
14526 case PPC::BI__builtin_vsx_xvnmaddasp:
14527 if (Builder.getIsFPConstrained())
14528 return Builder.CreateFNeg(
14529 Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
14530 else
14531 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
14532 case PPC::BI__builtin_vsx_xvmsubadp:
14533 case PPC::BI__builtin_vsx_xvmsubasp:
14534 if (Builder.getIsFPConstrained())
14535 return Builder.CreateConstrainedFPCall(
14536 F, {X, Y, Builder.CreateFNeg(Z, "neg")});
14537 else
14538 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
14539 case PPC::BI__builtin_vsx_xvnmsubadp:
14540 case PPC::BI__builtin_vsx_xvnmsubasp:
14541 if (Builder.getIsFPConstrained())
14542 return Builder.CreateFNeg(
14543 Builder.CreateConstrainedFPCall(
14544 F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
14545 "neg");
14546 else
14547 return Builder.CreateFNeg(
14548 Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
14549 "neg");
14550 }
14551 llvm_unreachable("Unknown FMA operation")::llvm::llvm_unreachable_internal("Unknown FMA operation", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14551)
;
14552 return nullptr; // Suppress no-return warning
14553 }
14554
14555 case PPC::BI__builtin_vsx_insertword: {
14556 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
14557
14558 // Third argument is a compile time constant int. It must be clamped to
14559 // to the range [0, 12].
14560 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14561 assert(ArgCI &&((ArgCI && "Third arg to xxinsertw intrinsic must be constant integer"
) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14562, __PRETTY_FUNCTION__))
14562 "Third arg to xxinsertw intrinsic must be constant integer")((ArgCI && "Third arg to xxinsertw intrinsic must be constant integer"
) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14562, __PRETTY_FUNCTION__))
;
14563 const int64_t MaxIndex = 12;
14564 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
14565
14566 // The builtin semantics don't exactly match the xxinsertw instructions
14567 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
14568 // word from the first argument, and inserts it in the second argument. The
14569 // instruction extracts the word from its second input register and inserts
14570 // it into its first input register, so swap the first and second arguments.
14571 std::swap(Ops[0], Ops[1]);
14572
14573 // Need to cast the second argument from a vector of unsigned int to a
14574 // vector of long long.
14575 Ops[1] =
14576 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
14577
14578 if (getTarget().isLittleEndian()) {
14579 // Reverse the double words in the vector we will extract from.
14580 Ops[0] =
14581 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14582 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
14583
14584 // Reverse the index.
14585 Index = MaxIndex - Index;
14586 }
14587
14588 // Intrinsic expects the first arg to be a vector of int.
14589 Ops[0] =
14590 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
14591 Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
14592 return Builder.CreateCall(F, Ops);
14593 }
14594
14595 case PPC::BI__builtin_vsx_extractuword: {
14596 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
14597
14598 // Intrinsic expects the first argument to be a vector of doublewords.
14599 Ops[0] =
14600 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14601
14602 // The second argument is a compile time constant int that needs to
14603 // be clamped to the range [0, 12].
14604 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
14605 assert(ArgCI &&((ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!"
) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14606, __PRETTY_FUNCTION__))
14606 "Second Arg to xxextractuw intrinsic must be a constant integer!")((ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!"
) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14606, __PRETTY_FUNCTION__))
;
14607 const int64_t MaxIndex = 12;
14608 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
14609
14610 if (getTarget().isLittleEndian()) {
14611 // Reverse the index.
14612 Index = MaxIndex - Index;
14613 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
14614
14615 // Emit the call, then reverse the double words of the results vector.
14616 Value *Call = Builder.CreateCall(F, Ops);
14617
14618 Value *ShuffleCall =
14619 Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
14620 return ShuffleCall;
14621 } else {
14622 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
14623 return Builder.CreateCall(F, Ops);
14624 }
14625 }
14626
14627 case PPC::BI__builtin_vsx_xxpermdi: {
14628 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14629 assert(ArgCI && "Third arg must be constant integer!")((ArgCI && "Third arg must be constant integer!") ? static_cast
<void> (0) : __assert_fail ("ArgCI && \"Third arg must be constant integer!\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14629, __PRETTY_FUNCTION__))
;
14630
14631 unsigned Index = ArgCI->getZExtValue();
14632 Ops[0] =
14633 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14634 Ops[1] =
14635 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
14636
14637 // Account for endianness by treating this as just a shuffle. So we use the
14638 // same indices for both LE and BE in order to produce expected results in
14639 // both cases.
14640 int ElemIdx0 = (Index & 2) >> 1;
14641 int ElemIdx1 = 2 + (Index & 1);
14642
14643 int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
14644 Value *ShuffleCall =
14645 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
14646 QualType BIRetType = E->getType();
14647 auto RetTy = ConvertType(BIRetType);
14648 return Builder.CreateBitCast(ShuffleCall, RetTy);
14649 }
14650
14651 case PPC::BI__builtin_vsx_xxsldwi: {
14652 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14653 assert(ArgCI && "Third argument must be a compile time constant")((ArgCI && "Third argument must be a compile time constant"
) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Third argument must be a compile time constant\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14653, __PRETTY_FUNCTION__))
;
14654 unsigned Index = ArgCI->getZExtValue() & 0x3;
14655 Ops[0] =
14656 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
14657 Ops[1] =
14658 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
14659
14660 // Create a shuffle mask
14661 int ElemIdx0;
14662 int ElemIdx1;
14663 int ElemIdx2;
14664 int ElemIdx3;
14665 if (getTarget().isLittleEndian()) {
14666 // Little endian element N comes from element 8+N-Index of the
14667 // concatenated wide vector (of course, using modulo arithmetic on
14668 // the total number of elements).
14669 ElemIdx0 = (8 - Index) % 8;
14670 ElemIdx1 = (9 - Index) % 8;
14671 ElemIdx2 = (10 - Index) % 8;
14672 ElemIdx3 = (11 - Index) % 8;
14673 } else {
14674 // Big endian ElemIdx<N> = Index + N
14675 ElemIdx0 = Index;
14676 ElemIdx1 = Index + 1;
14677 ElemIdx2 = Index + 2;
14678 ElemIdx3 = Index + 3;
14679 }
14680
14681 int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
14682 Value *ShuffleCall =
14683 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
14684 QualType BIRetType = E->getType();
14685 auto RetTy = ConvertType(BIRetType);
14686 return Builder.CreateBitCast(ShuffleCall, RetTy);
14687 }
14688
14689 case PPC::BI__builtin_pack_vector_int128: {
14690 bool isLittleEndian = getTarget().isLittleEndian();
14691 Value *UndefValue =
14692 llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
14693 Value *Res = Builder.CreateInsertElement(
14694 UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
14695 Res = Builder.CreateInsertElement(Res, Ops[1],
14696 (uint64_t)(isLittleEndian ? 0 : 1));
14697 return Builder.CreateBitCast(Res, ConvertType(E->getType()));
14698 }
14699
14700 case PPC::BI__builtin_unpack_vector_int128: {
14701 ConstantInt *Index = cast<ConstantInt>(Ops[1]);
14702 Value *Unpacked = Builder.CreateBitCast(
14703 Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
14704
14705 if (getTarget().isLittleEndian())
14706 Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
14707
14708 return Builder.CreateExtractElement(Unpacked, Index);
14709 }
14710 }
14711}
14712
14713namespace {
14714// If \p E is not null pointer, insert address space cast to match return
14715// type of \p E if necessary.
14716Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
14717 const CallExpr *E = nullptr) {
14718 auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
14719 auto *Call = CGF.Builder.CreateCall(F);
14720 Call->addAttribute(
14721 AttributeList::ReturnIndex,
14722 Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
14723 Call->addAttribute(AttributeList::ReturnIndex,
14724 Attribute::getWithAlignment(Call->getContext(), Align(4)));
14725 if (!E)
14726 return Call;
14727 QualType BuiltinRetType = E->getType();
14728 auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
14729 if (RetTy == Call->getType())
14730 return Call;
14731 return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
14732}
14733
14734// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
14735Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
14736 const unsigned XOffset = 4;
14737 auto *DP = EmitAMDGPUDispatchPtr(CGF);
14738 // Indexing the HSA kernel_dispatch_packet struct.
14739 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
14740 auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
14741 auto *DstTy =
14742 CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
14743 auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
14744 auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
14745 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
14746 llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
14747 APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
14748 LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
14749 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
14750 llvm::MDNode::get(CGF.getLLVMContext(), None));
14751 return LD;
14752}
14753} // namespace
14754
14755// For processing memory ordering and memory scope arguments of various
14756// amdgcn builtins.
14757// \p Order takes a C++11 comptabile memory-ordering specifier and converts
14758// it into LLVM's memory ordering specifier using atomic C ABI, and writes
14759// to \p AO. \p Scope takes a const char * and converts it into AMDGCN
14760// specific SyncScopeID and writes it to \p SSID.
14761bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
14762 llvm::AtomicOrdering &AO,
14763 llvm::SyncScope::ID &SSID) {
14764 if (isa<llvm::ConstantInt>(Order)) {
3
Assuming 'Order' is not a 'ConstantInt'
4
Taking false branch
14765 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
14766
14767 // Map C11/C++11 memory ordering to LLVM memory ordering
14768 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
14769 case llvm::AtomicOrderingCABI::acquire:
14770 AO = llvm::AtomicOrdering::Acquire;
14771 break;
14772 case llvm::AtomicOrderingCABI::release:
14773 AO = llvm::AtomicOrdering::Release;
14774 break;
14775 case llvm::AtomicOrderingCABI::acq_rel:
14776 AO = llvm::AtomicOrdering::AcquireRelease;
14777 break;
14778 case llvm::AtomicOrderingCABI::seq_cst:
14779 AO = llvm::AtomicOrdering::SequentiallyConsistent;
14780 break;
14781 case llvm::AtomicOrderingCABI::consume:
14782 case llvm::AtomicOrderingCABI::relaxed:
14783 break;
14784 }
14785
14786 StringRef scp;
14787 llvm::getConstantStringInfo(Scope, scp);
14788 SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
14789 return true;
14790 }
14791 return false;
5
Returning zero, which participates in a condition later
14792}
14793
14794Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
14795 const CallExpr *E) {
14796 llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
14797 llvm::SyncScope::ID SSID;
14798 switch (BuiltinID) {
1
Control jumps to 'case BI__builtin_amdgcn_fence:' at line 15031
14799 case AMDGPU::BI__builtin_amdgcn_div_scale:
14800 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
14801 // Translate from the intrinsics's struct return to the builtin's out
14802 // argument.
14803
14804 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
14805
14806 llvm::Value *X = EmitScalarExpr(E->getArg(0));
14807 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
14808 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
14809
14810 llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
14811 X->getType());
14812
14813 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
14814
14815 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
14816 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
14817
14818 llvm::Type *RealFlagType
14819 = FlagOutPtr.getPointer()->getType()->getPointerElementType();
14820
14821 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
14822 Builder.CreateStore(FlagExt, FlagOutPtr);
14823 return Result;
14824 }
14825 case AMDGPU::BI__builtin_amdgcn_div_fmas:
14826 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
14827 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
14828 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
14829 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
14830 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
14831
14832 llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
14833 Src0->getType());
14834 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
14835 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
14836 }
14837
14838 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
14839 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
14840 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
14841 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
14842 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
14843 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
14844 llvm::SmallVector<llvm::Value *, 6> Args;
14845 for (unsigned I = 0; I != E->getNumArgs(); ++I)
14846 Args.push_back(EmitScalarExpr(E->getArg(I)));
14847 assert(Args.size() == 5 || Args.size() == 6)((Args.size() == 5 || Args.size() == 6) ? static_cast<void
> (0) : __assert_fail ("Args.size() == 5 || Args.size() == 6"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 14847, __PRETTY_FUNCTION__))
;
14848 if (Args.size() == 5)
14849 Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
14850 Function *F =
14851 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
14852 return Builder.CreateCall(F, Args);
14853 }
14854 case AMDGPU::BI__builtin_amdgcn_div_fixup:
14855 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
14856 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
14857 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
14858 case AMDGPU::BI__builtin_amdgcn_trig_preop:
14859 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
14860 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
14861 case AMDGPU::BI__builtin_amdgcn_rcp:
14862 case AMDGPU::BI__builtin_amdgcn_rcpf:
14863 case AMDGPU::BI__builtin_amdgcn_rcph:
14864 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
14865 case AMDGPU::BI__builtin_amdgcn_sqrt:
14866 case AMDGPU::BI__builtin_amdgcn_sqrtf:
14867 case AMDGPU::BI__builtin_amdgcn_sqrth:
14868 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
14869 case AMDGPU::BI__builtin_amdgcn_rsq:
14870 case AMDGPU::BI__builtin_amdgcn_rsqf:
14871 case AMDGPU::BI__builtin_amdgcn_rsqh:
14872 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
14873 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
14874 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
14875 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
14876 case AMDGPU::BI__builtin_amdgcn_sinf:
14877 case AMDGPU::BI__builtin_amdgcn_sinh:
14878 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
14879 case AMDGPU::BI__builtin_amdgcn_cosf:
14880 case AMDGPU::BI__builtin_amdgcn_cosh:
14881 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
14882 case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
14883 return EmitAMDGPUDispatchPtr(*this, E);
14884 case AMDGPU::BI__builtin_amdgcn_log_clampf:
14885 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
14886 case AMDGPU::BI__builtin_amdgcn_ldexp:
14887 case AMDGPU::BI__builtin_amdgcn_ldexpf:
14888 case AMDGPU::BI__builtin_amdgcn_ldexph:
14889 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
14890 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
14891 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
14892 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
14893 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
14894 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
14895 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
14896 Value *Src0 = EmitScalarExpr(E->getArg(0));
14897 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
14898 { Builder.getInt32Ty(), Src0->getType() });
14899 return Builder.CreateCall(F, Src0);
14900 }
14901 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
14902 Value *Src0 = EmitScalarExpr(E->getArg(0));
14903 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
14904 { Builder.getInt16Ty(), Src0->getType() });
14905 return Builder.CreateCall(F, Src0);
14906 }
14907 case AMDGPU::BI__builtin_amdgcn_fract:
14908 case AMDGPU::BI__builtin_amdgcn_fractf:
14909 case AMDGPU::BI__builtin_amdgcn_fracth:
14910 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
14911 case AMDGPU::BI__builtin_amdgcn_lerp:
14912 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
14913 case AMDGPU::BI__builtin_amdgcn_ubfe:
14914 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
14915 case AMDGPU::BI__builtin_amdgcn_sbfe:
14916 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
14917 case AMDGPU::BI__builtin_amdgcn_uicmp:
14918 case AMDGPU::BI__builtin_amdgcn_uicmpl:
14919 case AMDGPU::BI__builtin_amdgcn_sicmp:
14920 case AMDGPU::BI__builtin_amdgcn_sicmpl: {
14921 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
14922 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
14923 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
14924
14925 // FIXME-GFX10: How should 32 bit mask be handled?
14926 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
14927 { Builder.getInt64Ty(), Src0->getType() });
14928 return Builder.CreateCall(F, { Src0, Src1, Src2 });
14929 }
14930 case AMDGPU::BI__builtin_amdgcn_fcmp:
14931 case AMDGPU::BI__builtin_amdgcn_fcmpf: {
14932 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
14933 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
14934 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
14935
14936 // FIXME-GFX10: How should 32 bit mask be handled?
14937 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
14938 { Builder.getInt64Ty(), Src0->getType() });
14939 return Builder.CreateCall(F, { Src0, Src1, Src2 });
14940 }
14941 case AMDGPU::BI__builtin_amdgcn_class:
14942 case AMDGPU::BI__builtin_amdgcn_classf:
14943 case AMDGPU::BI__builtin_amdgcn_classh:
14944 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
14945 case AMDGPU::BI__builtin_amdgcn_fmed3f:
14946 case AMDGPU::BI__builtin_amdgcn_fmed3h:
14947 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
14948 case AMDGPU::BI__builtin_amdgcn_ds_append:
14949 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
14950 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
14951 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
14952 Value *Src0 = EmitScalarExpr(E->getArg(0));
14953 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
14954 return Builder.CreateCall(F, { Src0, Builder.getFalse() });
14955 }
14956 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
14957 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
14958 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
14959 Intrinsic::ID Intrin;
14960 switch (BuiltinID) {
14961 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
14962 Intrin = Intrinsic::amdgcn_ds_fadd;
14963 break;
14964 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
14965 Intrin = Intrinsic::amdgcn_ds_fmin;
14966 break;
14967 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
14968 Intrin = Intrinsic::amdgcn_ds_fmax;
14969 break;
14970 }
14971 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
14972 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
14973 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
14974 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
14975 llvm::Value *Src4 = EmitScalarExpr(E->getArg(4));
14976 llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() });
14977 llvm::FunctionType *FTy = F->getFunctionType();
14978 llvm::Type *PTy = FTy->getParamType(0);
14979 Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
14980 return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
14981 }
14982 case AMDGPU::BI__builtin_amdgcn_read_exec: {
14983 CallInst *CI = cast<CallInst>(
14984 EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
14985 CI->setConvergent();
14986 return CI;
14987 }
14988 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
14989 case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
14990 StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
14991 "exec_lo" : "exec_hi";
14992 CallInst *CI = cast<CallInst>(
14993 EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName));
14994 CI->setConvergent();
14995 return CI;
14996 }
14997 // amdgcn workitem
14998 case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
14999 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
15000 case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
15001 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
15002 case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
15003 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
15004
15005 // amdgcn workgroup size
15006 case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
15007 return EmitAMDGPUWorkGroupSize(*this, 0);
15008 case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
15009 return EmitAMDGPUWorkGroupSize(*this, 1);
15010 case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
15011 return EmitAMDGPUWorkGroupSize(*this, 2);
15012
15013 // r600 intrinsics
15014 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
15015 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
15016 return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
15017 case AMDGPU::BI__builtin_r600_read_tidig_x:
15018 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
15019 case AMDGPU::BI__builtin_r600_read_tidig_y:
15020 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
15021 case AMDGPU::BI__builtin_r600_read_tidig_z:
15022 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
15023 case AMDGPU::BI__builtin_amdgcn_alignbit: {
15024 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15025 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15026 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15027 Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
15028 return Builder.CreateCall(F, { Src0, Src1, Src2 });
15029 }
15030
15031 case AMDGPU::BI__builtin_amdgcn_fence: {
15032 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
2
Calling 'CodeGenFunction::ProcessOrderScopeAMDGCN'
6
Returning from 'CodeGenFunction::ProcessOrderScopeAMDGCN'
7
Taking false branch
15033 EmitScalarExpr(E->getArg(1)), AO, SSID))
15034 return Builder.CreateFence(AO, SSID);
15035 LLVM_FALLTHROUGH[[gnu::fallthrough]];
15036 }
15037 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
15038 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
15039 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
15040 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
15041 unsigned BuiltinAtomicOp;
8
'BuiltinAtomicOp' declared without an initial value
15042 llvm::Type *ResultType = ConvertType(E->getType());
15043
15044 switch (BuiltinID) {
9
'Default' branch taken. Execution continues on line 15055
15045 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
15046 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
15047 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
15048 break;
15049 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
15050 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
15051 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
15052 break;
15053 }
15054
15055 Value *Ptr = EmitScalarExpr(E->getArg(0));
15056 Value *Val = EmitScalarExpr(E->getArg(1));
15057
15058 llvm::Function *F =
15059 CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
10
1st function call argument is an uninitialized value
15060
15061 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
15062 EmitScalarExpr(E->getArg(3)), AO, SSID)) {
15063
15064 // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
15065 // scope as unsigned values
15066 Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
15067 Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
15068
15069 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
15070 bool Volatile =
15071 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
15072 Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
15073
15074 return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
15075 }
15076 LLVM_FALLTHROUGH[[gnu::fallthrough]];
15077 }
15078 default:
15079 return nullptr;
15080 }
15081}
15082
15083/// Handle a SystemZ function in which the final argument is a pointer
15084/// to an int that receives the post-instruction CC value. At the LLVM level
15085/// this is represented as a function that returns a {result, cc} pair.
15086static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
15087 unsigned IntrinsicID,
15088 const CallExpr *E) {
15089 unsigned NumArgs = E->getNumArgs() - 1;
15090 SmallVector<Value *, 8> Args(NumArgs);
15091 for (unsigned I = 0; I < NumArgs; ++I)
15092 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
15093 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
15094 Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
15095 Value *Call = CGF.Builder.CreateCall(F, Args);
15096 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
15097 CGF.Builder.CreateStore(CC, CCPtr);
15098 return CGF.Builder.CreateExtractValue(Call, 0);
15099}
15100
15101Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
15102 const CallExpr *E) {
15103 switch (BuiltinID) {
15104 case SystemZ::BI__builtin_tbegin: {
15105 Value *TDB = EmitScalarExpr(E->getArg(0));
15106 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
15107 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
15108 return Builder.CreateCall(F, {TDB, Control});
15109 }
15110 case SystemZ::BI__builtin_tbegin_nofloat: {
15111 Value *TDB = EmitScalarExpr(E->getArg(0));
15112 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
15113 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
15114 return Builder.CreateCall(F, {TDB, Control});
15115 }
15116 case SystemZ::BI__builtin_tbeginc: {
15117 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
15118 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
15119 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
15120 return Builder.CreateCall(F, {TDB, Control});
15121 }
15122 case SystemZ::BI__builtin_tabort: {
15123 Value *Data = EmitScalarExpr(E->getArg(0));
15124 Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
15125 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
15126 }
15127 case SystemZ::BI__builtin_non_tx_store: {
15128 Value *Address = EmitScalarExpr(E->getArg(0));
15129 Value *Data = EmitScalarExpr(E->getArg(1));
15130 Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
15131 return Builder.CreateCall(F, {Data, Address});
15132 }
15133
15134 // Vector builtins. Note that most vector builtins are mapped automatically
15135 // to target-specific LLVM intrinsics. The ones handled specially here can
15136 // be represented via standard LLVM IR, which is preferable to enable common
15137 // LLVM optimizations.
15138
15139 case SystemZ::BI__builtin_s390_vpopctb:
15140 case SystemZ::BI__builtin_s390_vpopcth:
15141 case SystemZ::BI__builtin_s390_vpopctf:
15142 case SystemZ::BI__builtin_s390_vpopctg: {
15143 llvm::Type *ResultType = ConvertType(E->getType());
15144 Value *X = EmitScalarExpr(E->getArg(0));
15145 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
15146 return Builder.CreateCall(F, X);
15147 }
15148
15149 case SystemZ::BI__builtin_s390_vclzb:
15150 case SystemZ::BI__builtin_s390_vclzh:
15151 case SystemZ::BI__builtin_s390_vclzf:
15152 case SystemZ::BI__builtin_s390_vclzg: {
15153 llvm::Type *ResultType = ConvertType(E->getType());
15154 Value *X = EmitScalarExpr(E->getArg(0));
15155 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
15156 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
15157 return Builder.CreateCall(F, {X, Undef});
15158 }
15159
15160 case SystemZ::BI__builtin_s390_vctzb:
15161 case SystemZ::BI__builtin_s390_vctzh:
15162 case SystemZ::BI__builtin_s390_vctzf:
15163 case SystemZ::BI__builtin_s390_vctzg: {
15164 llvm::Type *ResultType = ConvertType(E->getType());
15165 Value *X = EmitScalarExpr(E->getArg(0));
15166 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
15167 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
15168 return Builder.CreateCall(F, {X, Undef});
15169 }
15170
15171 case SystemZ::BI__builtin_s390_vfsqsb:
15172 case SystemZ::BI__builtin_s390_vfsqdb: {
15173 llvm::Type *ResultType = ConvertType(E->getType());
15174 Value *X = EmitScalarExpr(E->getArg(0));
15175 if (Builder.getIsFPConstrained()) {
15176 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
15177 return Builder.CreateConstrainedFPCall(F, { X });
15178 } else {
15179 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
15180 return Builder.CreateCall(F, X);
15181 }
15182 }
15183 case SystemZ::BI__builtin_s390_vfmasb:
15184 case SystemZ::BI__builtin_s390_vfmadb: {
15185 llvm::Type *ResultType = ConvertType(E->getType());
15186 Value *X = EmitScalarExpr(E->getArg(0));
15187 Value *Y = EmitScalarExpr(E->getArg(1));
15188 Value *Z = EmitScalarExpr(E->getArg(2));
15189 if (Builder.getIsFPConstrained()) {
15190 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15191 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
15192 } else {
15193 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15194 return Builder.CreateCall(F, {X, Y, Z});
15195 }
15196 }
15197 case SystemZ::BI__builtin_s390_vfmssb:
15198 case SystemZ::BI__builtin_s390_vfmsdb: {
15199 llvm::Type *ResultType = ConvertType(E->getType());
15200 Value *X = EmitScalarExpr(E->getArg(0));
15201 Value *Y = EmitScalarExpr(E->getArg(1));
15202 Value *Z = EmitScalarExpr(E->getArg(2));
15203 if (Builder.getIsFPConstrained()) {
15204 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15205 return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15206 } else {
15207 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15208 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15209 }
15210 }
15211 case SystemZ::BI__builtin_s390_vfnmasb:
15212 case SystemZ::BI__builtin_s390_vfnmadb: {
15213 llvm::Type *ResultType = ConvertType(E->getType());
15214 Value *X = EmitScalarExpr(E->getArg(0));
15215 Value *Y = EmitScalarExpr(E->getArg(1));
15216 Value *Z = EmitScalarExpr(E->getArg(2));
15217 if (Builder.getIsFPConstrained()) {
15218 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15219 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
15220 } else {
15221 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15222 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
15223 }
15224 }
15225 case SystemZ::BI__builtin_s390_vfnmssb:
15226 case SystemZ::BI__builtin_s390_vfnmsdb: {
15227 llvm::Type *ResultType = ConvertType(E->getType());
15228 Value *X = EmitScalarExpr(E->getArg(0));
15229 Value *Y = EmitScalarExpr(E->getArg(1));
15230 Value *Z = EmitScalarExpr(E->getArg(2));
15231 if (Builder.getIsFPConstrained()) {
15232 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15233 Value *NegZ = Builder.CreateFNeg(Z, "sub");
15234 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
15235 } else {
15236 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15237 Value *NegZ = Builder.CreateFNeg(Z, "neg");
15238 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
15239 }
15240 }
15241 case SystemZ::BI__builtin_s390_vflpsb:
15242 case SystemZ::BI__builtin_s390_vflpdb: {
15243 llvm::Type *ResultType = ConvertType(E->getType());
15244 Value *X = EmitScalarExpr(E->getArg(0));
15245 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15246 return Builder.CreateCall(F, X);
15247 }
15248 case SystemZ::BI__builtin_s390_vflnsb:
15249 case SystemZ::BI__builtin_s390_vflndb: {
15250 llvm::Type *ResultType = ConvertType(E->getType());
15251 Value *X = EmitScalarExpr(E->getArg(0));
15252 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15253 return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg");
15254 }
15255 case SystemZ::BI__builtin_s390_vfisb:
15256 case SystemZ::BI__builtin_s390_vfidb: {
15257 llvm::Type *ResultType = ConvertType(E->getType());
15258 Value *X = EmitScalarExpr(E->getArg(0));
15259 // Constant-fold the M4 and M5 mask arguments.
15260 llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext());
15261 llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15262 // Check whether this instance can be represented via a LLVM standard
15263 // intrinsic. We only support some combinations of M4 and M5.
15264 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15265 Intrinsic::ID CI;
15266 switch (M4.getZExtValue()) {
15267 default: break;
15268 case 0: // IEEE-inexact exception allowed
15269 switch (M5.getZExtValue()) {
15270 default: break;
15271 case 0: ID = Intrinsic::rint;
15272 CI = Intrinsic::experimental_constrained_rint; break;
15273 }
15274 break;
15275 case 4: // IEEE-inexact exception suppressed
15276 switch (M5.getZExtValue()) {
15277 default: break;
15278 case 0: ID = Intrinsic::nearbyint;
15279 CI = Intrinsic::experimental_constrained_nearbyint; break;
15280 case 1: ID = Intrinsic::round;
15281 CI = Intrinsic::experimental_constrained_round; break;
15282 case 5: ID = Intrinsic::trunc;
15283 CI = Intrinsic::experimental_constrained_trunc; break;
15284 case 6: ID = Intrinsic::ceil;
15285 CI = Intrinsic::experimental_constrained_ceil; break;
15286 case 7: ID = Intrinsic::floor;
15287 CI = Intrinsic::experimental_constrained_floor; break;
15288 }
15289 break;
15290 }
15291 if (ID != Intrinsic::not_intrinsic) {
15292 if (Builder.getIsFPConstrained()) {
15293 Function *F = CGM.getIntrinsic(CI, ResultType);
15294 return Builder.CreateConstrainedFPCall(F, X);
15295 } else {
15296 Function *F = CGM.getIntrinsic(ID, ResultType);
15297 return Builder.CreateCall(F, X);
15298 }
15299 }
15300 switch (BuiltinID) { // FIXME: constrained version?
15301 case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
15302 case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
15303 default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 15303)
;
15304 }
15305 Function *F = CGM.getIntrinsic(ID);
15306 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15307 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
15308 return Builder.CreateCall(F, {X, M4Value, M5Value});
15309 }
15310 case SystemZ::BI__builtin_s390_vfmaxsb:
15311 case SystemZ::BI__builtin_s390_vfmaxdb: {
15312 llvm::Type *ResultType = ConvertType(E->getType());
15313 Value *X = EmitScalarExpr(E->getArg(0));
15314 Value *Y = EmitScalarExpr(E->getArg(1));
15315 // Constant-fold the M4 mask argument.
15316 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15317 // Check whether this instance can be represented via a LLVM standard
15318 // intrinsic. We only support some values of M4.
15319 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15320 Intrinsic::ID CI;
15321 switch (M4.getZExtValue()) {
15322 default: break;
15323 case 4: ID = Intrinsic::maxnum;
15324 CI = Intrinsic::experimental_constrained_maxnum; break;
15325 }
15326 if (ID != Intrinsic::not_intrinsic) {
15327 if (Builder.getIsFPConstrained()) {
15328 Function *F = CGM.getIntrinsic(CI, ResultType);
15329 return Builder.CreateConstrainedFPCall(F, {X, Y});
15330 } else {
15331 Function *F = CGM.getIntrinsic(ID, ResultType);
15332 return Builder.CreateCall(F, {X, Y});
15333 }
15334 }
15335 switch (BuiltinID) {
15336 case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
15337 case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
15338 default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 15338)
;
15339 }
15340 Function *F = CGM.getIntrinsic(ID);
15341 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15342 return Builder.CreateCall(F, {X, Y, M4Value});
15343 }
15344 case SystemZ::BI__builtin_s390_vfminsb:
15345 case SystemZ::BI__builtin_s390_vfmindb: {
15346 llvm::Type *ResultType = ConvertType(E->getType());
15347 Value *X = EmitScalarExpr(E->getArg(0));
15348 Value *Y = EmitScalarExpr(E->getArg(1));
15349 // Constant-fold the M4 mask argument.
15350 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15351 // Check whether this instance can be represented via a LLVM standard
15352 // intrinsic. We only support some values of M4.
15353 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15354 Intrinsic::ID CI;
15355 switch (M4.getZExtValue()) {
15356 default: break;
15357 case 4: ID = Intrinsic::minnum;
15358 CI = Intrinsic::experimental_constrained_minnum; break;
15359 }
15360 if (ID != Intrinsic::not_intrinsic) {
15361 if (Builder.getIsFPConstrained()) {
15362 Function *F = CGM.getIntrinsic(CI, ResultType);
15363 return Builder.CreateConstrainedFPCall(F, {X, Y});
15364 } else {
15365 Function *F = CGM.getIntrinsic(ID, ResultType);
15366 return Builder.CreateCall(F, {X, Y});
15367 }
15368 }
15369 switch (BuiltinID) {
15370 case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
15371 case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
15372 default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 15372)
;
15373 }
15374 Function *F = CGM.getIntrinsic(ID);
15375 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15376 return Builder.CreateCall(F, {X, Y, M4Value});
15377 }
15378
15379 case SystemZ::BI__builtin_s390_vlbrh:
15380 case SystemZ::BI__builtin_s390_vlbrf:
15381 case SystemZ::BI__builtin_s390_vlbrg: {
15382 llvm::Type *ResultType = ConvertType(E->getType());
15383 Value *X = EmitScalarExpr(E->getArg(0));
15384 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
15385 return Builder.CreateCall(F, X);
15386 }
15387
15388 // Vector intrinsics that output the post-instruction CC value.
15389
15390#define INTRINSIC_WITH_CC(NAME) \
15391 case SystemZ::BI__builtin_##NAME: \
15392 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
15393
15394 INTRINSIC_WITH_CC(s390_vpkshs);
15395 INTRINSIC_WITH_CC(s390_vpksfs);
15396 INTRINSIC_WITH_CC(s390_vpksgs);
15397
15398 INTRINSIC_WITH_CC(s390_vpklshs);
15399 INTRINSIC_WITH_CC(s390_vpklsfs);
15400 INTRINSIC_WITH_CC(s390_vpklsgs);
15401
15402 INTRINSIC_WITH_CC(s390_vceqbs);
15403 INTRINSIC_WITH_CC(s390_vceqhs);
15404 INTRINSIC_WITH_CC(s390_vceqfs);
15405 INTRINSIC_WITH_CC(s390_vceqgs);
15406
15407 INTRINSIC_WITH_CC(s390_vchbs);
15408 INTRINSIC_WITH_CC(s390_vchhs);
15409 INTRINSIC_WITH_CC(s390_vchfs);
15410 INTRINSIC_WITH_CC(s390_vchgs);
15411
15412 INTRINSIC_WITH_CC(s390_vchlbs);
15413 INTRINSIC_WITH_CC(s390_vchlhs);
15414 INTRINSIC_WITH_CC(s390_vchlfs);
15415 INTRINSIC_WITH_CC(s390_vchlgs);
15416
15417 INTRINSIC_WITH_CC(s390_vfaebs);
15418 INTRINSIC_WITH_CC(s390_vfaehs);
15419 INTRINSIC_WITH_CC(s390_vfaefs);
15420
15421 INTRINSIC_WITH_CC(s390_vfaezbs);
15422 INTRINSIC_WITH_CC(s390_vfaezhs);
15423 INTRINSIC_WITH_CC(s390_vfaezfs);
15424
15425 INTRINSIC_WITH_CC(s390_vfeebs);
15426 INTRINSIC_WITH_CC(s390_vfeehs);
15427 INTRINSIC_WITH_CC(s390_vfeefs);
15428
15429 INTRINSIC_WITH_CC(s390_vfeezbs);
15430 INTRINSIC_WITH_CC(s390_vfeezhs);
15431 INTRINSIC_WITH_CC(s390_vfeezfs);
15432
15433 INTRINSIC_WITH_CC(s390_vfenebs);
15434 INTRINSIC_WITH_CC(s390_vfenehs);
15435 INTRINSIC_WITH_CC(s390_vfenefs);
15436
15437 INTRINSIC_WITH_CC(s390_vfenezbs);
15438 INTRINSIC_WITH_CC(s390_vfenezhs);
15439 INTRINSIC_WITH_CC(s390_vfenezfs);
15440
15441 INTRINSIC_WITH_CC(s390_vistrbs);
15442 INTRINSIC_WITH_CC(s390_vistrhs);
15443 INTRINSIC_WITH_CC(s390_vistrfs);
15444
15445 INTRINSIC_WITH_CC(s390_vstrcbs);
15446 INTRINSIC_WITH_CC(s390_vstrchs);
15447 INTRINSIC_WITH_CC(s390_vstrcfs);
15448
15449 INTRINSIC_WITH_CC(s390_vstrczbs);
15450 INTRINSIC_WITH_CC(s390_vstrczhs);
15451 INTRINSIC_WITH_CC(s390_vstrczfs);
15452
15453 INTRINSIC_WITH_CC(s390_vfcesbs);
15454 INTRINSIC_WITH_CC(s390_vfcedbs);
15455 INTRINSIC_WITH_CC(s390_vfchsbs);
15456 INTRINSIC_WITH_CC(s390_vfchdbs);
15457 INTRINSIC_WITH_CC(s390_vfchesbs);
15458 INTRINSIC_WITH_CC(s390_vfchedbs);
15459
15460 INTRINSIC_WITH_CC(s390_vftcisb);
15461 INTRINSIC_WITH_CC(s390_vftcidb);
15462
15463 INTRINSIC_WITH_CC(s390_vstrsb);
15464 INTRINSIC_WITH_CC(s390_vstrsh);
15465 INTRINSIC_WITH_CC(s390_vstrsf);
15466
15467 INTRINSIC_WITH_CC(s390_vstrszb);
15468 INTRINSIC_WITH_CC(s390_vstrszh);
15469 INTRINSIC_WITH_CC(s390_vstrszf);
15470
15471#undef INTRINSIC_WITH_CC
15472
15473 default:
15474 return nullptr;
15475 }
15476}
15477
15478namespace {
15479// Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
15480struct NVPTXMmaLdstInfo {
15481 unsigned NumResults; // Number of elements to load/store
15482 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
15483 unsigned IID_col;
15484 unsigned IID_row;
15485};
15486
15487#define MMA_INTR(geom_op_type, layout) \
15488 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
15489#define MMA_LDST(n, geom_op_type) \
15490 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
15491
15492static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
15493 switch (BuiltinID) {
15494 // FP MMA loads
15495 case NVPTX::BI__hmma_m16n16k16_ld_a:
15496 return MMA_LDST(8, m16n16k16_load_a_f16);
15497 case NVPTX::BI__hmma_m16n16k16_ld_b:
15498 return MMA_LDST(8, m16n16k16_load_b_f16);
15499 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
15500 return MMA_LDST(4, m16n16k16_load_c_f16);
15501 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
15502 return MMA_LDST(8, m16n16k16_load_c_f32);
15503 case NVPTX::BI__hmma_m32n8k16_ld_a:
15504 return MMA_LDST(8, m32n8k16_load_a_f16);
15505 case NVPTX::BI__hmma_m32n8k16_ld_b:
15506 return MMA_LDST(8, m32n8k16_load_b_f16);
15507 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
15508 return MMA_LDST(4, m32n8k16_load_c_f16);
15509 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
15510 return MMA_LDST(8, m32n8k16_load_c_f32);
15511 case NVPTX::BI__hmma_m8n32k16_ld_a:
15512 return MMA_LDST(8, m8n32k16_load_a_f16);
15513 case NVPTX::BI__hmma_m8n32k16_ld_b:
15514 return MMA_LDST(8, m8n32k16_load_b_f16);
15515 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
15516 return MMA_LDST(4, m8n32k16_load_c_f16);
15517 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
15518 return MMA_LDST(8, m8n32k16_load_c_f32);
15519
15520 // Integer MMA loads
15521 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
15522 return MMA_LDST(2, m16n16k16_load_a_s8);
15523 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
15524 return MMA_LDST(2, m16n16k16_load_a_u8);
15525 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
15526 return MMA_LDST(2, m16n16k16_load_b_s8);
15527 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
15528 return MMA_LDST(2, m16n16k16_load_b_u8);
15529 case NVPTX::BI__imma_m16n16k16_ld_c:
15530 return MMA_LDST(8, m16n16k16_load_c_s32);
15531 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
15532 return MMA_LDST(4, m32n8k16_load_a_s8);
15533 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
15534 return MMA_LDST(4, m32n8k16_load_a_u8);
15535 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
15536 return MMA_LDST(1, m32n8k16_load_b_s8);
15537 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
15538 return MMA_LDST(1, m32n8k16_load_b_u8);
15539 case NVPTX::BI__imma_m32n8k16_ld_c:
15540 return MMA_LDST(8, m32n8k16_load_c_s32);
15541 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
15542 return MMA_LDST(1, m8n32k16_load_a_s8);
15543 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
15544 return MMA_LDST(1, m8n32k16_load_a_u8);
15545 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
15546 return MMA_LDST(4, m8n32k16_load_b_s8);
15547 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
15548 return MMA_LDST(4, m8n32k16_load_b_u8);
15549 case NVPTX::BI__imma_m8n32k16_ld_c:
15550 return MMA_LDST(8, m8n32k16_load_c_s32);
15551
15552 // Sub-integer MMA loads.
15553 // Only row/col layout is supported by A/B fragments.
15554 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
15555 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
15556 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
15557 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
15558 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
15559 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
15560 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
15561 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
15562 case NVPTX::BI__imma_m8n8k32_ld_c:
15563 return MMA_LDST(2, m8n8k32_load_c_s32);
15564 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
15565 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
15566 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
15567 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
15568 case NVPTX::BI__bmma_m8n8k128_ld_c:
15569 return MMA_LDST(2, m8n8k128_load_c_s32);
15570
15571 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
15572 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
15573 // use fragment C for both loads and stores.
15574 // FP MMA stores.
15575 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
15576 return MMA_LDST(4, m16n16k16_store_d_f16);
15577 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
15578 return MMA_LDST(8, m16n16k16_store_d_f32);
15579 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
15580 return MMA_LDST(4, m32n8k16_store_d_f16);
15581 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
15582 return MMA_LDST(8, m32n8k16_store_d_f32);
15583 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
15584 return MMA_LDST(4, m8n32k16_store_d_f16);
15585 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
15586 return MMA_LDST(8, m8n32k16_store_d_f32);
15587
15588 // Integer and sub-integer MMA stores.
15589 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
15590 // name, integer loads/stores use LLVM's i32.
15591 case NVPTX::BI__imma_m16n16k16_st_c_i32:
15592 return MMA_LDST(8, m16n16k16_store_d_s32);
15593 case NVPTX::BI__imma_m32n8k16_st_c_i32:
15594 return MMA_LDST(8, m32n8k16_store_d_s32);
15595 case NVPTX::BI__imma_m8n32k16_st_c_i32:
15596 return MMA_LDST(8, m8n32k16_store_d_s32);
15597 case NVPTX::BI__imma_m8n8k32_st_c_i32:
15598 return MMA_LDST(2, m8n8k32_store_d_s32);
15599 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
15600 return MMA_LDST(2, m8n8k128_store_d_s32);
15601
15602 default:
15603 llvm_unreachable("Unknown MMA builtin")::llvm::llvm_unreachable_internal("Unknown MMA builtin", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 15603)
;
15604 }
15605}
15606#undef MMA_LDST
15607#undef MMA_INTR
15608
15609
15610struct NVPTXMmaInfo {
15611 unsigned NumEltsA;
15612 unsigned NumEltsB;
15613 unsigned NumEltsC;
15614 unsigned NumEltsD;
15615 std::array<unsigned, 8> Variants;
15616
15617 unsigned getMMAIntrinsic(int Layout, bool Satf) {
15618 unsigned Index = Layout * 2 + Satf;
15619 if (Index >= Variants.size())
15620 return 0;
15621 return Variants[Index];
15622 }
15623};
15624
15625 // Returns an intrinsic that matches Layout and Satf for valid combinations of
15626 // Layout and Satf, 0 otherwise.
15627static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
15628 // clang-format off
15629#define MMA_VARIANTS(geom, type) {{ \
15630 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
15631 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
15632 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
15633 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
15634 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
15635 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
15636 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
15637 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
15638 }}
15639// Sub-integer MMA only supports row.col layout.
15640#define MMA_VARIANTS_I4(geom, type) {{ \
15641 0, \
15642 0, \
15643 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
15644 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
15645 0, \
15646 0, \
15647 0, \
15648 0 \
15649 }}
15650// b1 MMA does not support .satfinite.
15651#define MMA_VARIANTS_B1(geom, type) {{ \
15652 0, \
15653 0, \
15654 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
15655 0, \
15656 0, \
15657 0, \
15658 0, \
15659 0 \
15660 }}
15661 // clang-format on
15662 switch (BuiltinID) {
15663 // FP MMA
15664 // Note that 'type' argument of MMA_VARIANT uses D_C notation, while
15665 // NumEltsN of return value are ordered as A,B,C,D.
15666 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
15667 return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)};
15668 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
15669 return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)};
15670 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
15671 return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)};
15672 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
15673 return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)};
15674 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
15675 return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)};
15676 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
15677 return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)};
15678 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
15679 return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)};
15680 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
15681 return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)};
15682 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
15683 return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)};
15684 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
15685 return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)};
15686 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
15687 return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)};
15688 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
15689 return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)};
15690
15691 // Integer MMA
15692 case NVPTX::BI__imma_m16n16k16_mma_s8:
15693 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)};
15694 case NVPTX::BI__imma_m16n16k16_mma_u8:
15695 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)};
15696 case NVPTX::BI__imma_m32n8k16_mma_s8:
15697 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)};
15698 case NVPTX::BI__imma_m32n8k16_mma_u8:
15699 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)};
15700 case NVPTX::BI__imma_m8n32k16_mma_s8:
15701 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)};
15702 case NVPTX::BI__imma_m8n32k16_mma_u8:
15703 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)};
15704
15705 // Sub-integer MMA
15706 case NVPTX::BI__imma_m8n8k32_mma_s4:
15707 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)};
15708 case NVPTX::BI__imma_m8n8k32_mma_u4:
15709 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)};
15710 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
15711 return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)};
15712 default:
15713 llvm_unreachable("Unexpected builtin ID.")::llvm::llvm_unreachable_internal("Unexpected builtin ID.", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 15713)
;
15714 }
15715#undef MMA_VARIANTS
15716#undef MMA_VARIANTS_I4
15717#undef MMA_VARIANTS_B1
15718}
15719
15720} // namespace
15721
15722Value *
15723CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
15724 auto MakeLdg = [&](unsigned IntrinsicID) {
15725 Value *Ptr = EmitScalarExpr(E->getArg(0));
15726 clang::CharUnits Align =
15727 CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
15728 return Builder.CreateCall(
15729 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
15730 Ptr->getType()}),
15731 {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
15732 };
15733 auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
15734 Value *Ptr = EmitScalarExpr(E->getArg(0));
15735 return Builder.CreateCall(
15736 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
15737 Ptr->getType()}),
15738 {Ptr, EmitScalarExpr(E->getArg(1))});
15739 };
15740 switch (BuiltinID) {
15741 case NVPTX::BI__nvvm_atom_add_gen_i:
15742 case NVPTX::BI__nvvm_atom_add_gen_l:
15743 case NVPTX::BI__nvvm_atom_add_gen_ll:
15744 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
15745
15746 case NVPTX::BI__nvvm_atom_sub_gen_i:
15747 case NVPTX::BI__nvvm_atom_sub_gen_l:
15748 case NVPTX::BI__nvvm_atom_sub_gen_ll:
15749 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
15750
15751 case NVPTX::BI__nvvm_atom_and_gen_i:
15752 case NVPTX::BI__nvvm_atom_and_gen_l:
15753 case NVPTX::BI__nvvm_atom_and_gen_ll:
15754 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
15755
15756 case NVPTX::BI__nvvm_atom_or_gen_i:
15757 case NVPTX::BI__nvvm_atom_or_gen_l:
15758 case NVPTX::BI__nvvm_atom_or_gen_ll:
15759 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
15760
15761 case NVPTX::BI__nvvm_atom_xor_gen_i:
15762 case NVPTX::BI__nvvm_atom_xor_gen_l:
15763 case NVPTX::BI__nvvm_atom_xor_gen_ll:
15764 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
15765
15766 case NVPTX::BI__nvvm_atom_xchg_gen_i:
15767 case NVPTX::BI__nvvm_atom_xchg_gen_l:
15768 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
15769 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
15770
15771 case NVPTX::BI__nvvm_atom_max_gen_i:
15772 case NVPTX::BI__nvvm_atom_max_gen_l:
15773 case NVPTX::BI__nvvm_atom_max_gen_ll:
15774 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
15775
15776 case NVPTX::BI__nvvm_atom_max_gen_ui:
15777 case NVPTX::BI__nvvm_atom_max_gen_ul:
15778 case NVPTX::BI__nvvm_atom_max_gen_ull:
15779 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
15780
15781 case NVPTX::BI__nvvm_atom_min_gen_i:
15782 case NVPTX::BI__nvvm_atom_min_gen_l:
15783 case NVPTX::BI__nvvm_atom_min_gen_ll:
15784 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
15785
15786 case NVPTX::BI__nvvm_atom_min_gen_ui:
15787 case NVPTX::BI__nvvm_atom_min_gen_ul:
15788 case NVPTX::BI__nvvm_atom_min_gen_ull:
15789 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
15790
15791 case NVPTX::BI__nvvm_atom_cas_gen_i:
15792 case NVPTX::BI__nvvm_atom_cas_gen_l:
15793 case NVPTX::BI__nvvm_atom_cas_gen_ll:
15794 // __nvvm_atom_cas_gen_* should return the old value rather than the
15795 // success flag.
15796 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
15797
15798 case NVPTX::BI__nvvm_atom_add_gen_f:
15799 case NVPTX::BI__nvvm_atom_add_gen_d: {
15800 Value *Ptr = EmitScalarExpr(E->getArg(0));
15801 Value *Val = EmitScalarExpr(E->getArg(1));
15802 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
15803 AtomicOrdering::SequentiallyConsistent);
15804 }
15805
15806 case NVPTX::BI__nvvm_atom_inc_gen_ui: {
15807 Value *Ptr = EmitScalarExpr(E->getArg(0));
15808 Value *Val = EmitScalarExpr(E->getArg(1));
15809 Function *FnALI32 =
15810 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
15811 return Builder.CreateCall(FnALI32, {Ptr, Val});
15812 }
15813
15814 case NVPTX::BI__nvvm_atom_dec_gen_ui: {
15815 Value *Ptr = EmitScalarExpr(E->getArg(0));
15816 Value *Val = EmitScalarExpr(E->getArg(1));
15817 Function *FnALD32 =
15818 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
15819 return Builder.CreateCall(FnALD32, {Ptr, Val});
15820 }
15821
15822 case NVPTX::BI__nvvm_ldg_c:
15823 case NVPTX::BI__nvvm_ldg_c2:
15824 case NVPTX::BI__nvvm_ldg_c4:
15825 case NVPTX::BI__nvvm_ldg_s:
15826 case NVPTX::BI__nvvm_ldg_s2:
15827 case NVPTX::BI__nvvm_ldg_s4:
15828 case NVPTX::BI__nvvm_ldg_i:
15829 case NVPTX::BI__nvvm_ldg_i2:
15830 case NVPTX::BI__nvvm_ldg_i4:
15831 case NVPTX::BI__nvvm_ldg_l:
15832 case NVPTX::BI__nvvm_ldg_ll:
15833 case NVPTX::BI__nvvm_ldg_ll2:
15834 case NVPTX::BI__nvvm_ldg_uc:
15835 case NVPTX::BI__nvvm_ldg_uc2:
15836 case NVPTX::BI__nvvm_ldg_uc4:
15837 case NVPTX::BI__nvvm_ldg_us:
15838 case NVPTX::BI__nvvm_ldg_us2:
15839 case NVPTX::BI__nvvm_ldg_us4:
15840 case NVPTX::BI__nvvm_ldg_ui:
15841 case NVPTX::BI__nvvm_ldg_ui2:
15842 case NVPTX::BI__nvvm_ldg_ui4:
15843 case NVPTX::BI__nvvm_ldg_ul:
15844 case NVPTX::BI__nvvm_ldg_ull:
15845 case NVPTX::BI__nvvm_ldg_ull2:
15846 // PTX Interoperability section 2.2: "For a vector with an even number of
15847 // elements, its alignment is set to number of elements times the alignment
15848 // of its member: n*alignof(t)."
15849 return MakeLdg(Intrinsic::nvvm_ldg_global_i);
15850 case NVPTX::BI__nvvm_ldg_f:
15851 case NVPTX::BI__nvvm_ldg_f2:
15852 case NVPTX::BI__nvvm_ldg_f4:
15853 case NVPTX::BI__nvvm_ldg_d:
15854 case NVPTX::BI__nvvm_ldg_d2:
15855 return MakeLdg(Intrinsic::nvvm_ldg_global_f);
15856
15857 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
15858 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
15859 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
15860 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
15861 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
15862 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
15863 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
15864 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
15865 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
15866 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
15867 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
15868 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
15869 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
15870 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
15871 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
15872 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
15873 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
15874 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
15875 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
15876 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
15877 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
15878 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
15879 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
15880 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
15881 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
15882 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
15883 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
15884 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
15885 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
15886 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
15887 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
15888 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
15889 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
15890 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
15891 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
15892 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
15893 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
15894 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
15895 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
15896 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
15897 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
15898 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
15899 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
15900 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
15901 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
15902 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
15903 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
15904 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
15905 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
15906 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
15907 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
15908 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
15909 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
15910 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
15911 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
15912 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
15913 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
15914 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
15915 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
15916 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
15917 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
15918 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
15919 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
15920 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
15921 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
15922 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
15923 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
15924 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
15925 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
15926 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
15927 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
15928 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
15929 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
15930 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
15931 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
15932 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
15933 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
15934 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
15935 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
15936 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
15937 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
15938 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
15939 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
15940 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
15941 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
15942 Value *Ptr = EmitScalarExpr(E->getArg(0));
15943 return Builder.CreateCall(
15944 CGM.getIntrinsic(
15945 Intrinsic::nvvm_atomic_cas_gen_i_cta,
15946 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
15947 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
15948 }
15949 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
15950 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
15951 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
15952 Value *Ptr = EmitScalarExpr(E->getArg(0));
15953 return Builder.CreateCall(
15954 CGM.getIntrinsic(
15955 Intrinsic::nvvm_atomic_cas_gen_i_sys,
15956 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
15957 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
15958 }
15959 case NVPTX::BI__nvvm_match_all_sync_i32p:
15960 case NVPTX::BI__nvvm_match_all_sync_i64p: {
15961 Value *Mask = EmitScalarExpr(E->getArg(0));
15962 Value *Val = EmitScalarExpr(E->getArg(1));
15963 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
15964 Value *ResultPair = Builder.CreateCall(
15965 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
15966 ? Intrinsic::nvvm_match_all_sync_i32p
15967 : Intrinsic::nvvm_match_all_sync_i64p),
15968 {Mask, Val});
15969 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
15970 PredOutPtr.getElementType());
15971 Builder.CreateStore(Pred, PredOutPtr);
15972 return Builder.CreateExtractValue(ResultPair, 0);
15973 }
15974
15975 // FP MMA loads
15976 case NVPTX::BI__hmma_m16n16k16_ld_a:
15977 case NVPTX::BI__hmma_m16n16k16_ld_b:
15978 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
15979 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
15980 case NVPTX::BI__hmma_m32n8k16_ld_a:
15981 case NVPTX::BI__hmma_m32n8k16_ld_b:
15982 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
15983 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
15984 case NVPTX::BI__hmma_m8n32k16_ld_a:
15985 case NVPTX::BI__hmma_m8n32k16_ld_b:
15986 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
15987 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
15988 // Integer MMA loads.
15989 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
15990 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
15991 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
15992 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
15993 case NVPTX::BI__imma_m16n16k16_ld_c:
15994 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
15995 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
15996 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
15997 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
15998 case NVPTX::BI__imma_m32n8k16_ld_c:
15999 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
16000 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
16001 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
16002 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
16003 case NVPTX::BI__imma_m8n32k16_ld_c:
16004 // Sub-integer MMA loads.
16005 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
16006 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
16007 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
16008 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
16009 case NVPTX::BI__imma_m8n8k32_ld_c:
16010 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
16011 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
16012 case NVPTX::BI__bmma_m8n8k128_ld_c:
16013 {
16014 Address Dst = EmitPointerWithAlignment(E->getArg(0));
16015 Value *Src = EmitScalarExpr(E->getArg(1));
16016 Value *Ldm = EmitScalarExpr(E->getArg(2));
16017 Optional<llvm::APSInt> isColMajorArg =
16018 E->getArg(3)->getIntegerConstantExpr(getContext());
16019 if (!isColMajorArg)
16020 return nullptr;
16021 bool isColMajor = isColMajorArg->getSExtValue();
16022 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
16023 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
16024 if (IID == 0)
16025 return nullptr;
16026
16027 Value *Result =
16028 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
16029
16030 // Save returned values.
16031 assert(II.NumResults)((II.NumResults) ? static_cast<void> (0) : __assert_fail
("II.NumResults", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16031, __PRETTY_FUNCTION__))
;
16032 if (II.NumResults == 1) {
16033 Builder.CreateAlignedStore(Result, Dst.getPointer(),
16034 CharUnits::fromQuantity(4));
16035 } else {
16036 for (unsigned i = 0; i < II.NumResults; ++i) {
16037 Builder.CreateAlignedStore(
16038 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
16039 Dst.getElementType()),
16040 Builder.CreateGEP(Dst.getPointer(),
16041 llvm::ConstantInt::get(IntTy, i)),
16042 CharUnits::fromQuantity(4));
16043 }
16044 }
16045 return Result;
16046 }
16047
16048 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
16049 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
16050 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
16051 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
16052 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
16053 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
16054 case NVPTX::BI__imma_m16n16k16_st_c_i32:
16055 case NVPTX::BI__imma_m32n8k16_st_c_i32:
16056 case NVPTX::BI__imma_m8n32k16_st_c_i32:
16057 case NVPTX::BI__imma_m8n8k32_st_c_i32:
16058 case NVPTX::BI__bmma_m8n8k128_st_c_i32: {
16059 Value *Dst = EmitScalarExpr(E->getArg(0));
16060 Address Src = EmitPointerWithAlignment(E->getArg(1));
16061 Value *Ldm = EmitScalarExpr(E->getArg(2));
16062 Optional<llvm::APSInt> isColMajorArg =
16063 E->getArg(3)->getIntegerConstantExpr(getContext());
16064 if (!isColMajorArg)
16065 return nullptr;
16066 bool isColMajor = isColMajorArg->getSExtValue();
16067 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
16068 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
16069 if (IID == 0)
16070 return nullptr;
16071 Function *Intrinsic =
16072 CGM.getIntrinsic(IID, Dst->getType());
16073 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
16074 SmallVector<Value *, 10> Values = {Dst};
16075 for (unsigned i = 0; i < II.NumResults; ++i) {
16076 Value *V = Builder.CreateAlignedLoad(
16077 Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
16078 CharUnits::fromQuantity(4));
16079 Values.push_back(Builder.CreateBitCast(V, ParamType));
16080 }
16081 Values.push_back(Ldm);
16082 Value *Result = Builder.CreateCall(Intrinsic, Values);
16083 return Result;
16084 }
16085
16086 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
16087 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
16088 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
16089 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
16090 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
16091 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
16092 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
16093 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
16094 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
16095 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
16096 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
16097 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
16098 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
16099 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
16100 case NVPTX::BI__imma_m16n16k16_mma_s8:
16101 case NVPTX::BI__imma_m16n16k16_mma_u8:
16102 case NVPTX::BI__imma_m32n8k16_mma_s8:
16103 case NVPTX::BI__imma_m32n8k16_mma_u8:
16104 case NVPTX::BI__imma_m8n32k16_mma_s8:
16105 case NVPTX::BI__imma_m8n32k16_mma_u8:
16106 case NVPTX::BI__imma_m8n8k32_mma_s4:
16107 case NVPTX::BI__imma_m8n8k32_mma_u4:
16108 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: {
16109 Address Dst = EmitPointerWithAlignment(E->getArg(0));
16110 Address SrcA = EmitPointerWithAlignment(E->getArg(1));
16111 Address SrcB = EmitPointerWithAlignment(E->getArg(2));
16112 Address SrcC = EmitPointerWithAlignment(E->getArg(3));
16113 Optional<llvm::APSInt> LayoutArg =
16114 E->getArg(4)->getIntegerConstantExpr(getContext());
16115 if (!LayoutArg)
16116 return nullptr;
16117 int Layout = LayoutArg->getSExtValue();
16118 if (Layout < 0 || Layout > 3)
16119 return nullptr;
16120 llvm::APSInt SatfArg;
16121 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
16122 SatfArg = 0; // .b1 does not have satf argument.
16123 else if (Optional<llvm::APSInt> OptSatfArg =
16124 E->getArg(5)->getIntegerConstantExpr(getContext()))
16125 SatfArg = *OptSatfArg;
16126 else
16127 return nullptr;
16128 bool Satf = SatfArg.getSExtValue();
16129 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
16130 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
16131 if (IID == 0) // Unsupported combination of Layout/Satf.
16132 return nullptr;
16133
16134 SmallVector<Value *, 24> Values;
16135 Function *Intrinsic = CGM.getIntrinsic(IID);
16136 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
16137 // Load A
16138 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
16139 Value *V = Builder.CreateAlignedLoad(
16140 Builder.CreateGEP(SrcA.getPointer(),
16141 llvm::ConstantInt::get(IntTy, i)),
16142 CharUnits::fromQuantity(4));
16143 Values.push_back(Builder.CreateBitCast(V, AType));
16144 }
16145 // Load B
16146 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
16147 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
16148 Value *V = Builder.CreateAlignedLoad(
16149 Builder.CreateGEP(SrcB.getPointer(),
16150 llvm::ConstantInt::get(IntTy, i)),
16151 CharUnits::fromQuantity(4));
16152 Values.push_back(Builder.CreateBitCast(V, BType));
16153 }
16154 // Load C
16155 llvm::Type *CType =
16156 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
16157 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
16158 Value *V = Builder.CreateAlignedLoad(
16159 Builder.CreateGEP(SrcC.getPointer(),
16160 llvm::ConstantInt::get(IntTy, i)),
16161 CharUnits::fromQuantity(4));
16162 Values.push_back(Builder.CreateBitCast(V, CType));
16163 }
16164 Value *Result = Builder.CreateCall(Intrinsic, Values);
16165 llvm::Type *DType = Dst.getElementType();
16166 for (unsigned i = 0; i < MI.NumEltsD; ++i)
16167 Builder.CreateAlignedStore(
16168 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
16169 Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
16170 CharUnits::fromQuantity(4));
16171 return Result;
16172 }
16173 default:
16174 return nullptr;
16175 }
16176}
16177
16178namespace {
16179struct BuiltinAlignArgs {
16180 llvm::Value *Src = nullptr;
16181 llvm::Type *SrcType = nullptr;
16182 llvm::Value *Alignment = nullptr;
16183 llvm::Value *Mask = nullptr;
16184 llvm::IntegerType *IntType = nullptr;
16185
16186 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
16187 QualType AstType = E->getArg(0)->getType();
16188 if (AstType->isArrayType())
16189 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
16190 else
16191 Src = CGF.EmitScalarExpr(E->getArg(0));
16192 SrcType = Src->getType();
16193 if (SrcType->isPointerTy()) {
16194 IntType = IntegerType::get(
16195 CGF.getLLVMContext(),
16196 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
16197 } else {
16198 assert(SrcType->isIntegerTy())((SrcType->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("SrcType->isIntegerTy()", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16198, __PRETTY_FUNCTION__))
;
16199 IntType = cast<llvm::IntegerType>(SrcType);
16200 }
16201 Alignment = CGF.EmitScalarExpr(E->getArg(1));
16202 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
16203 auto *One = llvm::ConstantInt::get(IntType, 1);
16204 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
16205 }
16206};
16207} // namespace
16208
16209/// Generate (x & (y-1)) == 0.
16210RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
16211 BuiltinAlignArgs Args(E, *this);
16212 llvm::Value *SrcAddress = Args.Src;
16213 if (Args.SrcType->isPointerTy())
16214 SrcAddress =
16215 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
16216 return RValue::get(Builder.CreateICmpEQ(
16217 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
16218 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
16219}
16220
16221/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
16222/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
16223/// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
16224/// TODO: actually use ptrmask once most optimization passes know about it.
16225RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
16226 BuiltinAlignArgs Args(E, *this);
16227 llvm::Value *SrcAddr = Args.Src;
16228 if (Args.Src->getType()->isPointerTy())
16229 SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
16230 llvm::Value *SrcForMask = SrcAddr;
16231 if (AlignUp) {
16232 // When aligning up we have to first add the mask to ensure we go over the
16233 // next alignment value and then align down to the next valid multiple.
16234 // By adding the mask, we ensure that align_up on an already aligned
16235 // value will not change the value.
16236 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
16237 }
16238 // Invert the mask to only clear the lower bits.
16239 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
16240 llvm::Value *Result =
16241 Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
16242 if (Args.Src->getType()->isPointerTy()) {
16243 /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
16244 // Result = Builder.CreateIntrinsic(
16245 // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
16246 // {SrcForMask, NegatedMask}, nullptr, "aligned_result");
16247 Result->setName("aligned_intptr");
16248 llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
16249 // The result must point to the same underlying allocation. This means we
16250 // can use an inbounds GEP to enable better optimization.
16251 Value *Base = EmitCastToVoidPtr(Args.Src);
16252 if (getLangOpts().isSignedOverflowDefined())
16253 Result = Builder.CreateGEP(Base, Difference, "aligned_result");
16254 else
16255 Result = EmitCheckedInBoundsGEP(Base, Difference,
16256 /*SignedIndices=*/true,
16257 /*isSubtraction=*/!AlignUp,
16258 E->getExprLoc(), "aligned_result");
16259 Result = Builder.CreatePointerCast(Result, Args.SrcType);
16260 // Emit an alignment assumption to ensure that the new alignment is
16261 // propagated to loads/stores, etc.
16262 emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
16263 }
16264 assert(Result->getType() == Args.SrcType)((Result->getType() == Args.SrcType) ? static_cast<void
> (0) : __assert_fail ("Result->getType() == Args.SrcType"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16264, __PRETTY_FUNCTION__))
;
16265 return RValue::get(Result);
16266}
16267
16268Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
16269 const CallExpr *E) {
16270 switch (BuiltinID) {
16271 case WebAssembly::BI__builtin_wasm_memory_size: {
16272 llvm::Type *ResultType = ConvertType(E->getType());
16273 Value *I = EmitScalarExpr(E->getArg(0));
16274 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
16275 return Builder.CreateCall(Callee, I);
16276 }
16277 case WebAssembly::BI__builtin_wasm_memory_grow: {
16278 llvm::Type *ResultType = ConvertType(E->getType());
16279 Value *Args[] = {
16280 EmitScalarExpr(E->getArg(0)),
16281 EmitScalarExpr(E->getArg(1))
16282 };
16283 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
16284 return Builder.CreateCall(Callee, Args);
16285 }
16286 case WebAssembly::BI__builtin_wasm_tls_size: {
16287 llvm::Type *ResultType = ConvertType(E->getType());
16288 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
16289 return Builder.CreateCall(Callee);
16290 }
16291 case WebAssembly::BI__builtin_wasm_tls_align: {
16292 llvm::Type *ResultType = ConvertType(E->getType());
16293 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
16294 return Builder.CreateCall(Callee);
16295 }
16296 case WebAssembly::BI__builtin_wasm_tls_base: {
16297 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
16298 return Builder.CreateCall(Callee);
16299 }
16300 case WebAssembly::BI__builtin_wasm_throw: {
16301 Value *Tag = EmitScalarExpr(E->getArg(0));
16302 Value *Obj = EmitScalarExpr(E->getArg(1));
16303 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
16304 return Builder.CreateCall(Callee, {Tag, Obj});
16305 }
16306 case WebAssembly::BI__builtin_wasm_rethrow_in_catch: {
16307 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow_in_catch);
16308 return Builder.CreateCall(Callee);
16309 }
16310 case WebAssembly::BI__builtin_wasm_atomic_wait_i32: {
16311 Value *Addr = EmitScalarExpr(E->getArg(0));
16312 Value *Expected = EmitScalarExpr(E->getArg(1));
16313 Value *Timeout = EmitScalarExpr(E->getArg(2));
16314 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i32);
16315 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
16316 }
16317 case WebAssembly::BI__builtin_wasm_atomic_wait_i64: {
16318 Value *Addr = EmitScalarExpr(E->getArg(0));
16319 Value *Expected = EmitScalarExpr(E->getArg(1));
16320 Value *Timeout = EmitScalarExpr(E->getArg(2));
16321 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i64);
16322 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
16323 }
16324 case WebAssembly::BI__builtin_wasm_atomic_notify: {
16325 Value *Addr = EmitScalarExpr(E->getArg(0));
16326 Value *Count = EmitScalarExpr(E->getArg(1));
16327 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify);
16328 return Builder.CreateCall(Callee, {Addr, Count});
16329 }
16330 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
16331 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
16332 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
16333 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
16334 Value *Src = EmitScalarExpr(E->getArg(0));
16335 llvm::Type *ResT = ConvertType(E->getType());
16336 Function *Callee =
16337 CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
16338 return Builder.CreateCall(Callee, {Src});
16339 }
16340 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
16341 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
16342 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
16343 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
16344 Value *Src = EmitScalarExpr(E->getArg(0));
16345 llvm::Type *ResT = ConvertType(E->getType());
16346 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
16347 {ResT, Src->getType()});
16348 return Builder.CreateCall(Callee, {Src});
16349 }
16350 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
16351 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
16352 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
16353 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
16354 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
16355 Value *Src = EmitScalarExpr(E->getArg(0));
16356 llvm::Type *ResT = ConvertType(E->getType());
16357 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
16358 {ResT, Src->getType()});
16359 return Builder.CreateCall(Callee, {Src});
16360 }
16361 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
16362 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
16363 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
16364 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
16365 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
16366 Value *Src = EmitScalarExpr(E->getArg(0));
16367 llvm::Type *ResT = ConvertType(E->getType());
16368 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
16369 {ResT, Src->getType()});
16370 return Builder.CreateCall(Callee, {Src});
16371 }
16372 case WebAssembly::BI__builtin_wasm_min_f32:
16373 case WebAssembly::BI__builtin_wasm_min_f64:
16374 case WebAssembly::BI__builtin_wasm_min_f32x4:
16375 case WebAssembly::BI__builtin_wasm_min_f64x2: {
16376 Value *LHS = EmitScalarExpr(E->getArg(0));
16377 Value *RHS = EmitScalarExpr(E->getArg(1));
16378 Function *Callee = CGM.getIntrinsic(Intrinsic::minimum,
16379 ConvertType(E->getType()));
16380 return Builder.CreateCall(Callee, {LHS, RHS});
16381 }
16382 case WebAssembly::BI__builtin_wasm_max_f32:
16383 case WebAssembly::BI__builtin_wasm_max_f64:
16384 case WebAssembly::BI__builtin_wasm_max_f32x4:
16385 case WebAssembly::BI__builtin_wasm_max_f64x2: {
16386 Value *LHS = EmitScalarExpr(E->getArg(0));
16387 Value *RHS = EmitScalarExpr(E->getArg(1));
16388 Function *Callee = CGM.getIntrinsic(Intrinsic::maximum,
16389 ConvertType(E->getType()));
16390 return Builder.CreateCall(Callee, {LHS, RHS});
16391 }
16392 case WebAssembly::BI__builtin_wasm_pmin_f32x4:
16393 case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
16394 Value *LHS = EmitScalarExpr(E->getArg(0));
16395 Value *RHS = EmitScalarExpr(E->getArg(1));
16396 Function *Callee =
16397 CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
16398 return Builder.CreateCall(Callee, {LHS, RHS});
16399 }
16400 case WebAssembly::BI__builtin_wasm_pmax_f32x4:
16401 case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
16402 Value *LHS = EmitScalarExpr(E->getArg(0));
16403 Value *RHS = EmitScalarExpr(E->getArg(1));
16404 Function *Callee =
16405 CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
16406 return Builder.CreateCall(Callee, {LHS, RHS});
16407 }
16408 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
16409 case WebAssembly::BI__builtin_wasm_floor_f32x4:
16410 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
16411 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
16412 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
16413 case WebAssembly::BI__builtin_wasm_floor_f64x2:
16414 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
16415 case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
16416 unsigned IntNo;
16417 switch (BuiltinID) {
16418 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
16419 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
16420 IntNo = Intrinsic::wasm_ceil;
16421 break;
16422 case WebAssembly::BI__builtin_wasm_floor_f32x4:
16423 case WebAssembly::BI__builtin_wasm_floor_f64x2:
16424 IntNo = Intrinsic::wasm_floor;
16425 break;
16426 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
16427 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
16428 IntNo = Intrinsic::wasm_trunc;
16429 break;
16430 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
16431 case WebAssembly::BI__builtin_wasm_nearest_f64x2:
16432 IntNo = Intrinsic::wasm_nearest;
16433 break;
16434 default:
16435 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16435)
;
16436 }
16437 Value *Value = EmitScalarExpr(E->getArg(0));
16438 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
16439 return Builder.CreateCall(Callee, Value);
16440 }
16441 case WebAssembly::BI__builtin_wasm_swizzle_v8x16: {
16442 Value *Src = EmitScalarExpr(E->getArg(0));
16443 Value *Indices = EmitScalarExpr(E->getArg(1));
16444 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
16445 return Builder.CreateCall(Callee, {Src, Indices});
16446 }
16447 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
16448 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
16449 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
16450 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
16451 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
16452 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
16453 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
16454 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
16455 llvm::APSInt LaneConst =
16456 *E->getArg(1)->getIntegerConstantExpr(getContext());
16457 Value *Vec = EmitScalarExpr(E->getArg(0));
16458 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
16459 Value *Extract = Builder.CreateExtractElement(Vec, Lane);
16460 switch (BuiltinID) {
16461 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
16462 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
16463 return Builder.CreateSExt(Extract, ConvertType(E->getType()));
16464 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
16465 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
16466 return Builder.CreateZExt(Extract, ConvertType(E->getType()));
16467 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
16468 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
16469 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
16470 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
16471 return Extract;
16472 default:
16473 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16473)
;
16474 }
16475 }
16476 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
16477 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
16478 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
16479 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
16480 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
16481 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
16482 llvm::APSInt LaneConst =
16483 *E->getArg(1)->getIntegerConstantExpr(getContext());
16484 Value *Vec = EmitScalarExpr(E->getArg(0));
16485 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
16486 Value *Val = EmitScalarExpr(E->getArg(2));
16487 switch (BuiltinID) {
16488 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
16489 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
16490 llvm::Type *ElemType =
16491 cast<llvm::VectorType>(ConvertType(E->getType()))->getElementType();
16492 Value *Trunc = Builder.CreateTrunc(Val, ElemType);
16493 return Builder.CreateInsertElement(Vec, Trunc, Lane);
16494 }
16495 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
16496 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
16497 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
16498 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
16499 return Builder.CreateInsertElement(Vec, Val, Lane);
16500 default:
16501 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16501)
;
16502 }
16503 }
16504 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
16505 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
16506 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
16507 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
16508 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
16509 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
16510 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
16511 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
16512 unsigned IntNo;
16513 switch (BuiltinID) {
16514 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
16515 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
16516 IntNo = Intrinsic::sadd_sat;
16517 break;
16518 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
16519 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
16520 IntNo = Intrinsic::uadd_sat;
16521 break;
16522 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
16523 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
16524 IntNo = Intrinsic::wasm_sub_saturate_signed;
16525 break;
16526 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
16527 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
16528 IntNo = Intrinsic::wasm_sub_saturate_unsigned;
16529 break;
16530 default:
16531 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16531)
;
16532 }
16533 Value *LHS = EmitScalarExpr(E->getArg(0));
16534 Value *RHS = EmitScalarExpr(E->getArg(1));
16535 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
16536 return Builder.CreateCall(Callee, {LHS, RHS});
16537 }
16538 case WebAssembly::BI__builtin_wasm_abs_i8x16:
16539 case WebAssembly::BI__builtin_wasm_abs_i16x8:
16540 case WebAssembly::BI__builtin_wasm_abs_i32x4: {
16541 Value *Vec = EmitScalarExpr(E->getArg(0));
16542 Value *Neg = Builder.CreateNeg(Vec, "neg");
16543 Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
16544 Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
16545 return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
16546 }
16547 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
16548 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
16549 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
16550 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
16551 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
16552 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
16553 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
16554 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
16555 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
16556 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
16557 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
16558 case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
16559 Value *LHS = EmitScalarExpr(E->getArg(0));
16560 Value *RHS = EmitScalarExpr(E->getArg(1));
16561 Value *ICmp;
16562 switch (BuiltinID) {
16563 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
16564 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
16565 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
16566 ICmp = Builder.CreateICmpSLT(LHS, RHS);
16567 break;
16568 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
16569 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
16570 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
16571 ICmp = Builder.CreateICmpULT(LHS, RHS);
16572 break;
16573 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
16574 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
16575 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
16576 ICmp = Builder.CreateICmpSGT(LHS, RHS);
16577 break;
16578 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
16579 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
16580 case WebAssembly::BI__builtin_wasm_max_u_i32x4:
16581 ICmp = Builder.CreateICmpUGT(LHS, RHS);
16582 break;
16583 default:
16584 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16584)
;
16585 }
16586 return Builder.CreateSelect(ICmp, LHS, RHS);
16587 }
16588 case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
16589 case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
16590 Value *LHS = EmitScalarExpr(E->getArg(0));
16591 Value *RHS = EmitScalarExpr(E->getArg(1));
16592 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned,
16593 ConvertType(E->getType()));
16594 return Builder.CreateCall(Callee, {LHS, RHS});
16595 }
16596 case WebAssembly::BI__builtin_wasm_q15mulr_saturate_s_i8x16: {
16597 Value *LHS = EmitScalarExpr(E->getArg(0));
16598 Value *RHS = EmitScalarExpr(E->getArg(1));
16599 Function *Callee =
16600 CGM.getIntrinsic(Intrinsic::wasm_q15mulr_saturate_signed);
16601 return Builder.CreateCall(Callee, {LHS, RHS});
16602 }
16603 case WebAssembly::BI__builtin_wasm_bitselect: {
16604 Value *V1 = EmitScalarExpr(E->getArg(0));
16605 Value *V2 = EmitScalarExpr(E->getArg(1));
16606 Value *C = EmitScalarExpr(E->getArg(2));
16607 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect,
16608 ConvertType(E->getType()));
16609 return Builder.CreateCall(Callee, {V1, V2, C});
16610 }
16611 case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
16612 Value *LHS = EmitScalarExpr(E->getArg(0));
16613 Value *RHS = EmitScalarExpr(E->getArg(1));
16614 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
16615 return Builder.CreateCall(Callee, {LHS, RHS});
16616 }
16617 case WebAssembly::BI__builtin_wasm_popcnt_i8x16: {
16618 Value *Vec = EmitScalarExpr(E->getArg(0));
16619 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_popcnt);
16620 return Builder.CreateCall(Callee, {Vec});
16621 }
16622 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
16623 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
16624 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
16625 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
16626 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
16627 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
16628 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
16629 case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
16630 unsigned IntNo;
16631 switch (BuiltinID) {
16632 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
16633 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
16634 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
16635 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
16636 IntNo = Intrinsic::wasm_anytrue;
16637 break;
16638 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
16639 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
16640 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
16641 case WebAssembly::BI__builtin_wasm_all_true_i64x2:
16642 IntNo = Intrinsic::wasm_alltrue;
16643 break;
16644 default:
16645 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16645)
;
16646 }
16647 Value *Vec = EmitScalarExpr(E->getArg(0));
16648 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
16649 return Builder.CreateCall(Callee, {Vec});
16650 }
16651 case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
16652 case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
16653 case WebAssembly::BI__builtin_wasm_bitmask_i32x4: {
16654 Value *Vec = EmitScalarExpr(E->getArg(0));
16655 Function *Callee =
16656 CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
16657 return Builder.CreateCall(Callee, {Vec});
16658 }
16659 case WebAssembly::BI__builtin_wasm_abs_f32x4:
16660 case WebAssembly::BI__builtin_wasm_abs_f64x2: {
16661 Value *Vec = EmitScalarExpr(E->getArg(0));
16662 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
16663 return Builder.CreateCall(Callee, {Vec});
16664 }
16665 case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
16666 case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
16667 Value *Vec = EmitScalarExpr(E->getArg(0));
16668 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
16669 return Builder.CreateCall(Callee, {Vec});
16670 }
16671 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
16672 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
16673 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
16674 case WebAssembly::BI__builtin_wasm_qfms_f64x2: {
16675 Value *A = EmitScalarExpr(E->getArg(0));
16676 Value *B = EmitScalarExpr(E->getArg(1));
16677 Value *C = EmitScalarExpr(E->getArg(2));
16678 unsigned IntNo;
16679 switch (BuiltinID) {
16680 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
16681 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
16682 IntNo = Intrinsic::wasm_qfma;
16683 break;
16684 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
16685 case WebAssembly::BI__builtin_wasm_qfms_f64x2:
16686 IntNo = Intrinsic::wasm_qfms;
16687 break;
16688 default:
16689 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16689)
;
16690 }
16691 Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
16692 return Builder.CreateCall(Callee, {A, B, C});
16693 }
16694 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
16695 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
16696 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
16697 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
16698 Value *Low = EmitScalarExpr(E->getArg(0));
16699 Value *High = EmitScalarExpr(E->getArg(1));
16700 unsigned IntNo;
16701 switch (BuiltinID) {
16702 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
16703 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
16704 IntNo = Intrinsic::wasm_narrow_signed;
16705 break;
16706 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
16707 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
16708 IntNo = Intrinsic::wasm_narrow_unsigned;
16709 break;
16710 default:
16711 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16711)
;
16712 }
16713 Function *Callee =
16714 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
16715 return Builder.CreateCall(Callee, {Low, High});
16716 }
16717 case WebAssembly::BI__builtin_wasm_load32_zero: {
16718 Value *Ptr = EmitScalarExpr(E->getArg(0));
16719 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load32_zero);
16720 return Builder.CreateCall(Callee, {Ptr});
16721 }
16722 case WebAssembly::BI__builtin_wasm_load64_zero: {
16723 Value *Ptr = EmitScalarExpr(E->getArg(0));
16724 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load64_zero);
16725 return Builder.CreateCall(Callee, {Ptr});
16726 }
16727 case WebAssembly::BI__builtin_wasm_load8_lane:
16728 case WebAssembly::BI__builtin_wasm_load16_lane:
16729 case WebAssembly::BI__builtin_wasm_load32_lane:
16730 case WebAssembly::BI__builtin_wasm_load64_lane:
16731 case WebAssembly::BI__builtin_wasm_store8_lane:
16732 case WebAssembly::BI__builtin_wasm_store16_lane:
16733 case WebAssembly::BI__builtin_wasm_store32_lane:
16734 case WebAssembly::BI__builtin_wasm_store64_lane: {
16735 Value *Ptr = EmitScalarExpr(E->getArg(0));
16736 Value *Vec = EmitScalarExpr(E->getArg(1));
16737 Optional<llvm::APSInt> LaneIdxConst =
16738 E->getArg(2)->getIntegerConstantExpr(getContext());
16739 assert(LaneIdxConst && "Constant arg isn't actually constant?")((LaneIdxConst && "Constant arg isn't actually constant?"
) ? static_cast<void> (0) : __assert_fail ("LaneIdxConst && \"Constant arg isn't actually constant?\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16739, __PRETTY_FUNCTION__))
;
16740 Value *LaneIdx = llvm::ConstantInt::get(getLLVMContext(), *LaneIdxConst);
16741 unsigned IntNo;
16742 switch (BuiltinID) {
16743 case WebAssembly::BI__builtin_wasm_load8_lane:
16744 IntNo = Intrinsic::wasm_load8_lane;
16745 break;
16746 case WebAssembly::BI__builtin_wasm_load16_lane:
16747 IntNo = Intrinsic::wasm_load16_lane;
16748 break;
16749 case WebAssembly::BI__builtin_wasm_load32_lane:
16750 IntNo = Intrinsic::wasm_load32_lane;
16751 break;
16752 case WebAssembly::BI__builtin_wasm_load64_lane:
16753 IntNo = Intrinsic::wasm_load64_lane;
16754 break;
16755 case WebAssembly::BI__builtin_wasm_store8_lane:
16756 IntNo = Intrinsic::wasm_store8_lane;
16757 break;
16758 case WebAssembly::BI__builtin_wasm_store16_lane:
16759 IntNo = Intrinsic::wasm_store16_lane;
16760 break;
16761 case WebAssembly::BI__builtin_wasm_store32_lane:
16762 IntNo = Intrinsic::wasm_store32_lane;
16763 break;
16764 case WebAssembly::BI__builtin_wasm_store64_lane:
16765 IntNo = Intrinsic::wasm_store64_lane;
16766 break;
16767 default:
16768 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16768)
;
16769 }
16770 Function *Callee = CGM.getIntrinsic(IntNo);
16771 return Builder.CreateCall(Callee, {Ptr, Vec, LaneIdx});
16772 }
16773 case WebAssembly::BI__builtin_wasm_shuffle_v8x16: {
16774 Value *Ops[18];
16775 size_t OpIdx = 0;
16776 Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
16777 Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
16778 while (OpIdx < 18) {
16779 Optional<llvm::APSInt> LaneConst =
16780 E->getArg(OpIdx)->getIntegerConstantExpr(getContext());
16781 assert(LaneConst && "Constant arg isn't actually constant?")((LaneConst && "Constant arg isn't actually constant?"
) ? static_cast<void> (0) : __assert_fail ("LaneConst && \"Constant arg isn't actually constant?\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/clang/lib/CodeGen/CGBuiltin.cpp"
, 16781, __PRETTY_FUNCTION__))
;
16782 Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
16783 }
16784 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
16785 return Builder.CreateCall(Callee, Ops);
16786 }
16787 default:
16788 return nullptr;
16789 }
16790}
16791
16792static std::pair<Intrinsic::ID, unsigned>
16793getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
16794 struct Info {
16795 unsigned BuiltinID;
16796 Intrinsic::ID IntrinsicID;
16797 unsigned VecLen;
16798 };
16799 Info Infos[] = {
16800#define CUSTOM_BUILTIN_MAPPING(x,s) \
16801 { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
16802 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
16803 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
16804 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
16805 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
16806 CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
16807 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
16808 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
16809 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
16810 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
16811 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
16812 CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
16813 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
16814 CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
16815 CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
16816 CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
16817 CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
16818 CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
16819 CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
16820 CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
16821 CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
16822 CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
16823 CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
16824 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
16825 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
16826 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
16827 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
16828 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
16829 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
16830 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
16831 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
16832#include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
16833#undef CUSTOM_BUILTIN_MAPPING
16834 };
16835
16836 auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
16837 static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
16838 (void)SortOnce;
16839
16840 const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos),
16841 Info{BuiltinID, 0, 0}, CmpInfo);
16842 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
16843 return {Intrinsic::not_intrinsic, 0};
16844
16845 return {F->IntrinsicID, F->VecLen};
16846}
16847
16848Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
16849 const CallExpr *E) {
16850 Intrinsic::ID ID;
16851 unsigned VecLen;
16852 std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
16853
16854 auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
16855 // The base pointer is passed by address, so it needs to be loaded.
16856 Address A = EmitPointerWithAlignment(E->getArg(0));
16857 Address BP = Address(
16858 Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
16859 llvm::Value *Base = Builder.CreateLoad(BP);
16860 // The treatment of both loads and stores is the same: the arguments for
16861 // the builtin are the same as the arguments for the intrinsic.
16862 // Load:
16863 // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
16864 // builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
16865 // Store:
16866 // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
16867 // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
16868 SmallVector<llvm::Value*,5> Ops = { Base };
16869 for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
16870 Ops.push_back(EmitScalarExpr(E->getArg(i)));
16871
16872 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
16873 // The load intrinsics generate two results (Value, NewBase), stores
16874 // generate one (NewBase). The new base address needs to be stored.
16875 llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
16876 : Result;
16877 llvm::Value *LV = Builder.CreateBitCast(
16878 EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
16879 Address Dest = EmitPointerWithAlignment(E->getArg(0));
16880 llvm::Value *RetVal =
16881 Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
16882 if (IsLoad)
16883 RetVal = Builder.CreateExtractValue(Result, 0);
16884 return RetVal;
16885 };
16886
16887 // Handle the conversion of bit-reverse load intrinsics to bit code.
16888 // The intrinsic call after this function only reads from memory and the
16889 // write to memory is dealt by the store instruction.
16890 auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
16891 // The intrinsic generates one result, which is the new value for the base
16892 // pointer. It needs to be returned. The result of the load instruction is
16893 // passed to intrinsic by address, so the value needs to be stored.
16894 llvm::Value *BaseAddress =
16895 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
16896
16897 // Expressions like &(*pt++) will be incremented per evaluation.
16898 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
16899 // per call.
16900 Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
16901 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
16902 DestAddr.getAlignment());
16903 llvm::Value *DestAddress = DestAddr.getPointer();
16904
16905 // Operands are Base, Dest, Modifier.
16906 // The intrinsic format in LLVM IR is defined as
16907 // { ValueType, i8* } (i8*, i32).
16908 llvm::Value *Result = Builder.CreateCall(
16909 CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
16910
16911 // The value needs to be stored as the variable is passed by reference.
16912 llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
16913
16914 // The store needs to be truncated to fit the destination type.
16915 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
16916 // to be handled with stores of respective destination type.
16917 DestVal = Builder.CreateTrunc(DestVal, DestTy);
16918
16919 llvm::Value *DestForStore =
16920 Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
16921 Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
16922 // The updated value of the base pointer is returned.
16923 return Builder.CreateExtractValue(Result, 1);
16924 };
16925
16926 auto V2Q = [this, VecLen] (llvm::Value *Vec) {
16927 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
16928 : Intrinsic::hexagon_V6_vandvrt;
16929 return Builder.CreateCall(CGM.getIntrinsic(ID),
16930 {Vec, Builder.getInt32(-1)});
16931 };
16932 auto Q2V = [this, VecLen] (llvm::Value *Pred) {
16933 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
16934 : Intrinsic::hexagon_V6_vandqrt;
16935 return Builder.CreateCall(CGM.getIntrinsic(ID),
16936 {Pred, Builder.getInt32(-1)});
16937 };
16938
16939 switch (BuiltinID) {
16940 // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
16941 // and the corresponding C/C++ builtins use loads/stores to update
16942 // the predicate.
16943 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
16944 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
16945 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
16946 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
16947 // Get the type from the 0-th argument.
16948 llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
16949 Address PredAddr = Builder.CreateBitCast(
16950 EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
16951 llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
16952 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
16953 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
16954
16955 llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
16956 Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
16957 PredAddr.getAlignment());
16958 return Builder.CreateExtractValue(Result, 0);
16959 }
16960
16961 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
16962 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
16963 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
16964 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
16965 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
16966 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
16967 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
16968 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
16969 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
16970 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
16971 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
16972 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
16973 return MakeCircOp(ID, /*IsLoad=*/true);
16974 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
16975 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
16976 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
16977 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
16978 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
16979 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
16980 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
16981 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
16982 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
16983 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
16984 return MakeCircOp(ID, /*IsLoad=*/false);
16985 case Hexagon::BI__builtin_brev_ldub:
16986 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
16987 case Hexagon::BI__builtin_brev_ldb:
16988 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
16989 case Hexagon::BI__builtin_brev_lduh:
16990 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
16991 case Hexagon::BI__builtin_brev_ldh:
16992 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
16993 case Hexagon::BI__builtin_brev_ldw:
16994 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
16995 case Hexagon::BI__builtin_brev_ldd:
16996 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
16997
16998 default: {
16999 if (ID == Intrinsic::not_intrinsic)
17000 return nullptr;
17001
17002 auto IsVectorPredTy = [](llvm::Type *T) {
17003 return T->isVectorTy() &&
17004 cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
17005 };
17006
17007 llvm::Function *IntrFn = CGM.getIntrinsic(ID);
17008 llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
17009 SmallVector<llvm::Value*,4> Ops;
17010 for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
17011 llvm::Type *T = IntrTy->getParamType(i);
17012 const Expr *A = E->getArg(i);
17013 if (IsVectorPredTy(T)) {
17014 // There will be an implicit cast to a boolean vector. Strip it.
17015 if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
17016 if (Cast->getCastKind() == CK_BitCast)
17017 A = Cast->getSubExpr();
17018 }
17019 Ops.push_back(V2Q(EmitScalarExpr(A)));
17020 } else {
17021 Ops.push_back(EmitScalarExpr(A));
17022 }
17023 }
17024
17025 llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
17026 if (IsVectorPredTy(IntrTy->getReturnType()))
17027 Call = Q2V(Call);
17028
17029 return Call;
17030 } // default
17031 } // switch
17032
17033 return nullptr;
17034}