Bug Summary

File:clang/lib/CodeGen/CGBuiltin.cpp
Warning:line 3027, column 56
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGBuiltin.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/build-llvm/tools/clang/lib/CodeGen -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D CLANG_ROUND_TRIP_CC1_ARGS=ON -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include -I /build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-17-010711-25934-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp

/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp

1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCUDARuntime.h"
14#include "CGCXXABI.h"
15#include "CGObjCRuntime.h"
16#include "CGOpenCLRuntime.h"
17#include "CGRecordLayout.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "PatternInit.h"
22#include "TargetInfo.h"
23#include "clang/AST/ASTContext.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/Decl.h"
26#include "clang/AST/OSLog.h"
27#include "clang/Basic/TargetBuiltins.h"
28#include "clang/Basic/TargetInfo.h"
29#include "clang/CodeGen/CGFunctionInfo.h"
30#include "llvm/ADT/APFloat.h"
31#include "llvm/ADT/APInt.h"
32#include "llvm/ADT/SmallPtrSet.h"
33#include "llvm/ADT/StringExtras.h"
34#include "llvm/Analysis/ValueTracking.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/InlineAsm.h"
37#include "llvm/IR/Intrinsics.h"
38#include "llvm/IR/IntrinsicsAArch64.h"
39#include "llvm/IR/IntrinsicsAMDGPU.h"
40#include "llvm/IR/IntrinsicsARM.h"
41#include "llvm/IR/IntrinsicsBPF.h"
42#include "llvm/IR/IntrinsicsHexagon.h"
43#include "llvm/IR/IntrinsicsNVPTX.h"
44#include "llvm/IR/IntrinsicsPowerPC.h"
45#include "llvm/IR/IntrinsicsR600.h"
46#include "llvm/IR/IntrinsicsRISCV.h"
47#include "llvm/IR/IntrinsicsS390.h"
48#include "llvm/IR/IntrinsicsWebAssembly.h"
49#include "llvm/IR/IntrinsicsX86.h"
50#include "llvm/IR/MDBuilder.h"
51#include "llvm/IR/MatrixBuilder.h"
52#include "llvm/Support/ConvertUTF.h"
53#include "llvm/Support/ScopedPrinter.h"
54#include "llvm/Support/X86TargetParser.h"
55#include <sstream>
56
57using namespace clang;
58using namespace CodeGen;
59using namespace llvm;
60
61static
62int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
63 return std::min(High, std::max(Low, Value));
64}
65
66static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
67 Align AlignmentInBytes) {
68 ConstantInt *Byte;
69 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
70 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
71 // Nothing to initialize.
72 return;
73 case LangOptions::TrivialAutoVarInitKind::Zero:
74 Byte = CGF.Builder.getInt8(0x00);
75 break;
76 case LangOptions::TrivialAutoVarInitKind::Pattern: {
77 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
78 Byte = llvm::dyn_cast<llvm::ConstantInt>(
79 initializationPatternFor(CGF.CGM, Int8));
80 break;
81 }
82 }
83 if (CGF.CGM.stopAutoInit())
84 return;
85 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
86 I->addAnnotationMetadata("auto-init");
87}
88
89/// getBuiltinLibFunction - Given a builtin id for a function like
90/// "__builtin_fabsf", return a Function* for "fabsf".
91llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
92 unsigned BuiltinID) {
93 assert(Context.BuiltinInfo.isLibFunction(BuiltinID))(static_cast <bool> (Context.BuiltinInfo.isLibFunction(
BuiltinID)) ? void (0) : __assert_fail ("Context.BuiltinInfo.isLibFunction(BuiltinID)"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 93, __extension__ __PRETTY_FUNCTION__))
;
94
95 // Get the name, skip over the __builtin_ prefix (if necessary).
96 StringRef Name;
97 GlobalDecl D(FD);
98
99 // If the builtin has been declared explicitly with an assembler label,
100 // use the mangled name. This differs from the plain label on platforms
101 // that prefix labels.
102 if (FD->hasAttr<AsmLabelAttr>())
103 Name = getMangledName(D);
104 else
105 Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
106
107 llvm::FunctionType *Ty =
108 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
109
110 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
111}
112
113/// Emit the conversions required to turn the given value into an
114/// integer of the given size.
115static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
116 QualType T, llvm::IntegerType *IntType) {
117 V = CGF.EmitToMemory(V, T);
118
119 if (V->getType()->isPointerTy())
120 return CGF.Builder.CreatePtrToInt(V, IntType);
121
122 assert(V->getType() == IntType)(static_cast <bool> (V->getType() == IntType) ? void
(0) : __assert_fail ("V->getType() == IntType", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 122, __extension__ __PRETTY_FUNCTION__))
;
123 return V;
124}
125
126static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
127 QualType T, llvm::Type *ResultType) {
128 V = CGF.EmitFromMemory(V, T);
129
130 if (ResultType->isPointerTy())
131 return CGF.Builder.CreateIntToPtr(V, ResultType);
132
133 assert(V->getType() == ResultType)(static_cast <bool> (V->getType() == ResultType) ? void
(0) : __assert_fail ("V->getType() == ResultType", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 133, __extension__ __PRETTY_FUNCTION__))
;
134 return V;
135}
136
137/// Utility to insert an atomic instruction based on Intrinsic::ID
138/// and the expression node.
139static Value *MakeBinaryAtomicValue(
140 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
141 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
142 QualType T = E->getType();
143 assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()->
isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 143, __extension__ __PRETTY_FUNCTION__))
;
144 assert(CGF.getContext().hasSameUnqualifiedType(T,(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(T, E->getArg(0)->getType()->getPointeeType())) ? void
(0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 145, __extension__ __PRETTY_FUNCTION__))
145 E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(T, E->getArg(0)->getType()->getPointeeType())) ? void
(0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 145, __extension__ __PRETTY_FUNCTION__))
;
146 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(T, E->getArg(1)->getType())) ? void (0) : __assert_fail
("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 146, __extension__ __PRETTY_FUNCTION__))
;
147
148 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
149 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
150
151 llvm::IntegerType *IntType =
152 llvm::IntegerType::get(CGF.getLLVMContext(),
153 CGF.getContext().getTypeSize(T));
154 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
155
156 llvm::Value *Args[2];
157 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
158 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
159 llvm::Type *ValueType = Args[1]->getType();
160 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
161
162 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
163 Kind, Args[0], Args[1], Ordering);
164 return EmitFromInt(CGF, Result, T, ValueType);
165}
166
167static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
168 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
169 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
170
171 // Convert the type of the pointer to a pointer to the stored type.
172 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
173 Value *BC = CGF.Builder.CreateBitCast(
174 Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
175 LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
176 LV.setNontemporal(true);
177 CGF.EmitStoreOfScalar(Val, LV, false);
178 return nullptr;
179}
180
181static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
182 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
183
184 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
185 LV.setNontemporal(true);
186 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
187}
188
189static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
190 llvm::AtomicRMWInst::BinOp Kind,
191 const CallExpr *E) {
192 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
193}
194
195/// Utility to insert an atomic instruction based Intrinsic::ID and
196/// the expression node, where the return value is the result of the
197/// operation.
198static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
199 llvm::AtomicRMWInst::BinOp Kind,
200 const CallExpr *E,
201 Instruction::BinaryOps Op,
202 bool Invert = false) {
203 QualType T = E->getType();
204 assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()->
isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 204, __extension__ __PRETTY_FUNCTION__))
;
205 assert(CGF.getContext().hasSameUnqualifiedType(T,(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(T, E->getArg(0)->getType()->getPointeeType())) ? void
(0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 206, __extension__ __PRETTY_FUNCTION__))
206 E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(T, E->getArg(0)->getType()->getPointeeType())) ? void
(0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 206, __extension__ __PRETTY_FUNCTION__))
;
207 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(T, E->getArg(1)->getType())) ? void (0) : __assert_fail
("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 207, __extension__ __PRETTY_FUNCTION__))
;
208
209 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
210 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
211
212 llvm::IntegerType *IntType =
213 llvm::IntegerType::get(CGF.getLLVMContext(),
214 CGF.getContext().getTypeSize(T));
215 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
216
217 llvm::Value *Args[2];
218 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
219 llvm::Type *ValueType = Args[1]->getType();
220 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
221 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
222
223 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
224 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
225 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
226 if (Invert)
227 Result =
228 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
229 llvm::ConstantInt::getAllOnesValue(IntType));
230 Result = EmitFromInt(CGF, Result, T, ValueType);
231 return RValue::get(Result);
232}
233
234/// Utility to insert an atomic cmpxchg instruction.
235///
236/// @param CGF The current codegen function.
237/// @param E Builtin call expression to convert to cmpxchg.
238/// arg0 - address to operate on
239/// arg1 - value to compare with
240/// arg2 - new value
241/// @param ReturnBool Specifies whether to return success flag of
242/// cmpxchg result or the old value.
243///
244/// @returns result of cmpxchg, according to ReturnBool
245///
246/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
247/// invoke the function EmitAtomicCmpXchgForMSIntrin.
248static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
249 bool ReturnBool) {
250 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
251 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
252 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
253
254 llvm::IntegerType *IntType = llvm::IntegerType::get(
255 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
256 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
257
258 Value *Args[3];
259 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
260 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
261 llvm::Type *ValueType = Args[1]->getType();
262 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
263 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
264
265 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
266 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
267 llvm::AtomicOrdering::SequentiallyConsistent);
268 if (ReturnBool)
269 // Extract boolean success flag and zext it to int.
270 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
271 CGF.ConvertType(E->getType()));
272 else
273 // Extract old value and emit it using the same type as compare value.
274 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
275 ValueType);
276}
277
278/// This function should be invoked to emit atomic cmpxchg for Microsoft's
279/// _InterlockedCompareExchange* intrinsics which have the following signature:
280/// T _InterlockedCompareExchange(T volatile *Destination,
281/// T Exchange,
282/// T Comparand);
283///
284/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
285/// cmpxchg *Destination, Comparand, Exchange.
286/// So we need to swap Comparand and Exchange when invoking
287/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
288/// function MakeAtomicCmpXchgValue since it expects the arguments to be
289/// already swapped.
290
291static
292Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
293 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
294 assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()->
isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 294, __extension__ __PRETTY_FUNCTION__))
;
295 assert(CGF.getContext().hasSameUnqualifiedType((static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
( E->getType(), E->getArg(0)->getType()->getPointeeType
())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 296, __extension__ __PRETTY_FUNCTION__))
296 E->getType(), E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
( E->getType(), E->getArg(0)->getType()->getPointeeType
())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 296, __extension__ __PRETTY_FUNCTION__))
;
297 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(E->getType(), E->getArg(1)->getType())) ? void (0) :
__assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 298, __extension__ __PRETTY_FUNCTION__))
298 E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(E->getType(), E->getArg(1)->getType())) ? void (0) :
__assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 298, __extension__ __PRETTY_FUNCTION__))
;
299 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(E->getType(), E->getArg(2)->getType())) ? void (0) :
__assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 300, __extension__ __PRETTY_FUNCTION__))
300 E->getArg(2)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType
(E->getType(), E->getArg(2)->getType())) ? void (0) :
__assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 300, __extension__ __PRETTY_FUNCTION__))
;
301
302 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
303 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
304 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
305
306 // For Release ordering, the failure ordering should be Monotonic.
307 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
308 AtomicOrdering::Monotonic :
309 SuccessOrdering;
310
311 // The atomic instruction is marked volatile for consistency with MSVC. This
312 // blocks the few atomics optimizations that LLVM has. If we want to optimize
313 // _Interlocked* operations in the future, we will have to remove the volatile
314 // marker.
315 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
316 Destination, Comparand, Exchange,
317 SuccessOrdering, FailureOrdering);
318 Result->setVolatile(true);
319 return CGF.Builder.CreateExtractValue(Result, 0);
320}
321
322// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
323// prototyped like this:
324//
325// unsigned char _InterlockedCompareExchange128...(
326// __int64 volatile * _Destination,
327// __int64 _ExchangeHigh,
328// __int64 _ExchangeLow,
329// __int64 * _ComparandResult);
330static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
331 const CallExpr *E,
332 AtomicOrdering SuccessOrdering) {
333 assert(E->getNumArgs() == 4)(static_cast <bool> (E->getNumArgs() == 4) ? void (0
) : __assert_fail ("E->getNumArgs() == 4", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 333, __extension__ __PRETTY_FUNCTION__))
;
334 llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0));
335 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
336 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
337 llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3));
338
339 assert(Destination->getType()->isPointerTy())(static_cast <bool> (Destination->getType()->isPointerTy
()) ? void (0) : __assert_fail ("Destination->getType()->isPointerTy()"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 339, __extension__ __PRETTY_FUNCTION__))
;
340 assert(!ExchangeHigh->getType()->isPointerTy())(static_cast <bool> (!ExchangeHigh->getType()->isPointerTy
()) ? void (0) : __assert_fail ("!ExchangeHigh->getType()->isPointerTy()"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 340, __extension__ __PRETTY_FUNCTION__))
;
341 assert(!ExchangeLow->getType()->isPointerTy())(static_cast <bool> (!ExchangeLow->getType()->isPointerTy
()) ? void (0) : __assert_fail ("!ExchangeLow->getType()->isPointerTy()"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 341, __extension__ __PRETTY_FUNCTION__))
;
342 assert(ComparandPtr->getType()->isPointerTy())(static_cast <bool> (ComparandPtr->getType()->isPointerTy
()) ? void (0) : __assert_fail ("ComparandPtr->getType()->isPointerTy()"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 342, __extension__ __PRETTY_FUNCTION__))
;
343
344 // For Release ordering, the failure ordering should be Monotonic.
345 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
346 ? AtomicOrdering::Monotonic
347 : SuccessOrdering;
348
349 // Convert to i128 pointers and values.
350 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
351 llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
352 Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy);
353 Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy),
354 CGF.getContext().toCharUnitsFromBits(128));
355
356 // (((i128)hi) << 64) | ((i128)lo)
357 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
358 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
359 ExchangeHigh =
360 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
361 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
362
363 // Load the comparand for the instruction.
364 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult);
365
366 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
367 SuccessOrdering, FailureOrdering);
368
369 // The atomic instruction is marked volatile for consistency with MSVC. This
370 // blocks the few atomics optimizations that LLVM has. If we want to optimize
371 // _Interlocked* operations in the future, we will have to remove the volatile
372 // marker.
373 CXI->setVolatile(true);
374
375 // Store the result as an outparameter.
376 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
377 ComparandResult);
378
379 // Get the success boolean and zero extend it to i8.
380 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
381 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
382}
383
384static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
385 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
386 assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()->
isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 386, __extension__ __PRETTY_FUNCTION__))
;
387
388 auto *IntTy = CGF.ConvertType(E->getType());
389 auto *Result = CGF.Builder.CreateAtomicRMW(
390 AtomicRMWInst::Add,
391 CGF.EmitScalarExpr(E->getArg(0)),
392 ConstantInt::get(IntTy, 1),
393 Ordering);
394 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
395}
396
397static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
398 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
399 assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()->
isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 399, __extension__ __PRETTY_FUNCTION__))
;
400
401 auto *IntTy = CGF.ConvertType(E->getType());
402 auto *Result = CGF.Builder.CreateAtomicRMW(
403 AtomicRMWInst::Sub,
404 CGF.EmitScalarExpr(E->getArg(0)),
405 ConstantInt::get(IntTy, 1),
406 Ordering);
407 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
408}
409
410// Build a plain volatile load.
411static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
412 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
413 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
414 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
415 llvm::Type *ITy =
416 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
417 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
418 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
419 Load->setVolatile(true);
420 return Load;
421}
422
423// Build a plain volatile store.
424static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
425 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
426 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
427 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
428 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
429 llvm::Type *ITy =
430 llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
431 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
432 llvm::StoreInst *Store =
433 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
434 Store->setVolatile(true);
435 return Store;
436}
437
438// Emit a simple mangled intrinsic that has 1 argument and a return type
439// matching the argument type. Depending on mode, this may be a constrained
440// floating-point intrinsic.
441static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
442 const CallExpr *E, unsigned IntrinsicID,
443 unsigned ConstrainedIntrinsicID) {
444 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
445
446 if (CGF.Builder.getIsFPConstrained()) {
447 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
448 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
449 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
450 } else {
451 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
452 return CGF.Builder.CreateCall(F, Src0);
453 }
454}
455
456// Emit an intrinsic that has 2 operands of the same type as its result.
457// Depending on mode, this may be a constrained floating-point intrinsic.
458static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
459 const CallExpr *E, unsigned IntrinsicID,
460 unsigned ConstrainedIntrinsicID) {
461 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
462 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
463
464 if (CGF.Builder.getIsFPConstrained()) {
465 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
466 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
467 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
468 } else {
469 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
470 return CGF.Builder.CreateCall(F, { Src0, Src1 });
471 }
472}
473
474// Emit an intrinsic that has 3 operands of the same type as its result.
475// Depending on mode, this may be a constrained floating-point intrinsic.
476static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
477 const CallExpr *E, unsigned IntrinsicID,
478 unsigned ConstrainedIntrinsicID) {
479 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
480 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
481 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
482
483 if (CGF.Builder.getIsFPConstrained()) {
484 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
485 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
486 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
487 } else {
488 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
489 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
490 }
491}
492
493// Emit an intrinsic where all operands are of the same type as the result.
494// Depending on mode, this may be a constrained floating-point intrinsic.
495static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
496 unsigned IntrinsicID,
497 unsigned ConstrainedIntrinsicID,
498 llvm::Type *Ty,
499 ArrayRef<Value *> Args) {
500 Function *F;
501 if (CGF.Builder.getIsFPConstrained())
502 F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
503 else
504 F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
505
506 if (CGF.Builder.getIsFPConstrained())
507 return CGF.Builder.CreateConstrainedFPCall(F, Args);
508 else
509 return CGF.Builder.CreateCall(F, Args);
510}
511
512// Emit a simple mangled intrinsic that has 1 argument and a return type
513// matching the argument type.
514static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
515 const CallExpr *E,
516 unsigned IntrinsicID) {
517 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
518
519 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
520 return CGF.Builder.CreateCall(F, Src0);
521}
522
523// Emit an intrinsic that has 2 operands of the same type as its result.
524static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
525 const CallExpr *E,
526 unsigned IntrinsicID) {
527 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
528 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
529
530 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
531 return CGF.Builder.CreateCall(F, { Src0, Src1 });
532}
533
534// Emit an intrinsic that has 3 operands of the same type as its result.
535static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
536 const CallExpr *E,
537 unsigned IntrinsicID) {
538 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
539 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
540 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
541
542 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
543 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
544}
545
546// Emit an intrinsic that has 1 float or double operand, and 1 integer.
547static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
548 const CallExpr *E,
549 unsigned IntrinsicID) {
550 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
551 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
552
553 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
554 return CGF.Builder.CreateCall(F, {Src0, Src1});
555}
556
557// Emit an intrinsic that has overloaded integer result and fp operand.
558static Value *
559emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
560 unsigned IntrinsicID,
561 unsigned ConstrainedIntrinsicID) {
562 llvm::Type *ResultType = CGF.ConvertType(E->getType());
563 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
564
565 if (CGF.Builder.getIsFPConstrained()) {
566 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
567 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
568 {ResultType, Src0->getType()});
569 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
570 } else {
571 Function *F =
572 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
573 return CGF.Builder.CreateCall(F, Src0);
574 }
575}
576
577/// EmitFAbs - Emit a call to @llvm.fabs().
578static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
579 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
580 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
581 Call->setDoesNotAccessMemory();
582 return Call;
583}
584
585/// Emit the computation of the sign bit for a floating point value. Returns
586/// the i1 sign bit value.
587static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
588 LLVMContext &C = CGF.CGM.getLLVMContext();
589
590 llvm::Type *Ty = V->getType();
591 int Width = Ty->getPrimitiveSizeInBits();
592 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
593 V = CGF.Builder.CreateBitCast(V, IntTy);
594 if (Ty->isPPC_FP128Ty()) {
595 // We want the sign bit of the higher-order double. The bitcast we just
596 // did works as if the double-double was stored to memory and then
597 // read as an i128. The "store" will put the higher-order double in the
598 // lower address in both little- and big-Endian modes, but the "load"
599 // will treat those bits as a different part of the i128: the low bits in
600 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
601 // we need to shift the high bits down to the low before truncating.
602 Width >>= 1;
603 if (CGF.getTarget().isBigEndian()) {
604 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
605 V = CGF.Builder.CreateLShr(V, ShiftCst);
606 }
607 // We are truncating value in order to extract the higher-order
608 // double, which we will be using to extract the sign from.
609 IntTy = llvm::IntegerType::get(C, Width);
610 V = CGF.Builder.CreateTrunc(V, IntTy);
611 }
612 Value *Zero = llvm::Constant::getNullValue(IntTy);
613 return CGF.Builder.CreateICmpSLT(V, Zero);
614}
615
616static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
617 const CallExpr *E, llvm::Constant *calleeValue) {
618 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
619 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
620}
621
622/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
623/// depending on IntrinsicID.
624///
625/// \arg CGF The current codegen function.
626/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
627/// \arg X The first argument to the llvm.*.with.overflow.*.
628/// \arg Y The second argument to the llvm.*.with.overflow.*.
629/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
630/// \returns The result (i.e. sum/product) returned by the intrinsic.
631static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
632 const llvm::Intrinsic::ID IntrinsicID,
633 llvm::Value *X, llvm::Value *Y,
634 llvm::Value *&Carry) {
635 // Make sure we have integers of the same width.
636 assert(X->getType() == Y->getType() &&(static_cast <bool> (X->getType() == Y->getType()
&& "Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)") ? void (0) : __assert_fail
("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 638, __extension__ __PRETTY_FUNCTION__))
637 "Arguments must be the same type. (Did you forget to make sure both "(static_cast <bool> (X->getType() == Y->getType()
&& "Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)") ? void (0) : __assert_fail
("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 638, __extension__ __PRETTY_FUNCTION__))
638 "arguments have the same integer width?)")(static_cast <bool> (X->getType() == Y->getType()
&& "Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)") ? void (0) : __assert_fail
("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 638, __extension__ __PRETTY_FUNCTION__))
;
639
640 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
641 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
642 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
643 return CGF.Builder.CreateExtractValue(Tmp, 0);
644}
645
646static Value *emitRangedBuiltin(CodeGenFunction &CGF,
647 unsigned IntrinsicID,
648 int low, int high) {
649 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
650 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
651 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
652 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
653 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
654 return Call;
655}
656
657namespace {
658 struct WidthAndSignedness {
659 unsigned Width;
660 bool Signed;
661 };
662}
663
664static WidthAndSignedness
665getIntegerWidthAndSignedness(const clang::ASTContext &context,
666 const clang::QualType Type) {
667 assert(Type->isIntegerType() && "Given type is not an integer.")(static_cast <bool> (Type->isIntegerType() &&
"Given type is not an integer.") ? void (0) : __assert_fail (
"Type->isIntegerType() && \"Given type is not an integer.\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 667, __extension__ __PRETTY_FUNCTION__))
;
668 unsigned Width = Type->isBooleanType() ? 1
669 : Type->isExtIntType() ? context.getIntWidth(Type)
670 : context.getTypeInfo(Type).Width;
671 bool Signed = Type->isSignedIntegerType();
672 return {Width, Signed};
673}
674
675// Given one or more integer types, this function produces an integer type that
676// encompasses them: any value in one of the given types could be expressed in
677// the encompassing type.
678static struct WidthAndSignedness
679EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
680 assert(Types.size() > 0 && "Empty list of types.")(static_cast <bool> (Types.size() > 0 && "Empty list of types."
) ? void (0) : __assert_fail ("Types.size() > 0 && \"Empty list of types.\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 680, __extension__ __PRETTY_FUNCTION__))
;
681
682 // If any of the given types is signed, we must return a signed type.
683 bool Signed = false;
684 for (const auto &Type : Types) {
685 Signed |= Type.Signed;
686 }
687
688 // The encompassing type must have a width greater than or equal to the width
689 // of the specified types. Additionally, if the encompassing type is signed,
690 // its width must be strictly greater than the width of any unsigned types
691 // given.
692 unsigned Width = 0;
693 for (const auto &Type : Types) {
694 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
695 if (Width < MinWidth) {
696 Width = MinWidth;
697 }
698 }
699
700 return {Width, Signed};
701}
702
703Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
704 llvm::Type *DestType = Int8PtrTy;
705 if (ArgValue->getType() != DestType)
706 ArgValue =
707 Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
708
709 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
710 return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
711}
712
713/// Checks if using the result of __builtin_object_size(p, @p From) in place of
714/// __builtin_object_size(p, @p To) is correct
715static bool areBOSTypesCompatible(int From, int To) {
716 // Note: Our __builtin_object_size implementation currently treats Type=0 and
717 // Type=2 identically. Encoding this implementation detail here may make
718 // improving __builtin_object_size difficult in the future, so it's omitted.
719 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
720}
721
722static llvm::Value *
723getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
724 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
725}
726
727llvm::Value *
728CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
729 llvm::IntegerType *ResType,
730 llvm::Value *EmittedE,
731 bool IsDynamic) {
732 uint64_t ObjectSize;
733 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
734 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
735 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
736}
737
738/// Returns a Value corresponding to the size of the given expression.
739/// This Value may be either of the following:
740/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
741/// it)
742/// - A call to the @llvm.objectsize intrinsic
743///
744/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
745/// and we wouldn't otherwise try to reference a pass_object_size parameter,
746/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
747llvm::Value *
748CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
749 llvm::IntegerType *ResType,
750 llvm::Value *EmittedE, bool IsDynamic) {
751 // We need to reference an argument if the pointer is a parameter with the
752 // pass_object_size attribute.
753 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
754 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
755 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
756 if (Param != nullptr && PS != nullptr &&
757 areBOSTypesCompatible(PS->getType(), Type)) {
758 auto Iter = SizeArguments.find(Param);
759 assert(Iter != SizeArguments.end())(static_cast <bool> (Iter != SizeArguments.end()) ? void
(0) : __assert_fail ("Iter != SizeArguments.end()", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 759, __extension__ __PRETTY_FUNCTION__))
;
760
761 const ImplicitParamDecl *D = Iter->second;
762 auto DIter = LocalDeclMap.find(D);
763 assert(DIter != LocalDeclMap.end())(static_cast <bool> (DIter != LocalDeclMap.end()) ? void
(0) : __assert_fail ("DIter != LocalDeclMap.end()", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 763, __extension__ __PRETTY_FUNCTION__))
;
764
765 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
766 getContext().getSizeType(), E->getBeginLoc());
767 }
768 }
769
770 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
771 // evaluate E for side-effects. In either case, we shouldn't lower to
772 // @llvm.objectsize.
773 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
774 return getDefaultBuiltinObjectSizeResult(Type, ResType);
775
776 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
777 assert(Ptr->getType()->isPointerTy() &&(static_cast <bool> (Ptr->getType()->isPointerTy(
) && "Non-pointer passed to __builtin_object_size?") ?
void (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 778, __extension__ __PRETTY_FUNCTION__))
778 "Non-pointer passed to __builtin_object_size?")(static_cast <bool> (Ptr->getType()->isPointerTy(
) && "Non-pointer passed to __builtin_object_size?") ?
void (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 778, __extension__ __PRETTY_FUNCTION__))
;
779
780 Function *F =
781 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
782
783 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
784 Value *Min = Builder.getInt1((Type & 2) != 0);
785 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
786 Value *NullIsUnknown = Builder.getTrue();
787 Value *Dynamic = Builder.getInt1(IsDynamic);
788 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
789}
790
791namespace {
792/// A struct to generically describe a bit test intrinsic.
793struct BitTest {
794 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
795 enum InterlockingKind : uint8_t {
796 Unlocked,
797 Sequential,
798 Acquire,
799 Release,
800 NoFence
801 };
802
803 ActionKind Action;
804 InterlockingKind Interlocking;
805 bool Is64Bit;
806
807 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
808};
809} // namespace
810
811BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
812 switch (BuiltinID) {
813 // Main portable variants.
814 case Builtin::BI_bittest:
815 return {TestOnly, Unlocked, false};
816 case Builtin::BI_bittestandcomplement:
817 return {Complement, Unlocked, false};
818 case Builtin::BI_bittestandreset:
819 return {Reset, Unlocked, false};
820 case Builtin::BI_bittestandset:
821 return {Set, Unlocked, false};
822 case Builtin::BI_interlockedbittestandreset:
823 return {Reset, Sequential, false};
824 case Builtin::BI_interlockedbittestandset:
825 return {Set, Sequential, false};
826
827 // X86-specific 64-bit variants.
828 case Builtin::BI_bittest64:
829 return {TestOnly, Unlocked, true};
830 case Builtin::BI_bittestandcomplement64:
831 return {Complement, Unlocked, true};
832 case Builtin::BI_bittestandreset64:
833 return {Reset, Unlocked, true};
834 case Builtin::BI_bittestandset64:
835 return {Set, Unlocked, true};
836 case Builtin::BI_interlockedbittestandreset64:
837 return {Reset, Sequential, true};
838 case Builtin::BI_interlockedbittestandset64:
839 return {Set, Sequential, true};
840
841 // ARM/AArch64-specific ordering variants.
842 case Builtin::BI_interlockedbittestandset_acq:
843 return {Set, Acquire, false};
844 case Builtin::BI_interlockedbittestandset_rel:
845 return {Set, Release, false};
846 case Builtin::BI_interlockedbittestandset_nf:
847 return {Set, NoFence, false};
848 case Builtin::BI_interlockedbittestandreset_acq:
849 return {Reset, Acquire, false};
850 case Builtin::BI_interlockedbittestandreset_rel:
851 return {Reset, Release, false};
852 case Builtin::BI_interlockedbittestandreset_nf:
853 return {Reset, NoFence, false};
854 }
855 llvm_unreachable("expected only bittest intrinsics")::llvm::llvm_unreachable_internal("expected only bittest intrinsics"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 855)
;
856}
857
858static char bitActionToX86BTCode(BitTest::ActionKind A) {
859 switch (A) {
860 case BitTest::TestOnly: return '\0';
861 case BitTest::Complement: return 'c';
862 case BitTest::Reset: return 'r';
863 case BitTest::Set: return 's';
864 }
865 llvm_unreachable("invalid action")::llvm::llvm_unreachable_internal("invalid action", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 865)
;
866}
867
868static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
869 BitTest BT,
870 const CallExpr *E, Value *BitBase,
871 Value *BitPos) {
872 char Action = bitActionToX86BTCode(BT.Action);
873 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
874
875 // Build the assembly.
876 SmallString<64> Asm;
877 raw_svector_ostream AsmOS(Asm);
878 if (BT.Interlocking != BitTest::Unlocked)
879 AsmOS << "lock ";
880 AsmOS << "bt";
881 if (Action)
882 AsmOS << Action;
883 AsmOS << SizeSuffix << " $2, ($1)";
884
885 // Build the constraints. FIXME: We should support immediates when possible.
886 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
887 std::string MachineClobbers = CGF.getTarget().getClobbers();
888 if (!MachineClobbers.empty()) {
889 Constraints += ',';
890 Constraints += MachineClobbers;
891 }
892 llvm::IntegerType *IntType = llvm::IntegerType::get(
893 CGF.getLLVMContext(),
894 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
895 llvm::Type *IntPtrType = IntType->getPointerTo();
896 llvm::FunctionType *FTy =
897 llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
898
899 llvm::InlineAsm *IA =
900 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
901 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
902}
903
904static llvm::AtomicOrdering
905getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
906 switch (I) {
907 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
908 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
909 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
910 case BitTest::Release: return llvm::AtomicOrdering::Release;
911 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
912 }
913 llvm_unreachable("invalid interlocking")::llvm::llvm_unreachable_internal("invalid interlocking", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 913)
;
914}
915
916/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
917/// bits and a bit position and read and optionally modify the bit at that
918/// position. The position index can be arbitrarily large, i.e. it can be larger
919/// than 31 or 63, so we need an indexed load in the general case.
920static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
921 unsigned BuiltinID,
922 const CallExpr *E) {
923 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
924 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
925
926 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
927
928 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
929 // indexing operation internally. Use them if possible.
930 if (CGF.getTarget().getTriple().isX86())
931 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
932
933 // Otherwise, use generic code to load one byte and test the bit. Use all but
934 // the bottom three bits as the array index, and the bottom three bits to form
935 // a mask.
936 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
937 Value *ByteIndex = CGF.Builder.CreateAShr(
938 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
939 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
940 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
941 ByteIndex, "bittest.byteaddr"),
942 CharUnits::One());
943 Value *PosLow =
944 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
945 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
946
947 // The updating instructions will need a mask.
948 Value *Mask = nullptr;
949 if (BT.Action != BitTest::TestOnly) {
950 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
951 "bittest.mask");
952 }
953
954 // Check the action and ordering of the interlocked intrinsics.
955 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
956
957 Value *OldByte = nullptr;
958 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
959 // Emit a combined atomicrmw load/store operation for the interlocked
960 // intrinsics.
961 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
962 if (BT.Action == BitTest::Reset) {
963 Mask = CGF.Builder.CreateNot(Mask);
964 RMWOp = llvm::AtomicRMWInst::And;
965 }
966 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
967 Ordering);
968 } else {
969 // Emit a plain load for the non-interlocked intrinsics.
970 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
971 Value *NewByte = nullptr;
972 switch (BT.Action) {
973 case BitTest::TestOnly:
974 // Don't store anything.
975 break;
976 case BitTest::Complement:
977 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
978 break;
979 case BitTest::Reset:
980 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
981 break;
982 case BitTest::Set:
983 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
984 break;
985 }
986 if (NewByte)
987 CGF.Builder.CreateStore(NewByte, ByteAddr);
988 }
989
990 // However we loaded the old byte, either by plain load or atomicrmw, shift
991 // the bit into the low position and mask it to 0 or 1.
992 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
993 return CGF.Builder.CreateAnd(
994 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
995}
996
997namespace {
998enum class MSVCSetJmpKind {
999 _setjmpex,
1000 _setjmp3,
1001 _setjmp
1002};
1003}
1004
1005/// MSVC handles setjmp a bit differently on different platforms. On every
1006/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1007/// parameters can be passed as variadic arguments, but we always pass none.
1008static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1009 const CallExpr *E) {
1010 llvm::Value *Arg1 = nullptr;
1011 llvm::Type *Arg1Ty = nullptr;
1012 StringRef Name;
1013 bool IsVarArg = false;
1014 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1015 Name = "_setjmp3";
1016 Arg1Ty = CGF.Int32Ty;
1017 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1018 IsVarArg = true;
1019 } else {
1020 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1021 Arg1Ty = CGF.Int8PtrTy;
1022 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1023 Arg1 = CGF.Builder.CreateCall(
1024 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1025 } else
1026 Arg1 = CGF.Builder.CreateCall(
1027 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1028 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1029 }
1030
1031 // Mark the call site and declaration with ReturnsTwice.
1032 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1033 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1034 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1035 llvm::Attribute::ReturnsTwice);
1036 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1037 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1038 ReturnsTwiceAttr, /*Local=*/true);
1039
1040 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1041 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1042 llvm::Value *Args[] = {Buf, Arg1};
1043 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1044 CB->setAttributes(ReturnsTwiceAttr);
1045 return RValue::get(CB);
1046}
1047
1048// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
1049// we handle them here.
1050enum class CodeGenFunction::MSVCIntrin {
1051 _BitScanForward,
1052 _BitScanReverse,
1053 _InterlockedAnd,
1054 _InterlockedDecrement,
1055 _InterlockedExchange,
1056 _InterlockedExchangeAdd,
1057 _InterlockedExchangeSub,
1058 _InterlockedIncrement,
1059 _InterlockedOr,
1060 _InterlockedXor,
1061 _InterlockedExchangeAdd_acq,
1062 _InterlockedExchangeAdd_rel,
1063 _InterlockedExchangeAdd_nf,
1064 _InterlockedExchange_acq,
1065 _InterlockedExchange_rel,
1066 _InterlockedExchange_nf,
1067 _InterlockedCompareExchange_acq,
1068 _InterlockedCompareExchange_rel,
1069 _InterlockedCompareExchange_nf,
1070 _InterlockedCompareExchange128,
1071 _InterlockedCompareExchange128_acq,
1072 _InterlockedCompareExchange128_rel,
1073 _InterlockedCompareExchange128_nf,
1074 _InterlockedOr_acq,
1075 _InterlockedOr_rel,
1076 _InterlockedOr_nf,
1077 _InterlockedXor_acq,
1078 _InterlockedXor_rel,
1079 _InterlockedXor_nf,
1080 _InterlockedAnd_acq,
1081 _InterlockedAnd_rel,
1082 _InterlockedAnd_nf,
1083 _InterlockedIncrement_acq,
1084 _InterlockedIncrement_rel,
1085 _InterlockedIncrement_nf,
1086 _InterlockedDecrement_acq,
1087 _InterlockedDecrement_rel,
1088 _InterlockedDecrement_nf,
1089 __fastfail,
1090};
1091
1092static Optional<CodeGenFunction::MSVCIntrin>
1093translateArmToMsvcIntrin(unsigned BuiltinID) {
1094 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1095 switch (BuiltinID) {
1096 default:
1097 return None;
1098 case ARM::BI_BitScanForward:
1099 case ARM::BI_BitScanForward64:
1100 return MSVCIntrin::_BitScanForward;
1101 case ARM::BI_BitScanReverse:
1102 case ARM::BI_BitScanReverse64:
1103 return MSVCIntrin::_BitScanReverse;
1104 case ARM::BI_InterlockedAnd64:
1105 return MSVCIntrin::_InterlockedAnd;
1106 case ARM::BI_InterlockedExchange64:
1107 return MSVCIntrin::_InterlockedExchange;
1108 case ARM::BI_InterlockedExchangeAdd64:
1109 return MSVCIntrin::_InterlockedExchangeAdd;
1110 case ARM::BI_InterlockedExchangeSub64:
1111 return MSVCIntrin::_InterlockedExchangeSub;
1112 case ARM::BI_InterlockedOr64:
1113 return MSVCIntrin::_InterlockedOr;
1114 case ARM::BI_InterlockedXor64:
1115 return MSVCIntrin::_InterlockedXor;
1116 case ARM::BI_InterlockedDecrement64:
1117 return MSVCIntrin::_InterlockedDecrement;
1118 case ARM::BI_InterlockedIncrement64:
1119 return MSVCIntrin::_InterlockedIncrement;
1120 case ARM::BI_InterlockedExchangeAdd8_acq:
1121 case ARM::BI_InterlockedExchangeAdd16_acq:
1122 case ARM::BI_InterlockedExchangeAdd_acq:
1123 case ARM::BI_InterlockedExchangeAdd64_acq:
1124 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1125 case ARM::BI_InterlockedExchangeAdd8_rel:
1126 case ARM::BI_InterlockedExchangeAdd16_rel:
1127 case ARM::BI_InterlockedExchangeAdd_rel:
1128 case ARM::BI_InterlockedExchangeAdd64_rel:
1129 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1130 case ARM::BI_InterlockedExchangeAdd8_nf:
1131 case ARM::BI_InterlockedExchangeAdd16_nf:
1132 case ARM::BI_InterlockedExchangeAdd_nf:
1133 case ARM::BI_InterlockedExchangeAdd64_nf:
1134 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1135 case ARM::BI_InterlockedExchange8_acq:
1136 case ARM::BI_InterlockedExchange16_acq:
1137 case ARM::BI_InterlockedExchange_acq:
1138 case ARM::BI_InterlockedExchange64_acq:
1139 return MSVCIntrin::_InterlockedExchange_acq;
1140 case ARM::BI_InterlockedExchange8_rel:
1141 case ARM::BI_InterlockedExchange16_rel:
1142 case ARM::BI_InterlockedExchange_rel:
1143 case ARM::BI_InterlockedExchange64_rel:
1144 return MSVCIntrin::_InterlockedExchange_rel;
1145 case ARM::BI_InterlockedExchange8_nf:
1146 case ARM::BI_InterlockedExchange16_nf:
1147 case ARM::BI_InterlockedExchange_nf:
1148 case ARM::BI_InterlockedExchange64_nf:
1149 return MSVCIntrin::_InterlockedExchange_nf;
1150 case ARM::BI_InterlockedCompareExchange8_acq:
1151 case ARM::BI_InterlockedCompareExchange16_acq:
1152 case ARM::BI_InterlockedCompareExchange_acq:
1153 case ARM::BI_InterlockedCompareExchange64_acq:
1154 return MSVCIntrin::_InterlockedCompareExchange_acq;
1155 case ARM::BI_InterlockedCompareExchange8_rel:
1156 case ARM::BI_InterlockedCompareExchange16_rel:
1157 case ARM::BI_InterlockedCompareExchange_rel:
1158 case ARM::BI_InterlockedCompareExchange64_rel:
1159 return MSVCIntrin::_InterlockedCompareExchange_rel;
1160 case ARM::BI_InterlockedCompareExchange8_nf:
1161 case ARM::BI_InterlockedCompareExchange16_nf:
1162 case ARM::BI_InterlockedCompareExchange_nf:
1163 case ARM::BI_InterlockedCompareExchange64_nf:
1164 return MSVCIntrin::_InterlockedCompareExchange_nf;
1165 case ARM::BI_InterlockedOr8_acq:
1166 case ARM::BI_InterlockedOr16_acq:
1167 case ARM::BI_InterlockedOr_acq:
1168 case ARM::BI_InterlockedOr64_acq:
1169 return MSVCIntrin::_InterlockedOr_acq;
1170 case ARM::BI_InterlockedOr8_rel:
1171 case ARM::BI_InterlockedOr16_rel:
1172 case ARM::BI_InterlockedOr_rel:
1173 case ARM::BI_InterlockedOr64_rel:
1174 return MSVCIntrin::_InterlockedOr_rel;
1175 case ARM::BI_InterlockedOr8_nf:
1176 case ARM::BI_InterlockedOr16_nf:
1177 case ARM::BI_InterlockedOr_nf:
1178 case ARM::BI_InterlockedOr64_nf:
1179 return MSVCIntrin::_InterlockedOr_nf;
1180 case ARM::BI_InterlockedXor8_acq:
1181 case ARM::BI_InterlockedXor16_acq:
1182 case ARM::BI_InterlockedXor_acq:
1183 case ARM::BI_InterlockedXor64_acq:
1184 return MSVCIntrin::_InterlockedXor_acq;
1185 case ARM::BI_InterlockedXor8_rel:
1186 case ARM::BI_InterlockedXor16_rel:
1187 case ARM::BI_InterlockedXor_rel:
1188 case ARM::BI_InterlockedXor64_rel:
1189 return MSVCIntrin::_InterlockedXor_rel;
1190 case ARM::BI_InterlockedXor8_nf:
1191 case ARM::BI_InterlockedXor16_nf:
1192 case ARM::BI_InterlockedXor_nf:
1193 case ARM::BI_InterlockedXor64_nf:
1194 return MSVCIntrin::_InterlockedXor_nf;
1195 case ARM::BI_InterlockedAnd8_acq:
1196 case ARM::BI_InterlockedAnd16_acq:
1197 case ARM::BI_InterlockedAnd_acq:
1198 case ARM::BI_InterlockedAnd64_acq:
1199 return MSVCIntrin::_InterlockedAnd_acq;
1200 case ARM::BI_InterlockedAnd8_rel:
1201 case ARM::BI_InterlockedAnd16_rel:
1202 case ARM::BI_InterlockedAnd_rel:
1203 case ARM::BI_InterlockedAnd64_rel:
1204 return MSVCIntrin::_InterlockedAnd_rel;
1205 case ARM::BI_InterlockedAnd8_nf:
1206 case ARM::BI_InterlockedAnd16_nf:
1207 case ARM::BI_InterlockedAnd_nf:
1208 case ARM::BI_InterlockedAnd64_nf:
1209 return MSVCIntrin::_InterlockedAnd_nf;
1210 case ARM::BI_InterlockedIncrement16_acq:
1211 case ARM::BI_InterlockedIncrement_acq:
1212 case ARM::BI_InterlockedIncrement64_acq:
1213 return MSVCIntrin::_InterlockedIncrement_acq;
1214 case ARM::BI_InterlockedIncrement16_rel:
1215 case ARM::BI_InterlockedIncrement_rel:
1216 case ARM::BI_InterlockedIncrement64_rel:
1217 return MSVCIntrin::_InterlockedIncrement_rel;
1218 case ARM::BI_InterlockedIncrement16_nf:
1219 case ARM::BI_InterlockedIncrement_nf:
1220 case ARM::BI_InterlockedIncrement64_nf:
1221 return MSVCIntrin::_InterlockedIncrement_nf;
1222 case ARM::BI_InterlockedDecrement16_acq:
1223 case ARM::BI_InterlockedDecrement_acq:
1224 case ARM::BI_InterlockedDecrement64_acq:
1225 return MSVCIntrin::_InterlockedDecrement_acq;
1226 case ARM::BI_InterlockedDecrement16_rel:
1227 case ARM::BI_InterlockedDecrement_rel:
1228 case ARM::BI_InterlockedDecrement64_rel:
1229 return MSVCIntrin::_InterlockedDecrement_rel;
1230 case ARM::BI_InterlockedDecrement16_nf:
1231 case ARM::BI_InterlockedDecrement_nf:
1232 case ARM::BI_InterlockedDecrement64_nf:
1233 return MSVCIntrin::_InterlockedDecrement_nf;
1234 }
1235 llvm_unreachable("must return from switch")::llvm::llvm_unreachable_internal("must return from switch", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1235)
;
1236}
1237
1238static Optional<CodeGenFunction::MSVCIntrin>
1239translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
1240 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1241 switch (BuiltinID) {
1242 default:
1243 return None;
1244 case AArch64::BI_BitScanForward:
1245 case AArch64::BI_BitScanForward64:
1246 return MSVCIntrin::_BitScanForward;
1247 case AArch64::BI_BitScanReverse:
1248 case AArch64::BI_BitScanReverse64:
1249 return MSVCIntrin::_BitScanReverse;
1250 case AArch64::BI_InterlockedAnd64:
1251 return MSVCIntrin::_InterlockedAnd;
1252 case AArch64::BI_InterlockedExchange64:
1253 return MSVCIntrin::_InterlockedExchange;
1254 case AArch64::BI_InterlockedExchangeAdd64:
1255 return MSVCIntrin::_InterlockedExchangeAdd;
1256 case AArch64::BI_InterlockedExchangeSub64:
1257 return MSVCIntrin::_InterlockedExchangeSub;
1258 case AArch64::BI_InterlockedOr64:
1259 return MSVCIntrin::_InterlockedOr;
1260 case AArch64::BI_InterlockedXor64:
1261 return MSVCIntrin::_InterlockedXor;
1262 case AArch64::BI_InterlockedDecrement64:
1263 return MSVCIntrin::_InterlockedDecrement;
1264 case AArch64::BI_InterlockedIncrement64:
1265 return MSVCIntrin::_InterlockedIncrement;
1266 case AArch64::BI_InterlockedExchangeAdd8_acq:
1267 case AArch64::BI_InterlockedExchangeAdd16_acq:
1268 case AArch64::BI_InterlockedExchangeAdd_acq:
1269 case AArch64::BI_InterlockedExchangeAdd64_acq:
1270 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1271 case AArch64::BI_InterlockedExchangeAdd8_rel:
1272 case AArch64::BI_InterlockedExchangeAdd16_rel:
1273 case AArch64::BI_InterlockedExchangeAdd_rel:
1274 case AArch64::BI_InterlockedExchangeAdd64_rel:
1275 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1276 case AArch64::BI_InterlockedExchangeAdd8_nf:
1277 case AArch64::BI_InterlockedExchangeAdd16_nf:
1278 case AArch64::BI_InterlockedExchangeAdd_nf:
1279 case AArch64::BI_InterlockedExchangeAdd64_nf:
1280 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1281 case AArch64::BI_InterlockedExchange8_acq:
1282 case AArch64::BI_InterlockedExchange16_acq:
1283 case AArch64::BI_InterlockedExchange_acq:
1284 case AArch64::BI_InterlockedExchange64_acq:
1285 return MSVCIntrin::_InterlockedExchange_acq;
1286 case AArch64::BI_InterlockedExchange8_rel:
1287 case AArch64::BI_InterlockedExchange16_rel:
1288 case AArch64::BI_InterlockedExchange_rel:
1289 case AArch64::BI_InterlockedExchange64_rel:
1290 return MSVCIntrin::_InterlockedExchange_rel;
1291 case AArch64::BI_InterlockedExchange8_nf:
1292 case AArch64::BI_InterlockedExchange16_nf:
1293 case AArch64::BI_InterlockedExchange_nf:
1294 case AArch64::BI_InterlockedExchange64_nf:
1295 return MSVCIntrin::_InterlockedExchange_nf;
1296 case AArch64::BI_InterlockedCompareExchange8_acq:
1297 case AArch64::BI_InterlockedCompareExchange16_acq:
1298 case AArch64::BI_InterlockedCompareExchange_acq:
1299 case AArch64::BI_InterlockedCompareExchange64_acq:
1300 return MSVCIntrin::_InterlockedCompareExchange_acq;
1301 case AArch64::BI_InterlockedCompareExchange8_rel:
1302 case AArch64::BI_InterlockedCompareExchange16_rel:
1303 case AArch64::BI_InterlockedCompareExchange_rel:
1304 case AArch64::BI_InterlockedCompareExchange64_rel:
1305 return MSVCIntrin::_InterlockedCompareExchange_rel;
1306 case AArch64::BI_InterlockedCompareExchange8_nf:
1307 case AArch64::BI_InterlockedCompareExchange16_nf:
1308 case AArch64::BI_InterlockedCompareExchange_nf:
1309 case AArch64::BI_InterlockedCompareExchange64_nf:
1310 return MSVCIntrin::_InterlockedCompareExchange_nf;
1311 case AArch64::BI_InterlockedCompareExchange128:
1312 return MSVCIntrin::_InterlockedCompareExchange128;
1313 case AArch64::BI_InterlockedCompareExchange128_acq:
1314 return MSVCIntrin::_InterlockedCompareExchange128_acq;
1315 case AArch64::BI_InterlockedCompareExchange128_nf:
1316 return MSVCIntrin::_InterlockedCompareExchange128_nf;
1317 case AArch64::BI_InterlockedCompareExchange128_rel:
1318 return MSVCIntrin::_InterlockedCompareExchange128_rel;
1319 case AArch64::BI_InterlockedOr8_acq:
1320 case AArch64::BI_InterlockedOr16_acq:
1321 case AArch64::BI_InterlockedOr_acq:
1322 case AArch64::BI_InterlockedOr64_acq:
1323 return MSVCIntrin::_InterlockedOr_acq;
1324 case AArch64::BI_InterlockedOr8_rel:
1325 case AArch64::BI_InterlockedOr16_rel:
1326 case AArch64::BI_InterlockedOr_rel:
1327 case AArch64::BI_InterlockedOr64_rel:
1328 return MSVCIntrin::_InterlockedOr_rel;
1329 case AArch64::BI_InterlockedOr8_nf:
1330 case AArch64::BI_InterlockedOr16_nf:
1331 case AArch64::BI_InterlockedOr_nf:
1332 case AArch64::BI_InterlockedOr64_nf:
1333 return MSVCIntrin::_InterlockedOr_nf;
1334 case AArch64::BI_InterlockedXor8_acq:
1335 case AArch64::BI_InterlockedXor16_acq:
1336 case AArch64::BI_InterlockedXor_acq:
1337 case AArch64::BI_InterlockedXor64_acq:
1338 return MSVCIntrin::_InterlockedXor_acq;
1339 case AArch64::BI_InterlockedXor8_rel:
1340 case AArch64::BI_InterlockedXor16_rel:
1341 case AArch64::BI_InterlockedXor_rel:
1342 case AArch64::BI_InterlockedXor64_rel:
1343 return MSVCIntrin::_InterlockedXor_rel;
1344 case AArch64::BI_InterlockedXor8_nf:
1345 case AArch64::BI_InterlockedXor16_nf:
1346 case AArch64::BI_InterlockedXor_nf:
1347 case AArch64::BI_InterlockedXor64_nf:
1348 return MSVCIntrin::_InterlockedXor_nf;
1349 case AArch64::BI_InterlockedAnd8_acq:
1350 case AArch64::BI_InterlockedAnd16_acq:
1351 case AArch64::BI_InterlockedAnd_acq:
1352 case AArch64::BI_InterlockedAnd64_acq:
1353 return MSVCIntrin::_InterlockedAnd_acq;
1354 case AArch64::BI_InterlockedAnd8_rel:
1355 case AArch64::BI_InterlockedAnd16_rel:
1356 case AArch64::BI_InterlockedAnd_rel:
1357 case AArch64::BI_InterlockedAnd64_rel:
1358 return MSVCIntrin::_InterlockedAnd_rel;
1359 case AArch64::BI_InterlockedAnd8_nf:
1360 case AArch64::BI_InterlockedAnd16_nf:
1361 case AArch64::BI_InterlockedAnd_nf:
1362 case AArch64::BI_InterlockedAnd64_nf:
1363 return MSVCIntrin::_InterlockedAnd_nf;
1364 case AArch64::BI_InterlockedIncrement16_acq:
1365 case AArch64::BI_InterlockedIncrement_acq:
1366 case AArch64::BI_InterlockedIncrement64_acq:
1367 return MSVCIntrin::_InterlockedIncrement_acq;
1368 case AArch64::BI_InterlockedIncrement16_rel:
1369 case AArch64::BI_InterlockedIncrement_rel:
1370 case AArch64::BI_InterlockedIncrement64_rel:
1371 return MSVCIntrin::_InterlockedIncrement_rel;
1372 case AArch64::BI_InterlockedIncrement16_nf:
1373 case AArch64::BI_InterlockedIncrement_nf:
1374 case AArch64::BI_InterlockedIncrement64_nf:
1375 return MSVCIntrin::_InterlockedIncrement_nf;
1376 case AArch64::BI_InterlockedDecrement16_acq:
1377 case AArch64::BI_InterlockedDecrement_acq:
1378 case AArch64::BI_InterlockedDecrement64_acq:
1379 return MSVCIntrin::_InterlockedDecrement_acq;
1380 case AArch64::BI_InterlockedDecrement16_rel:
1381 case AArch64::BI_InterlockedDecrement_rel:
1382 case AArch64::BI_InterlockedDecrement64_rel:
1383 return MSVCIntrin::_InterlockedDecrement_rel;
1384 case AArch64::BI_InterlockedDecrement16_nf:
1385 case AArch64::BI_InterlockedDecrement_nf:
1386 case AArch64::BI_InterlockedDecrement64_nf:
1387 return MSVCIntrin::_InterlockedDecrement_nf;
1388 }
1389 llvm_unreachable("must return from switch")::llvm::llvm_unreachable_internal("must return from switch", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1389)
;
1390}
1391
1392static Optional<CodeGenFunction::MSVCIntrin>
1393translateX86ToMsvcIntrin(unsigned BuiltinID) {
1394 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1395 switch (BuiltinID) {
1396 default:
1397 return None;
1398 case clang::X86::BI_BitScanForward:
1399 case clang::X86::BI_BitScanForward64:
1400 return MSVCIntrin::_BitScanForward;
1401 case clang::X86::BI_BitScanReverse:
1402 case clang::X86::BI_BitScanReverse64:
1403 return MSVCIntrin::_BitScanReverse;
1404 case clang::X86::BI_InterlockedAnd64:
1405 return MSVCIntrin::_InterlockedAnd;
1406 case clang::X86::BI_InterlockedCompareExchange128:
1407 return MSVCIntrin::_InterlockedCompareExchange128;
1408 case clang::X86::BI_InterlockedExchange64:
1409 return MSVCIntrin::_InterlockedExchange;
1410 case clang::X86::BI_InterlockedExchangeAdd64:
1411 return MSVCIntrin::_InterlockedExchangeAdd;
1412 case clang::X86::BI_InterlockedExchangeSub64:
1413 return MSVCIntrin::_InterlockedExchangeSub;
1414 case clang::X86::BI_InterlockedOr64:
1415 return MSVCIntrin::_InterlockedOr;
1416 case clang::X86::BI_InterlockedXor64:
1417 return MSVCIntrin::_InterlockedXor;
1418 case clang::X86::BI_InterlockedDecrement64:
1419 return MSVCIntrin::_InterlockedDecrement;
1420 case clang::X86::BI_InterlockedIncrement64:
1421 return MSVCIntrin::_InterlockedIncrement;
1422 }
1423 llvm_unreachable("must return from switch")::llvm::llvm_unreachable_internal("must return from switch", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1423)
;
1424}
1425
1426// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1427Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
1428 const CallExpr *E) {
1429 switch (BuiltinID) {
1430 case MSVCIntrin::_BitScanForward:
1431 case MSVCIntrin::_BitScanReverse: {
1432 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1433 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1434
1435 llvm::Type *ArgType = ArgValue->getType();
1436 llvm::Type *IndexType =
1437 IndexAddress.getPointer()->getType()->getPointerElementType();
1438 llvm::Type *ResultType = ConvertType(E->getType());
1439
1440 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1441 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1442 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1443
1444 BasicBlock *Begin = Builder.GetInsertBlock();
1445 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1446 Builder.SetInsertPoint(End);
1447 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1448
1449 Builder.SetInsertPoint(Begin);
1450 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1451 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1452 Builder.CreateCondBr(IsZero, End, NotZero);
1453 Result->addIncoming(ResZero, Begin);
1454
1455 Builder.SetInsertPoint(NotZero);
1456
1457 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1458 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1459 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1460 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1461 Builder.CreateStore(ZeroCount, IndexAddress, false);
1462 } else {
1463 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1464 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1465
1466 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1467 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1468 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1469 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1470 Builder.CreateStore(Index, IndexAddress, false);
1471 }
1472 Builder.CreateBr(End);
1473 Result->addIncoming(ResOne, NotZero);
1474
1475 Builder.SetInsertPoint(End);
1476 return Result;
1477 }
1478 case MSVCIntrin::_InterlockedAnd:
1479 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1480 case MSVCIntrin::_InterlockedExchange:
1481 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1482 case MSVCIntrin::_InterlockedExchangeAdd:
1483 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1484 case MSVCIntrin::_InterlockedExchangeSub:
1485 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1486 case MSVCIntrin::_InterlockedOr:
1487 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1488 case MSVCIntrin::_InterlockedXor:
1489 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1490 case MSVCIntrin::_InterlockedExchangeAdd_acq:
1491 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1492 AtomicOrdering::Acquire);
1493 case MSVCIntrin::_InterlockedExchangeAdd_rel:
1494 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1495 AtomicOrdering::Release);
1496 case MSVCIntrin::_InterlockedExchangeAdd_nf:
1497 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1498 AtomicOrdering::Monotonic);
1499 case MSVCIntrin::_InterlockedExchange_acq:
1500 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1501 AtomicOrdering::Acquire);
1502 case MSVCIntrin::_InterlockedExchange_rel:
1503 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1504 AtomicOrdering::Release);
1505 case MSVCIntrin::_InterlockedExchange_nf:
1506 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1507 AtomicOrdering::Monotonic);
1508 case MSVCIntrin::_InterlockedCompareExchange_acq:
1509 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1510 case MSVCIntrin::_InterlockedCompareExchange_rel:
1511 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1512 case MSVCIntrin::_InterlockedCompareExchange_nf:
1513 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1514 case MSVCIntrin::_InterlockedCompareExchange128:
1515 return EmitAtomicCmpXchg128ForMSIntrin(
1516 *this, E, AtomicOrdering::SequentiallyConsistent);
1517 case MSVCIntrin::_InterlockedCompareExchange128_acq:
1518 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1519 case MSVCIntrin::_InterlockedCompareExchange128_rel:
1520 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1521 case MSVCIntrin::_InterlockedCompareExchange128_nf:
1522 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1523 case MSVCIntrin::_InterlockedOr_acq:
1524 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1525 AtomicOrdering::Acquire);
1526 case MSVCIntrin::_InterlockedOr_rel:
1527 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1528 AtomicOrdering::Release);
1529 case MSVCIntrin::_InterlockedOr_nf:
1530 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1531 AtomicOrdering::Monotonic);
1532 case MSVCIntrin::_InterlockedXor_acq:
1533 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1534 AtomicOrdering::Acquire);
1535 case MSVCIntrin::_InterlockedXor_rel:
1536 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1537 AtomicOrdering::Release);
1538 case MSVCIntrin::_InterlockedXor_nf:
1539 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1540 AtomicOrdering::Monotonic);
1541 case MSVCIntrin::_InterlockedAnd_acq:
1542 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1543 AtomicOrdering::Acquire);
1544 case MSVCIntrin::_InterlockedAnd_rel:
1545 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1546 AtomicOrdering::Release);
1547 case MSVCIntrin::_InterlockedAnd_nf:
1548 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1549 AtomicOrdering::Monotonic);
1550 case MSVCIntrin::_InterlockedIncrement_acq:
1551 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1552 case MSVCIntrin::_InterlockedIncrement_rel:
1553 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1554 case MSVCIntrin::_InterlockedIncrement_nf:
1555 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1556 case MSVCIntrin::_InterlockedDecrement_acq:
1557 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1558 case MSVCIntrin::_InterlockedDecrement_rel:
1559 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1560 case MSVCIntrin::_InterlockedDecrement_nf:
1561 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1562
1563 case MSVCIntrin::_InterlockedDecrement:
1564 return EmitAtomicDecrementValue(*this, E);
1565 case MSVCIntrin::_InterlockedIncrement:
1566 return EmitAtomicIncrementValue(*this, E);
1567
1568 case MSVCIntrin::__fastfail: {
1569 // Request immediate process termination from the kernel. The instruction
1570 // sequences to do this are documented on MSDN:
1571 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1572 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1573 StringRef Asm, Constraints;
1574 switch (ISA) {
1575 default:
1576 ErrorUnsupported(E, "__fastfail call for this architecture");
1577 break;
1578 case llvm::Triple::x86:
1579 case llvm::Triple::x86_64:
1580 Asm = "int $$0x29";
1581 Constraints = "{cx}";
1582 break;
1583 case llvm::Triple::thumb:
1584 Asm = "udf #251";
1585 Constraints = "{r0}";
1586 break;
1587 case llvm::Triple::aarch64:
1588 Asm = "brk #0xF003";
1589 Constraints = "{w0}";
1590 }
1591 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1592 llvm::InlineAsm *IA =
1593 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1594 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1595 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1596 llvm::Attribute::NoReturn);
1597 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1598 CI->setAttributes(NoReturnAttr);
1599 return CI;
1600 }
1601 }
1602 llvm_unreachable("Incorrect MSVC intrinsic!")::llvm::llvm_unreachable_internal("Incorrect MSVC intrinsic!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1602)
;
1603}
1604
1605namespace {
1606// ARC cleanup for __builtin_os_log_format
1607struct CallObjCArcUse final : EHScopeStack::Cleanup {
1608 CallObjCArcUse(llvm::Value *object) : object(object) {}
1609 llvm::Value *object;
1610
1611 void Emit(CodeGenFunction &CGF, Flags flags) override {
1612 CGF.EmitARCIntrinsicUse(object);
1613 }
1614};
1615}
1616
1617Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1618 BuiltinCheckKind Kind) {
1619 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)(static_cast <bool> ((Kind == BCK_CLZPassedZero || Kind
== BCK_CTZPassedZero) && "Unsupported builtin check kind"
) ? void (0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1620, __extension__ __PRETTY_FUNCTION__))
1620 && "Unsupported builtin check kind")(static_cast <bool> ((Kind == BCK_CLZPassedZero || Kind
== BCK_CTZPassedZero) && "Unsupported builtin check kind"
) ? void (0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1620, __extension__ __PRETTY_FUNCTION__))
;
1621
1622 Value *ArgValue = EmitScalarExpr(E);
1623 if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
1624 return ArgValue;
1625
1626 SanitizerScope SanScope(this);
1627 Value *Cond = Builder.CreateICmpNE(
1628 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1629 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1630 SanitizerHandler::InvalidBuiltin,
1631 {EmitCheckSourceLocation(E->getExprLoc()),
1632 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1633 None);
1634 return ArgValue;
1635}
1636
1637/// Get the argument type for arguments to os_log_helper.
1638static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1639 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1640 return C.getCanonicalType(UnsignedTy);
1641}
1642
1643llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1644 const analyze_os_log::OSLogBufferLayout &Layout,
1645 CharUnits BufferAlignment) {
1646 ASTContext &Ctx = getContext();
1647
1648 llvm::SmallString<64> Name;
1649 {
1650 raw_svector_ostream OS(Name);
1651 OS << "__os_log_helper";
1652 OS << "_" << BufferAlignment.getQuantity();
1653 OS << "_" << int(Layout.getSummaryByte());
1654 OS << "_" << int(Layout.getNumArgsByte());
1655 for (const auto &Item : Layout.Items)
1656 OS << "_" << int(Item.getSizeByte()) << "_"
1657 << int(Item.getDescriptorByte());
1658 }
1659
1660 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1661 return F;
1662
1663 llvm::SmallVector<QualType, 4> ArgTys;
1664 FunctionArgList Args;
1665 Args.push_back(ImplicitParamDecl::Create(
1666 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1667 ImplicitParamDecl::Other));
1668 ArgTys.emplace_back(Ctx.VoidPtrTy);
1669
1670 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1671 char Size = Layout.Items[I].getSizeByte();
1672 if (!Size)
1673 continue;
1674
1675 QualType ArgTy = getOSLogArgType(Ctx, Size);
1676 Args.push_back(ImplicitParamDecl::Create(
1677 Ctx, nullptr, SourceLocation(),
1678 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1679 ImplicitParamDecl::Other));
1680 ArgTys.emplace_back(ArgTy);
1681 }
1682
1683 QualType ReturnTy = Ctx.VoidTy;
1684 QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
1685
1686 // The helper function has linkonce_odr linkage to enable the linker to merge
1687 // identical functions. To ensure the merging always happens, 'noinline' is
1688 // attached to the function when compiling with -Oz.
1689 const CGFunctionInfo &FI =
1690 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1691 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1692 llvm::Function *Fn = llvm::Function::Create(
1693 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1694 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1695 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
1696 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1697 Fn->setDoesNotThrow();
1698
1699 // Attach 'noinline' at -Oz.
1700 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1701 Fn->addFnAttr(llvm::Attribute::NoInline);
1702
1703 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1704 IdentifierInfo *II = &Ctx.Idents.get(Name);
1705 FunctionDecl *FD = FunctionDecl::Create(
1706 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
1707 FuncionTy, nullptr, SC_PrivateExtern, false, false);
1708 // Avoid generating debug location info for the function.
1709 FD->setImplicit();
1710
1711 StartFunction(FD, ReturnTy, Fn, FI, Args);
1712
1713 // Create a scope with an artificial location for the body of this function.
1714 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1715
1716 CharUnits Offset;
1717 Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
1718 BufferAlignment);
1719 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1720 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1721 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1722 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1723
1724 unsigned I = 1;
1725 for (const auto &Item : Layout.Items) {
1726 Builder.CreateStore(
1727 Builder.getInt8(Item.getDescriptorByte()),
1728 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1729 Builder.CreateStore(
1730 Builder.getInt8(Item.getSizeByte()),
1731 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1732
1733 CharUnits Size = Item.size();
1734 if (!Size.getQuantity())
1735 continue;
1736
1737 Address Arg = GetAddrOfLocalVar(Args[I]);
1738 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1739 Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
1740 "argDataCast");
1741 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1742 Offset += Size;
1743 ++I;
1744 }
1745
1746 FinishFunction();
1747
1748 return Fn;
1749}
1750
1751RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1752 assert(E.getNumArgs() >= 2 &&(static_cast <bool> (E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments"
) ? void (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1753, __extension__ __PRETTY_FUNCTION__))
1753 "__builtin_os_log_format takes at least 2 arguments")(static_cast <bool> (E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments"
) ? void (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1753, __extension__ __PRETTY_FUNCTION__))
;
1754 ASTContext &Ctx = getContext();
1755 analyze_os_log::OSLogBufferLayout Layout;
1756 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1757 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1758 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1759
1760 // Ignore argument 1, the format string. It is not currently used.
1761 CallArgList Args;
1762 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1763
1764 for (const auto &Item : Layout.Items) {
1765 int Size = Item.getSizeByte();
1766 if (!Size)
1767 continue;
1768
1769 llvm::Value *ArgVal;
1770
1771 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1772 uint64_t Val = 0;
1773 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1774 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1775 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1776 } else if (const Expr *TheExpr = Item.getExpr()) {
1777 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1778
1779 // If a temporary object that requires destruction after the full
1780 // expression is passed, push a lifetime-extended cleanup to extend its
1781 // lifetime to the end of the enclosing block scope.
1782 auto LifetimeExtendObject = [&](const Expr *E) {
1783 E = E->IgnoreParenCasts();
1784 // Extend lifetimes of objects returned by function calls and message
1785 // sends.
1786
1787 // FIXME: We should do this in other cases in which temporaries are
1788 // created including arguments of non-ARC types (e.g., C++
1789 // temporaries).
1790 if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
1791 return true;
1792 return false;
1793 };
1794
1795 if (TheExpr->getType()->isObjCRetainableType() &&
1796 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
1797 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&(static_cast <bool> (getEvaluationKind(TheExpr->getType
()) == TEK_Scalar && "Only scalar can be a ObjC retainable type"
) ? void (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1798, __extension__ __PRETTY_FUNCTION__))
1798 "Only scalar can be a ObjC retainable type")(static_cast <bool> (getEvaluationKind(TheExpr->getType
()) == TEK_Scalar && "Only scalar can be a ObjC retainable type"
) ? void (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1798, __extension__ __PRETTY_FUNCTION__))
;
1799 if (!isa<Constant>(ArgVal)) {
1800 CleanupKind Cleanup = getARCCleanupKind();
1801 QualType Ty = TheExpr->getType();
1802 Address Alloca = Address::invalid();
1803 Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
1804 ArgVal = EmitARCRetain(Ty, ArgVal);
1805 Builder.CreateStore(ArgVal, Addr);
1806 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
1807 CodeGenFunction::destroyARCStrongPrecise,
1808 Cleanup & EHCleanup);
1809
1810 // Push a clang.arc.use call to ensure ARC optimizer knows that the
1811 // argument has to be alive.
1812 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
1813 pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
1814 }
1815 }
1816 } else {
1817 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1818 }
1819
1820 unsigned ArgValSize =
1821 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1822 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1823 ArgValSize);
1824 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1825 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1826 // If ArgVal has type x86_fp80, zero-extend ArgVal.
1827 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1828 Args.add(RValue::get(ArgVal), ArgTy);
1829 }
1830
1831 const CGFunctionInfo &FI =
1832 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
1833 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
1834 Layout, BufAddr.getAlignment());
1835 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
1836 return RValue::get(BufAddr.getPointer());
1837}
1838
1839static bool isSpecialUnsignedMultiplySignedResult(
1840 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
1841 WidthAndSignedness ResultInfo) {
1842 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1843 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
1844 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
1845}
1846
1847static RValue EmitCheckedUnsignedMultiplySignedResult(
1848 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
1849 const clang::Expr *Op2, WidthAndSignedness Op2Info,
1850 const clang::Expr *ResultArg, QualType ResultQTy,
1851 WidthAndSignedness ResultInfo) {
1852 assert(isSpecialUnsignedMultiplySignedResult((static_cast <bool> (isSpecialUnsignedMultiplySignedResult
( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo
) && "Cannot specialize this multiply") ? void (0) : __assert_fail
("isSpecialUnsignedMultiplySignedResult( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Cannot specialize this multiply\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1854, __extension__ __PRETTY_FUNCTION__))
1853 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&(static_cast <bool> (isSpecialUnsignedMultiplySignedResult
( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo
) && "Cannot specialize this multiply") ? void (0) : __assert_fail
("isSpecialUnsignedMultiplySignedResult( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Cannot specialize this multiply\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1854, __extension__ __PRETTY_FUNCTION__))
1854 "Cannot specialize this multiply")(static_cast <bool> (isSpecialUnsignedMultiplySignedResult
( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo
) && "Cannot specialize this multiply") ? void (0) : __assert_fail
("isSpecialUnsignedMultiplySignedResult( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Cannot specialize this multiply\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1854, __extension__ __PRETTY_FUNCTION__))
;
1855
1856 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
1857 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
1858
1859 llvm::Value *HasOverflow;
1860 llvm::Value *Result = EmitOverflowIntrinsic(
1861 CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
1862
1863 // The intrinsic call will detect overflow when the value is > UINT_MAX,
1864 // however, since the original builtin had a signed result, we need to report
1865 // an overflow when the result is greater than INT_MAX.
1866 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
1867 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
1868
1869 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
1870 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
1871
1872 bool isVolatile =
1873 ResultArg->getType()->getPointeeType().isVolatileQualified();
1874 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1875 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1876 isVolatile);
1877 return RValue::get(HasOverflow);
1878}
1879
1880/// Determine if a binop is a checked mixed-sign multiply we can specialize.
1881static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
1882 WidthAndSignedness Op1Info,
1883 WidthAndSignedness Op2Info,
1884 WidthAndSignedness ResultInfo) {
1885 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1886 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
1887 Op1Info.Signed != Op2Info.Signed;
1888}
1889
1890/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
1891/// the generic checked-binop irgen.
1892static RValue
1893EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
1894 WidthAndSignedness Op1Info, const clang::Expr *Op2,
1895 WidthAndSignedness Op2Info,
1896 const clang::Expr *ResultArg, QualType ResultQTy,
1897 WidthAndSignedness ResultInfo) {
1898 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,(static_cast <bool> (isSpecialMixedSignMultiply(Builtin
::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
"Not a mixed-sign multipliction we can specialize") ? void (
0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1900, __extension__ __PRETTY_FUNCTION__))
1899 Op2Info, ResultInfo) &&(static_cast <bool> (isSpecialMixedSignMultiply(Builtin
::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
"Not a mixed-sign multipliction we can specialize") ? void (
0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1900, __extension__ __PRETTY_FUNCTION__))
1900 "Not a mixed-sign multipliction we can specialize")(static_cast <bool> (isSpecialMixedSignMultiply(Builtin
::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
"Not a mixed-sign multipliction we can specialize") ? void (
0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1900, __extension__ __PRETTY_FUNCTION__))
;
1901
1902 // Emit the signed and unsigned operands.
1903 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
1904 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
1905 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
1906 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
1907 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
1908 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
1909
1910 // One of the operands may be smaller than the other. If so, [s|z]ext it.
1911 if (SignedOpWidth < UnsignedOpWidth)
1912 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
1913 if (UnsignedOpWidth < SignedOpWidth)
1914 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
1915
1916 llvm::Type *OpTy = Signed->getType();
1917 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
1918 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1919 llvm::Type *ResTy = ResultPtr.getElementType();
1920 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
1921
1922 // Take the absolute value of the signed operand.
1923 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
1924 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
1925 llvm::Value *AbsSigned =
1926 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
1927
1928 // Perform a checked unsigned multiplication.
1929 llvm::Value *UnsignedOverflow;
1930 llvm::Value *UnsignedResult =
1931 EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
1932 Unsigned, UnsignedOverflow);
1933
1934 llvm::Value *Overflow, *Result;
1935 if (ResultInfo.Signed) {
1936 // Signed overflow occurs if the result is greater than INT_MAX or lesser
1937 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
1938 auto IntMax =
1939 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
1940 llvm::Value *MaxResult =
1941 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
1942 CGF.Builder.CreateZExt(IsNegative, OpTy));
1943 llvm::Value *SignedOverflow =
1944 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
1945 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
1946
1947 // Prepare the signed result (possibly by negating it).
1948 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
1949 llvm::Value *SignedResult =
1950 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
1951 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
1952 } else {
1953 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
1954 llvm::Value *Underflow = CGF.Builder.CreateAnd(
1955 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
1956 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
1957 if (ResultInfo.Width < OpWidth) {
1958 auto IntMax =
1959 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
1960 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
1961 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
1962 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
1963 }
1964
1965 // Negate the product if it would be negative in infinite precision.
1966 Result = CGF.Builder.CreateSelect(
1967 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
1968
1969 Result = CGF.Builder.CreateTrunc(Result, ResTy);
1970 }
1971 assert(Overflow && Result && "Missing overflow or result")(static_cast <bool> (Overflow && Result &&
"Missing overflow or result") ? void (0) : __assert_fail ("Overflow && Result && \"Missing overflow or result\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 1971, __extension__ __PRETTY_FUNCTION__))
;
1972
1973 bool isVolatile =
1974 ResultArg->getType()->getPointeeType().isVolatileQualified();
1975 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1976 isVolatile);
1977 return RValue::get(Overflow);
1978}
1979
1980static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
1981 Value *&RecordPtr, CharUnits Align,
1982 llvm::FunctionCallee Func, int Lvl) {
1983 ASTContext &Context = CGF.getContext();
1984 RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
1985 std::string Pad = std::string(Lvl * 4, ' ');
1986
1987 Value *GString =
1988 CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
1989 Value *Res = CGF.Builder.CreateCall(Func, {GString});
1990
1991 static llvm::DenseMap<QualType, const char *> Types;
1992 if (Types.empty()) {
1993 Types[Context.CharTy] = "%c";
1994 Types[Context.BoolTy] = "%d";
1995 Types[Context.SignedCharTy] = "%hhd";
1996 Types[Context.UnsignedCharTy] = "%hhu";
1997 Types[Context.IntTy] = "%d";
1998 Types[Context.UnsignedIntTy] = "%u";
1999 Types[Context.LongTy] = "%ld";
2000 Types[Context.UnsignedLongTy] = "%lu";
2001 Types[Context.LongLongTy] = "%lld";
2002 Types[Context.UnsignedLongLongTy] = "%llu";
2003 Types[Context.ShortTy] = "%hd";
2004 Types[Context.UnsignedShortTy] = "%hu";
2005 Types[Context.VoidPtrTy] = "%p";
2006 Types[Context.FloatTy] = "%f";
2007 Types[Context.DoubleTy] = "%f";
2008 Types[Context.LongDoubleTy] = "%Lf";
2009 Types[Context.getPointerType(Context.CharTy)] = "%s";
2010 Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
2011 }
2012
2013 for (const auto *FD : RD->fields()) {
2014 Value *FieldPtr = RecordPtr;
2015 if (RD->isUnion())
2016 FieldPtr = CGF.Builder.CreatePointerCast(
2017 FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
2018 else
2019 FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
2020 FD->getFieldIndex());
2021
2022 GString = CGF.Builder.CreateGlobalStringPtr(
2023 llvm::Twine(Pad)
2024 .concat(FD->getType().getAsString())
2025 .concat(llvm::Twine(' '))
2026 .concat(FD->getNameAsString())
2027 .concat(" : ")
2028 .str());
2029 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
2030 Res = CGF.Builder.CreateAdd(Res, TmpRes);
2031
2032 QualType CanonicalType =
2033 FD->getType().getUnqualifiedType().getCanonicalType();
2034
2035 // We check whether we are in a recursive type
2036 if (CanonicalType->isRecordType()) {
2037 TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
2038 Res = CGF.Builder.CreateAdd(TmpRes, Res);
2039 continue;
2040 }
2041
2042 // We try to determine the best format to print the current field
2043 llvm::Twine Format = Types.find(CanonicalType) == Types.end()
2044 ? Types[Context.VoidPtrTy]
2045 : Types[CanonicalType];
2046
2047 Address FieldAddress = Address(FieldPtr, Align);
2048 FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
2049
2050 // FIXME Need to handle bitfield here
2051 GString = CGF.Builder.CreateGlobalStringPtr(
2052 Format.concat(llvm::Twine('\n')).str());
2053 TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
2054 Res = CGF.Builder.CreateAdd(Res, TmpRes);
2055 }
2056
2057 GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
2058 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
2059 Res = CGF.Builder.CreateAdd(Res, TmpRes);
2060 return Res;
2061}
2062
2063static bool
2064TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
2065 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2066 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2067 Ty = Ctx.getBaseElementType(Arr);
2068
2069 const auto *Record = Ty->getAsCXXRecordDecl();
2070 if (!Record)
2071 return false;
2072
2073 // We've already checked this type, or are in the process of checking it.
2074 if (!Seen.insert(Record).second)
2075 return false;
2076
2077 assert(Record->hasDefinition() &&(static_cast <bool> (Record->hasDefinition() &&
"Incomplete types should already be diagnosed") ? void (0) :
__assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2078, __extension__ __PRETTY_FUNCTION__))
2078 "Incomplete types should already be diagnosed")(static_cast <bool> (Record->hasDefinition() &&
"Incomplete types should already be diagnosed") ? void (0) :
__assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2078, __extension__ __PRETTY_FUNCTION__))
;
2079
2080 if (Record->isDynamicClass())
2081 return true;
2082
2083 for (FieldDecl *F : Record->fields()) {
2084 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2085 return true;
2086 }
2087 return false;
2088}
2089
2090/// Determine if the specified type requires laundering by checking if it is a
2091/// dynamic class type or contains a subobject which is a dynamic class type.
2092static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
2093 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2094 return false;
2095 llvm::SmallPtrSet<const Decl *, 16> Seen;
2096 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2097}
2098
2099RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2100 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2101 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2102
2103 // The builtin's shift arg may have a different type than the source arg and
2104 // result, but the LLVM intrinsic uses the same type for all values.
2105 llvm::Type *Ty = Src->getType();
2106 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2107
2108 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2109 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2110 Function *F = CGM.getIntrinsic(IID, Ty);
2111 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
2112}
2113
2114// Map math builtins for long-double to f128 version.
2115static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2116 switch (BuiltinID) {
2117#define MUTATE_LDBL(func) \
2118 case Builtin::BI__builtin_##func##l: \
2119 return Builtin::BI__builtin_##func##f128;
2120 MUTATE_LDBL(sqrt)
2121 MUTATE_LDBL(cbrt)
2122 MUTATE_LDBL(fabs)
2123 MUTATE_LDBL(log)
2124 MUTATE_LDBL(log2)
2125 MUTATE_LDBL(log10)
2126 MUTATE_LDBL(log1p)
2127 MUTATE_LDBL(logb)
2128 MUTATE_LDBL(exp)
2129 MUTATE_LDBL(exp2)
2130 MUTATE_LDBL(expm1)
2131 MUTATE_LDBL(fdim)
2132 MUTATE_LDBL(hypot)
2133 MUTATE_LDBL(ilogb)
2134 MUTATE_LDBL(pow)
2135 MUTATE_LDBL(fmin)
2136 MUTATE_LDBL(fmax)
2137 MUTATE_LDBL(ceil)
2138 MUTATE_LDBL(trunc)
2139 MUTATE_LDBL(rint)
2140 MUTATE_LDBL(nearbyint)
2141 MUTATE_LDBL(round)
2142 MUTATE_LDBL(floor)
2143 MUTATE_LDBL(lround)
2144 MUTATE_LDBL(llround)
2145 MUTATE_LDBL(lrint)
2146 MUTATE_LDBL(llrint)
2147 MUTATE_LDBL(fmod)
2148 MUTATE_LDBL(modf)
2149 MUTATE_LDBL(nan)
2150 MUTATE_LDBL(nans)
2151 MUTATE_LDBL(inf)
2152 MUTATE_LDBL(fma)
2153 MUTATE_LDBL(sin)
2154 MUTATE_LDBL(cos)
2155 MUTATE_LDBL(tan)
2156 MUTATE_LDBL(sinh)
2157 MUTATE_LDBL(cosh)
2158 MUTATE_LDBL(tanh)
2159 MUTATE_LDBL(asin)
2160 MUTATE_LDBL(acos)
2161 MUTATE_LDBL(atan)
2162 MUTATE_LDBL(asinh)
2163 MUTATE_LDBL(acosh)
2164 MUTATE_LDBL(atanh)
2165 MUTATE_LDBL(atan2)
2166 MUTATE_LDBL(erf)
2167 MUTATE_LDBL(erfc)
2168 MUTATE_LDBL(ldexp)
2169 MUTATE_LDBL(frexp)
2170 MUTATE_LDBL(huge_val)
2171 MUTATE_LDBL(copysign)
2172 MUTATE_LDBL(nextafter)
2173 MUTATE_LDBL(nexttoward)
2174 MUTATE_LDBL(remainder)
2175 MUTATE_LDBL(remquo)
2176 MUTATE_LDBL(scalbln)
2177 MUTATE_LDBL(scalbn)
2178 MUTATE_LDBL(tgamma)
2179 MUTATE_LDBL(lgamma)
2180#undef MUTATE_LDBL
2181 default:
2182 return BuiltinID;
2183 }
2184}
2185
2186RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
2187 const CallExpr *E,
2188 ReturnValueSlot ReturnValue) {
2189 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2190 // See if we can constant fold this builtin. If so, don't emit it at all.
2191 Expr::EvalResult Result;
2192 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
1
Assuming the condition is false
2193 !Result.hasSideEffects()) {
2194 if (Result.Val.isInt())
2195 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2196 Result.Val.getInt()));
2197 if (Result.Val.isFloat())
2198 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2199 Result.Val.getFloat()));
2200 }
2201
2202 // If current long-double semantics is IEEE 128-bit, replace math builtins
2203 // of long-double with f128 equivalent.
2204 // TODO: This mutation should also be applied to other targets other than PPC,
2205 // after backend supports IEEE 128-bit style libcalls.
2206 if (getTarget().getTriple().isPPC64() &&
2207 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2208 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2209
2210 // If the builtin has been declared explicitly with an assembler label,
2211 // disable the specialized emitting below. Ideally we should communicate the
2212 // rename in IR, or at least avoid generating the intrinsic calls that are
2213 // likely to get lowered to the renamed library functions.
2214 const unsigned BuiltinIDIfNoAsmLabel =
2215 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2
'?' condition is false
2216
2217 // There are LLVM math intrinsics/instructions corresponding to math library
2218 // functions except the LLVM op will never set errno while the math library
2219 // might. Also, math builtins have the same semantics as their math library
2220 // twins. Thus, we can transform math library and builtin calls to their
2221 // LLVM counterparts if the call is marked 'const' (known to never set errno).
2222 if (FD->hasAttr<ConstAttr>()) {
3
Calling 'Decl::hasAttr'
5
Returning from 'Decl::hasAttr'
6
Taking false branch
2223 switch (BuiltinIDIfNoAsmLabel) {
2224 case Builtin::BIceil:
2225 case Builtin::BIceilf:
2226 case Builtin::BIceill:
2227 case Builtin::BI__builtin_ceil:
2228 case Builtin::BI__builtin_ceilf:
2229 case Builtin::BI__builtin_ceilf16:
2230 case Builtin::BI__builtin_ceill:
2231 case Builtin::BI__builtin_ceilf128:
2232 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2233 Intrinsic::ceil,
2234 Intrinsic::experimental_constrained_ceil));
2235
2236 case Builtin::BIcopysign:
2237 case Builtin::BIcopysignf:
2238 case Builtin::BIcopysignl:
2239 case Builtin::BI__builtin_copysign:
2240 case Builtin::BI__builtin_copysignf:
2241 case Builtin::BI__builtin_copysignf16:
2242 case Builtin::BI__builtin_copysignl:
2243 case Builtin::BI__builtin_copysignf128:
2244 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
2245
2246 case Builtin::BIcos:
2247 case Builtin::BIcosf:
2248 case Builtin::BIcosl:
2249 case Builtin::BI__builtin_cos:
2250 case Builtin::BI__builtin_cosf:
2251 case Builtin::BI__builtin_cosf16:
2252 case Builtin::BI__builtin_cosl:
2253 case Builtin::BI__builtin_cosf128:
2254 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2255 Intrinsic::cos,
2256 Intrinsic::experimental_constrained_cos));
2257
2258 case Builtin::BIexp:
2259 case Builtin::BIexpf:
2260 case Builtin::BIexpl:
2261 case Builtin::BI__builtin_exp:
2262 case Builtin::BI__builtin_expf:
2263 case Builtin::BI__builtin_expf16:
2264 case Builtin::BI__builtin_expl:
2265 case Builtin::BI__builtin_expf128:
2266 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2267 Intrinsic::exp,
2268 Intrinsic::experimental_constrained_exp));
2269
2270 case Builtin::BIexp2:
2271 case Builtin::BIexp2f:
2272 case Builtin::BIexp2l:
2273 case Builtin::BI__builtin_exp2:
2274 case Builtin::BI__builtin_exp2f:
2275 case Builtin::BI__builtin_exp2f16:
2276 case Builtin::BI__builtin_exp2l:
2277 case Builtin::BI__builtin_exp2f128:
2278 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2279 Intrinsic::exp2,
2280 Intrinsic::experimental_constrained_exp2));
2281
2282 case Builtin::BIfabs:
2283 case Builtin::BIfabsf:
2284 case Builtin::BIfabsl:
2285 case Builtin::BI__builtin_fabs:
2286 case Builtin::BI__builtin_fabsf:
2287 case Builtin::BI__builtin_fabsf16:
2288 case Builtin::BI__builtin_fabsl:
2289 case Builtin::BI__builtin_fabsf128:
2290 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
2291
2292 case Builtin::BIfloor:
2293 case Builtin::BIfloorf:
2294 case Builtin::BIfloorl:
2295 case Builtin::BI__builtin_floor:
2296 case Builtin::BI__builtin_floorf:
2297 case Builtin::BI__builtin_floorf16:
2298 case Builtin::BI__builtin_floorl:
2299 case Builtin::BI__builtin_floorf128:
2300 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2301 Intrinsic::floor,
2302 Intrinsic::experimental_constrained_floor));
2303
2304 case Builtin::BIfma:
2305 case Builtin::BIfmaf:
2306 case Builtin::BIfmal:
2307 case Builtin::BI__builtin_fma:
2308 case Builtin::BI__builtin_fmaf:
2309 case Builtin::BI__builtin_fmaf16:
2310 case Builtin::BI__builtin_fmal:
2311 case Builtin::BI__builtin_fmaf128:
2312 return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
2313 Intrinsic::fma,
2314 Intrinsic::experimental_constrained_fma));
2315
2316 case Builtin::BIfmax:
2317 case Builtin::BIfmaxf:
2318 case Builtin::BIfmaxl:
2319 case Builtin::BI__builtin_fmax:
2320 case Builtin::BI__builtin_fmaxf:
2321 case Builtin::BI__builtin_fmaxf16:
2322 case Builtin::BI__builtin_fmaxl:
2323 case Builtin::BI__builtin_fmaxf128:
2324 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2325 Intrinsic::maxnum,
2326 Intrinsic::experimental_constrained_maxnum));
2327
2328 case Builtin::BIfmin:
2329 case Builtin::BIfminf:
2330 case Builtin::BIfminl:
2331 case Builtin::BI__builtin_fmin:
2332 case Builtin::BI__builtin_fminf:
2333 case Builtin::BI__builtin_fminf16:
2334 case Builtin::BI__builtin_fminl:
2335 case Builtin::BI__builtin_fminf128:
2336 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2337 Intrinsic::minnum,
2338 Intrinsic::experimental_constrained_minnum));
2339
2340 // fmod() is a special-case. It maps to the frem instruction rather than an
2341 // LLVM intrinsic.
2342 case Builtin::BIfmod:
2343 case Builtin::BIfmodf:
2344 case Builtin::BIfmodl:
2345 case Builtin::BI__builtin_fmod:
2346 case Builtin::BI__builtin_fmodf:
2347 case Builtin::BI__builtin_fmodf16:
2348 case Builtin::BI__builtin_fmodl:
2349 case Builtin::BI__builtin_fmodf128: {
2350 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2351 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2352 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2353 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2354 }
2355
2356 case Builtin::BIlog:
2357 case Builtin::BIlogf:
2358 case Builtin::BIlogl:
2359 case Builtin::BI__builtin_log:
2360 case Builtin::BI__builtin_logf:
2361 case Builtin::BI__builtin_logf16:
2362 case Builtin::BI__builtin_logl:
2363 case Builtin::BI__builtin_logf128:
2364 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2365 Intrinsic::log,
2366 Intrinsic::experimental_constrained_log));
2367
2368 case Builtin::BIlog10:
2369 case Builtin::BIlog10f:
2370 case Builtin::BIlog10l:
2371 case Builtin::BI__builtin_log10:
2372 case Builtin::BI__builtin_log10f:
2373 case Builtin::BI__builtin_log10f16:
2374 case Builtin::BI__builtin_log10l:
2375 case Builtin::BI__builtin_log10f128:
2376 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2377 Intrinsic::log10,
2378 Intrinsic::experimental_constrained_log10));
2379
2380 case Builtin::BIlog2:
2381 case Builtin::BIlog2f:
2382 case Builtin::BIlog2l:
2383 case Builtin::BI__builtin_log2:
2384 case Builtin::BI__builtin_log2f:
2385 case Builtin::BI__builtin_log2f16:
2386 case Builtin::BI__builtin_log2l:
2387 case Builtin::BI__builtin_log2f128:
2388 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2389 Intrinsic::log2,
2390 Intrinsic::experimental_constrained_log2));
2391
2392 case Builtin::BInearbyint:
2393 case Builtin::BInearbyintf:
2394 case Builtin::BInearbyintl:
2395 case Builtin::BI__builtin_nearbyint:
2396 case Builtin::BI__builtin_nearbyintf:
2397 case Builtin::BI__builtin_nearbyintl:
2398 case Builtin::BI__builtin_nearbyintf128:
2399 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2400 Intrinsic::nearbyint,
2401 Intrinsic::experimental_constrained_nearbyint));
2402
2403 case Builtin::BIpow:
2404 case Builtin::BIpowf:
2405 case Builtin::BIpowl:
2406 case Builtin::BI__builtin_pow:
2407 case Builtin::BI__builtin_powf:
2408 case Builtin::BI__builtin_powf16:
2409 case Builtin::BI__builtin_powl:
2410 case Builtin::BI__builtin_powf128:
2411 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2412 Intrinsic::pow,
2413 Intrinsic::experimental_constrained_pow));
2414
2415 case Builtin::BIrint:
2416 case Builtin::BIrintf:
2417 case Builtin::BIrintl:
2418 case Builtin::BI__builtin_rint:
2419 case Builtin::BI__builtin_rintf:
2420 case Builtin::BI__builtin_rintf16:
2421 case Builtin::BI__builtin_rintl:
2422 case Builtin::BI__builtin_rintf128:
2423 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2424 Intrinsic::rint,
2425 Intrinsic::experimental_constrained_rint));
2426
2427 case Builtin::BIround:
2428 case Builtin::BIroundf:
2429 case Builtin::BIroundl:
2430 case Builtin::BI__builtin_round:
2431 case Builtin::BI__builtin_roundf:
2432 case Builtin::BI__builtin_roundf16:
2433 case Builtin::BI__builtin_roundl:
2434 case Builtin::BI__builtin_roundf128:
2435 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2436 Intrinsic::round,
2437 Intrinsic::experimental_constrained_round));
2438
2439 case Builtin::BIsin:
2440 case Builtin::BIsinf:
2441 case Builtin::BIsinl:
2442 case Builtin::BI__builtin_sin:
2443 case Builtin::BI__builtin_sinf:
2444 case Builtin::BI__builtin_sinf16:
2445 case Builtin::BI__builtin_sinl:
2446 case Builtin::BI__builtin_sinf128:
2447 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2448 Intrinsic::sin,
2449 Intrinsic::experimental_constrained_sin));
2450
2451 case Builtin::BIsqrt:
2452 case Builtin::BIsqrtf:
2453 case Builtin::BIsqrtl:
2454 case Builtin::BI__builtin_sqrt:
2455 case Builtin::BI__builtin_sqrtf:
2456 case Builtin::BI__builtin_sqrtf16:
2457 case Builtin::BI__builtin_sqrtl:
2458 case Builtin::BI__builtin_sqrtf128:
2459 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2460 Intrinsic::sqrt,
2461 Intrinsic::experimental_constrained_sqrt));
2462
2463 case Builtin::BItrunc:
2464 case Builtin::BItruncf:
2465 case Builtin::BItruncl:
2466 case Builtin::BI__builtin_trunc:
2467 case Builtin::BI__builtin_truncf:
2468 case Builtin::BI__builtin_truncf16:
2469 case Builtin::BI__builtin_truncl:
2470 case Builtin::BI__builtin_truncf128:
2471 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2472 Intrinsic::trunc,
2473 Intrinsic::experimental_constrained_trunc));
2474
2475 case Builtin::BIlround:
2476 case Builtin::BIlroundf:
2477 case Builtin::BIlroundl:
2478 case Builtin::BI__builtin_lround:
2479 case Builtin::BI__builtin_lroundf:
2480 case Builtin::BI__builtin_lroundl:
2481 case Builtin::BI__builtin_lroundf128:
2482 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2483 *this, E, Intrinsic::lround,
2484 Intrinsic::experimental_constrained_lround));
2485
2486 case Builtin::BIllround:
2487 case Builtin::BIllroundf:
2488 case Builtin::BIllroundl:
2489 case Builtin::BI__builtin_llround:
2490 case Builtin::BI__builtin_llroundf:
2491 case Builtin::BI__builtin_llroundl:
2492 case Builtin::BI__builtin_llroundf128:
2493 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2494 *this, E, Intrinsic::llround,
2495 Intrinsic::experimental_constrained_llround));
2496
2497 case Builtin::BIlrint:
2498 case Builtin::BIlrintf:
2499 case Builtin::BIlrintl:
2500 case Builtin::BI__builtin_lrint:
2501 case Builtin::BI__builtin_lrintf:
2502 case Builtin::BI__builtin_lrintl:
2503 case Builtin::BI__builtin_lrintf128:
2504 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2505 *this, E, Intrinsic::lrint,
2506 Intrinsic::experimental_constrained_lrint));
2507
2508 case Builtin::BIllrint:
2509 case Builtin::BIllrintf:
2510 case Builtin::BIllrintl:
2511 case Builtin::BI__builtin_llrint:
2512 case Builtin::BI__builtin_llrintf:
2513 case Builtin::BI__builtin_llrintl:
2514 case Builtin::BI__builtin_llrintf128:
2515 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2516 *this, E, Intrinsic::llrint,
2517 Intrinsic::experimental_constrained_llrint));
2518
2519 default:
2520 break;
2521 }
2522 }
2523
2524 switch (BuiltinIDIfNoAsmLabel) {
7
Control jumps to 'case BI__builtin_matrix_transpose:' at line 3023
2525 default: break;
2526 case Builtin::BI__builtin___CFStringMakeConstantString:
2527 case Builtin::BI__builtin___NSStringMakeConstantString:
2528 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
2529 case Builtin::BI__builtin_stdarg_start:
2530 case Builtin::BI__builtin_va_start:
2531 case Builtin::BI__va_start:
2532 case Builtin::BI__builtin_va_end:
2533 return RValue::get(
2534 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
2535 ? EmitScalarExpr(E->getArg(0))
2536 : EmitVAListRef(E->getArg(0)).getPointer(),
2537 BuiltinID != Builtin::BI__builtin_va_end));
2538 case Builtin::BI__builtin_va_copy: {
2539 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
2540 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
2541
2542 llvm::Type *Type = Int8PtrTy;
2543
2544 DstPtr = Builder.CreateBitCast(DstPtr, Type);
2545 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
2546 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
2547 {DstPtr, SrcPtr}));
2548 }
2549 case Builtin::BI__builtin_abs:
2550 case Builtin::BI__builtin_labs:
2551 case Builtin::BI__builtin_llabs: {
2552 // X < 0 ? -X : X
2553 // The negation has 'nsw' because abs of INT_MIN is undefined.
2554 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2555 Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
2556 Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
2557 Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2558 Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
2559 return RValue::get(Result);
2560 }
2561 case Builtin::BI__builtin_complex: {
2562 Value *Real = EmitScalarExpr(E->getArg(0));
2563 Value *Imag = EmitScalarExpr(E->getArg(1));
2564 return RValue::getComplex({Real, Imag});
2565 }
2566 case Builtin::BI__builtin_conj:
2567 case Builtin::BI__builtin_conjf:
2568 case Builtin::BI__builtin_conjl:
2569 case Builtin::BIconj:
2570 case Builtin::BIconjf:
2571 case Builtin::BIconjl: {
2572 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2573 Value *Real = ComplexVal.first;
2574 Value *Imag = ComplexVal.second;
2575 Imag = Builder.CreateFNeg(Imag, "neg");
2576 return RValue::getComplex(std::make_pair(Real, Imag));
2577 }
2578 case Builtin::BI__builtin_creal:
2579 case Builtin::BI__builtin_crealf:
2580 case Builtin::BI__builtin_creall:
2581 case Builtin::BIcreal:
2582 case Builtin::BIcrealf:
2583 case Builtin::BIcreall: {
2584 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2585 return RValue::get(ComplexVal.first);
2586 }
2587
2588 case Builtin::BI__builtin_dump_struct: {
2589 llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
2590 llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
2591 LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
2592
2593 Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
2594 CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
2595
2596 const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
2597 QualType Arg0Type = Arg0->getType()->getPointeeType();
2598
2599 Value *RecordPtr = EmitScalarExpr(Arg0);
2600 Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
2601 {LLVMFuncType, Func}, 0);
2602 return RValue::get(Res);
2603 }
2604
2605 case Builtin::BI__builtin_preserve_access_index: {
2606 // Only enabled preserved access index region when debuginfo
2607 // is available as debuginfo is needed to preserve user-level
2608 // access pattern.
2609 if (!getDebugInfo()) {
2610 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
2611 return RValue::get(EmitScalarExpr(E->getArg(0)));
2612 }
2613
2614 // Nested builtin_preserve_access_index() not supported
2615 if (IsInPreservedAIRegion) {
2616 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
2617 return RValue::get(EmitScalarExpr(E->getArg(0)));
2618 }
2619
2620 IsInPreservedAIRegion = true;
2621 Value *Res = EmitScalarExpr(E->getArg(0));
2622 IsInPreservedAIRegion = false;
2623 return RValue::get(Res);
2624 }
2625
2626 case Builtin::BI__builtin_cimag:
2627 case Builtin::BI__builtin_cimagf:
2628 case Builtin::BI__builtin_cimagl:
2629 case Builtin::BIcimag:
2630 case Builtin::BIcimagf:
2631 case Builtin::BIcimagl: {
2632 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2633 return RValue::get(ComplexVal.second);
2634 }
2635
2636 case Builtin::BI__builtin_clrsb:
2637 case Builtin::BI__builtin_clrsbl:
2638 case Builtin::BI__builtin_clrsbll: {
2639 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
2640 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2641
2642 llvm::Type *ArgType = ArgValue->getType();
2643 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2644
2645 llvm::Type *ResultType = ConvertType(E->getType());
2646 Value *Zero = llvm::Constant::getNullValue(ArgType);
2647 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
2648 Value *Inverse = Builder.CreateNot(ArgValue, "not");
2649 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
2650 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
2651 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
2652 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2653 "cast");
2654 return RValue::get(Result);
2655 }
2656 case Builtin::BI__builtin_ctzs:
2657 case Builtin::BI__builtin_ctz:
2658 case Builtin::BI__builtin_ctzl:
2659 case Builtin::BI__builtin_ctzll: {
2660 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
2661
2662 llvm::Type *ArgType = ArgValue->getType();
2663 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2664
2665 llvm::Type *ResultType = ConvertType(E->getType());
2666 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2667 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2668 if (Result->getType() != ResultType)
2669 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2670 "cast");
2671 return RValue::get(Result);
2672 }
2673 case Builtin::BI__builtin_clzs:
2674 case Builtin::BI__builtin_clz:
2675 case Builtin::BI__builtin_clzl:
2676 case Builtin::BI__builtin_clzll: {
2677 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
2678
2679 llvm::Type *ArgType = ArgValue->getType();
2680 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2681
2682 llvm::Type *ResultType = ConvertType(E->getType());
2683 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2684 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2685 if (Result->getType() != ResultType)
2686 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2687 "cast");
2688 return RValue::get(Result);
2689 }
2690 case Builtin::BI__builtin_ffs:
2691 case Builtin::BI__builtin_ffsl:
2692 case Builtin::BI__builtin_ffsll: {
2693 // ffs(x) -> x ? cttz(x) + 1 : 0
2694 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2695
2696 llvm::Type *ArgType = ArgValue->getType();
2697 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2698
2699 llvm::Type *ResultType = ConvertType(E->getType());
2700 Value *Tmp =
2701 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
2702 llvm::ConstantInt::get(ArgType, 1));
2703 Value *Zero = llvm::Constant::getNullValue(ArgType);
2704 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
2705 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
2706 if (Result->getType() != ResultType)
2707 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2708 "cast");
2709 return RValue::get(Result);
2710 }
2711 case Builtin::BI__builtin_parity:
2712 case Builtin::BI__builtin_parityl:
2713 case Builtin::BI__builtin_parityll: {
2714 // parity(x) -> ctpop(x) & 1
2715 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2716
2717 llvm::Type *ArgType = ArgValue->getType();
2718 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2719
2720 llvm::Type *ResultType = ConvertType(E->getType());
2721 Value *Tmp = Builder.CreateCall(F, ArgValue);
2722 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
2723 if (Result->getType() != ResultType)
2724 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2725 "cast");
2726 return RValue::get(Result);
2727 }
2728 case Builtin::BI__lzcnt16:
2729 case Builtin::BI__lzcnt:
2730 case Builtin::BI__lzcnt64: {
2731 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2732
2733 llvm::Type *ArgType = ArgValue->getType();
2734 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2735
2736 llvm::Type *ResultType = ConvertType(E->getType());
2737 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2738 if (Result->getType() != ResultType)
2739 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2740 "cast");
2741 return RValue::get(Result);
2742 }
2743 case Builtin::BI__popcnt16:
2744 case Builtin::BI__popcnt:
2745 case Builtin::BI__popcnt64:
2746 case Builtin::BI__builtin_popcount:
2747 case Builtin::BI__builtin_popcountl:
2748 case Builtin::BI__builtin_popcountll: {
2749 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2750
2751 llvm::Type *ArgType = ArgValue->getType();
2752 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2753
2754 llvm::Type *ResultType = ConvertType(E->getType());
2755 Value *Result = Builder.CreateCall(F, ArgValue);
2756 if (Result->getType() != ResultType)
2757 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2758 "cast");
2759 return RValue::get(Result);
2760 }
2761 case Builtin::BI__builtin_unpredictable: {
2762 // Always return the argument of __builtin_unpredictable. LLVM does not
2763 // handle this builtin. Metadata for this builtin should be added directly
2764 // to instructions such as branches or switches that use it.
2765 return RValue::get(EmitScalarExpr(E->getArg(0)));
2766 }
2767 case Builtin::BI__builtin_expect: {
2768 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2769 llvm::Type *ArgType = ArgValue->getType();
2770
2771 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2772 // Don't generate llvm.expect on -O0 as the backend won't use it for
2773 // anything.
2774 // Note, we still IRGen ExpectedValue because it could have side-effects.
2775 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2776 return RValue::get(ArgValue);
2777
2778 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
2779 Value *Result =
2780 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
2781 return RValue::get(Result);
2782 }
2783 case Builtin::BI__builtin_expect_with_probability: {
2784 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2785 llvm::Type *ArgType = ArgValue->getType();
2786
2787 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2788 llvm::APFloat Probability(0.0);
2789 const Expr *ProbArg = E->getArg(2);
2790 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
2791 assert(EvalSucceed && "probability should be able to evaluate as float")(static_cast <bool> (EvalSucceed && "probability should be able to evaluate as float"
) ? void (0) : __assert_fail ("EvalSucceed && \"probability should be able to evaluate as float\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2791, __extension__ __PRETTY_FUNCTION__))
;
2792 (void)EvalSucceed;
2793 bool LoseInfo = false;
2794 Probability.convert(llvm::APFloat::IEEEdouble(),
2795 llvm::RoundingMode::Dynamic, &LoseInfo);
2796 llvm::Type *Ty = ConvertType(ProbArg->getType());
2797 Constant *Confidence = ConstantFP::get(Ty, Probability);
2798 // Don't generate llvm.expect.with.probability on -O0 as the backend
2799 // won't use it for anything.
2800 // Note, we still IRGen ExpectedValue because it could have side-effects.
2801 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2802 return RValue::get(ArgValue);
2803
2804 Function *FnExpect =
2805 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
2806 Value *Result = Builder.CreateCall(
2807 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
2808 return RValue::get(Result);
2809 }
2810 case Builtin::BI__builtin_assume_aligned: {
2811 const Expr *Ptr = E->getArg(0);
2812 Value *PtrValue = EmitScalarExpr(Ptr);
2813 Value *OffsetValue =
2814 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
2815
2816 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
2817 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
2818 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
2819 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
2820 llvm::Value::MaximumAlignment);
2821
2822 emitAlignmentAssumption(PtrValue, Ptr,
2823 /*The expr loc is sufficient.*/ SourceLocation(),
2824 AlignmentCI, OffsetValue);
2825 return RValue::get(PtrValue);
2826 }
2827 case Builtin::BI__assume:
2828 case Builtin::BI__builtin_assume: {
2829 if (E->getArg(0)->HasSideEffects(getContext()))
2830 return RValue::get(nullptr);
2831
2832 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2833 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
2834 return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
2835 }
2836 case Builtin::BI__builtin_bswap16:
2837 case Builtin::BI__builtin_bswap32:
2838 case Builtin::BI__builtin_bswap64: {
2839 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
2840 }
2841 case Builtin::BI__builtin_bitreverse8:
2842 case Builtin::BI__builtin_bitreverse16:
2843 case Builtin::BI__builtin_bitreverse32:
2844 case Builtin::BI__builtin_bitreverse64: {
2845 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
2846 }
2847 case Builtin::BI__builtin_rotateleft8:
2848 case Builtin::BI__builtin_rotateleft16:
2849 case Builtin::BI__builtin_rotateleft32:
2850 case Builtin::BI__builtin_rotateleft64:
2851 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2852 case Builtin::BI_rotl16:
2853 case Builtin::BI_rotl:
2854 case Builtin::BI_lrotl:
2855 case Builtin::BI_rotl64:
2856 return emitRotate(E, false);
2857
2858 case Builtin::BI__builtin_rotateright8:
2859 case Builtin::BI__builtin_rotateright16:
2860 case Builtin::BI__builtin_rotateright32:
2861 case Builtin::BI__builtin_rotateright64:
2862 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2863 case Builtin::BI_rotr16:
2864 case Builtin::BI_rotr:
2865 case Builtin::BI_lrotr:
2866 case Builtin::BI_rotr64:
2867 return emitRotate(E, true);
2868
2869 case Builtin::BI__builtin_constant_p: {
2870 llvm::Type *ResultType = ConvertType(E->getType());
2871
2872 const Expr *Arg = E->getArg(0);
2873 QualType ArgType = Arg->getType();
2874 // FIXME: The allowance for Obj-C pointers and block pointers is historical
2875 // and likely a mistake.
2876 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
2877 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
2878 // Per the GCC documentation, only numeric constants are recognized after
2879 // inlining.
2880 return RValue::get(ConstantInt::get(ResultType, 0));
2881
2882 if (Arg->HasSideEffects(getContext()))
2883 // The argument is unevaluated, so be conservative if it might have
2884 // side-effects.
2885 return RValue::get(ConstantInt::get(ResultType, 0));
2886
2887 Value *ArgValue = EmitScalarExpr(Arg);
2888 if (ArgType->isObjCObjectPointerType()) {
2889 // Convert Objective-C objects to id because we cannot distinguish between
2890 // LLVM types for Obj-C classes as they are opaque.
2891 ArgType = CGM.getContext().getObjCIdType();
2892 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
2893 }
2894 Function *F =
2895 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
2896 Value *Result = Builder.CreateCall(F, ArgValue);
2897 if (Result->getType() != ResultType)
2898 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
2899 return RValue::get(Result);
2900 }
2901 case Builtin::BI__builtin_dynamic_object_size:
2902 case Builtin::BI__builtin_object_size: {
2903 unsigned Type =
2904 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
2905 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
2906
2907 // We pass this builtin onto the optimizer so that it can figure out the
2908 // object size in more complex cases.
2909 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
2910 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
2911 /*EmittedE=*/nullptr, IsDynamic));
2912 }
2913 case Builtin::BI__builtin_prefetch: {
2914 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
2915 // FIXME: Technically these constants should of type 'int', yes?
2916 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
2917 llvm::ConstantInt::get(Int32Ty, 0);
2918 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
2919 llvm::ConstantInt::get(Int32Ty, 3);
2920 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
2921 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
2922 return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
2923 }
2924 case Builtin::BI__builtin_readcyclecounter: {
2925 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
2926 return RValue::get(Builder.CreateCall(F));
2927 }
2928 case Builtin::BI__builtin___clear_cache: {
2929 Value *Begin = EmitScalarExpr(E->getArg(0));
2930 Value *End = EmitScalarExpr(E->getArg(1));
2931 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
2932 return RValue::get(Builder.CreateCall(F, {Begin, End}));
2933 }
2934 case Builtin::BI__builtin_trap:
2935 return RValue::get(EmitTrapCall(Intrinsic::trap));
2936 case Builtin::BI__debugbreak:
2937 return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
2938 case Builtin::BI__builtin_unreachable: {
2939 EmitUnreachable(E->getExprLoc());
2940
2941 // We do need to preserve an insertion point.
2942 EmitBlock(createBasicBlock("unreachable.cont"));
2943
2944 return RValue::get(nullptr);
2945 }
2946
2947 case Builtin::BI__builtin_powi:
2948 case Builtin::BI__builtin_powif:
2949 case Builtin::BI__builtin_powil:
2950 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
2951 *this, E, Intrinsic::powi, Intrinsic::experimental_constrained_powi));
2952
2953 case Builtin::BI__builtin_isgreater:
2954 case Builtin::BI__builtin_isgreaterequal:
2955 case Builtin::BI__builtin_isless:
2956 case Builtin::BI__builtin_islessequal:
2957 case Builtin::BI__builtin_islessgreater:
2958 case Builtin::BI__builtin_isunordered: {
2959 // Ordered comparisons: we know the arguments to these are matching scalar
2960 // floating point values.
2961 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2962 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
2963 Value *LHS = EmitScalarExpr(E->getArg(0));
2964 Value *RHS = EmitScalarExpr(E->getArg(1));
2965
2966 switch (BuiltinID) {
2967 default: llvm_unreachable("Unknown ordered comparison")::llvm::llvm_unreachable_internal("Unknown ordered comparison"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 2967)
;
2968 case Builtin::BI__builtin_isgreater:
2969 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
2970 break;
2971 case Builtin::BI__builtin_isgreaterequal:
2972 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
2973 break;
2974 case Builtin::BI__builtin_isless:
2975 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
2976 break;
2977 case Builtin::BI__builtin_islessequal:
2978 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
2979 break;
2980 case Builtin::BI__builtin_islessgreater:
2981 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
2982 break;
2983 case Builtin::BI__builtin_isunordered:
2984 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
2985 break;
2986 }
2987 // ZExt bool to int type.
2988 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
2989 }
2990 case Builtin::BI__builtin_isnan: {
2991 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2992 Value *V = EmitScalarExpr(E->getArg(0));
2993 llvm::Type *Ty = V->getType();
2994 const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
2995 if (!Builder.getIsFPConstrained() ||
2996 Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
2997 !Ty->isIEEE()) {
2998 V = Builder.CreateFCmpUNO(V, V, "cmp");
2999 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
3000 }
3001
3002 if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
3003 return RValue::get(Result);
3004
3005 // NaN has all exp bits set and a non zero significand. Therefore:
3006 // isnan(V) == ((exp mask - (abs(V) & exp mask)) < 0)
3007 unsigned bitsize = Ty->getScalarSizeInBits();
3008 llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
3009 Value *IntV = Builder.CreateBitCast(V, IntTy);
3010 APInt AndMask = APInt::getSignedMaxValue(bitsize);
3011 Value *AbsV =
3012 Builder.CreateAnd(IntV, llvm::ConstantInt::get(IntTy, AndMask));
3013 APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
3014 Value *Sub =
3015 Builder.CreateSub(llvm::ConstantInt::get(IntTy, ExpMask), AbsV);
3016 // V = sign bit (Sub) <=> V = (Sub < 0)
3017 V = Builder.CreateLShr(Sub, llvm::ConstantInt::get(IntTy, bitsize - 1));
3018 if (bitsize > 32)
3019 V = Builder.CreateTrunc(V, ConvertType(E->getType()));
3020 return RValue::get(V);
3021 }
3022
3023 case Builtin::BI__builtin_matrix_transpose: {
3024 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
8
Assuming the object is not a 'ConstantMatrixType'
9
'MatrixTy' initialized to a null pointer value
3025 Value *MatValue = EmitScalarExpr(E->getArg(0));
3026 MatrixBuilder<CGBuilderTy> MB(Builder);
3027 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
10
Called C++ object pointer is null
3028 MatrixTy->getNumColumns());
3029 return RValue::get(Result);
3030 }
3031
3032 case Builtin::BI__builtin_matrix_column_major_load: {
3033 MatrixBuilder<CGBuilderTy> MB(Builder);
3034 // Emit everything that isn't dependent on the first parameter type
3035 Value *Stride = EmitScalarExpr(E->getArg(3));
3036 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
3037 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
3038 assert(PtrTy && "arg0 must be of pointer type")(static_cast <bool> (PtrTy && "arg0 must be of pointer type"
) ? void (0) : __assert_fail ("PtrTy && \"arg0 must be of pointer type\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3038, __extension__ __PRETTY_FUNCTION__))
;
3039 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
3040
3041 Address Src = EmitPointerWithAlignment(E->getArg(0));
3042 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
3043 E->getArg(0)->getExprLoc(), FD, 0);
3044 Value *Result = MB.CreateColumnMajorLoad(
3045 Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
3046 IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
3047 "matrix");
3048 return RValue::get(Result);
3049 }
3050
3051 case Builtin::BI__builtin_matrix_column_major_store: {
3052 MatrixBuilder<CGBuilderTy> MB(Builder);
3053 Value *Matrix = EmitScalarExpr(E->getArg(0));
3054 Address Dst = EmitPointerWithAlignment(E->getArg(1));
3055 Value *Stride = EmitScalarExpr(E->getArg(2));
3056
3057 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
3058 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
3059 assert(PtrTy && "arg1 must be of pointer type")(static_cast <bool> (PtrTy && "arg1 must be of pointer type"
) ? void (0) : __assert_fail ("PtrTy && \"arg1 must be of pointer type\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3059, __extension__ __PRETTY_FUNCTION__))
;
3060 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
3061
3062 EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
3063 E->getArg(1)->getExprLoc(), FD, 0);
3064 Value *Result = MB.CreateColumnMajorStore(
3065 Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
3066 Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
3067 return RValue::get(Result);
3068 }
3069
3070 case Builtin::BIfinite:
3071 case Builtin::BI__finite:
3072 case Builtin::BIfinitef:
3073 case Builtin::BI__finitef:
3074 case Builtin::BIfinitel:
3075 case Builtin::BI__finitel:
3076 case Builtin::BI__builtin_isinf:
3077 case Builtin::BI__builtin_isfinite: {
3078 // isinf(x) --> fabs(x) == infinity
3079 // isfinite(x) --> fabs(x) != infinity
3080 // x != NaN via the ordered compare in either case.
3081 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3082 Value *V = EmitScalarExpr(E->getArg(0));
3083 llvm::Type *Ty = V->getType();
3084 if (!Builder.getIsFPConstrained() ||
3085 Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
3086 !Ty->isIEEE()) {
3087 Value *Fabs = EmitFAbs(*this, V);
3088 Constant *Infinity = ConstantFP::getInfinity(V->getType());
3089 CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
3090 ? CmpInst::FCMP_OEQ
3091 : CmpInst::FCMP_ONE;
3092 Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
3093 return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
3094 }
3095
3096 if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
3097 return RValue::get(Result);
3098
3099 // Inf values have all exp bits set and a zero significand. Therefore:
3100 // isinf(V) == ((V << 1) == ((exp mask) << 1))
3101 // isfinite(V) == ((V << 1) < ((exp mask) << 1)) using unsigned comparison
3102 unsigned bitsize = Ty->getScalarSizeInBits();
3103 llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
3104 Value *IntV = Builder.CreateBitCast(V, IntTy);
3105 Value *Shl1 = Builder.CreateShl(IntV, 1);
3106 const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
3107 APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
3108 Value *ExpMaskShl1 = llvm::ConstantInt::get(IntTy, ExpMask.shl(1));
3109 if (BuiltinID == Builtin::BI__builtin_isinf)
3110 V = Builder.CreateICmpEQ(Shl1, ExpMaskShl1);
3111 else
3112 V = Builder.CreateICmpULT(Shl1, ExpMaskShl1);
3113 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
3114 }
3115
3116 case Builtin::BI__builtin_isinf_sign: {
3117 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
3118 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3119 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3120 Value *Arg = EmitScalarExpr(E->getArg(0));
3121 Value *AbsArg = EmitFAbs(*this, Arg);
3122 Value *IsInf = Builder.CreateFCmpOEQ(
3123 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
3124 Value *IsNeg = EmitSignBit(*this, Arg);
3125
3126 llvm::Type *IntTy = ConvertType(E->getType());
3127 Value *Zero = Constant::getNullValue(IntTy);
3128 Value *One = ConstantInt::get(IntTy, 1);
3129 Value *NegativeOne = ConstantInt::get(IntTy, -1);
3130 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
3131 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
3132 return RValue::get(Result);
3133 }
3134
3135 case Builtin::BI__builtin_isnormal: {
3136 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
3137 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3138 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3139 Value *V = EmitScalarExpr(E->getArg(0));
3140 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
3141
3142 Value *Abs = EmitFAbs(*this, V);
3143 Value *IsLessThanInf =
3144 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
3145 APFloat Smallest = APFloat::getSmallestNormalized(
3146 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
3147 Value *IsNormal =
3148 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
3149 "isnormal");
3150 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
3151 V = Builder.CreateAnd(V, IsNormal, "and");
3152 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
3153 }
3154
3155 case Builtin::BI__builtin_flt_rounds: {
3156 Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
3157
3158 llvm::Type *ResultType = ConvertType(E->getType());
3159 Value *Result = Builder.CreateCall(F);
3160 if (Result->getType() != ResultType)
3161 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3162 "cast");
3163 return RValue::get(Result);
3164 }
3165
3166 case Builtin::BI__builtin_fpclassify: {
3167 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3168 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3169 Value *V = EmitScalarExpr(E->getArg(5));
3170 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
3171
3172 // Create Result
3173 BasicBlock *Begin = Builder.GetInsertBlock();
3174 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
3175 Builder.SetInsertPoint(End);
3176 PHINode *Result =
3177 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
3178 "fpclassify_result");
3179
3180 // if (V==0) return FP_ZERO
3181 Builder.SetInsertPoint(Begin);
3182 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
3183 "iszero");
3184 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
3185 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
3186 Builder.CreateCondBr(IsZero, End, NotZero);
3187 Result->addIncoming(ZeroLiteral, Begin);
3188
3189 // if (V != V) return FP_NAN
3190 Builder.SetInsertPoint(NotZero);
3191 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
3192 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
3193 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
3194 Builder.CreateCondBr(IsNan, End, NotNan);
3195 Result->addIncoming(NanLiteral, NotZero);
3196
3197 // if (fabs(V) == infinity) return FP_INFINITY
3198 Builder.SetInsertPoint(NotNan);
3199 Value *VAbs = EmitFAbs(*this, V);
3200 Value *IsInf =
3201 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
3202 "isinf");
3203 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
3204 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
3205 Builder.CreateCondBr(IsInf, End, NotInf);
3206 Result->addIncoming(InfLiteral, NotNan);
3207
3208 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
3209 Builder.SetInsertPoint(NotInf);
3210 APFloat Smallest = APFloat::getSmallestNormalized(
3211 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
3212 Value *IsNormal =
3213 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
3214 "isnormal");
3215 Value *NormalResult =
3216 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
3217 EmitScalarExpr(E->getArg(3)));
3218 Builder.CreateBr(End);
3219 Result->addIncoming(NormalResult, NotInf);
3220
3221 // return Result
3222 Builder.SetInsertPoint(End);
3223 return RValue::get(Result);
3224 }
3225
3226 case Builtin::BIalloca:
3227 case Builtin::BI_alloca:
3228 case Builtin::BI__builtin_alloca: {
3229 Value *Size = EmitScalarExpr(E->getArg(0));
3230 const TargetInfo &TI = getContext().getTargetInfo();
3231 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
3232 const Align SuitableAlignmentInBytes =
3233 CGM.getContext()
3234 .toCharUnitsFromBits(TI.getSuitableAlign())
3235 .getAsAlign();
3236 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3237 AI->setAlignment(SuitableAlignmentInBytes);
3238 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
3239 return RValue::get(AI);
3240 }
3241
3242 case Builtin::BI__builtin_alloca_with_align: {
3243 Value *Size = EmitScalarExpr(E->getArg(0));
3244 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
3245 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
3246 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
3247 const Align AlignmentInBytes =
3248 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
3249 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3250 AI->setAlignment(AlignmentInBytes);
3251 initializeAlloca(*this, AI, Size, AlignmentInBytes);
3252 return RValue::get(AI);
3253 }
3254
3255 case Builtin::BIbzero:
3256 case Builtin::BI__builtin_bzero: {
3257 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3258 Value *SizeVal = EmitScalarExpr(E->getArg(1));
3259 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3260 E->getArg(0)->getExprLoc(), FD, 0);
3261 Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
3262 return RValue::get(nullptr);
3263 }
3264 case Builtin::BImemcpy:
3265 case Builtin::BI__builtin_memcpy:
3266 case Builtin::BImempcpy:
3267 case Builtin::BI__builtin_mempcpy: {
3268 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3269 Address Src = EmitPointerWithAlignment(E->getArg(1));
3270 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3271 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3272 E->getArg(0)->getExprLoc(), FD, 0);
3273 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3274 E->getArg(1)->getExprLoc(), FD, 1);
3275 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3276 if (BuiltinID == Builtin::BImempcpy ||
3277 BuiltinID == Builtin::BI__builtin_mempcpy)
3278 return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(),
3279 Dest.getPointer(), SizeVal));
3280 else
3281 return RValue::get(Dest.getPointer());
3282 }
3283
3284 case Builtin::BI__builtin_memcpy_inline: {
3285 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3286 Address Src = EmitPointerWithAlignment(E->getArg(1));
3287 uint64_t Size =
3288 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
3289 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3290 E->getArg(0)->getExprLoc(), FD, 0);
3291 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3292 E->getArg(1)->getExprLoc(), FD, 1);
3293 Builder.CreateMemCpyInline(Dest, Src, Size);
3294 return RValue::get(nullptr);
3295 }
3296
3297 case Builtin::BI__builtin_char_memchr:
3298 BuiltinID = Builtin::BI__builtin_memchr;
3299 break;
3300
3301 case Builtin::BI__builtin___memcpy_chk: {
3302 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
3303 Expr::EvalResult SizeResult, DstSizeResult;
3304 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3305 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3306 break;
3307 llvm::APSInt Size = SizeResult.Val.getInt();
3308 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3309 if (Size.ugt(DstSize))
3310 break;
3311 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3312 Address Src = EmitPointerWithAlignment(E->getArg(1));
3313 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3314 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3315 return RValue::get(Dest.getPointer());
3316 }
3317
3318 case Builtin::BI__builtin_objc_memmove_collectable: {
3319 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
3320 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
3321 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3322 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
3323 DestAddr, SrcAddr, SizeVal);
3324 return RValue::get(DestAddr.getPointer());
3325 }
3326
3327 case Builtin::BI__builtin___memmove_chk: {
3328 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
3329 Expr::EvalResult SizeResult, DstSizeResult;
3330 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3331 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3332 break;
3333 llvm::APSInt Size = SizeResult.Val.getInt();
3334 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3335 if (Size.ugt(DstSize))
3336 break;
3337 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3338 Address Src = EmitPointerWithAlignment(E->getArg(1));
3339 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3340 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3341 return RValue::get(Dest.getPointer());
3342 }
3343
3344 case Builtin::BImemmove:
3345 case Builtin::BI__builtin_memmove: {
3346 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3347 Address Src = EmitPointerWithAlignment(E->getArg(1));
3348 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3349 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3350 E->getArg(0)->getExprLoc(), FD, 0);
3351 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3352 E->getArg(1)->getExprLoc(), FD, 1);
3353 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3354 return RValue::get(Dest.getPointer());
3355 }
3356 case Builtin::BImemset:
3357 case Builtin::BI__builtin_memset: {
3358 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3359 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3360 Builder.getInt8Ty());
3361 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3362 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3363 E->getArg(0)->getExprLoc(), FD, 0);
3364 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3365 return RValue::get(Dest.getPointer());
3366 }
3367 case Builtin::BI__builtin___memset_chk: {
3368 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
3369 Expr::EvalResult SizeResult, DstSizeResult;
3370 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3371 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3372 break;
3373 llvm::APSInt Size = SizeResult.Val.getInt();
3374 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3375 if (Size.ugt(DstSize))
3376 break;
3377 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3378 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3379 Builder.getInt8Ty());
3380 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3381 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3382 return RValue::get(Dest.getPointer());
3383 }
3384 case Builtin::BI__builtin_wmemchr: {
3385 // The MSVC runtime library does not provide a definition of wmemchr, so we
3386 // need an inline implementation.
3387 if (!getTarget().getTriple().isOSMSVCRT())
3388 break;
3389
3390 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
3391 Value *Str = EmitScalarExpr(E->getArg(0));
3392 Value *Chr = EmitScalarExpr(E->getArg(1));
3393 Value *Size = EmitScalarExpr(E->getArg(2));
3394
3395 BasicBlock *Entry = Builder.GetInsertBlock();
3396 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
3397 BasicBlock *Next = createBasicBlock("wmemchr.next");
3398 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
3399 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
3400 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
3401
3402 EmitBlock(CmpEq);
3403 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
3404 StrPhi->addIncoming(Str, Entry);
3405 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
3406 SizePhi->addIncoming(Size, Entry);
3407 CharUnits WCharAlign =
3408 getContext().getTypeAlignInChars(getContext().WCharTy);
3409 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
3410 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
3411 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
3412 Builder.CreateCondBr(StrEqChr, Exit, Next);
3413
3414 EmitBlock(Next);
3415 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
3416 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
3417 Value *NextSizeEq0 =
3418 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
3419 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
3420 StrPhi->addIncoming(NextStr, Next);
3421 SizePhi->addIncoming(NextSize, Next);
3422
3423 EmitBlock(Exit);
3424 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
3425 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
3426 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
3427 Ret->addIncoming(FoundChr, CmpEq);
3428 return RValue::get(Ret);
3429 }
3430 case Builtin::BI__builtin_wmemcmp: {
3431 // The MSVC runtime library does not provide a definition of wmemcmp, so we
3432 // need an inline implementation.
3433 if (!getTarget().getTriple().isOSMSVCRT())
3434 break;
3435
3436 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
3437
3438 Value *Dst = EmitScalarExpr(E->getArg(0));
3439 Value *Src = EmitScalarExpr(E->getArg(1));
3440 Value *Size = EmitScalarExpr(E->getArg(2));
3441
3442 BasicBlock *Entry = Builder.GetInsertBlock();
3443 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
3444 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
3445 BasicBlock *Next = createBasicBlock("wmemcmp.next");
3446 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
3447 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
3448 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
3449
3450 EmitBlock(CmpGT);
3451 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
3452 DstPhi->addIncoming(Dst, Entry);
3453 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
3454 SrcPhi->addIncoming(Src, Entry);
3455 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
3456 SizePhi->addIncoming(Size, Entry);
3457 CharUnits WCharAlign =
3458 getContext().getTypeAlignInChars(getContext().WCharTy);
3459 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
3460 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
3461 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
3462 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
3463
3464 EmitBlock(CmpLT);
3465 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
3466 Builder.CreateCondBr(DstLtSrc, Exit, Next);
3467
3468 EmitBlock(Next);
3469 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
3470 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
3471 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
3472 Value *NextSizeEq0 =
3473 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
3474 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
3475 DstPhi->addIncoming(NextDst, Next);
3476 SrcPhi->addIncoming(NextSrc, Next);
3477 SizePhi->addIncoming(NextSize, Next);
3478
3479 EmitBlock(Exit);
3480 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
3481 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
3482 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
3483 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
3484 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
3485 return RValue::get(Ret);
3486 }
3487 case Builtin::BI__builtin_dwarf_cfa: {
3488 // The offset in bytes from the first argument to the CFA.
3489 //
3490 // Why on earth is this in the frontend? Is there any reason at
3491 // all that the backend can't reasonably determine this while
3492 // lowering llvm.eh.dwarf.cfa()?
3493 //
3494 // TODO: If there's a satisfactory reason, add a target hook for
3495 // this instead of hard-coding 0, which is correct for most targets.
3496 int32_t Offset = 0;
3497
3498 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
3499 return RValue::get(Builder.CreateCall(F,
3500 llvm::ConstantInt::get(Int32Ty, Offset)));
3501 }
3502 case Builtin::BI__builtin_return_address: {
3503 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
3504 getContext().UnsignedIntTy);
3505 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
3506 return RValue::get(Builder.CreateCall(F, Depth));
3507 }
3508 case Builtin::BI_ReturnAddress: {
3509 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
3510 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
3511 }
3512 case Builtin::BI__builtin_frame_address: {
3513 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
3514 getContext().UnsignedIntTy);
3515 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
3516 return RValue::get(Builder.CreateCall(F, Depth));
3517 }
3518 case Builtin::BI__builtin_extract_return_addr: {
3519 Value *Address = EmitScalarExpr(E->getArg(0));
3520 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
3521 return RValue::get(Result);
3522 }
3523 case Builtin::BI__builtin_frob_return_addr: {
3524 Value *Address = EmitScalarExpr(E->getArg(0));
3525 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
3526 return RValue::get(Result);
3527 }
3528 case Builtin::BI__builtin_dwarf_sp_column: {
3529 llvm::IntegerType *Ty
3530 = cast<llvm::IntegerType>(ConvertType(E->getType()));
3531 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
3532 if (Column == -1) {
3533 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
3534 return RValue::get(llvm::UndefValue::get(Ty));
3535 }
3536 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
3537 }
3538 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
3539 Value *Address = EmitScalarExpr(E->getArg(0));
3540 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
3541 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
3542 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
3543 }
3544 case Builtin::BI__builtin_eh_return: {
3545 Value *Int = EmitScalarExpr(E->getArg(0));
3546 Value *Ptr = EmitScalarExpr(E->getArg(1));
3547
3548 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
3549 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&(static_cast <bool> ((IntTy->getBitWidth() == 32 || IntTy
->getBitWidth() == 64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"
) ? void (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3550, __extension__ __PRETTY_FUNCTION__))
3550 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants")(static_cast <bool> ((IntTy->getBitWidth() == 32 || IntTy
->getBitWidth() == 64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"
) ? void (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3550, __extension__ __PRETTY_FUNCTION__))
;
3551 Function *F =
3552 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
3553 : Intrinsic::eh_return_i64);
3554 Builder.CreateCall(F, {Int, Ptr});
3555 Builder.CreateUnreachable();
3556
3557 // We do need to preserve an insertion point.
3558 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
3559
3560 return RValue::get(nullptr);
3561 }
3562 case Builtin::BI__builtin_unwind_init: {
3563 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
3564 return RValue::get(Builder.CreateCall(F));
3565 }
3566 case Builtin::BI__builtin_extend_pointer: {
3567 // Extends a pointer to the size of an _Unwind_Word, which is
3568 // uint64_t on all platforms. Generally this gets poked into a
3569 // register and eventually used as an address, so if the
3570 // addressing registers are wider than pointers and the platform
3571 // doesn't implicitly ignore high-order bits when doing
3572 // addressing, we need to make sure we zext / sext based on
3573 // the platform's expectations.
3574 //
3575 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
3576
3577 // Cast the pointer to intptr_t.
3578 Value *Ptr = EmitScalarExpr(E->getArg(0));
3579 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
3580
3581 // If that's 64 bits, we're done.
3582 if (IntPtrTy->getBitWidth() == 64)
3583 return RValue::get(Result);
3584
3585 // Otherwise, ask the codegen data what to do.
3586 if (getTargetHooks().extendPointerWithSExt())
3587 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
3588 else
3589 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
3590 }
3591 case Builtin::BI__builtin_setjmp: {
3592 // Buffer is a void**.
3593 Address Buf = EmitPointerWithAlignment(E->getArg(0));
3594
3595 // Store the frame pointer to the setjmp buffer.
3596 Value *FrameAddr = Builder.CreateCall(
3597 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
3598 ConstantInt::get(Int32Ty, 0));
3599 Builder.CreateStore(FrameAddr, Buf);
3600
3601 // Store the stack pointer to the setjmp buffer.
3602 Value *StackAddr =
3603 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
3604 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
3605 Builder.CreateStore(StackAddr, StackSaveSlot);
3606
3607 // Call LLVM's EH setjmp, which is lightweight.
3608 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
3609 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
3610 return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
3611 }
3612 case Builtin::BI__builtin_longjmp: {
3613 Value *Buf = EmitScalarExpr(E->getArg(0));
3614 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
3615
3616 // Call LLVM's EH longjmp, which is lightweight.
3617 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
3618
3619 // longjmp doesn't return; mark this as unreachable.
3620 Builder.CreateUnreachable();
3621
3622 // We do need to preserve an insertion point.
3623 EmitBlock(createBasicBlock("longjmp.cont"));
3624
3625 return RValue::get(nullptr);
3626 }
3627 case Builtin::BI__builtin_launder: {
3628 const Expr *Arg = E->getArg(0);
3629 QualType ArgTy = Arg->getType()->getPointeeType();
3630 Value *Ptr = EmitScalarExpr(Arg);
3631 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
3632 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
3633
3634 return RValue::get(Ptr);
3635 }
3636 case Builtin::BI__sync_fetch_and_add:
3637 case Builtin::BI__sync_fetch_and_sub:
3638 case Builtin::BI__sync_fetch_and_or:
3639 case Builtin::BI__sync_fetch_and_and:
3640 case Builtin::BI__sync_fetch_and_xor:
3641 case Builtin::BI__sync_fetch_and_nand:
3642 case Builtin::BI__sync_add_and_fetch:
3643 case Builtin::BI__sync_sub_and_fetch:
3644 case Builtin::BI__sync_and_and_fetch:
3645 case Builtin::BI__sync_or_and_fetch:
3646 case Builtin::BI__sync_xor_and_fetch:
3647 case Builtin::BI__sync_nand_and_fetch:
3648 case Builtin::BI__sync_val_compare_and_swap:
3649 case Builtin::BI__sync_bool_compare_and_swap:
3650 case Builtin::BI__sync_lock_test_and_set:
3651 case Builtin::BI__sync_lock_release:
3652 case Builtin::BI__sync_swap:
3653 llvm_unreachable("Shouldn't make it through sema")::llvm::llvm_unreachable_internal("Shouldn't make it through sema"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 3653)
;
3654 case Builtin::BI__sync_fetch_and_add_1:
3655 case Builtin::BI__sync_fetch_and_add_2:
3656 case Builtin::BI__sync_fetch_and_add_4:
3657 case Builtin::BI__sync_fetch_and_add_8:
3658 case Builtin::BI__sync_fetch_and_add_16:
3659 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
3660 case Builtin::BI__sync_fetch_and_sub_1:
3661 case Builtin::BI__sync_fetch_and_sub_2:
3662 case Builtin::BI__sync_fetch_and_sub_4:
3663 case Builtin::BI__sync_fetch_and_sub_8:
3664 case Builtin::BI__sync_fetch_and_sub_16:
3665 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
3666 case Builtin::BI__sync_fetch_and_or_1:
3667 case Builtin::BI__sync_fetch_and_or_2:
3668 case Builtin::BI__sync_fetch_and_or_4:
3669 case Builtin::BI__sync_fetch_and_or_8:
3670 case Builtin::BI__sync_fetch_and_or_16:
3671 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
3672 case Builtin::BI__sync_fetch_and_and_1:
3673 case Builtin::BI__sync_fetch_and_and_2:
3674 case Builtin::BI__sync_fetch_and_and_4:
3675 case Builtin::BI__sync_fetch_and_and_8:
3676 case Builtin::BI__sync_fetch_and_and_16:
3677 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
3678 case Builtin::BI__sync_fetch_and_xor_1:
3679 case Builtin::BI__sync_fetch_and_xor_2:
3680 case Builtin::BI__sync_fetch_and_xor_4:
3681 case Builtin::BI__sync_fetch_and_xor_8:
3682 case Builtin::BI__sync_fetch_and_xor_16:
3683 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
3684 case Builtin::BI__sync_fetch_and_nand_1:
3685 case Builtin::BI__sync_fetch_and_nand_2:
3686 case Builtin::BI__sync_fetch_and_nand_4:
3687 case Builtin::BI__sync_fetch_and_nand_8:
3688 case Builtin::BI__sync_fetch_and_nand_16:
3689 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
3690
3691 // Clang extensions: not overloaded yet.
3692 case Builtin::BI__sync_fetch_and_min:
3693 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
3694 case Builtin::BI__sync_fetch_and_max:
3695 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
3696 case Builtin::BI__sync_fetch_and_umin:
3697 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
3698 case Builtin::BI__sync_fetch_and_umax:
3699 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
3700
3701 case Builtin::BI__sync_add_and_fetch_1:
3702 case Builtin::BI__sync_add_and_fetch_2:
3703 case Builtin::BI__sync_add_and_fetch_4:
3704 case Builtin::BI__sync_add_and_fetch_8:
3705 case Builtin::BI__sync_add_and_fetch_16:
3706 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
3707 llvm::Instruction::Add);
3708 case Builtin::BI__sync_sub_and_fetch_1:
3709 case Builtin::BI__sync_sub_and_fetch_2:
3710 case Builtin::BI__sync_sub_and_fetch_4:
3711 case Builtin::BI__sync_sub_and_fetch_8:
3712 case Builtin::BI__sync_sub_and_fetch_16:
3713 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
3714 llvm::Instruction::Sub);
3715 case Builtin::BI__sync_and_and_fetch_1:
3716 case Builtin::BI__sync_and_and_fetch_2:
3717 case Builtin::BI__sync_and_and_fetch_4:
3718 case Builtin::BI__sync_and_and_fetch_8:
3719 case Builtin::BI__sync_and_and_fetch_16:
3720 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
3721 llvm::Instruction::And);
3722 case Builtin::BI__sync_or_and_fetch_1:
3723 case Builtin::BI__sync_or_and_fetch_2:
3724 case Builtin::BI__sync_or_and_fetch_4:
3725 case Builtin::BI__sync_or_and_fetch_8:
3726 case Builtin::BI__sync_or_and_fetch_16:
3727 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
3728 llvm::Instruction::Or);
3729 case Builtin::BI__sync_xor_and_fetch_1:
3730 case Builtin::BI__sync_xor_and_fetch_2:
3731 case Builtin::BI__sync_xor_and_fetch_4:
3732 case Builtin::BI__sync_xor_and_fetch_8:
3733 case Builtin::BI__sync_xor_and_fetch_16:
3734 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
3735 llvm::Instruction::Xor);
3736 case Builtin::BI__sync_nand_and_fetch_1:
3737 case Builtin::BI__sync_nand_and_fetch_2:
3738 case Builtin::BI__sync_nand_and_fetch_4:
3739 case Builtin::BI__sync_nand_and_fetch_8:
3740 case Builtin::BI__sync_nand_and_fetch_16:
3741 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
3742 llvm::Instruction::And, true);
3743
3744 case Builtin::BI__sync_val_compare_and_swap_1:
3745 case Builtin::BI__sync_val_compare_and_swap_2:
3746 case Builtin::BI__sync_val_compare_and_swap_4:
3747 case Builtin::BI__sync_val_compare_and_swap_8:
3748 case Builtin::BI__sync_val_compare_and_swap_16:
3749 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
3750
3751 case Builtin::BI__sync_bool_compare_and_swap_1:
3752 case Builtin::BI__sync_bool_compare_and_swap_2:
3753 case Builtin::BI__sync_bool_compare_and_swap_4:
3754 case Builtin::BI__sync_bool_compare_and_swap_8:
3755 case Builtin::BI__sync_bool_compare_and_swap_16:
3756 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
3757
3758 case Builtin::BI__sync_swap_1:
3759 case Builtin::BI__sync_swap_2:
3760 case Builtin::BI__sync_swap_4:
3761 case Builtin::BI__sync_swap_8:
3762 case Builtin::BI__sync_swap_16:
3763 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3764
3765 case Builtin::BI__sync_lock_test_and_set_1:
3766 case Builtin::BI__sync_lock_test_and_set_2:
3767 case Builtin::BI__sync_lock_test_and_set_4:
3768 case Builtin::BI__sync_lock_test_and_set_8:
3769 case Builtin::BI__sync_lock_test_and_set_16:
3770 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3771
3772 case Builtin::BI__sync_lock_release_1:
3773 case Builtin::BI__sync_lock_release_2:
3774 case Builtin::BI__sync_lock_release_4:
3775 case Builtin::BI__sync_lock_release_8:
3776 case Builtin::BI__sync_lock_release_16: {
3777 Value *Ptr = EmitScalarExpr(E->getArg(0));
3778 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
3779 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
3780 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
3781 StoreSize.getQuantity() * 8);
3782 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
3783 llvm::StoreInst *Store =
3784 Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
3785 StoreSize);
3786 Store->setAtomic(llvm::AtomicOrdering::Release);
3787 return RValue::get(nullptr);
3788 }
3789
3790 case Builtin::BI__sync_synchronize: {
3791 // We assume this is supposed to correspond to a C++0x-style
3792 // sequentially-consistent fence (i.e. this is only usable for
3793 // synchronization, not device I/O or anything like that). This intrinsic
3794 // is really badly designed in the sense that in theory, there isn't
3795 // any way to safely use it... but in practice, it mostly works
3796 // to use it with non-atomic loads and stores to get acquire/release
3797 // semantics.
3798 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
3799 return RValue::get(nullptr);
3800 }
3801
3802 case Builtin::BI__builtin_nontemporal_load:
3803 return RValue::get(EmitNontemporalLoad(*this, E));
3804 case Builtin::BI__builtin_nontemporal_store:
3805 return RValue::get(EmitNontemporalStore(*this, E));
3806 case Builtin::BI__c11_atomic_is_lock_free:
3807 case Builtin::BI__atomic_is_lock_free: {
3808 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
3809 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
3810 // _Atomic(T) is always properly-aligned.
3811 const char *LibCallName = "__atomic_is_lock_free";
3812 CallArgList Args;
3813 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
3814 getContext().getSizeType());
3815 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
3816 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
3817 getContext().VoidPtrTy);
3818 else
3819 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
3820 getContext().VoidPtrTy);
3821 const CGFunctionInfo &FuncInfo =
3822 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
3823 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3824 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3825 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
3826 ReturnValueSlot(), Args);
3827 }
3828
3829 case Builtin::BI__atomic_test_and_set: {
3830 // Look at the argument type to determine whether this is a volatile
3831 // operation. The parameter type is always volatile.
3832 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3833 bool Volatile =
3834 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3835
3836 Value *Ptr = EmitScalarExpr(E->getArg(0));
3837 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
3838 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3839 Value *NewVal = Builder.getInt8(1);
3840 Value *Order = EmitScalarExpr(E->getArg(1));
3841 if (isa<llvm::ConstantInt>(Order)) {
3842 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3843 AtomicRMWInst *Result = nullptr;
3844 switch (ord) {
3845 case 0: // memory_order_relaxed
3846 default: // invalid order
3847 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3848 llvm::AtomicOrdering::Monotonic);
3849 break;
3850 case 1: // memory_order_consume
3851 case 2: // memory_order_acquire
3852 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3853 llvm::AtomicOrdering::Acquire);
3854 break;
3855 case 3: // memory_order_release
3856 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3857 llvm::AtomicOrdering::Release);
3858 break;
3859 case 4: // memory_order_acq_rel
3860
3861 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3862 llvm::AtomicOrdering::AcquireRelease);
3863 break;
3864 case 5: // memory_order_seq_cst
3865 Result = Builder.CreateAtomicRMW(
3866 llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3867 llvm::AtomicOrdering::SequentiallyConsistent);
3868 break;
3869 }
3870 Result->setVolatile(Volatile);
3871 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3872 }
3873
3874 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3875
3876 llvm::BasicBlock *BBs[5] = {
3877 createBasicBlock("monotonic", CurFn),
3878 createBasicBlock("acquire", CurFn),
3879 createBasicBlock("release", CurFn),
3880 createBasicBlock("acqrel", CurFn),
3881 createBasicBlock("seqcst", CurFn)
3882 };
3883 llvm::AtomicOrdering Orders[5] = {
3884 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
3885 llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
3886 llvm::AtomicOrdering::SequentiallyConsistent};
3887
3888 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3889 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3890
3891 Builder.SetInsertPoint(ContBB);
3892 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
3893
3894 for (unsigned i = 0; i < 5; ++i) {
3895 Builder.SetInsertPoint(BBs[i]);
3896 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
3897 Ptr, NewVal, Orders[i]);
3898 RMW->setVolatile(Volatile);
3899 Result->addIncoming(RMW, BBs[i]);
3900 Builder.CreateBr(ContBB);
3901 }
3902
3903 SI->addCase(Builder.getInt32(0), BBs[0]);
3904 SI->addCase(Builder.getInt32(1), BBs[1]);
3905 SI->addCase(Builder.getInt32(2), BBs[1]);
3906 SI->addCase(Builder.getInt32(3), BBs[2]);
3907 SI->addCase(Builder.getInt32(4), BBs[3]);
3908 SI->addCase(Builder.getInt32(5), BBs[4]);
3909
3910 Builder.SetInsertPoint(ContBB);
3911 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3912 }
3913
3914 case Builtin::BI__atomic_clear: {
3915 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3916 bool Volatile =
3917 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3918
3919 Address Ptr = EmitPointerWithAlignment(E->getArg(0));
3920 unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
3921 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3922 Value *NewVal = Builder.getInt8(0);
3923 Value *Order = EmitScalarExpr(E->getArg(1));
3924 if (isa<llvm::ConstantInt>(Order)) {
3925 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3926 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3927 switch (ord) {
3928 case 0: // memory_order_relaxed
3929 default: // invalid order
3930 Store->setOrdering(llvm::AtomicOrdering::Monotonic);
3931 break;
3932 case 3: // memory_order_release
3933 Store->setOrdering(llvm::AtomicOrdering::Release);
3934 break;
3935 case 5: // memory_order_seq_cst
3936 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
3937 break;
3938 }
3939 return RValue::get(nullptr);
3940 }
3941
3942 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3943
3944 llvm::BasicBlock *BBs[3] = {
3945 createBasicBlock("monotonic", CurFn),
3946 createBasicBlock("release", CurFn),
3947 createBasicBlock("seqcst", CurFn)
3948 };
3949 llvm::AtomicOrdering Orders[3] = {
3950 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
3951 llvm::AtomicOrdering::SequentiallyConsistent};
3952
3953 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3954 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3955
3956 for (unsigned i = 0; i < 3; ++i) {
3957 Builder.SetInsertPoint(BBs[i]);
3958 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3959 Store->setOrdering(Orders[i]);
3960 Builder.CreateBr(ContBB);
3961 }
3962
3963 SI->addCase(Builder.getInt32(0), BBs[0]);
3964 SI->addCase(Builder.getInt32(3), BBs[1]);
3965 SI->addCase(Builder.getInt32(5), BBs[2]);
3966
3967 Builder.SetInsertPoint(ContBB);
3968 return RValue::get(nullptr);
3969 }
3970
3971 case Builtin::BI__atomic_thread_fence:
3972 case Builtin::BI__atomic_signal_fence:
3973 case Builtin::BI__c11_atomic_thread_fence:
3974 case Builtin::BI__c11_atomic_signal_fence: {
3975 llvm::SyncScope::ID SSID;
3976 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
3977 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
3978 SSID = llvm::SyncScope::SingleThread;
3979 else
3980 SSID = llvm::SyncScope::System;
3981 Value *Order = EmitScalarExpr(E->getArg(0));
3982 if (isa<llvm::ConstantInt>(Order)) {
3983 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3984 switch (ord) {
3985 case 0: // memory_order_relaxed
3986 default: // invalid order
3987 break;
3988 case 1: // memory_order_consume
3989 case 2: // memory_order_acquire
3990 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3991 break;
3992 case 3: // memory_order_release
3993 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3994 break;
3995 case 4: // memory_order_acq_rel
3996 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3997 break;
3998 case 5: // memory_order_seq_cst
3999 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
4000 break;
4001 }
4002 return RValue::get(nullptr);
4003 }
4004
4005 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
4006 AcquireBB = createBasicBlock("acquire", CurFn);
4007 ReleaseBB = createBasicBlock("release", CurFn);
4008 AcqRelBB = createBasicBlock("acqrel", CurFn);
4009 SeqCstBB = createBasicBlock("seqcst", CurFn);
4010 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4011
4012 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4013 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
4014
4015 Builder.SetInsertPoint(AcquireBB);
4016 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
4017 Builder.CreateBr(ContBB);
4018 SI->addCase(Builder.getInt32(1), AcquireBB);
4019 SI->addCase(Builder.getInt32(2), AcquireBB);
4020
4021 Builder.SetInsertPoint(ReleaseBB);
4022 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
4023 Builder.CreateBr(ContBB);
4024 SI->addCase(Builder.getInt32(3), ReleaseBB);
4025
4026 Builder.SetInsertPoint(AcqRelBB);
4027 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
4028 Builder.CreateBr(ContBB);
4029 SI->addCase(Builder.getInt32(4), AcqRelBB);
4030
4031 Builder.SetInsertPoint(SeqCstBB);
4032 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
4033 Builder.CreateBr(ContBB);
4034 SI->addCase(Builder.getInt32(5), SeqCstBB);
4035
4036 Builder.SetInsertPoint(ContBB);
4037 return RValue::get(nullptr);
4038 }
4039
4040 case Builtin::BI__builtin_signbit:
4041 case Builtin::BI__builtin_signbitf:
4042 case Builtin::BI__builtin_signbitl: {
4043 return RValue::get(
4044 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
4045 ConvertType(E->getType())));
4046 }
4047 case Builtin::BI__warn_memset_zero_len:
4048 return RValue::getIgnored();
4049 case Builtin::BI__annotation: {
4050 // Re-encode each wide string to UTF8 and make an MDString.
4051 SmallVector<Metadata *, 1> Strings;
4052 for (const Expr *Arg : E->arguments()) {
4053 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
4054 assert(Str->getCharByteWidth() == 2)(static_cast <bool> (Str->getCharByteWidth() == 2) ?
void (0) : __assert_fail ("Str->getCharByteWidth() == 2",
"/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4054, __extension__ __PRETTY_FUNCTION__))
;
4055 StringRef WideBytes = Str->getBytes();
4056 std::string StrUtf8;
4057 if (!convertUTF16ToUTF8String(
4058 makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
4059 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
4060 continue;
4061 }
4062 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
4063 }
4064
4065 // Build and MDTuple of MDStrings and emit the intrinsic call.
4066 llvm::Function *F =
4067 CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
4068 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
4069 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
4070 return RValue::getIgnored();
4071 }
4072 case Builtin::BI__builtin_annotation: {
4073 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
4074 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
4075 AnnVal->getType());
4076
4077 // Get the annotation string, go through casts. Sema requires this to be a
4078 // non-wide string literal, potentially casted, so the cast<> is safe.
4079 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
4080 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
4081 return RValue::get(
4082 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
4083 }
4084 case Builtin::BI__builtin_addcb:
4085 case Builtin::BI__builtin_addcs:
4086 case Builtin::BI__builtin_addc:
4087 case Builtin::BI__builtin_addcl:
4088 case Builtin::BI__builtin_addcll:
4089 case Builtin::BI__builtin_subcb:
4090 case Builtin::BI__builtin_subcs:
4091 case Builtin::BI__builtin_subc:
4092 case Builtin::BI__builtin_subcl:
4093 case Builtin::BI__builtin_subcll: {
4094
4095 // We translate all of these builtins from expressions of the form:
4096 // int x = ..., y = ..., carryin = ..., carryout, result;
4097 // result = __builtin_addc(x, y, carryin, &carryout);
4098 //
4099 // to LLVM IR of the form:
4100 //
4101 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
4102 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
4103 // %carry1 = extractvalue {i32, i1} %tmp1, 1
4104 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
4105 // i32 %carryin)
4106 // %result = extractvalue {i32, i1} %tmp2, 0
4107 // %carry2 = extractvalue {i32, i1} %tmp2, 1
4108 // %tmp3 = or i1 %carry1, %carry2
4109 // %tmp4 = zext i1 %tmp3 to i32
4110 // store i32 %tmp4, i32* %carryout
4111
4112 // Scalarize our inputs.
4113 llvm::Value *X = EmitScalarExpr(E->getArg(0));
4114 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4115 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
4116 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
4117
4118 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
4119 llvm::Intrinsic::ID IntrinsicId;
4120 switch (BuiltinID) {
4121 default: llvm_unreachable("Unknown multiprecision builtin id.")::llvm::llvm_unreachable_internal("Unknown multiprecision builtin id."
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4121)
;
4122 case Builtin::BI__builtin_addcb:
4123 case Builtin::BI__builtin_addcs:
4124 case Builtin::BI__builtin_addc:
4125 case Builtin::BI__builtin_addcl:
4126 case Builtin::BI__builtin_addcll:
4127 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4128 break;
4129 case Builtin::BI__builtin_subcb:
4130 case Builtin::BI__builtin_subcs:
4131 case Builtin::BI__builtin_subc:
4132 case Builtin::BI__builtin_subcl:
4133 case Builtin::BI__builtin_subcll:
4134 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4135 break;
4136 }
4137
4138 // Construct our resulting LLVM IR expression.
4139 llvm::Value *Carry1;
4140 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
4141 X, Y, Carry1);
4142 llvm::Value *Carry2;
4143 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
4144 Sum1, Carryin, Carry2);
4145 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
4146 X->getType());
4147 Builder.CreateStore(CarryOut, CarryOutPtr);
4148 return RValue::get(Sum2);
4149 }
4150
4151 case Builtin::BI__builtin_add_overflow:
4152 case Builtin::BI__builtin_sub_overflow:
4153 case Builtin::BI__builtin_mul_overflow: {
4154 const clang::Expr *LeftArg = E->getArg(0);
4155 const clang::Expr *RightArg = E->getArg(1);
4156 const clang::Expr *ResultArg = E->getArg(2);
4157
4158 clang::QualType ResultQTy =
4159 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
4160
4161 WidthAndSignedness LeftInfo =
4162 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
4163 WidthAndSignedness RightInfo =
4164 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
4165 WidthAndSignedness ResultInfo =
4166 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
4167
4168 // Handle mixed-sign multiplication as a special case, because adding
4169 // runtime or backend support for our generic irgen would be too expensive.
4170 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
4171 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
4172 RightInfo, ResultArg, ResultQTy,
4173 ResultInfo);
4174
4175 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
4176 ResultInfo))
4177 return EmitCheckedUnsignedMultiplySignedResult(
4178 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
4179 ResultInfo);
4180
4181 WidthAndSignedness EncompassingInfo =
4182 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
4183
4184 llvm::Type *EncompassingLLVMTy =
4185 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
4186
4187 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
4188
4189 llvm::Intrinsic::ID IntrinsicId;
4190 switch (BuiltinID) {
4191 default:
4192 llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id."
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4192)
;
4193 case Builtin::BI__builtin_add_overflow:
4194 IntrinsicId = EncompassingInfo.Signed
4195 ? llvm::Intrinsic::sadd_with_overflow
4196 : llvm::Intrinsic::uadd_with_overflow;
4197 break;
4198 case Builtin::BI__builtin_sub_overflow:
4199 IntrinsicId = EncompassingInfo.Signed
4200 ? llvm::Intrinsic::ssub_with_overflow
4201 : llvm::Intrinsic::usub_with_overflow;
4202 break;
4203 case Builtin::BI__builtin_mul_overflow:
4204 IntrinsicId = EncompassingInfo.Signed
4205 ? llvm::Intrinsic::smul_with_overflow
4206 : llvm::Intrinsic::umul_with_overflow;
4207 break;
4208 }
4209
4210 llvm::Value *Left = EmitScalarExpr(LeftArg);
4211 llvm::Value *Right = EmitScalarExpr(RightArg);
4212 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
4213
4214 // Extend each operand to the encompassing type.
4215 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
4216 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
4217
4218 // Perform the operation on the extended values.
4219 llvm::Value *Overflow, *Result;
4220 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
4221
4222 if (EncompassingInfo.Width > ResultInfo.Width) {
4223 // The encompassing type is wider than the result type, so we need to
4224 // truncate it.
4225 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
4226
4227 // To see if the truncation caused an overflow, we will extend
4228 // the result and then compare it to the original result.
4229 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
4230 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
4231 llvm::Value *TruncationOverflow =
4232 Builder.CreateICmpNE(Result, ResultTruncExt);
4233
4234 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
4235 Result = ResultTrunc;
4236 }
4237
4238 // Finally, store the result using the pointer.
4239 bool isVolatile =
4240 ResultArg->getType()->getPointeeType().isVolatileQualified();
4241 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
4242
4243 return RValue::get(Overflow);
4244 }
4245
4246 case Builtin::BI__builtin_uadd_overflow:
4247 case Builtin::BI__builtin_uaddl_overflow:
4248 case Builtin::BI__builtin_uaddll_overflow:
4249 case Builtin::BI__builtin_usub_overflow:
4250 case Builtin::BI__builtin_usubl_overflow:
4251 case Builtin::BI__builtin_usubll_overflow:
4252 case Builtin::BI__builtin_umul_overflow:
4253 case Builtin::BI__builtin_umull_overflow:
4254 case Builtin::BI__builtin_umulll_overflow:
4255 case Builtin::BI__builtin_sadd_overflow:
4256 case Builtin::BI__builtin_saddl_overflow:
4257 case Builtin::BI__builtin_saddll_overflow:
4258 case Builtin::BI__builtin_ssub_overflow:
4259 case Builtin::BI__builtin_ssubl_overflow:
4260 case Builtin::BI__builtin_ssubll_overflow:
4261 case Builtin::BI__builtin_smul_overflow:
4262 case Builtin::BI__builtin_smull_overflow:
4263 case Builtin::BI__builtin_smulll_overflow: {
4264
4265 // We translate all of these builtins directly to the relevant llvm IR node.
4266
4267 // Scalarize our inputs.
4268 llvm::Value *X = EmitScalarExpr(E->getArg(0));
4269 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4270 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
4271
4272 // Decide which of the overflow intrinsics we are lowering to:
4273 llvm::Intrinsic::ID IntrinsicId;
4274 switch (BuiltinID) {
4275 default: llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id."
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4275)
;
4276 case Builtin::BI__builtin_uadd_overflow:
4277 case Builtin::BI__builtin_uaddl_overflow:
4278 case Builtin::BI__builtin_uaddll_overflow:
4279 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4280 break;
4281 case Builtin::BI__builtin_usub_overflow:
4282 case Builtin::BI__builtin_usubl_overflow:
4283 case Builtin::BI__builtin_usubll_overflow:
4284 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4285 break;
4286 case Builtin::BI__builtin_umul_overflow:
4287 case Builtin::BI__builtin_umull_overflow:
4288 case Builtin::BI__builtin_umulll_overflow:
4289 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
4290 break;
4291 case Builtin::BI__builtin_sadd_overflow:
4292 case Builtin::BI__builtin_saddl_overflow:
4293 case Builtin::BI__builtin_saddll_overflow:
4294 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
4295 break;
4296 case Builtin::BI__builtin_ssub_overflow:
4297 case Builtin::BI__builtin_ssubl_overflow:
4298 case Builtin::BI__builtin_ssubll_overflow:
4299 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
4300 break;
4301 case Builtin::BI__builtin_smul_overflow:
4302 case Builtin::BI__builtin_smull_overflow:
4303 case Builtin::BI__builtin_smulll_overflow:
4304 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
4305 break;
4306 }
4307
4308
4309 llvm::Value *Carry;
4310 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
4311 Builder.CreateStore(Sum, SumOutPtr);
4312
4313 return RValue::get(Carry);
4314 }
4315 case Builtin::BI__builtin_addressof:
4316 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
4317 case Builtin::BI__builtin_operator_new:
4318 return EmitBuiltinNewDeleteCall(
4319 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
4320 case Builtin::BI__builtin_operator_delete:
4321 return EmitBuiltinNewDeleteCall(
4322 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
4323
4324 case Builtin::BI__builtin_is_aligned:
4325 return EmitBuiltinIsAligned(E);
4326 case Builtin::BI__builtin_align_up:
4327 return EmitBuiltinAlignTo(E, true);
4328 case Builtin::BI__builtin_align_down:
4329 return EmitBuiltinAlignTo(E, false);
4330
4331 case Builtin::BI__noop:
4332 // __noop always evaluates to an integer literal zero.
4333 return RValue::get(ConstantInt::get(IntTy, 0));
4334 case Builtin::BI__builtin_call_with_static_chain: {
4335 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
4336 const Expr *Chain = E->getArg(1);
4337 return EmitCall(Call->getCallee()->getType(),
4338 EmitCallee(Call->getCallee()), Call, ReturnValue,
4339 EmitScalarExpr(Chain));
4340 }
4341 case Builtin::BI_InterlockedExchange8:
4342 case Builtin::BI_InterlockedExchange16:
4343 case Builtin::BI_InterlockedExchange:
4344 case Builtin::BI_InterlockedExchangePointer:
4345 return RValue::get(
4346 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
4347 case Builtin::BI_InterlockedCompareExchangePointer:
4348 case Builtin::BI_InterlockedCompareExchangePointer_nf: {
4349 llvm::Type *RTy;
4350 llvm::IntegerType *IntType =
4351 IntegerType::get(getLLVMContext(),
4352 getContext().getTypeSize(E->getType()));
4353 llvm::Type *IntPtrType = IntType->getPointerTo();
4354
4355 llvm::Value *Destination =
4356 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
4357
4358 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
4359 RTy = Exchange->getType();
4360 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
4361
4362 llvm::Value *Comparand =
4363 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
4364
4365 auto Ordering =
4366 BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
4367 AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
4368
4369 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
4370 Ordering, Ordering);
4371 Result->setVolatile(true);
4372
4373 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
4374 0),
4375 RTy));
4376 }
4377 case Builtin::BI_InterlockedCompareExchange8:
4378 case Builtin::BI_InterlockedCompareExchange16:
4379 case Builtin::BI_InterlockedCompareExchange:
4380 case Builtin::BI_InterlockedCompareExchange64:
4381 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
4382 case Builtin::BI_InterlockedIncrement16:
4383 case Builtin::BI_InterlockedIncrement:
4384 return RValue::get(
4385 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
4386 case Builtin::BI_InterlockedDecrement16:
4387 case Builtin::BI_InterlockedDecrement:
4388 return RValue::get(
4389 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
4390 case Builtin::BI_InterlockedAnd8:
4391 case Builtin::BI_InterlockedAnd16:
4392 case Builtin::BI_InterlockedAnd:
4393 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
4394 case Builtin::BI_InterlockedExchangeAdd8:
4395 case Builtin::BI_InterlockedExchangeAdd16:
4396 case Builtin::BI_InterlockedExchangeAdd:
4397 return RValue::get(
4398 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
4399 case Builtin::BI_InterlockedExchangeSub8:
4400 case Builtin::BI_InterlockedExchangeSub16:
4401 case Builtin::BI_InterlockedExchangeSub:
4402 return RValue::get(
4403 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
4404 case Builtin::BI_InterlockedOr8:
4405 case Builtin::BI_InterlockedOr16:
4406 case Builtin::BI_InterlockedOr:
4407 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
4408 case Builtin::BI_InterlockedXor8:
4409 case Builtin::BI_InterlockedXor16:
4410 case Builtin::BI_InterlockedXor:
4411 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
4412
4413 case Builtin::BI_bittest64:
4414 case Builtin::BI_bittest:
4415 case Builtin::BI_bittestandcomplement64:
4416 case Builtin::BI_bittestandcomplement:
4417 case Builtin::BI_bittestandreset64:
4418 case Builtin::BI_bittestandreset:
4419 case Builtin::BI_bittestandset64:
4420 case Builtin::BI_bittestandset:
4421 case Builtin::BI_interlockedbittestandreset:
4422 case Builtin::BI_interlockedbittestandreset64:
4423 case Builtin::BI_interlockedbittestandset64:
4424 case Builtin::BI_interlockedbittestandset:
4425 case Builtin::BI_interlockedbittestandset_acq:
4426 case Builtin::BI_interlockedbittestandset_rel:
4427 case Builtin::BI_interlockedbittestandset_nf:
4428 case Builtin::BI_interlockedbittestandreset_acq:
4429 case Builtin::BI_interlockedbittestandreset_rel:
4430 case Builtin::BI_interlockedbittestandreset_nf:
4431 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
4432
4433 // These builtins exist to emit regular volatile loads and stores not
4434 // affected by the -fms-volatile setting.
4435 case Builtin::BI__iso_volatile_load8:
4436 case Builtin::BI__iso_volatile_load16:
4437 case Builtin::BI__iso_volatile_load32:
4438 case Builtin::BI__iso_volatile_load64:
4439 return RValue::get(EmitISOVolatileLoad(*this, E));
4440 case Builtin::BI__iso_volatile_store8:
4441 case Builtin::BI__iso_volatile_store16:
4442 case Builtin::BI__iso_volatile_store32:
4443 case Builtin::BI__iso_volatile_store64:
4444 return RValue::get(EmitISOVolatileStore(*this, E));
4445
4446 case Builtin::BI__exception_code:
4447 case Builtin::BI_exception_code:
4448 return RValue::get(EmitSEHExceptionCode());
4449 case Builtin::BI__exception_info:
4450 case Builtin::BI_exception_info:
4451 return RValue::get(EmitSEHExceptionInfo());
4452 case Builtin::BI__abnormal_termination:
4453 case Builtin::BI_abnormal_termination:
4454 return RValue::get(EmitSEHAbnormalTermination());
4455 case Builtin::BI_setjmpex:
4456 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
4457 E->getArg(0)->getType()->isPointerType())
4458 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
4459 break;
4460 case Builtin::BI_setjmp:
4461 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
4462 E->getArg(0)->getType()->isPointerType()) {
4463 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
4464 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
4465 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
4466 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
4467 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
4468 }
4469 break;
4470
4471 case Builtin::BI__GetExceptionInfo: {
4472 if (llvm::GlobalVariable *GV =
4473 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
4474 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
4475 break;
4476 }
4477
4478 case Builtin::BI__fastfail:
4479 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
4480
4481 case Builtin::BI__builtin_coro_size: {
4482 auto & Context = getContext();
4483 auto SizeTy = Context.getSizeType();
4484 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4485 Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
4486 return RValue::get(Builder.CreateCall(F));
4487 }
4488
4489 case Builtin::BI__builtin_coro_id:
4490 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
4491 case Builtin::BI__builtin_coro_promise:
4492 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
4493 case Builtin::BI__builtin_coro_resume:
4494 return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
4495 case Builtin::BI__builtin_coro_frame:
4496 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
4497 case Builtin::BI__builtin_coro_noop:
4498 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
4499 case Builtin::BI__builtin_coro_free:
4500 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
4501 case Builtin::BI__builtin_coro_destroy:
4502 return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
4503 case Builtin::BI__builtin_coro_done:
4504 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
4505 case Builtin::BI__builtin_coro_alloc:
4506 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
4507 case Builtin::BI__builtin_coro_begin:
4508 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
4509 case Builtin::BI__builtin_coro_end:
4510 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
4511 case Builtin::BI__builtin_coro_suspend:
4512 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
4513 case Builtin::BI__builtin_coro_param:
4514 return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
4515
4516 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
4517 case Builtin::BIread_pipe:
4518 case Builtin::BIwrite_pipe: {
4519 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4520 *Arg1 = EmitScalarExpr(E->getArg(1));
4521 CGOpenCLRuntime OpenCLRT(CGM);
4522 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4523 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4524
4525 // Type of the generic packet parameter.
4526 unsigned GenericAS =
4527 getContext().getTargetAddressSpace(LangAS::opencl_generic);
4528 llvm::Type *I8PTy = llvm::PointerType::get(
4529 llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
4530
4531 // Testing which overloaded version we should generate the call for.
4532 if (2U == E->getNumArgs()) {
4533 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
4534 : "__write_pipe_2";
4535 // Creating a generic function type to be able to call with any builtin or
4536 // user defined type.
4537 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
4538 llvm::FunctionType *FTy = llvm::FunctionType::get(
4539 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4540 Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
4541 return RValue::get(
4542 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4543 {Arg0, BCast, PacketSize, PacketAlign}));
4544 } else {
4545 assert(4 == E->getNumArgs() &&(static_cast <bool> (4 == E->getNumArgs() &&
"Illegal number of parameters to pipe function") ? void (0) :
__assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4546, __extension__ __PRETTY_FUNCTION__))
4546 "Illegal number of parameters to pipe function")(static_cast <bool> (4 == E->getNumArgs() &&
"Illegal number of parameters to pipe function") ? void (0) :
__assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4546, __extension__ __PRETTY_FUNCTION__))
;
4547 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
4548 : "__write_pipe_4";
4549
4550 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
4551 Int32Ty, Int32Ty};
4552 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
4553 *Arg3 = EmitScalarExpr(E->getArg(3));
4554 llvm::FunctionType *FTy = llvm::FunctionType::get(
4555 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4556 Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
4557 // We know the third argument is an integer type, but we may need to cast
4558 // it to i32.
4559 if (Arg2->getType() != Int32Ty)
4560 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
4561 return RValue::get(
4562 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4563 {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
4564 }
4565 }
4566 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
4567 // functions
4568 case Builtin::BIreserve_read_pipe:
4569 case Builtin::BIreserve_write_pipe:
4570 case Builtin::BIwork_group_reserve_read_pipe:
4571 case Builtin::BIwork_group_reserve_write_pipe:
4572 case Builtin::BIsub_group_reserve_read_pipe:
4573 case Builtin::BIsub_group_reserve_write_pipe: {
4574 // Composing the mangled name for the function.
4575 const char *Name;
4576 if (BuiltinID == Builtin::BIreserve_read_pipe)
4577 Name = "__reserve_read_pipe";
4578 else if (BuiltinID == Builtin::BIreserve_write_pipe)
4579 Name = "__reserve_write_pipe";
4580 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
4581 Name = "__work_group_reserve_read_pipe";
4582 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
4583 Name = "__work_group_reserve_write_pipe";
4584 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
4585 Name = "__sub_group_reserve_read_pipe";
4586 else
4587 Name = "__sub_group_reserve_write_pipe";
4588
4589 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4590 *Arg1 = EmitScalarExpr(E->getArg(1));
4591 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
4592 CGOpenCLRuntime OpenCLRT(CGM);
4593 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4594 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4595
4596 // Building the generic function prototype.
4597 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
4598 llvm::FunctionType *FTy = llvm::FunctionType::get(
4599 ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4600 // We know the second argument is an integer type, but we may need to cast
4601 // it to i32.
4602 if (Arg1->getType() != Int32Ty)
4603 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
4604 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4605 {Arg0, Arg1, PacketSize, PacketAlign}));
4606 }
4607 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
4608 // functions
4609 case Builtin::BIcommit_read_pipe:
4610 case Builtin::BIcommit_write_pipe:
4611 case Builtin::BIwork_group_commit_read_pipe:
4612 case Builtin::BIwork_group_commit_write_pipe:
4613 case Builtin::BIsub_group_commit_read_pipe:
4614 case Builtin::BIsub_group_commit_write_pipe: {
4615 const char *Name;
4616 if (BuiltinID == Builtin::BIcommit_read_pipe)
4617 Name = "__commit_read_pipe";
4618 else if (BuiltinID == Builtin::BIcommit_write_pipe)
4619 Name = "__commit_write_pipe";
4620 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
4621 Name = "__work_group_commit_read_pipe";
4622 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
4623 Name = "__work_group_commit_write_pipe";
4624 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
4625 Name = "__sub_group_commit_read_pipe";
4626 else
4627 Name = "__sub_group_commit_write_pipe";
4628
4629 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4630 *Arg1 = EmitScalarExpr(E->getArg(1));
4631 CGOpenCLRuntime OpenCLRT(CGM);
4632 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4633 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4634
4635 // Building the generic function prototype.
4636 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
4637 llvm::FunctionType *FTy =
4638 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
4639 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4640
4641 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4642 {Arg0, Arg1, PacketSize, PacketAlign}));
4643 }
4644 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
4645 case Builtin::BIget_pipe_num_packets:
4646 case Builtin::BIget_pipe_max_packets: {
4647 const char *BaseName;
4648 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
4649 if (BuiltinID == Builtin::BIget_pipe_num_packets)
4650 BaseName = "__get_pipe_num_packets";
4651 else
4652 BaseName = "__get_pipe_max_packets";
4653 std::string Name = std::string(BaseName) +
4654 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
4655
4656 // Building the generic function prototype.
4657 Value *Arg0 = EmitScalarExpr(E->getArg(0));
4658 CGOpenCLRuntime OpenCLRT(CGM);
4659 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4660 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4661 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
4662 llvm::FunctionType *FTy = llvm::FunctionType::get(
4663 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4664
4665 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4666 {Arg0, PacketSize, PacketAlign}));
4667 }
4668
4669 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
4670 case Builtin::BIto_global:
4671 case Builtin::BIto_local:
4672 case Builtin::BIto_private: {
4673 auto Arg0 = EmitScalarExpr(E->getArg(0));
4674 auto NewArgT = llvm::PointerType::get(Int8Ty,
4675 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4676 auto NewRetT = llvm::PointerType::get(Int8Ty,
4677 CGM.getContext().getTargetAddressSpace(
4678 E->getType()->getPointeeType().getAddressSpace()));
4679 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
4680 llvm::Value *NewArg;
4681 if (Arg0->getType()->getPointerAddressSpace() !=
4682 NewArgT->getPointerAddressSpace())
4683 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
4684 else
4685 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
4686 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
4687 auto NewCall =
4688 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
4689 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
4690 ConvertType(E->getType())));
4691 }
4692
4693 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
4694 // It contains four different overload formats specified in Table 6.13.17.1.
4695 case Builtin::BIenqueue_kernel: {
4696 StringRef Name; // Generated function call name
4697 unsigned NumArgs = E->getNumArgs();
4698
4699 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
4700 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4701 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4702
4703 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
4704 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
4705 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
4706 llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
4707 llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
4708
4709 if (NumArgs == 4) {
4710 // The most basic form of the call with parameters:
4711 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
4712 Name = "__enqueue_kernel_basic";
4713 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
4714 GenericVoidPtrTy};
4715 llvm::FunctionType *FTy = llvm::FunctionType::get(
4716 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4717
4718 auto Info =
4719 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4720 llvm::Value *Kernel =
4721 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4722 llvm::Value *Block =
4723 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4724
4725 AttrBuilder B;
4726 B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
4727 llvm::AttributeList ByValAttrSet =
4728 llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
4729
4730 auto RTCall =
4731 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
4732 {Queue, Flags, Range, Kernel, Block});
4733 RTCall->setAttributes(ByValAttrSet);
4734 return RValue::get(RTCall);
4735 }
4736 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature")(static_cast <bool> (NumArgs >= 5 && "Invalid enqueue_kernel signature"
) ? void (0) : __assert_fail ("NumArgs >= 5 && \"Invalid enqueue_kernel signature\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 4736, __extension__ __PRETTY_FUNCTION__))
;
4737
4738 // Create a temporary array to hold the sizes of local pointer arguments
4739 // for the block. \p First is the position of the first size argument.
4740 auto CreateArrayForSizeVar = [=](unsigned First)
4741 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
4742 llvm::APInt ArraySize(32, NumArgs - First);
4743 QualType SizeArrayTy = getContext().getConstantArrayType(
4744 getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
4745 /*IndexTypeQuals=*/0);
4746 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
4747 llvm::Value *TmpPtr = Tmp.getPointer();
4748 llvm::Value *TmpSize = EmitLifetimeStart(
4749 CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
4750 llvm::Value *ElemPtr;
4751 // Each of the following arguments specifies the size of the corresponding
4752 // argument passed to the enqueued block.
4753 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
4754 for (unsigned I = First; I < NumArgs; ++I) {
4755 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
4756 auto *GEP = Builder.CreateGEP(Tmp.getElementType(), TmpPtr,
4757 {Zero, Index});
4758 if (I == First)
4759 ElemPtr = GEP;
4760 auto *V =
4761 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
4762 Builder.CreateAlignedStore(
4763 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
4764 }
4765 return std::tie(ElemPtr, TmpSize, TmpPtr);
4766 };
4767
4768 // Could have events and/or varargs.
4769 if (E->getArg(3)->getType()->isBlockPointerType()) {
4770 // No events passed, but has variadic arguments.
4771 Name = "__enqueue_kernel_varargs";
4772 auto Info =
4773 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4774 llvm::Value *Kernel =
4775 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4776 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4777 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4778 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
4779
4780 // Create a vector of the arguments, as well as a constant value to
4781 // express to the runtime the number of variadic arguments.
4782 llvm::Value *const Args[] = {Queue, Flags,
4783 Range, Kernel,
4784 Block, ConstantInt::get(IntTy, NumArgs - 4),
4785 ElemPtr};
4786 llvm::Type *const ArgTys[] = {
4787 QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
4788 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
4789
4790 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
4791 auto Call = RValue::get(
4792 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
4793 if (TmpSize)
4794 EmitLifetimeEnd(TmpSize, TmpPtr);
4795 return Call;
4796 }
4797 // Any calls now have event arguments passed.
4798 if (NumArgs >= 7) {
4799 llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
4800 llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
4801 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4802
4803 llvm::Value *NumEvents =
4804 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
4805
4806 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
4807 // to be a null pointer constant (including `0` literal), we can take it
4808 // into account and emit null pointer directly.
4809 llvm::Value *EventWaitList = nullptr;
4810 if (E->getArg(4)->isNullPointerConstant(
4811 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4812 EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
4813 } else {
4814 EventWaitList = E->getArg(4)->getType()->isArrayType()
4815 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
4816 : EmitScalarExpr(E->getArg(4));
4817 // Convert to generic address space.
4818 EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
4819 }
4820 llvm::Value *EventRet = nullptr;
4821 if (E->getArg(5)->isNullPointerConstant(
4822 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4823 EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
4824 } else {
4825 EventRet =
4826 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
4827 }
4828
4829 auto Info =
4830 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
4831 llvm::Value *Kernel =
4832 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4833 llvm::Value *Block =
4834 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4835
4836 std::vector<llvm::Type *> ArgTys = {
4837 QueueTy, Int32Ty, RangeTy, Int32Ty,
4838 EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
4839
4840 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
4841 NumEvents, EventWaitList, EventRet,
4842 Kernel, Block};
4843
4844 if (NumArgs == 7) {
4845 // Has events but no variadics.
4846 Name = "__enqueue_kernel_basic_events";
4847 llvm::FunctionType *FTy = llvm::FunctionType::get(
4848 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4849 return RValue::get(
4850 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4851 llvm::ArrayRef<llvm::Value *>(Args)));
4852 }
4853 // Has event info and variadics
4854 // Pass the number of variadics to the runtime function too.
4855 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
4856 ArgTys.push_back(Int32Ty);
4857 Name = "__enqueue_kernel_events_varargs";
4858
4859 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4860 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
4861 Args.push_back(ElemPtr);
4862 ArgTys.push_back(ElemPtr->getType());
4863
4864 llvm::FunctionType *FTy = llvm::FunctionType::get(
4865 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4866 auto Call =
4867 RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4868 llvm::ArrayRef<llvm::Value *>(Args)));
4869 if (TmpSize)
4870 EmitLifetimeEnd(TmpSize, TmpPtr);
4871 return Call;
4872 }
4873 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4874 }
4875 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
4876 // parameter.
4877 case Builtin::BIget_kernel_work_group_size: {
4878 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4879 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4880 auto Info =
4881 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4882 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4883 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4884 return RValue::get(EmitRuntimeCall(
4885 CGM.CreateRuntimeFunction(
4886 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4887 false),
4888 "__get_kernel_work_group_size_impl"),
4889 {Kernel, Arg}));
4890 }
4891 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
4892 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4893 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4894 auto Info =
4895 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4896 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4897 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4898 return RValue::get(EmitRuntimeCall(
4899 CGM.CreateRuntimeFunction(
4900 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4901 false),
4902 "__get_kernel_preferred_work_group_size_multiple_impl"),
4903 {Kernel, Arg}));
4904 }
4905 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
4906 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
4907 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4908 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4909 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
4910 llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
4911 auto Info =
4912 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
4913 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4914 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4915 const char *Name =
4916 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
4917 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
4918 : "__get_kernel_sub_group_count_for_ndrange_impl";
4919 return RValue::get(EmitRuntimeCall(
4920 CGM.CreateRuntimeFunction(
4921 llvm::FunctionType::get(
4922 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
4923 false),
4924 Name),
4925 {NDRange, Kernel, Block}));
4926 }
4927
4928 case Builtin::BI__builtin_store_half:
4929 case Builtin::BI__builtin_store_halff: {
4930 Value *Val = EmitScalarExpr(E->getArg(0));
4931 Address Address = EmitPointerWithAlignment(E->getArg(1));
4932 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
4933 return RValue::get(Builder.CreateStore(HalfVal, Address));
4934 }
4935 case Builtin::BI__builtin_load_half: {
4936 Address Address = EmitPointerWithAlignment(E->getArg(0));
4937 Value *HalfVal = Builder.CreateLoad(Address);
4938 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
4939 }
4940 case Builtin::BI__builtin_load_halff: {
4941 Address Address = EmitPointerWithAlignment(E->getArg(0));
4942 Value *HalfVal = Builder.CreateLoad(Address);
4943 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
4944 }
4945 case Builtin::BIprintf:
4946 if (getTarget().getTriple().isNVPTX())
4947 return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
4948 if (getTarget().getTriple().getArch() == Triple::amdgcn &&
4949 getLangOpts().HIP)
4950 return EmitAMDGPUDevicePrintfCallExpr(E, ReturnValue);
4951 break;
4952 case Builtin::BI__builtin_canonicalize:
4953 case Builtin::BI__builtin_canonicalizef:
4954 case Builtin::BI__builtin_canonicalizef16:
4955 case Builtin::BI__builtin_canonicalizel:
4956 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
4957
4958 case Builtin::BI__builtin_thread_pointer: {
4959 if (!getContext().getTargetInfo().isTLSSupported())
4960 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
4961 // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
4962 break;
4963 }
4964 case Builtin::BI__builtin_os_log_format:
4965 return emitBuiltinOSLogFormat(*E);
4966
4967 case Builtin::BI__xray_customevent: {
4968 if (!ShouldXRayInstrumentFunction())
4969 return RValue::getIgnored();
4970
4971 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4972 XRayInstrKind::Custom))
4973 return RValue::getIgnored();
4974
4975 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4976 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
4977 return RValue::getIgnored();
4978
4979 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
4980 auto FTy = F->getFunctionType();
4981 auto Arg0 = E->getArg(0);
4982 auto Arg0Val = EmitScalarExpr(Arg0);
4983 auto Arg0Ty = Arg0->getType();
4984 auto PTy0 = FTy->getParamType(0);
4985 if (PTy0 != Arg0Val->getType()) {
4986 if (Arg0Ty->isArrayType())
4987 Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
4988 else
4989 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
4990 }
4991 auto Arg1 = EmitScalarExpr(E->getArg(1));
4992 auto PTy1 = FTy->getParamType(1);
4993 if (PTy1 != Arg1->getType())
4994 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
4995 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
4996 }
4997
4998 case Builtin::BI__xray_typedevent: {
4999 // TODO: There should be a way to always emit events even if the current
5000 // function is not instrumented. Losing events in a stream can cripple
5001 // a trace.
5002 if (!ShouldXRayInstrumentFunction())
5003 return RValue::getIgnored();
5004
5005 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
5006 XRayInstrKind::Typed))
5007 return RValue::getIgnored();
5008
5009 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
5010 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
5011 return RValue::getIgnored();
5012
5013 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
5014 auto FTy = F->getFunctionType();
5015 auto Arg0 = EmitScalarExpr(E->getArg(0));
5016 auto PTy0 = FTy->getParamType(0);
5017 if (PTy0 != Arg0->getType())
5018 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
5019 auto Arg1 = E->getArg(1);
5020 auto Arg1Val = EmitScalarExpr(Arg1);
5021 auto Arg1Ty = Arg1->getType();
5022 auto PTy1 = FTy->getParamType(1);
5023 if (PTy1 != Arg1Val->getType()) {
5024 if (Arg1Ty->isArrayType())
5025 Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
5026 else
5027 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
5028 }
5029 auto Arg2 = EmitScalarExpr(E->getArg(2));
5030 auto PTy2 = FTy->getParamType(2);
5031 if (PTy2 != Arg2->getType())
5032 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
5033 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
5034 }
5035
5036 case Builtin::BI__builtin_ms_va_start:
5037 case Builtin::BI__builtin_ms_va_end:
5038 return RValue::get(
5039 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
5040 BuiltinID == Builtin::BI__builtin_ms_va_start));
5041
5042 case Builtin::BI__builtin_ms_va_copy: {
5043 // Lower this manually. We can't reliably determine whether or not any
5044 // given va_copy() is for a Win64 va_list from the calling convention
5045 // alone, because it's legal to do this from a System V ABI function.
5046 // With opaque pointer types, we won't have enough information in LLVM
5047 // IR to determine this from the argument types, either. Best to do it
5048 // now, while we have enough information.
5049 Address DestAddr = EmitMSVAListRef(E->getArg(0));
5050 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
5051
5052 llvm::Type *BPP = Int8PtrPtrTy;
5053
5054 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
5055 DestAddr.getAlignment());
5056 SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
5057 SrcAddr.getAlignment());
5058
5059 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
5060 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
5061 }
5062
5063 case Builtin::BI__builtin_get_device_side_mangled_name: {
5064 auto Name = CGM.getCUDARuntime().getDeviceSideName(
5065 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
5066 auto Str = CGM.GetAddrOfConstantCString(Name, "");
5067 llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
5068 llvm::ConstantInt::get(SizeTy, 0)};
5069 auto *Ptr = llvm::ConstantExpr::getGetElementPtr(Str.getElementType(),
5070 Str.getPointer(), Zeros);
5071 return RValue::get(Ptr);
5072 }
5073 }
5074
5075 // If this is an alias for a lib function (e.g. __builtin_sin), emit
5076 // the call using the normal call path, but using the unmangled
5077 // version of the function name.
5078 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
5079 return emitLibraryCall(*this, FD, E,
5080 CGM.getBuiltinLibFunction(FD, BuiltinID));
5081
5082 // If this is a predefined lib function (e.g. malloc), emit the call
5083 // using exactly the normal call path.
5084 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
5085 return emitLibraryCall(*this, FD, E,
5086 cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
5087
5088 // Check that a call to a target specific builtin has the correct target
5089 // features.
5090 // This is down here to avoid non-target specific builtins, however, if
5091 // generic builtins start to require generic target features then we
5092 // can move this up to the beginning of the function.
5093 checkTargetFeatures(E, FD);
5094
5095 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
5096 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
5097
5098 // See if we have a target specific intrinsic.
5099 const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
5100 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
5101 StringRef Prefix =
5102 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
5103 if (!Prefix.empty()) {
5104 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
5105 // NOTE we don't need to perform a compatibility flag check here since the
5106 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
5107 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
5108 if (IntrinsicID == Intrinsic::not_intrinsic)
5109 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
5110 }
5111
5112 if (IntrinsicID != Intrinsic::not_intrinsic) {
5113 SmallVector<Value*, 16> Args;
5114
5115 // Find out if any arguments are required to be integer constant
5116 // expressions.
5117 unsigned ICEArguments = 0;
5118 ASTContext::GetBuiltinTypeError Error;
5119 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
5120 assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None &&
"Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5120, __extension__ __PRETTY_FUNCTION__))
;
5121
5122 Function *F = CGM.getIntrinsic(IntrinsicID);
5123 llvm::FunctionType *FTy = F->getFunctionType();
5124
5125 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
5126 Value *ArgValue;
5127 // If this is a normal argument, just emit it as a scalar.
5128 if ((ICEArguments & (1 << i)) == 0) {
5129 ArgValue = EmitScalarExpr(E->getArg(i));
5130 } else {
5131 // If this is required to be a constant, constant fold it so that we
5132 // know that the generated intrinsic gets a ConstantInt.
5133 ArgValue = llvm::ConstantInt::get(
5134 getLLVMContext(),
5135 *E->getArg(i)->getIntegerConstantExpr(getContext()));
5136 }
5137
5138 // If the intrinsic arg type is different from the builtin arg type
5139 // we need to do a bit cast.
5140 llvm::Type *PTy = FTy->getParamType(i);
5141 if (PTy != ArgValue->getType()) {
5142 // XXX - vector of pointers?
5143 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
5144 if (PtrTy->getAddressSpace() !=
5145 ArgValue->getType()->getPointerAddressSpace()) {
5146 ArgValue = Builder.CreateAddrSpaceCast(
5147 ArgValue,
5148 ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
5149 }
5150 }
5151
5152 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&(static_cast <bool> (PTy->canLosslesslyBitCastTo(FTy
->getParamType(i)) && "Must be able to losslessly bit cast to param"
) ? void (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5153, __extension__ __PRETTY_FUNCTION__))
5153 "Must be able to losslessly bit cast to param")(static_cast <bool> (PTy->canLosslesslyBitCastTo(FTy
->getParamType(i)) && "Must be able to losslessly bit cast to param"
) ? void (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5153, __extension__ __PRETTY_FUNCTION__))
;
5154 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
5155 }
5156
5157 Args.push_back(ArgValue);
5158 }
5159
5160 Value *V = Builder.CreateCall(F, Args);
5161 QualType BuiltinRetType = E->getType();
5162
5163 llvm::Type *RetTy = VoidTy;
5164 if (!BuiltinRetType->isVoidType())
5165 RetTy = ConvertType(BuiltinRetType);
5166
5167 if (RetTy != V->getType()) {
5168 // XXX - vector of pointers?
5169 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
5170 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
5171 V = Builder.CreateAddrSpaceCast(
5172 V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
5173 }
5174 }
5175
5176 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&(static_cast <bool> (V->getType()->canLosslesslyBitCastTo
(RetTy) && "Must be able to losslessly bit cast result type"
) ? void (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5177, __extension__ __PRETTY_FUNCTION__))
5177 "Must be able to losslessly bit cast result type")(static_cast <bool> (V->getType()->canLosslesslyBitCastTo
(RetTy) && "Must be able to losslessly bit cast result type"
) ? void (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5177, __extension__ __PRETTY_FUNCTION__))
;
5178 V = Builder.CreateBitCast(V, RetTy);
5179 }
5180
5181 return RValue::get(V);
5182 }
5183
5184 // Some target-specific builtins can have aggregate return values, e.g.
5185 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
5186 // ReturnValue to be non-null, so that the target-specific emission code can
5187 // always just emit into it.
5188 TypeEvaluationKind EvalKind = getEvaluationKind(E->getType());
5189 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
5190 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
5191 ReturnValue = ReturnValueSlot(DestPtr, false);
5192 }
5193
5194 // Now see if we can emit a target-specific builtin.
5195 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
5196 switch (EvalKind) {
5197 case TEK_Scalar:
5198 return RValue::get(V);
5199 case TEK_Aggregate:
5200 return RValue::getAggregate(ReturnValue.getValue(),
5201 ReturnValue.isVolatile());
5202 case TEK_Complex:
5203 llvm_unreachable("No current target builtin returns complex")::llvm::llvm_unreachable_internal("No current target builtin returns complex"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5203)
;
5204 }
5205 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr")::llvm::llvm_unreachable_internal("Bad evaluation kind in EmitBuiltinExpr"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5205)
;
5206 }
5207
5208 ErrorUnsupported(E, "builtin function");
5209
5210 // Unknown builtin, for now just dump it out and return undef.
5211 return GetUndefRValue(E->getType());
5212}
5213
5214static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
5215 unsigned BuiltinID, const CallExpr *E,
5216 ReturnValueSlot ReturnValue,
5217 llvm::Triple::ArchType Arch) {
5218 switch (Arch) {
5219 case llvm::Triple::arm:
5220 case llvm::Triple::armeb:
5221 case llvm::Triple::thumb:
5222 case llvm::Triple::thumbeb:
5223 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
5224 case llvm::Triple::aarch64:
5225 case llvm::Triple::aarch64_32:
5226 case llvm::Triple::aarch64_be:
5227 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
5228 case llvm::Triple::bpfeb:
5229 case llvm::Triple::bpfel:
5230 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
5231 case llvm::Triple::x86:
5232 case llvm::Triple::x86_64:
5233 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
5234 case llvm::Triple::ppc:
5235 case llvm::Triple::ppcle:
5236 case llvm::Triple::ppc64:
5237 case llvm::Triple::ppc64le:
5238 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
5239 case llvm::Triple::r600:
5240 case llvm::Triple::amdgcn:
5241 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
5242 case llvm::Triple::systemz:
5243 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
5244 case llvm::Triple::nvptx:
5245 case llvm::Triple::nvptx64:
5246 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
5247 case llvm::Triple::wasm32:
5248 case llvm::Triple::wasm64:
5249 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
5250 case llvm::Triple::hexagon:
5251 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
5252 case llvm::Triple::riscv32:
5253 case llvm::Triple::riscv64:
5254 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
5255 default:
5256 return nullptr;
5257 }
5258}
5259
5260Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
5261 const CallExpr *E,
5262 ReturnValueSlot ReturnValue) {
5263 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
5264 assert(getContext().getAuxTargetInfo() && "Missing aux target info")(static_cast <bool> (getContext().getAuxTargetInfo() &&
"Missing aux target info") ? void (0) : __assert_fail ("getContext().getAuxTargetInfo() && \"Missing aux target info\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5264, __extension__ __PRETTY_FUNCTION__))
;
5265 return EmitTargetArchBuiltinExpr(
5266 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
5267 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
5268 }
5269
5270 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
5271 getTarget().getTriple().getArch());
5272}
5273
5274static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
5275 NeonTypeFlags TypeFlags,
5276 bool HasLegalHalfType = true,
5277 bool V1Ty = false,
5278 bool AllowBFloatArgsAndRet = true) {
5279 int IsQuad = TypeFlags.isQuad();
5280 switch (TypeFlags.getEltType()) {
5281 case NeonTypeFlags::Int8:
5282 case NeonTypeFlags::Poly8:
5283 return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
5284 case NeonTypeFlags::Int16:
5285 case NeonTypeFlags::Poly16:
5286 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5287 case NeonTypeFlags::BFloat16:
5288 if (AllowBFloatArgsAndRet)
5289 return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
5290 else
5291 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5292 case NeonTypeFlags::Float16:
5293 if (HasLegalHalfType)
5294 return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
5295 else
5296 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5297 case NeonTypeFlags::Int32:
5298 return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
5299 case NeonTypeFlags::Int64:
5300 case NeonTypeFlags::Poly64:
5301 return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
5302 case NeonTypeFlags::Poly128:
5303 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
5304 // There is a lot of i128 and f128 API missing.
5305 // so we use v16i8 to represent poly128 and get pattern matched.
5306 return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
5307 case NeonTypeFlags::Float32:
5308 return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
5309 case NeonTypeFlags::Float64:
5310 return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
5311 }
5312 llvm_unreachable("Unknown vector element type!")::llvm::llvm_unreachable_internal("Unknown vector element type!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5312)
;
5313}
5314
5315static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
5316 NeonTypeFlags IntTypeFlags) {
5317 int IsQuad = IntTypeFlags.isQuad();
5318 switch (IntTypeFlags.getEltType()) {
5319 case NeonTypeFlags::Int16:
5320 return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
5321 case NeonTypeFlags::Int32:
5322 return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
5323 case NeonTypeFlags::Int64:
5324 return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
5325 default:
5326 llvm_unreachable("Type can't be converted to floating-point!")::llvm::llvm_unreachable_internal("Type can't be converted to floating-point!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 5326)
;
5327 }
5328}
5329
5330Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
5331 const ElementCount &Count) {
5332 Value *SV = llvm::ConstantVector::getSplat(Count, C);
5333 return Builder.CreateShuffleVector(V, V, SV, "lane");
5334}
5335
5336Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
5337 ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
5338 return EmitNeonSplat(V, C, EC);
5339}
5340
5341Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
5342 const char *name,
5343 unsigned shift, bool rightshift) {
5344 unsigned j = 0;
5345 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5346 ai != ae; ++ai, ++j) {
5347 if (F->isConstrainedFPIntrinsic())
5348 if (ai->getType()->isMetadataTy())
5349 continue;
5350 if (shift > 0 && shift == j)
5351 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
5352 else
5353 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
5354 }
5355
5356 if (F->isConstrainedFPIntrinsic())
5357 return Builder.CreateConstrainedFPCall(F, Ops, name);
5358 else
5359 return Builder.CreateCall(F, Ops, name);
5360}
5361
5362Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
5363 bool neg) {
5364 int SV = cast<ConstantInt>(V)->getSExtValue();
5365 return ConstantInt::get(Ty, neg ? -SV : SV);
5366}
5367
5368// Right-shift a vector by a constant.
5369Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
5370 llvm::Type *Ty, bool usgn,
5371 const char *name) {
5372 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
5373
5374 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
5375 int EltSize = VTy->getScalarSizeInBits();
5376
5377 Vec = Builder.CreateBitCast(Vec, Ty);
5378
5379 // lshr/ashr are undefined when the shift amount is equal to the vector
5380 // element size.
5381 if (ShiftAmt == EltSize) {
5382 if (usgn) {
5383 // Right-shifting an unsigned value by its size yields 0.
5384 return llvm::ConstantAggregateZero::get(VTy);
5385 } else {
5386 // Right-shifting a signed value by its size is equivalent
5387 // to a shift of size-1.
5388 --ShiftAmt;
5389 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
5390 }
5391 }
5392
5393 Shift = EmitNeonShiftVector(Shift, Ty, false);
5394 if (usgn)
5395 return Builder.CreateLShr(Vec, Shift, name);
5396 else
5397 return Builder.CreateAShr(Vec, Shift, name);
5398}
5399
5400enum {
5401 AddRetType = (1 << 0),
5402 Add1ArgType = (1 << 1),
5403 Add2ArgTypes = (1 << 2),
5404
5405 VectorizeRetType = (1 << 3),
5406 VectorizeArgTypes = (1 << 4),
5407
5408 InventFloatType = (1 << 5),
5409 UnsignedAlts = (1 << 6),
5410
5411 Use64BitVectors = (1 << 7),
5412 Use128BitVectors = (1 << 8),
5413
5414 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
5415 VectorRet = AddRetType | VectorizeRetType,
5416 VectorRetGetArgs01 =
5417 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
5418 FpCmpzModifiers =
5419 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
5420};
5421
5422namespace {
5423struct ARMVectorIntrinsicInfo {
5424 const char *NameHint;
5425 unsigned BuiltinID;
5426 unsigned LLVMIntrinsic;
5427 unsigned AltLLVMIntrinsic;
5428 uint64_t TypeModifier;
5429
5430 bool operator<(unsigned RHSBuiltinID) const {
5431 return BuiltinID < RHSBuiltinID;
5432 }
5433 bool operator<(const ARMVectorIntrinsicInfo &TE) const {
5434 return BuiltinID < TE.BuiltinID;
5435 }
5436};
5437} // end anonymous namespace
5438
5439#define NEONMAP0(NameBase) \
5440 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
5441
5442#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
5443 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
5444 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
5445
5446#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
5447 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
5448 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
5449 TypeModifier }
5450
5451static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
5452 NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0),
5453 NEONMAP0(splat_lane_v),
5454 NEONMAP0(splat_laneq_v),
5455 NEONMAP0(splatq_lane_v),
5456 NEONMAP0(splatq_laneq_v),
5457 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
5458 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
5459 NEONMAP1(vabs_v, arm_neon_vabs, 0),
5460 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
5461 NEONMAP0(vadd_v),
5462 NEONMAP0(vaddhn_v),
5463 NEONMAP0(vaddq_v),
5464 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
5465 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
5466 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
5467 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
5468 NEONMAP1(vbfdot_v, arm_neon_bfdot, 0),
5469 NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0),
5470 NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0),
5471 NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0),
5472 NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0),
5473 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
5474 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
5475 NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
5476 NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
5477 NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
5478 NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
5479 NEONMAP1(vcage_v, arm_neon_vacge, 0),
5480 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
5481 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
5482 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
5483 NEONMAP1(vcale_v, arm_neon_vacge, 0),
5484 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
5485 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
5486 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
5487 NEONMAP0(vceqz_v),
5488 NEONMAP0(vceqzq_v),
5489 NEONMAP0(vcgez_v),
5490 NEONMAP0(vcgezq_v),
5491 NEONMAP0(vcgtz_v),
5492 NEONMAP0(vcgtzq_v),
5493 NEONMAP0(vclez_v),
5494 NEONMAP0(vclezq_v),
5495 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
5496 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
5497 NEONMAP0(vcltz_v),
5498 NEONMAP0(vcltzq_v),
5499 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5500 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5501 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5502 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5503 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
5504 NEONMAP0(vcvt_f16_v),
5505 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
5506 NEONMAP0(vcvt_f32_v),
5507 NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5508 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5509 NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
5510 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
5511 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
5512 NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
5513 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
5514 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
5515 NEONMAP0(vcvt_s16_v),
5516 NEONMAP0(vcvt_s32_v),
5517 NEONMAP0(vcvt_s64_v),
5518 NEONMAP0(vcvt_u16_v),
5519 NEONMAP0(vcvt_u32_v),
5520 NEONMAP0(vcvt_u64_v),
5521 NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
5522 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
5523 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
5524 NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
5525 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
5526 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
5527 NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
5528 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
5529 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
5530 NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
5531 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
5532 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
5533 NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
5534 NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
5535 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
5536 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
5537 NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
5538 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
5539 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
5540 NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
5541 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
5542 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
5543 NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
5544 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
5545 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
5546 NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
5547 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
5548 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
5549 NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
5550 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
5551 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
5552 NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
5553 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
5554 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
5555 NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
5556 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
5557 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
5558 NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
5559 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
5560 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
5561 NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
5562 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
5563 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
5564 NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
5565 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
5566 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
5567 NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
5568 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
5569 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
5570 NEONMAP0(vcvtq_f16_v),
5571 NEONMAP0(vcvtq_f32_v),
5572 NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5573 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5574 NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
5575 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
5576 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
5577 NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
5578 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
5579 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
5580 NEONMAP0(vcvtq_s16_v),
5581 NEONMAP0(vcvtq_s32_v),
5582 NEONMAP0(vcvtq_s64_v),
5583 NEONMAP0(vcvtq_u16_v),
5584 NEONMAP0(vcvtq_u32_v),
5585 NEONMAP0(vcvtq_u64_v),
5586 NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
5587 NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
5588 NEONMAP0(vext_v),
5589 NEONMAP0(vextq_v),
5590 NEONMAP0(vfma_v),
5591 NEONMAP0(vfmaq_v),
5592 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
5593 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
5594 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
5595 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
5596 NEONMAP0(vld1_dup_v),
5597 NEONMAP1(vld1_v, arm_neon_vld1, 0),
5598 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
5599 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
5600 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
5601 NEONMAP0(vld1q_dup_v),
5602 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
5603 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
5604 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
5605 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
5606 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
5607 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
5608 NEONMAP1(vld2_v, arm_neon_vld2, 0),
5609 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
5610 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
5611 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
5612 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
5613 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
5614 NEONMAP1(vld3_v, arm_neon_vld3, 0),
5615 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
5616 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
5617 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
5618 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
5619 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
5620 NEONMAP1(vld4_v, arm_neon_vld4, 0),
5621 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
5622 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
5623 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
5624 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
5625 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
5626 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
5627 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
5628 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
5629 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
5630 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
5631 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
5632 NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
5633 NEONMAP0(vmovl_v),
5634 NEONMAP0(vmovn_v),
5635 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
5636 NEONMAP0(vmull_v),
5637 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
5638 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
5639 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
5640 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
5641 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
5642 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
5643 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
5644 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
5645 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
5646 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
5647 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
5648 NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
5649 NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
5650 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
5651 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
5652 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
5653 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
5654 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
5655 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
5656 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
5657 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
5658 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
5659 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
5660 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
5661 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
5662 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
5663 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
5664 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
5665 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
5666 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
5667 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
5668 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
5669 NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
5670 NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
5671 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
5672 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
5673 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
5674 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
5675 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
5676 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
5677 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
5678 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
5679 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
5680 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
5681 NEONMAP0(vrndi_v),
5682 NEONMAP0(vrndiq_v),
5683 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
5684 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
5685 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
5686 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
5687 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
5688 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
5689 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
5690 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
5691 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
5692 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
5693 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
5694 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
5695 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
5696 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
5697 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
5698 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
5699 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
5700 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
5701 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
5702 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
5703 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
5704 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
5705 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
5706 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
5707 NEONMAP0(vshl_n_v),
5708 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5709 NEONMAP0(vshll_n_v),
5710 NEONMAP0(vshlq_n_v),
5711 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5712 NEONMAP0(vshr_n_v),
5713 NEONMAP0(vshrn_n_v),
5714 NEONMAP0(vshrq_n_v),
5715 NEONMAP1(vst1_v, arm_neon_vst1, 0),
5716 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
5717 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
5718 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
5719 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
5720 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
5721 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
5722 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
5723 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
5724 NEONMAP1(vst2_v, arm_neon_vst2, 0),
5725 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
5726 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
5727 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
5728 NEONMAP1(vst3_v, arm_neon_vst3, 0),
5729 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
5730 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
5731 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
5732 NEONMAP1(vst4_v, arm_neon_vst4, 0),
5733 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
5734 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
5735 NEONMAP0(vsubhn_v),
5736 NEONMAP0(vtrn_v),
5737 NEONMAP0(vtrnq_v),
5738 NEONMAP0(vtst_v),
5739 NEONMAP0(vtstq_v),
5740 NEONMAP1(vusdot_v, arm_neon_usdot, 0),
5741 NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
5742 NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
5743 NEONMAP0(vuzp_v),
5744 NEONMAP0(vuzpq_v),
5745 NEONMAP0(vzip_v),
5746 NEONMAP0(vzipq_v)
5747};
5748
5749static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
5750 NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0),
5751 NEONMAP0(splat_lane_v),
5752 NEONMAP0(splat_laneq_v),
5753 NEONMAP0(splatq_lane_v),
5754 NEONMAP0(splatq_laneq_v),
5755 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
5756 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
5757 NEONMAP0(vadd_v),
5758 NEONMAP0(vaddhn_v),
5759 NEONMAP0(vaddq_p128),
5760 NEONMAP0(vaddq_v),
5761 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
5762 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
5763 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
5764 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
5765 NEONMAP2(vbcaxq_v, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
5766 NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
5767 NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
5768 NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
5769 NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0),
5770 NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0),
5771 NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5772 NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5773 NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5774 NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5775 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
5776 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
5777 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
5778 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
5779 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
5780 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
5781 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
5782 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
5783 NEONMAP0(vceqz_v),
5784 NEONMAP0(vceqzq_v),
5785 NEONMAP0(vcgez_v),
5786 NEONMAP0(vcgezq_v),
5787 NEONMAP0(vcgtz_v),
5788 NEONMAP0(vcgtzq_v),
5789 NEONMAP0(vclez_v),
5790 NEONMAP0(vclezq_v),
5791 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
5792 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
5793 NEONMAP0(vcltz_v),
5794 NEONMAP0(vcltzq_v),
5795 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5796 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5797 NEONMAP1(vcmla_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
5798 NEONMAP1(vcmla_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
5799 NEONMAP1(vcmla_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
5800 NEONMAP1(vcmla_v, aarch64_neon_vcmla_rot0, Add1ArgType),
5801 NEONMAP1(vcmlaq_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
5802 NEONMAP1(vcmlaq_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
5803 NEONMAP1(vcmlaq_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
5804 NEONMAP1(vcmlaq_v, aarch64_neon_vcmla_rot0, Add1ArgType),
5805 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5806 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5807 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
5808 NEONMAP0(vcvt_f16_v),
5809 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
5810 NEONMAP0(vcvt_f32_v),
5811 NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5812 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5813 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5814 NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5815 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5816 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5817 NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5818 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5819 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5820 NEONMAP0(vcvtq_f16_v),
5821 NEONMAP0(vcvtq_f32_v),
5822 NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0),
5823 NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5824 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5825 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5826 NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5827 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5828 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5829 NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5830 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5831 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5832 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
5833 NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5834 NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5835 NEONMAP2(veor3q_v, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
5836 NEONMAP0(vext_v),
5837 NEONMAP0(vextq_v),
5838 NEONMAP0(vfma_v),
5839 NEONMAP0(vfmaq_v),
5840 NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
5841 NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
5842 NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
5843 NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
5844 NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
5845 NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
5846 NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
5847 NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
5848 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5849 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5850 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5851 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5852 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
5853 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
5854 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
5855 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
5856 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
5857 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
5858 NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0),
5859 NEONMAP0(vmovl_v),
5860 NEONMAP0(vmovn_v),
5861 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
5862 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
5863 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
5864 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5865 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5866 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
5867 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
5868 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
5869 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5870 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5871 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
5872 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
5873 NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
5874 NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
5875 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
5876 NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
5877 NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
5878 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
5879 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
5880 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
5881 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
5882 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
5883 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
5884 NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
5885 NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
5886 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
5887 NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
5888 NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
5889 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
5890 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5891 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5892 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
5893 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5894 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
5895 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5896 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
5897 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
5898 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5899 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5900 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
5901 NEONMAP1(vrax1q_v, aarch64_crypto_rax1, 0),
5902 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5903 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5904 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
5905 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
5906 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5907 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5908 NEONMAP1(vrnd32x_v, aarch64_neon_frint32x, Add1ArgType),
5909 NEONMAP1(vrnd32xq_v, aarch64_neon_frint32x, Add1ArgType),
5910 NEONMAP1(vrnd32z_v, aarch64_neon_frint32z, Add1ArgType),
5911 NEONMAP1(vrnd32zq_v, aarch64_neon_frint32z, Add1ArgType),
5912 NEONMAP1(vrnd64x_v, aarch64_neon_frint64x, Add1ArgType),
5913 NEONMAP1(vrnd64xq_v, aarch64_neon_frint64x, Add1ArgType),
5914 NEONMAP1(vrnd64z_v, aarch64_neon_frint64z, Add1ArgType),
5915 NEONMAP1(vrnd64zq_v, aarch64_neon_frint64z, Add1ArgType),
5916 NEONMAP0(vrndi_v),
5917 NEONMAP0(vrndiq_v),
5918 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5919 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5920 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5921 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5922 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5923 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5924 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
5925 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
5926 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
5927 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
5928 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
5929 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
5930 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
5931 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
5932 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
5933 NEONMAP1(vsha512h2q_v, aarch64_crypto_sha512h2, 0),
5934 NEONMAP1(vsha512hq_v, aarch64_crypto_sha512h, 0),
5935 NEONMAP1(vsha512su0q_v, aarch64_crypto_sha512su0, 0),
5936 NEONMAP1(vsha512su1q_v, aarch64_crypto_sha512su1, 0),
5937 NEONMAP0(vshl_n_v),
5938 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5939 NEONMAP0(vshll_n_v),
5940 NEONMAP0(vshlq_n_v),
5941 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5942 NEONMAP0(vshr_n_v),
5943 NEONMAP0(vshrn_n_v),
5944 NEONMAP0(vshrq_n_v),
5945 NEONMAP1(vsm3partw1q_v, aarch64_crypto_sm3partw1, 0),
5946 NEONMAP1(vsm3partw2q_v, aarch64_crypto_sm3partw2, 0),
5947 NEONMAP1(vsm3ss1q_v, aarch64_crypto_sm3ss1, 0),
5948 NEONMAP1(vsm3tt1aq_v, aarch64_crypto_sm3tt1a, 0),
5949 NEONMAP1(vsm3tt1bq_v, aarch64_crypto_sm3tt1b, 0),
5950 NEONMAP1(vsm3tt2aq_v, aarch64_crypto_sm3tt2a, 0),
5951 NEONMAP1(vsm3tt2bq_v, aarch64_crypto_sm3tt2b, 0),
5952 NEONMAP1(vsm4ekeyq_v, aarch64_crypto_sm4ekey, 0),
5953 NEONMAP1(vsm4eq_v, aarch64_crypto_sm4e, 0),
5954 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
5955 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
5956 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
5957 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
5958 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
5959 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
5960 NEONMAP0(vsubhn_v),
5961 NEONMAP0(vtst_v),
5962 NEONMAP0(vtstq_v),
5963 NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
5964 NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
5965 NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
5966 NEONMAP1(vxarq_v, aarch64_crypto_xar, 0),
5967};
5968
5969static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
5970 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
5971 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
5972 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
5973 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5974 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5975 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5976 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5977 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5978 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5979 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5980 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5981 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
5982 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5983 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
5984 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5985 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5986 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5987 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5988 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5989 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5990 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5991 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5992 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5993 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5994 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5995 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5996 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5997 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5998 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5999 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6000 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6001 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6002 NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6003 NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6004 NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
6005 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6006 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6007 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6008 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6009 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6010 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6011 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6012 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6013 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6014 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6015 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6016 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6017 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6018 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6019 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6020 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6021 NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6022 NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6023 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
6024 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6025 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6026 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6027 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6028 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
6029 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
6030 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6031 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6032 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
6033 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
6034 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6035 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6036 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6037 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6038 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
6039 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
6040 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6041 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
6042 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
6043 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
6044 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
6045 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
6046 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
6047 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6048 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6049 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6050 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6051 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6052 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6053 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6054 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6055 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
6056 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6057 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
6058 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
6059 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
6060 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
6061 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
6062 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
6063 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
6064 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
6065 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
6066 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
6067 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
6068 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
6069 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
6070 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
6071 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
6072 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
6073 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
6074 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
6075 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
6076 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
6077 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
6078 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
6079 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
6080 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
6081 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
6082 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
6083 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
6084 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
6085 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
6086 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
6087 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
6088 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
6089 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
6090 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
6091 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
6092 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
6093 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
6094 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
6095 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
6096 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
6097 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
6098 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
6099 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
6100 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
6101 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
6102 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
6103 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
6104 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
6105 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6106 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6107 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6108 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6109 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
6110 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
6111 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6112 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6113 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6114 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6115 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
6116 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
6117 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
6118 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
6119 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
6120 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
6121 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
6122 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
6123 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
6124 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
6125 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
6126 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
6127 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
6128 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
6129 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
6130 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
6131 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
6132 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
6133 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
6134 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
6135 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
6136 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
6137 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
6138 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
6139 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
6140 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
6141 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
6142 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
6143 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
6144 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
6145 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
6146 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
6147 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
6148 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
6149 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
6150 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
6151 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
6152 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
6153 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
6154 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
6155 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
6156 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
6157 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
6158 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
6159 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
6160 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
6161 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
6162 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
6163 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
6164 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
6165 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
6166 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
6167 // FP16 scalar intrinisics go here.
6168 NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
6169 NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6170 NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6171 NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6172 NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6173 NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6174 NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6175 NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6176 NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6177 NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6178 NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6179 NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6180 NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6181 NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6182 NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6183 NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6184 NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6185 NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6186 NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6187 NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6188 NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6189 NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6190 NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6191 NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6192 NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6193 NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6194 NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6195 NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6196 NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6197 NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
6198 NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
6199 NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
6200 NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
6201 NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
6202};
6203
6204#undef NEONMAP0
6205#undef NEONMAP1
6206#undef NEONMAP2
6207
6208#define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
6209 { \
6210 #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
6211 TypeModifier \
6212 }
6213
6214#define SVEMAP2(NameBase, TypeModifier) \
6215 { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
6216static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
6217#define GET_SVE_LLVM_INTRINSIC_MAP
6218#include "clang/Basic/arm_sve_builtin_cg.inc"
6219#undef GET_SVE_LLVM_INTRINSIC_MAP
6220};
6221
6222#undef SVEMAP1
6223#undef SVEMAP2
6224
6225static bool NEONSIMDIntrinsicsProvenSorted = false;
6226
6227static bool AArch64SIMDIntrinsicsProvenSorted = false;
6228static bool AArch64SISDIntrinsicsProvenSorted = false;
6229static bool AArch64SVEIntrinsicsProvenSorted = false;
6230
6231static const ARMVectorIntrinsicInfo *
6232findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
6233 unsigned BuiltinID, bool &MapProvenSorted) {
6234
6235#ifndef NDEBUG
6236 if (!MapProvenSorted) {
6237 assert(llvm::is_sorted(IntrinsicMap))(static_cast <bool> (llvm::is_sorted(IntrinsicMap)) ? void
(0) : __assert_fail ("llvm::is_sorted(IntrinsicMap)", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6237, __extension__ __PRETTY_FUNCTION__))
;
6238 MapProvenSorted = true;
6239 }
6240#endif
6241
6242 const ARMVectorIntrinsicInfo *Builtin =
6243 llvm::lower_bound(IntrinsicMap, BuiltinID);
6244
6245 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
6246 return Builtin;
6247
6248 return nullptr;
6249}
6250
6251Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
6252 unsigned Modifier,
6253 llvm::Type *ArgType,
6254 const CallExpr *E) {
6255 int VectorSize = 0;
6256 if (Modifier & Use64BitVectors)
6257 VectorSize = 64;
6258 else if (Modifier & Use128BitVectors)
6259 VectorSize = 128;
6260
6261 // Return type.
6262 SmallVector<llvm::Type *, 3> Tys;
6263 if (Modifier & AddRetType) {
6264 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
6265 if (Modifier & VectorizeRetType)
6266 Ty = llvm::FixedVectorType::get(
6267 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
6268
6269 Tys.push_back(Ty);
6270 }
6271
6272 // Arguments.
6273 if (Modifier & VectorizeArgTypes) {
6274 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
6275 ArgType = llvm::FixedVectorType::get(ArgType, Elts);
6276 }
6277
6278 if (Modifier & (Add1ArgType | Add2ArgTypes))
6279 Tys.push_back(ArgType);
6280
6281 if (Modifier & Add2ArgTypes)
6282 Tys.push_back(ArgType);
6283
6284 if (Modifier & InventFloatType)
6285 Tys.push_back(FloatTy);
6286
6287 return CGM.getIntrinsic(IntrinsicID, Tys);
6288}
6289
6290static Value *EmitCommonNeonSISDBuiltinExpr(
6291 CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
6292 SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
6293 unsigned BuiltinID = SISDInfo.BuiltinID;
6294 unsigned int Int = SISDInfo.LLVMIntrinsic;
6295 unsigned Modifier = SISDInfo.TypeModifier;
6296 const char *s = SISDInfo.NameHint;
6297
6298 switch (BuiltinID) {
6299 case NEON::BI__builtin_neon_vcled_s64:
6300 case NEON::BI__builtin_neon_vcled_u64:
6301 case NEON::BI__builtin_neon_vcles_f32:
6302 case NEON::BI__builtin_neon_vcled_f64:
6303 case NEON::BI__builtin_neon_vcltd_s64:
6304 case NEON::BI__builtin_neon_vcltd_u64:
6305 case NEON::BI__builtin_neon_vclts_f32:
6306 case NEON::BI__builtin_neon_vcltd_f64:
6307 case NEON::BI__builtin_neon_vcales_f32:
6308 case NEON::BI__builtin_neon_vcaled_f64:
6309 case NEON::BI__builtin_neon_vcalts_f32:
6310 case NEON::BI__builtin_neon_vcaltd_f64:
6311 // Only one direction of comparisons actually exist, cmle is actually a cmge
6312 // with swapped operands. The table gives us the right intrinsic but we
6313 // still need to do the swap.
6314 std::swap(Ops[0], Ops[1]);
6315 break;
6316 }
6317
6318 assert(Int && "Generic code assumes a valid intrinsic")(static_cast <bool> (Int && "Generic code assumes a valid intrinsic"
) ? void (0) : __assert_fail ("Int && \"Generic code assumes a valid intrinsic\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6318, __extension__ __PRETTY_FUNCTION__))
;
6319
6320 // Determine the type(s) of this overloaded AArch64 intrinsic.
6321 const Expr *Arg = E->getArg(0);
6322 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
6323 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
6324
6325 int j = 0;
6326 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
6327 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
6328 ai != ae; ++ai, ++j) {
6329 llvm::Type *ArgTy = ai->getType();
6330 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
6331 ArgTy->getPrimitiveSizeInBits())
6332 continue;
6333
6334 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy())(static_cast <bool> (ArgTy->isVectorTy() && !
Ops[j]->getType()->isVectorTy()) ? void (0) : __assert_fail
("ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6334, __extension__ __PRETTY_FUNCTION__))
;
6335 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
6336 // it before inserting.
6337 Ops[j] = CGF.Builder.CreateTruncOrBitCast(
6338 Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
6339 Ops[j] =
6340 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
6341 }
6342
6343 Value *Result = CGF.EmitNeonCall(F, Ops, s);
6344 llvm::Type *ResultType = CGF.ConvertType(E->getType());
6345 if (ResultType->getPrimitiveSizeInBits().getFixedSize() <
6346 Result->getType()->getPrimitiveSizeInBits().getFixedSize())
6347 return CGF.Builder.CreateExtractElement(Result, C0);
6348
6349 return CGF.Builder.CreateBitCast(Result, ResultType, s);
6350}
6351
6352Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
6353 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
6354 const char *NameHint, unsigned Modifier, const CallExpr *E,
6355 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
6356 llvm::Triple::ArchType Arch) {
6357 // Get the last argument, which specifies the vector type.
6358 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
6359 Optional<llvm::APSInt> NeonTypeConst =
6360 Arg->getIntegerConstantExpr(getContext());
6361 if (!NeonTypeConst)
6362 return nullptr;
6363
6364 // Determine the type of this overloaded NEON intrinsic.
6365 NeonTypeFlags Type(NeonTypeConst->getZExtValue());
6366 bool Usgn = Type.isUnsigned();
6367 bool Quad = Type.isQuad();
6368 const bool HasLegalHalfType = getTarget().hasLegalHalfType();
6369 const bool AllowBFloatArgsAndRet =
6370 getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
6371
6372 llvm::FixedVectorType *VTy =
6373 GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet);
6374 llvm::Type *Ty = VTy;
6375 if (!Ty)
6376 return nullptr;
6377
6378 auto getAlignmentValue32 = [&](Address addr) -> Value* {
6379 return Builder.getInt32(addr.getAlignment().getQuantity());
6380 };
6381
6382 unsigned Int = LLVMIntrinsic;
6383 if ((Modifier & UnsignedAlts) && !Usgn)
6384 Int = AltLLVMIntrinsic;
6385
6386 switch (BuiltinID) {
6387 default: break;
6388 case NEON::BI__builtin_neon_splat_lane_v:
6389 case NEON::BI__builtin_neon_splat_laneq_v:
6390 case NEON::BI__builtin_neon_splatq_lane_v:
6391 case NEON::BI__builtin_neon_splatq_laneq_v: {
6392 auto NumElements = VTy->getElementCount();
6393 if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
6394 NumElements = NumElements * 2;
6395 if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
6396 NumElements = NumElements.divideCoefficientBy(2);
6397
6398 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
6399 return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
6400 }
6401 case NEON::BI__builtin_neon_vpadd_v:
6402 case NEON::BI__builtin_neon_vpaddq_v:
6403 // We don't allow fp/int overloading of intrinsics.
6404 if (VTy->getElementType()->isFloatingPointTy() &&
6405 Int == Intrinsic::aarch64_neon_addp)
6406 Int = Intrinsic::aarch64_neon_faddp;
6407 break;
6408 case NEON::BI__builtin_neon_vabs_v:
6409 case NEON::BI__builtin_neon_vabsq_v:
6410 if (VTy->getElementType()->isFloatingPointTy())
6411 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
6412 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
6413 case NEON::BI__builtin_neon_vadd_v:
6414 case NEON::BI__builtin_neon_vaddq_v: {
6415 llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, Quad ? 16 : 8);
6416 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
6417 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
6418 Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
6419 return Builder.CreateBitCast(Ops[0], Ty);
6420 }
6421 case NEON::BI__builtin_neon_vaddhn_v: {
6422 llvm::FixedVectorType *SrcTy =
6423 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6424
6425 // %sum = add <4 x i32> %lhs, %rhs
6426 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6427 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
6428 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
6429
6430 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
6431 Constant *ShiftAmt =
6432 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
6433 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
6434
6435 // %res = trunc <4 x i32> %high to <4 x i16>
6436 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
6437 }
6438 case NEON::BI__builtin_neon_vcale_v:
6439 case NEON::BI__builtin_neon_vcaleq_v:
6440 case NEON::BI__builtin_neon_vcalt_v:
6441 case NEON::BI__builtin_neon_vcaltq_v:
6442 std::swap(Ops[0], Ops[1]);
6443 LLVM_FALLTHROUGH[[gnu::fallthrough]];
6444 case NEON::BI__builtin_neon_vcage_v:
6445 case NEON::BI__builtin_neon_vcageq_v:
6446 case NEON::BI__builtin_neon_vcagt_v:
6447 case NEON::BI__builtin_neon_vcagtq_v: {
6448 llvm::Type *Ty;
6449 switch (VTy->getScalarSizeInBits()) {
6450 default: llvm_unreachable("unexpected type")::llvm::llvm_unreachable_internal("unexpected type", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 6450)
;
6451 case 32:
6452 Ty = FloatTy;
6453 break;
6454 case 64:
6455 Ty = DoubleTy;
6456 break;
6457 case 16:
6458 Ty = HalfTy;
6459 break;
6460 }
6461 auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
6462 llvm::Type *Tys[] = { VTy, VecFlt };
6463 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6464 return EmitNeonCall(F, Ops, NameHint);
6465 }
6466 case NEON::BI__builtin_neon_vceqz_v:
6467 case NEON::BI__builtin_neon_vceqzq_v:
6468 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
6469 ICmpInst::ICMP_EQ, "vceqz");
6470 case NEON::BI__builtin_neon_vcgez_v:
6471 case NEON::BI__builtin_neon_vcgezq_v:
6472 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
6473 ICmpInst::ICMP_SGE, "vcgez");
6474 case NEON::BI__builtin_neon_vclez_v:
6475 case NEON::BI__builtin_neon_vclezq_v:
6476 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
6477 ICmpInst::ICMP_SLE, "vclez");
6478 case NEON::BI__builtin_neon_vcgtz_v:
6479 case NEON::BI__builtin_neon_vcgtzq_v:
6480 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
6481 ICmpInst::ICMP_SGT, "vcgtz");
6482 case NEON::BI__builtin_neon_vcltz_v:
6483 case NEON::BI__builtin_neon_vcltzq_v:
6484 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
6485 ICmpInst::ICMP_SLT, "vcltz");
6486 case NEON::BI__builtin_neon_vclz_v:
6487 case NEON::BI__builtin_neon_vclzq_v:
6488 // We generate target-independent intrinsic, which needs a second argument
6489 // for whether or not clz of zero is undefined; on ARM it isn't.
6490 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
6491 break;
6492 case NEON::BI__builtin_neon_vcvt_f32_v:
6493 case NEON::BI__builtin_neon_vcvtq_f32_v:
6494 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6495 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
6496 HasLegalHalfType);
6497 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
6498 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
6499 case NEON::BI__builtin_neon_vcvt_f16_v:
6500 case NEON::BI__builtin_neon_vcvtq_f16_v:
6501 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6502 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
6503 HasLegalHalfType);
6504 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
6505 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
6506 case NEON::BI__builtin_neon_vcvt_n_f16_v:
6507 case NEON::BI__builtin_neon_vcvt_n_f32_v:
6508 case NEON::BI__builtin_neon_vcvt_n_f64_v:
6509 case NEON::BI__builtin_neon_vcvtq_n_f16_v:
6510 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
6511 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
6512 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
6513 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6514 Function *F = CGM.getIntrinsic(Int, Tys);
6515 return EmitNeonCall(F, Ops, "vcvt_n");
6516 }
6517 case NEON::BI__builtin_neon_vcvt_n_s16_v:
6518 case NEON::BI__builtin_neon_vcvt_n_s32_v:
6519 case NEON::BI__builtin_neon_vcvt_n_u16_v:
6520 case NEON::BI__builtin_neon_vcvt_n_u32_v:
6521 case NEON::BI__builtin_neon_vcvt_n_s64_v:
6522 case NEON::BI__builtin_neon_vcvt_n_u64_v:
6523 case NEON::BI__builtin_neon_vcvtq_n_s16_v:
6524 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
6525 case NEON::BI__builtin_neon_vcvtq_n_u16_v:
6526 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
6527 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
6528 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
6529 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
6530 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6531 return EmitNeonCall(F, Ops, "vcvt_n");
6532 }
6533 case NEON::BI__builtin_neon_vcvt_s32_v:
6534 case NEON::BI__builtin_neon_vcvt_u32_v:
6535 case NEON::BI__builtin_neon_vcvt_s64_v:
6536 case NEON::BI__builtin_neon_vcvt_u64_v:
6537 case NEON::BI__builtin_neon_vcvt_s16_v:
6538 case NEON::BI__builtin_neon_vcvt_u16_v:
6539 case NEON::BI__builtin_neon_vcvtq_s32_v:
6540 case NEON::BI__builtin_neon_vcvtq_u32_v:
6541 case NEON::BI__builtin_neon_vcvtq_s64_v:
6542 case NEON::BI__builtin_neon_vcvtq_u64_v:
6543 case NEON::BI__builtin_neon_vcvtq_s16_v:
6544 case NEON::BI__builtin_neon_vcvtq_u16_v: {
6545 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
6546 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
6547 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
6548 }
6549 case NEON::BI__builtin_neon_vcvta_s16_v:
6550 case NEON::BI__builtin_neon_vcvta_s32_v:
6551 case NEON::BI__builtin_neon_vcvta_s64_v:
6552 case NEON::BI__builtin_neon_vcvta_u16_v:
6553 case NEON::BI__builtin_neon_vcvta_u32_v:
6554 case NEON::BI__builtin_neon_vcvta_u64_v:
6555 case NEON::BI__builtin_neon_vcvtaq_s16_v:
6556 case NEON::BI__builtin_neon_vcvtaq_s32_v:
6557 case NEON::BI__builtin_neon_vcvtaq_s64_v:
6558 case NEON::BI__builtin_neon_vcvtaq_u16_v:
6559 case NEON::BI__builtin_neon_vcvtaq_u32_v:
6560 case NEON::BI__builtin_neon_vcvtaq_u64_v:
6561 case NEON::BI__builtin_neon_vcvtn_s16_v:
6562 case NEON::BI__builtin_neon_vcvtn_s32_v:
6563 case NEON::BI__builtin_neon_vcvtn_s64_v:
6564 case NEON::BI__builtin_neon_vcvtn_u16_v:
6565 case NEON::BI__builtin_neon_vcvtn_u32_v:
6566 case NEON::BI__builtin_neon_vcvtn_u64_v:
6567 case NEON::BI__builtin_neon_vcvtnq_s16_v:
6568 case NEON::BI__builtin_neon_vcvtnq_s32_v:
6569 case NEON::BI__builtin_neon_vcvtnq_s64_v:
6570 case NEON::BI__builtin_neon_vcvtnq_u16_v:
6571 case NEON::BI__builtin_neon_vcvtnq_u32_v:
6572 case NEON::BI__builtin_neon_vcvtnq_u64_v:
6573 case NEON::BI__builtin_neon_vcvtp_s16_v:
6574 case NEON::BI__builtin_neon_vcvtp_s32_v:
6575 case NEON::BI__builtin_neon_vcvtp_s64_v:
6576 case NEON::BI__builtin_neon_vcvtp_u16_v:
6577 case NEON::BI__builtin_neon_vcvtp_u32_v:
6578 case NEON::BI__builtin_neon_vcvtp_u64_v:
6579 case NEON::BI__builtin_neon_vcvtpq_s16_v:
6580 case NEON::BI__builtin_neon_vcvtpq_s32_v:
6581 case NEON::BI__builtin_neon_vcvtpq_s64_v:
6582 case NEON::BI__builtin_neon_vcvtpq_u16_v:
6583 case NEON::BI__builtin_neon_vcvtpq_u32_v:
6584 case NEON::BI__builtin_neon_vcvtpq_u64_v:
6585 case NEON::BI__builtin_neon_vcvtm_s16_v:
6586 case NEON::BI__builtin_neon_vcvtm_s32_v:
6587 case NEON::BI__builtin_neon_vcvtm_s64_v:
6588 case NEON::BI__builtin_neon_vcvtm_u16_v:
6589 case NEON::BI__builtin_neon_vcvtm_u32_v:
6590 case NEON::BI__builtin_neon_vcvtm_u64_v:
6591 case NEON::BI__builtin_neon_vcvtmq_s16_v:
6592 case NEON::BI__builtin_neon_vcvtmq_s32_v:
6593 case NEON::BI__builtin_neon_vcvtmq_s64_v:
6594 case NEON::BI__builtin_neon_vcvtmq_u16_v:
6595 case NEON::BI__builtin_neon_vcvtmq_u32_v:
6596 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
6597 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
6598 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
6599 }
6600 case NEON::BI__builtin_neon_vcvtx_f32_v: {
6601 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
6602 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
6603
6604 }
6605 case NEON::BI__builtin_neon_vext_v:
6606 case NEON::BI__builtin_neon_vextq_v: {
6607 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
6608 SmallVector<int, 16> Indices;
6609 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
6610 Indices.push_back(i+CV);
6611
6612 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6613 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6614 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
6615 }
6616 case NEON::BI__builtin_neon_vfma_v:
6617 case NEON::BI__builtin_neon_vfmaq_v: {
6618 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6619 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6620 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6621
6622 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
6623 return emitCallMaybeConstrainedFPBuiltin(
6624 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
6625 {Ops[1], Ops[2], Ops[0]});
6626 }
6627 case NEON::BI__builtin_neon_vld1_v:
6628 case NEON::BI__builtin_neon_vld1q_v: {
6629 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6630 Ops.push_back(getAlignmentValue32(PtrOp0));
6631 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
6632 }
6633 case NEON::BI__builtin_neon_vld1_x2_v:
6634 case NEON::BI__builtin_neon_vld1q_x2_v:
6635 case NEON::BI__builtin_neon_vld1_x3_v:
6636 case NEON::BI__builtin_neon_vld1q_x3_v:
6637 case NEON::BI__builtin_neon_vld1_x4_v:
6638 case NEON::BI__builtin_neon_vld1q_x4_v: {
6639 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
6640 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
6641 llvm::Type *Tys[2] = { VTy, PTy };
6642 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6643 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
6644 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6645 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6646 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6647 }
6648 case NEON::BI__builtin_neon_vld2_v:
6649 case NEON::BI__builtin_neon_vld2q_v:
6650 case NEON::BI__builtin_neon_vld3_v:
6651 case NEON::BI__builtin_neon_vld3q_v:
6652 case NEON::BI__builtin_neon_vld4_v:
6653 case NEON::BI__builtin_neon_vld4q_v:
6654 case NEON::BI__builtin_neon_vld2_dup_v:
6655 case NEON::BI__builtin_neon_vld2q_dup_v:
6656 case NEON::BI__builtin_neon_vld3_dup_v:
6657 case NEON::BI__builtin_neon_vld3q_dup_v:
6658 case NEON::BI__builtin_neon_vld4_dup_v:
6659 case NEON::BI__builtin_neon_vld4q_dup_v: {
6660 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6661 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6662 Value *Align = getAlignmentValue32(PtrOp1);
6663 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
6664 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6665 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6666 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6667 }
6668 case NEON::BI__builtin_neon_vld1_dup_v:
6669 case NEON::BI__builtin_neon_vld1q_dup_v: {
6670 Value *V = UndefValue::get(Ty);
6671 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
6672 PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
6673 LoadInst *Ld = Builder.CreateLoad(PtrOp0);
6674 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
6675 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
6676 return EmitNeonSplat(Ops[0], CI);
6677 }
6678 case NEON::BI__builtin_neon_vld2_lane_v:
6679 case NEON::BI__builtin_neon_vld2q_lane_v:
6680 case NEON::BI__builtin_neon_vld3_lane_v:
6681 case NEON::BI__builtin_neon_vld3q_lane_v:
6682 case NEON::BI__builtin_neon_vld4_lane_v:
6683 case NEON::BI__builtin_neon_vld4q_lane_v: {
6684 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6685 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6686 for (unsigned I = 2; I < Ops.size() - 1; ++I)
6687 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
6688 Ops.push_back(getAlignmentValue32(PtrOp1));
6689 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
6690 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6691 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6692 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6693 }
6694 case NEON::BI__builtin_neon_vmovl_v: {
6695 llvm::FixedVectorType *DTy =
6696 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
6697 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
6698 if (Usgn)
6699 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
6700 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
6701 }
6702 case NEON::BI__builtin_neon_vmovn_v: {
6703 llvm::FixedVectorType *QTy =
6704 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6705 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
6706 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
6707 }
6708 case NEON::BI__builtin_neon_vmull_v:
6709 // FIXME: the integer vmull operations could be emitted in terms of pure
6710 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
6711 // hoisting the exts outside loops. Until global ISel comes along that can
6712 // see through such movement this leads to bad CodeGen. So we need an
6713 // intrinsic for now.
6714 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
6715 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
6716 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
6717 case NEON::BI__builtin_neon_vpadal_v:
6718 case NEON::BI__builtin_neon_vpadalq_v: {
6719 // The source operand type has twice as many elements of half the size.
6720 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
6721 llvm::Type *EltTy =
6722 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
6723 auto *NarrowTy =
6724 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
6725 llvm::Type *Tys[2] = { Ty, NarrowTy };
6726 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6727 }
6728 case NEON::BI__builtin_neon_vpaddl_v:
6729 case NEON::BI__builtin_neon_vpaddlq_v: {
6730 // The source operand type has twice as many elements of half the size.
6731 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
6732 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
6733 auto *NarrowTy =
6734 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
6735 llvm::Type *Tys[2] = { Ty, NarrowTy };
6736 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
6737 }
6738 case NEON::BI__builtin_neon_vqdmlal_v:
6739 case NEON::BI__builtin_neon_vqdmlsl_v: {
6740 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
6741 Ops[1] =
6742 EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
6743 Ops.resize(2);
6744 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
6745 }
6746 case NEON::BI__builtin_neon_vqdmulhq_lane_v:
6747 case NEON::BI__builtin_neon_vqdmulh_lane_v:
6748 case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
6749 case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
6750 auto *RTy = cast<llvm::FixedVectorType>(Ty);
6751 if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
6752 BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
6753 RTy = llvm::FixedVectorType::get(RTy->getElementType(),
6754 RTy->getNumElements() * 2);
6755 llvm::Type *Tys[2] = {
6756 RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6757 /*isQuad*/ false))};
6758 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6759 }
6760 case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
6761 case NEON::BI__builtin_neon_vqdmulh_laneq_v:
6762 case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
6763 case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
6764 llvm::Type *Tys[2] = {
6765 Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6766 /*isQuad*/ true))};
6767 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6768 }
6769 case NEON::BI__builtin_neon_vqshl_n_v:
6770 case NEON::BI__builtin_neon_vqshlq_n_v:
6771 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
6772 1, false);
6773 case NEON::BI__builtin_neon_vqshlu_n_v:
6774 case NEON::BI__builtin_neon_vqshluq_n_v:
6775 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
6776 1, false);
6777 case NEON::BI__builtin_neon_vrecpe_v:
6778 case NEON::BI__builtin_neon_vrecpeq_v:
6779 case NEON::BI__builtin_neon_vrsqrte_v:
6780 case NEON::BI__builtin_neon_vrsqrteq_v:
6781 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
6782 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6783 case NEON::BI__builtin_neon_vrndi_v:
6784 case NEON::BI__builtin_neon_vrndiq_v:
6785 Int = Builder.getIsFPConstrained()
6786 ? Intrinsic::experimental_constrained_nearbyint
6787 : Intrinsic::nearbyint;
6788 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6789 case NEON::BI__builtin_neon_vrshr_n_v:
6790 case NEON::BI__builtin_neon_vrshrq_n_v:
6791 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
6792 1, true);
6793 case NEON::BI__builtin_neon_vsha512hq_v:
6794 case NEON::BI__builtin_neon_vsha512h2q_v:
6795 case NEON::BI__builtin_neon_vsha512su0q_v:
6796 case NEON::BI__builtin_neon_vsha512su1q_v: {
6797 Function *F = CGM.getIntrinsic(Int);
6798 return EmitNeonCall(F, Ops, "");
6799 }
6800 case NEON::BI__builtin_neon_vshl_n_v:
6801 case NEON::BI__builtin_neon_vshlq_n_v:
6802 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
6803 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
6804 "vshl_n");
6805 case NEON::BI__builtin_neon_vshll_n_v: {
6806 llvm::FixedVectorType *SrcTy =
6807 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
6808 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6809 if (Usgn)
6810 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
6811 else
6812 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
6813 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
6814 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
6815 }
6816 case NEON::BI__builtin_neon_vshrn_n_v: {
6817 llvm::FixedVectorType *SrcTy =
6818 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6819 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6820 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
6821 if (Usgn)
6822 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
6823 else
6824 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
6825 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
6826 }
6827 case NEON::BI__builtin_neon_vshr_n_v:
6828 case NEON::BI__builtin_neon_vshrq_n_v:
6829 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
6830 case NEON::BI__builtin_neon_vst1_v:
6831 case NEON::BI__builtin_neon_vst1q_v:
6832 case NEON::BI__builtin_neon_vst2_v:
6833 case NEON::BI__builtin_neon_vst2q_v:
6834 case NEON::BI__builtin_neon_vst3_v:
6835 case NEON::BI__builtin_neon_vst3q_v:
6836 case NEON::BI__builtin_neon_vst4_v:
6837 case NEON::BI__builtin_neon_vst4q_v:
6838 case NEON::BI__builtin_neon_vst2_lane_v:
6839 case NEON::BI__builtin_neon_vst2q_lane_v:
6840 case NEON::BI__builtin_neon_vst3_lane_v:
6841 case NEON::BI__builtin_neon_vst3q_lane_v:
6842 case NEON::BI__builtin_neon_vst4_lane_v:
6843 case NEON::BI__builtin_neon_vst4q_lane_v: {
6844 llvm::Type *Tys[] = {Int8PtrTy, Ty};
6845 Ops.push_back(getAlignmentValue32(PtrOp0));
6846 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
6847 }
6848 case NEON::BI__builtin_neon_vsm3partw1q_v:
6849 case NEON::BI__builtin_neon_vsm3partw2q_v:
6850 case NEON::BI__builtin_neon_vsm3ss1q_v:
6851 case NEON::BI__builtin_neon_vsm4ekeyq_v:
6852 case NEON::BI__builtin_neon_vsm4eq_v: {
6853 Function *F = CGM.getIntrinsic(Int);
6854 return EmitNeonCall(F, Ops, "");
6855 }
6856 case NEON::BI__builtin_neon_vsm3tt1aq_v:
6857 case NEON::BI__builtin_neon_vsm3tt1bq_v:
6858 case NEON::BI__builtin_neon_vsm3tt2aq_v:
6859 case NEON::BI__builtin_neon_vsm3tt2bq_v: {
6860 Function *F = CGM.getIntrinsic(Int);
6861 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
6862 return EmitNeonCall(F, Ops, "");
6863 }
6864 case NEON::BI__builtin_neon_vst1_x2_v:
6865 case NEON::BI__builtin_neon_vst1q_x2_v:
6866 case NEON::BI__builtin_neon_vst1_x3_v:
6867 case NEON::BI__builtin_neon_vst1q_x3_v:
6868 case NEON::BI__builtin_neon_vst1_x4_v:
6869 case NEON::BI__builtin_neon_vst1q_x4_v: {
6870 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
6871 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
6872 // in AArch64 it comes last. We may want to stick to one or another.
6873 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
6874 Arch == llvm::Triple::aarch64_32) {
6875 llvm::Type *Tys[2] = { VTy, PTy };
6876 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
6877 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
6878 }
6879 llvm::Type *Tys[2] = { PTy, VTy };
6880 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
6881 }
6882 case NEON::BI__builtin_neon_vsubhn_v: {
6883 llvm::FixedVectorType *SrcTy =
6884 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6885
6886 // %sum = add <4 x i32> %lhs, %rhs
6887 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6888 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
6889 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
6890
6891 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
6892 Constant *ShiftAmt =
6893 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
6894 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
6895
6896 // %res = trunc <4 x i32> %high to <4 x i16>
6897 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
6898 }
6899 case NEON::BI__builtin_neon_vtrn_v:
6900 case NEON::BI__builtin_neon_vtrnq_v: {
6901 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6902 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6903 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6904 Value *SV = nullptr;
6905
6906 for (unsigned vi = 0; vi != 2; ++vi) {
6907 SmallVector<int, 16> Indices;
6908 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6909 Indices.push_back(i+vi);
6910 Indices.push_back(i+e+vi);
6911 }
6912 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6913 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
6914 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6915 }
6916 return SV;
6917 }
6918 case NEON::BI__builtin_neon_vtst_v:
6919 case NEON::BI__builtin_neon_vtstq_v: {
6920 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6921 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6922 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
6923 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
6924 ConstantAggregateZero::get(Ty));
6925 return Builder.CreateSExt(Ops[0], Ty, "vtst");
6926 }
6927 case NEON::BI__builtin_neon_vuzp_v:
6928 case NEON::BI__builtin_neon_vuzpq_v: {
6929 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6930 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6931 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6932 Value *SV = nullptr;
6933
6934 for (unsigned vi = 0; vi != 2; ++vi) {
6935 SmallVector<int, 16> Indices;
6936 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
6937 Indices.push_back(2*i+vi);
6938
6939 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6940 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
6941 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6942 }
6943 return SV;
6944 }
6945 case NEON::BI__builtin_neon_vxarq_v: {
6946 Function *F = CGM.getIntrinsic(Int);
6947 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
6948 return EmitNeonCall(F, Ops, "");
6949 }
6950 case NEON::BI__builtin_neon_vzip_v:
6951 case NEON::BI__builtin_neon_vzipq_v: {
6952 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6953 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6954 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6955 Value *SV = nullptr;
6956
6957 for (unsigned vi = 0; vi != 2; ++vi) {
6958 SmallVector<int, 16> Indices;
6959 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6960 Indices.push_back((i + vi*e) >> 1);
6961 Indices.push_back(((i + vi*e) >> 1)+e);
6962 }
6963 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6964 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
6965 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6966 }
6967 return SV;
6968 }
6969 case NEON::BI__builtin_neon_vdot_v:
6970 case NEON::BI__builtin_neon_vdotq_v: {
6971 auto *InputTy =
6972 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6973 llvm::Type *Tys[2] = { Ty, InputTy };
6974 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6975 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
6976 }
6977 case NEON::BI__builtin_neon_vfmlal_low_v:
6978 case NEON::BI__builtin_neon_vfmlalq_low_v: {
6979 auto *InputTy =
6980 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6981 llvm::Type *Tys[2] = { Ty, InputTy };
6982 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
6983 }
6984 case NEON::BI__builtin_neon_vfmlsl_low_v:
6985 case NEON::BI__builtin_neon_vfmlslq_low_v: {
6986 auto *InputTy =
6987 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6988 llvm::Type *Tys[2] = { Ty, InputTy };
6989 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
6990 }
6991 case NEON::BI__builtin_neon_vfmlal_high_v:
6992 case NEON::BI__builtin_neon_vfmlalq_high_v: {
6993 auto *InputTy =
6994 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6995 llvm::Type *Tys[2] = { Ty, InputTy };
6996 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
6997 }
6998 case NEON::BI__builtin_neon_vfmlsl_high_v:
6999 case NEON::BI__builtin_neon_vfmlslq_high_v: {
7000 auto *InputTy =
7001 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7002 llvm::Type *Tys[2] = { Ty, InputTy };
7003 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
7004 }
7005 case NEON::BI__builtin_neon_vmmlaq_v: {
7006 auto *InputTy =
7007 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7008 llvm::Type *Tys[2] = { Ty, InputTy };
7009 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
7010 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla");
7011 }
7012 case NEON::BI__builtin_neon_vusmmlaq_v: {
7013 auto *InputTy =
7014 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7015 llvm::Type *Tys[2] = { Ty, InputTy };
7016 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
7017 }
7018 case NEON::BI__builtin_neon_vusdot_v:
7019 case NEON::BI__builtin_neon_vusdotq_v: {
7020 auto *InputTy =
7021 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7022 llvm::Type *Tys[2] = { Ty, InputTy };
7023 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
7024 }
7025 case NEON::BI__builtin_neon_vbfdot_v:
7026 case NEON::BI__builtin_neon_vbfdotq_v: {
7027 llvm::Type *InputTy =
7028 llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
7029 llvm::Type *Tys[2] = { Ty, InputTy };
7030 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
7031 }
7032 case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
7033 llvm::Type *Tys[1] = { Ty };
7034 Function *F = CGM.getIntrinsic(Int, Tys);
7035 return EmitNeonCall(F, Ops, "vcvtfp2bf");
7036 }
7037
7038 }
7039
7040 assert(Int && "Expected valid intrinsic number")(static_cast <bool> (Int && "Expected valid intrinsic number"
) ? void (0) : __assert_fail ("Int && \"Expected valid intrinsic number\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7040, __extension__ __PRETTY_FUNCTION__))
;
7041
7042 // Determine the type(s) of this overloaded AArch64 intrinsic.
7043 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
7044
7045 Value *Result = EmitNeonCall(F, Ops, NameHint);
7046 llvm::Type *ResultType = ConvertType(E->getType());
7047 // AArch64 intrinsic one-element vector type cast to
7048 // scalar type expected by the builtin
7049 return Builder.CreateBitCast(Result, ResultType, NameHint);
7050}
7051
7052Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
7053 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
7054 const CmpInst::Predicate Ip, const Twine &Name) {
7055 llvm::Type *OTy = Op->getType();
7056
7057 // FIXME: this is utterly horrific. We should not be looking at previous
7058 // codegen context to find out what needs doing. Unfortunately TableGen
7059 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
7060 // (etc).
7061 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
7062 OTy = BI->getOperand(0)->getType();
7063
7064 Op = Builder.CreateBitCast(Op, OTy);
7065 if (OTy->getScalarType()->isFloatingPointTy()) {
7066 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
7067 } else {
7068 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
7069 }
7070 return Builder.CreateSExt(Op, Ty, Name);
7071}
7072
7073static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
7074 Value *ExtOp, Value *IndexOp,
7075 llvm::Type *ResTy, unsigned IntID,
7076 const char *Name) {
7077 SmallVector<Value *, 2> TblOps;
7078 if (ExtOp)
7079 TblOps.push_back(ExtOp);
7080
7081 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
7082 SmallVector<int, 16> Indices;
7083 auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
7084 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
7085 Indices.push_back(2*i);
7086 Indices.push_back(2*i+1);
7087 }
7088
7089 int PairPos = 0, End = Ops.size() - 1;
7090 while (PairPos < End) {
7091 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
7092 Ops[PairPos+1], Indices,
7093 Name));
7094 PairPos += 2;
7095 }
7096
7097 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
7098 // of the 128-bit lookup table with zero.
7099 if (PairPos == End) {
7100 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
7101 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
7102 ZeroTbl, Indices, Name));
7103 }
7104
7105 Function *TblF;
7106 TblOps.push_back(IndexOp);
7107 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
7108
7109 return CGF.EmitNeonCall(TblF, TblOps, Name);
7110}
7111
7112Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
7113 unsigned Value;
7114 switch (BuiltinID) {
7115 default:
7116 return nullptr;
7117 case ARM::BI__builtin_arm_nop:
7118 Value = 0;
7119 break;
7120 case ARM::BI__builtin_arm_yield:
7121 case ARM::BI__yield:
7122 Value = 1;
7123 break;
7124 case ARM::BI__builtin_arm_wfe:
7125 case ARM::BI__wfe:
7126 Value = 2;
7127 break;
7128 case ARM::BI__builtin_arm_wfi:
7129 case ARM::BI__wfi:
7130 Value = 3;
7131 break;
7132 case ARM::BI__builtin_arm_sev:
7133 case ARM::BI__sev:
7134 Value = 4;
7135 break;
7136 case ARM::BI__builtin_arm_sevl:
7137 case ARM::BI__sevl:
7138 Value = 5;
7139 break;
7140 }
7141
7142 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
7143 llvm::ConstantInt::get(Int32Ty, Value));
7144}
7145
7146enum SpecialRegisterAccessKind {
7147 NormalRead,
7148 VolatileRead,
7149 Write,
7150};
7151
7152// Generates the IR for the read/write special register builtin,
7153// ValueType is the type of the value that is to be written or read,
7154// RegisterType is the type of the register being written to or read from.
7155static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
7156 const CallExpr *E,
7157 llvm::Type *RegisterType,
7158 llvm::Type *ValueType,
7159 SpecialRegisterAccessKind AccessKind,
7160 StringRef SysReg = "") {
7161 // write and register intrinsics only support 32 and 64 bit operations.
7162 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))(static_cast <bool> ((RegisterType->isIntegerTy(32) ||
RegisterType->isIntegerTy(64)) && "Unsupported size for register."
) ? void (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7163, __extension__ __PRETTY_FUNCTION__))
7163 && "Unsupported size for register.")(static_cast <bool> ((RegisterType->isIntegerTy(32) ||
RegisterType->isIntegerTy(64)) && "Unsupported size for register."
) ? void (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7163, __extension__ __PRETTY_FUNCTION__))
;
7164
7165 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7166 CodeGen::CodeGenModule &CGM = CGF.CGM;
7167 LLVMContext &Context = CGM.getLLVMContext();
7168
7169 if (SysReg.empty()) {
7170 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
7171 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
7172 }
7173
7174 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
7175 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
7176 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
7177
7178 llvm::Type *Types[] = { RegisterType };
7179
7180 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
7181 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))(static_cast <bool> (!(RegisterType->isIntegerTy(32)
&& ValueType->isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register"
) ? void (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7182, __extension__ __PRETTY_FUNCTION__))
7182 && "Can't fit 64-bit value in 32-bit register")(static_cast <bool> (!(RegisterType->isIntegerTy(32)
&& ValueType->isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register"
) ? void (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7182, __extension__ __PRETTY_FUNCTION__))
;
7183
7184 if (AccessKind != Write) {
7185 assert(AccessKind == NormalRead || AccessKind == VolatileRead)(static_cast <bool> (AccessKind == NormalRead || AccessKind
== VolatileRead) ? void (0) : __assert_fail ("AccessKind == NormalRead || AccessKind == VolatileRead"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7185, __extension__ __PRETTY_FUNCTION__))
;
7186 llvm::Function *F = CGM.getIntrinsic(
7187 AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
7188 : llvm::Intrinsic::read_register,
7189 Types);
7190 llvm::Value *Call = Builder.CreateCall(F, Metadata);
7191
7192 if (MixedTypes)
7193 // Read into 64 bit register and then truncate result to 32 bit.
7194 return Builder.CreateTrunc(Call, ValueType);
7195
7196 if (ValueType->isPointerTy())
7197 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
7198 return Builder.CreateIntToPtr(Call, ValueType);
7199
7200 return Call;
7201 }
7202
7203 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
7204 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
7205 if (MixedTypes) {
7206 // Extend 32 bit write value to 64 bit to pass to write.
7207 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
7208 return Builder.CreateCall(F, { Metadata, ArgValue });
7209 }
7210
7211 if (ValueType->isPointerTy()) {
7212 // Have VoidPtrTy ArgValue but want to return an i32/i64.
7213 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
7214 return Builder.CreateCall(F, { Metadata, ArgValue });
7215 }
7216
7217 return Builder.CreateCall(F, { Metadata, ArgValue });
7218}
7219
7220/// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
7221/// argument that specifies the vector type.
7222static bool HasExtraNeonArgument(unsigned BuiltinID) {
7223 switch (BuiltinID) {
7224 default: break;
7225 case NEON::BI__builtin_neon_vget_lane_i8:
7226 case NEON::BI__builtin_neon_vget_lane_i16:
7227 case NEON::BI__builtin_neon_vget_lane_bf16:
7228 case NEON::BI__builtin_neon_vget_lane_i32:
7229 case NEON::BI__builtin_neon_vget_lane_i64:
7230 case NEON::BI__builtin_neon_vget_lane_f32:
7231 case NEON::BI__builtin_neon_vgetq_lane_i8:
7232 case NEON::BI__builtin_neon_vgetq_lane_i16:
7233 case NEON::BI__builtin_neon_vgetq_lane_bf16:
7234 case NEON::BI__builtin_neon_vgetq_lane_i32:
7235 case NEON::BI__builtin_neon_vgetq_lane_i64:
7236 case NEON::BI__builtin_neon_vgetq_lane_f32:
7237 case NEON::BI__builtin_neon_vduph_lane_bf16:
7238 case NEON::BI__builtin_neon_vduph_laneq_bf16:
7239 case NEON::BI__builtin_neon_vset_lane_i8:
7240 case NEON::BI__builtin_neon_vset_lane_i16:
7241 case NEON::BI__builtin_neon_vset_lane_bf16:
7242 case NEON::BI__builtin_neon_vset_lane_i32:
7243 case NEON::BI__builtin_neon_vset_lane_i64:
7244 case NEON::BI__builtin_neon_vset_lane_f32:
7245 case NEON::BI__builtin_neon_vsetq_lane_i8:
7246 case NEON::BI__builtin_neon_vsetq_lane_i16:
7247 case NEON::BI__builtin_neon_vsetq_lane_bf16:
7248 case NEON::BI__builtin_neon_vsetq_lane_i32:
7249 case NEON::BI__builtin_neon_vsetq_lane_i64:
7250 case NEON::BI__builtin_neon_vsetq_lane_f32:
7251 case NEON::BI__builtin_neon_vsha1h_u32:
7252 case NEON::BI__builtin_neon_vsha1cq_u32:
7253 case NEON::BI__builtin_neon_vsha1pq_u32:
7254 case NEON::BI__builtin_neon_vsha1mq_u32:
7255 case NEON::BI__builtin_neon_vcvth_bf16_f32:
7256 case clang::ARM::BI_MoveToCoprocessor:
7257 case clang::ARM::BI_MoveToCoprocessor2:
7258 return false;
7259 }
7260 return true;
7261}
7262
7263Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
7264 const CallExpr *E,
7265 ReturnValueSlot ReturnValue,
7266 llvm::Triple::ArchType Arch) {
7267 if (auto Hint = GetValueForARMHint(BuiltinID))
7268 return Hint;
7269
7270 if (BuiltinID == ARM::BI__emit) {
7271 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
7272 llvm::FunctionType *FTy =
7273 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
7274
7275 Expr::EvalResult Result;
7276 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
7277 llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7277)
;
7278
7279 llvm::APSInt Value = Result.Val.getInt();
7280 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
7281
7282 llvm::InlineAsm *Emit =
7283 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
7284 /*hasSideEffects=*/true)
7285 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
7286 /*hasSideEffects=*/true);
7287
7288 return Builder.CreateCall(Emit);
7289 }
7290
7291 if (BuiltinID == ARM::BI__builtin_arm_dbg) {
7292 Value *Option = EmitScalarExpr(E->getArg(0));
7293 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
7294 }
7295
7296 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
7297 Value *Address = EmitScalarExpr(E->getArg(0));
7298 Value *RW = EmitScalarExpr(E->getArg(1));
7299 Value *IsData = EmitScalarExpr(E->getArg(2));
7300
7301 // Locality is not supported on ARM target
7302 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
7303
7304 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
7305 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
7306 }
7307
7308 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
7309 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7310 return Builder.CreateCall(
7311 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
7312 }
7313
7314 if (BuiltinID == ARM::BI__builtin_arm_cls) {
7315 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7316 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
7317 }
7318 if (BuiltinID == ARM::BI__builtin_arm_cls64) {
7319 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7320 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
7321 "cls");
7322 }
7323
7324 if (BuiltinID == ARM::BI__clear_cache) {
7325 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")(static_cast <bool> (E->getNumArgs() == 2 &&
"__clear_cache takes 2 arguments") ? void (0) : __assert_fail
("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7325, __extension__ __PRETTY_FUNCTION__))
;
7326 const FunctionDecl *FD = E->getDirectCallee();
7327 Value *Ops[2];
7328 for (unsigned i = 0; i < 2; i++)
7329 Ops[i] = EmitScalarExpr(E->getArg(i));
7330 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
7331 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
7332 StringRef Name = FD->getName();
7333 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
7334 }
7335
7336 if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
7337 BuiltinID == ARM::BI__builtin_arm_mcrr2) {
7338 Function *F;
7339
7340 switch (BuiltinID) {
7341 default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7341)
;
7342 case ARM::BI__builtin_arm_mcrr:
7343 F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
7344 break;
7345 case ARM::BI__builtin_arm_mcrr2:
7346 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
7347 break;
7348 }
7349
7350 // MCRR{2} instruction has 5 operands but
7351 // the intrinsic has 4 because Rt and Rt2
7352 // are represented as a single unsigned 64
7353 // bit integer in the intrinsic definition
7354 // but internally it's represented as 2 32
7355 // bit integers.
7356
7357 Value *Coproc = EmitScalarExpr(E->getArg(0));
7358 Value *Opc1 = EmitScalarExpr(E->getArg(1));
7359 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
7360 Value *CRm = EmitScalarExpr(E->getArg(3));
7361
7362 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
7363 Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
7364 Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
7365 Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
7366
7367 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
7368 }
7369
7370 if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
7371 BuiltinID == ARM::BI__builtin_arm_mrrc2) {
7372 Function *F;
7373
7374 switch (BuiltinID) {
7375 default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7375)
;
7376 case ARM::BI__builtin_arm_mrrc:
7377 F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
7378 break;
7379 case ARM::BI__builtin_arm_mrrc2:
7380 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
7381 break;
7382 }
7383
7384 Value *Coproc = EmitScalarExpr(E->getArg(0));
7385 Value *Opc1 = EmitScalarExpr(E->getArg(1));
7386 Value *CRm = EmitScalarExpr(E->getArg(2));
7387 Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
7388
7389 // Returns an unsigned 64 bit integer, represented
7390 // as two 32 bit integers.
7391
7392 Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
7393 Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
7394 Rt = Builder.CreateZExt(Rt, Int64Ty);
7395 Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
7396
7397 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
7398 RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
7399 RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
7400
7401 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
7402 }
7403
7404 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
7405 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
7406 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
7407 getContext().getTypeSize(E->getType()) == 64) ||
7408 BuiltinID == ARM::BI__ldrexd) {
7409 Function *F;
7410
7411 switch (BuiltinID) {
7412 default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7412)
;
7413 case ARM::BI__builtin_arm_ldaex:
7414 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
7415 break;
7416 case ARM::BI__builtin_arm_ldrexd:
7417 case ARM::BI__builtin_arm_ldrex:
7418 case ARM::BI__ldrexd:
7419 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
7420 break;
7421 }
7422
7423 Value *LdPtr = EmitScalarExpr(E->getArg(0));
7424 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
7425 "ldrexd");
7426
7427 Value *Val0 = Builder.CreateExtractValue(Val, 1);
7428 Value *Val1 = Builder.CreateExtractValue(Val, 0);
7429 Val0 = Builder.CreateZExt(Val0, Int64Ty);
7430 Val1 = Builder.CreateZExt(Val1, Int64Ty);
7431
7432 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
7433 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
7434 Val = Builder.CreateOr(Val, Val1);
7435 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
7436 }
7437
7438 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
7439 BuiltinID == ARM::BI__builtin_arm_ldaex) {
7440 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
7441
7442 QualType Ty = E->getType();
7443 llvm::Type *RealResTy = ConvertType(Ty);
7444 llvm::Type *PtrTy = llvm::IntegerType::get(
7445 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
7446 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
7447
7448 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
7449 ? Intrinsic::arm_ldaex
7450 : Intrinsic::arm_ldrex,
7451 PtrTy);
7452 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
7453
7454 if (RealResTy->isPointerTy())
7455 return Builder.CreateIntToPtr(Val, RealResTy);
7456 else {
7457 llvm::Type *IntResTy = llvm::IntegerType::get(
7458 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
7459 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
7460 return Builder.CreateBitCast(Val, RealResTy);
7461 }
7462 }
7463
7464 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
7465 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
7466 BuiltinID == ARM::BI__builtin_arm_strex) &&
7467 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
7468 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
7469 ? Intrinsic::arm_stlexd
7470 : Intrinsic::arm_strexd);
7471 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
7472
7473 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
7474 Value *Val = EmitScalarExpr(E->getArg(0));
7475 Builder.CreateStore(Val, Tmp);
7476
7477 Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
7478 Val = Builder.CreateLoad(LdPtr);
7479
7480 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
7481 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
7482 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
7483 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
7484 }
7485
7486 if (BuiltinID == ARM::BI__builtin_arm_strex ||
7487 BuiltinID == ARM::BI__builtin_arm_stlex) {
7488 Value *StoreVal = EmitScalarExpr(E->getArg(0));
7489 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
7490
7491 QualType Ty = E->getArg(0)->getType();
7492 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
7493 getContext().getTypeSize(Ty));
7494 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
7495
7496 if (StoreVal->getType()->isPointerTy())
7497 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
7498 else {
7499 llvm::Type *IntTy = llvm::IntegerType::get(
7500 getLLVMContext(),
7501 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
7502 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
7503 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
7504 }
7505
7506 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
7507 ? Intrinsic::arm_stlex
7508 : Intrinsic::arm_strex,
7509 StoreAddr->getType());
7510 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
7511 }
7512
7513 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
7514 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
7515 return Builder.CreateCall(F);
7516 }
7517
7518 // CRC32
7519 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
7520 switch (BuiltinID) {
7521 case ARM::BI__builtin_arm_crc32b:
7522 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
7523 case ARM::BI__builtin_arm_crc32cb:
7524 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
7525 case ARM::BI__builtin_arm_crc32h:
7526 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
7527 case ARM::BI__builtin_arm_crc32ch:
7528 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
7529 case ARM::BI__builtin_arm_crc32w:
7530 case ARM::BI__builtin_arm_crc32d:
7531 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
7532 case ARM::BI__builtin_arm_crc32cw:
7533 case ARM::BI__builtin_arm_crc32cd:
7534 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
7535 }
7536
7537 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
7538 Value *Arg0 = EmitScalarExpr(E->getArg(0));
7539 Value *Arg1 = EmitScalarExpr(E->getArg(1));
7540
7541 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
7542 // intrinsics, hence we need different codegen for these cases.
7543 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
7544 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
7545 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
7546 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
7547 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
7548 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
7549
7550 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7551 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
7552 return Builder.CreateCall(F, {Res, Arg1b});
7553 } else {
7554 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
7555
7556 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7557 return Builder.CreateCall(F, {Arg0, Arg1});
7558 }
7559 }
7560
7561 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
7562 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7563 BuiltinID == ARM::BI__builtin_arm_rsrp ||
7564 BuiltinID == ARM::BI__builtin_arm_wsr ||
7565 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
7566 BuiltinID == ARM::BI__builtin_arm_wsrp) {
7567
7568 SpecialRegisterAccessKind AccessKind = Write;
7569 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
7570 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7571 BuiltinID == ARM::BI__builtin_arm_rsrp)
7572 AccessKind = VolatileRead;
7573
7574 bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
7575 BuiltinID == ARM::BI__builtin_arm_wsrp;
7576
7577 bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7578 BuiltinID == ARM::BI__builtin_arm_wsr64;
7579
7580 llvm::Type *ValueType;
7581 llvm::Type *RegisterType;
7582 if (IsPointerBuiltin) {
7583 ValueType = VoidPtrTy;
7584 RegisterType = Int32Ty;
7585 } else if (Is64Bit) {
7586 ValueType = RegisterType = Int64Ty;
7587 } else {
7588 ValueType = RegisterType = Int32Ty;
7589 }
7590
7591 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
7592 AccessKind);
7593 }
7594
7595 // Handle MSVC intrinsics before argument evaluation to prevent double
7596 // evaluation.
7597 if (Optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID))
7598 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
7599
7600 // Deal with MVE builtins
7601 if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
7602 return Result;
7603 // Handle CDE builtins
7604 if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
7605 return Result;
7606
7607 // Find out if any arguments are required to be integer constant
7608 // expressions.
7609 unsigned ICEArguments = 0;
7610 ASTContext::GetBuiltinTypeError Error;
7611 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
7612 assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None &&
"Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7612, __extension__ __PRETTY_FUNCTION__))
;
7613
7614 auto getAlignmentValue32 = [&](Address addr) -> Value* {
7615 return Builder.getInt32(addr.getAlignment().getQuantity());
7616 };
7617
7618 Address PtrOp0 = Address::invalid();
7619 Address PtrOp1 = Address::invalid();
7620 SmallVector<Value*, 4> Ops;
7621 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
7622 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
7623 for (unsigned i = 0, e = NumArgs; i != e; i++) {
7624 if (i == 0) {
7625 switch (BuiltinID) {
7626 case NEON::BI__builtin_neon_vld1_v:
7627 case NEON::BI__builtin_neon_vld1q_v:
7628 case NEON::BI__builtin_neon_vld1q_lane_v:
7629 case NEON::BI__builtin_neon_vld1_lane_v:
7630 case NEON::BI__builtin_neon_vld1_dup_v:
7631 case NEON::BI__builtin_neon_vld1q_dup_v:
7632 case NEON::BI__builtin_neon_vst1_v:
7633 case NEON::BI__builtin_neon_vst1q_v:
7634 case NEON::BI__builtin_neon_vst1q_lane_v:
7635 case NEON::BI__builtin_neon_vst1_lane_v:
7636 case NEON::BI__builtin_neon_vst2_v:
7637 case NEON::BI__builtin_neon_vst2q_v:
7638 case NEON::BI__builtin_neon_vst2_lane_v:
7639 case NEON::BI__builtin_neon_vst2q_lane_v:
7640 case NEON::BI__builtin_neon_vst3_v:
7641 case NEON::BI__builtin_neon_vst3q_v:
7642 case NEON::BI__builtin_neon_vst3_lane_v:
7643 case NEON::BI__builtin_neon_vst3q_lane_v:
7644 case NEON::BI__builtin_neon_vst4_v:
7645 case NEON::BI__builtin_neon_vst4q_v:
7646 case NEON::BI__builtin_neon_vst4_lane_v:
7647 case NEON::BI__builtin_neon_vst4q_lane_v:
7648 // Get the alignment for the argument in addition to the value;
7649 // we'll use it later.
7650 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
7651 Ops.push_back(PtrOp0.getPointer());
7652 continue;
7653 }
7654 }
7655 if (i == 1) {
7656 switch (BuiltinID) {
7657 case NEON::BI__builtin_neon_vld2_v:
7658 case NEON::BI__builtin_neon_vld2q_v:
7659 case NEON::BI__builtin_neon_vld3_v:
7660 case NEON::BI__builtin_neon_vld3q_v:
7661 case NEON::BI__builtin_neon_vld4_v:
7662 case NEON::BI__builtin_neon_vld4q_v:
7663 case NEON::BI__builtin_neon_vld2_lane_v:
7664 case NEON::BI__builtin_neon_vld2q_lane_v:
7665 case NEON::BI__builtin_neon_vld3_lane_v:
7666 case NEON::BI__builtin_neon_vld3q_lane_v:
7667 case NEON::BI__builtin_neon_vld4_lane_v:
7668 case NEON::BI__builtin_neon_vld4q_lane_v:
7669 case NEON::BI__builtin_neon_vld2_dup_v:
7670 case NEON::BI__builtin_neon_vld2q_dup_v:
7671 case NEON::BI__builtin_neon_vld3_dup_v:
7672 case NEON::BI__builtin_neon_vld3q_dup_v:
7673 case NEON::BI__builtin_neon_vld4_dup_v:
7674 case NEON::BI__builtin_neon_vld4q_dup_v:
7675 // Get the alignment for the argument in addition to the value;
7676 // we'll use it later.
7677 PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
7678 Ops.push_back(PtrOp1.getPointer());
7679 continue;
7680 }
7681 }
7682
7683 if ((ICEArguments & (1 << i)) == 0) {
7684 Ops.push_back(EmitScalarExpr(E->getArg(i)));
7685 } else {
7686 // If this is required to be a constant, constant fold it so that we know
7687 // that the generated intrinsic gets a ConstantInt.
7688 Ops.push_back(llvm::ConstantInt::get(
7689 getLLVMContext(),
7690 *E->getArg(i)->getIntegerConstantExpr(getContext())));
7691 }
7692 }
7693
7694 switch (BuiltinID) {
7695 default: break;
7696
7697 case NEON::BI__builtin_neon_vget_lane_i8:
7698 case NEON::BI__builtin_neon_vget_lane_i16:
7699 case NEON::BI__builtin_neon_vget_lane_i32:
7700 case NEON::BI__builtin_neon_vget_lane_i64:
7701 case NEON::BI__builtin_neon_vget_lane_bf16:
7702 case NEON::BI__builtin_neon_vget_lane_f32:
7703 case NEON::BI__builtin_neon_vgetq_lane_i8:
7704 case NEON::BI__builtin_neon_vgetq_lane_i16:
7705 case NEON::BI__builtin_neon_vgetq_lane_i32:
7706 case NEON::BI__builtin_neon_vgetq_lane_i64:
7707 case NEON::BI__builtin_neon_vgetq_lane_bf16:
7708 case NEON::BI__builtin_neon_vgetq_lane_f32:
7709 case NEON::BI__builtin_neon_vduph_lane_bf16:
7710 case NEON::BI__builtin_neon_vduph_laneq_bf16:
7711 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
7712
7713 case NEON::BI__builtin_neon_vrndns_f32: {
7714 Value *Arg = EmitScalarExpr(E->getArg(0));
7715 llvm::Type *Tys[] = {Arg->getType()};
7716 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
7717 return Builder.CreateCall(F, {Arg}, "vrndn"); }
7718
7719 case NEON::BI__builtin_neon_vset_lane_i8:
7720 case NEON::BI__builtin_neon_vset_lane_i16:
7721 case NEON::BI__builtin_neon_vset_lane_i32:
7722 case NEON::BI__builtin_neon_vset_lane_i64:
7723 case NEON::BI__builtin_neon_vset_lane_bf16:
7724 case NEON::BI__builtin_neon_vset_lane_f32:
7725 case NEON::BI__builtin_neon_vsetq_lane_i8:
7726 case NEON::BI__builtin_neon_vsetq_lane_i16:
7727 case NEON::BI__builtin_neon_vsetq_lane_i32:
7728 case NEON::BI__builtin_neon_vsetq_lane_i64:
7729 case NEON::BI__builtin_neon_vsetq_lane_bf16:
7730 case NEON::BI__builtin_neon_vsetq_lane_f32:
7731 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
7732
7733 case NEON::BI__builtin_neon_vsha1h_u32:
7734 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
7735 "vsha1h");
7736 case NEON::BI__builtin_neon_vsha1cq_u32:
7737 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
7738 "vsha1h");
7739 case NEON::BI__builtin_neon_vsha1pq_u32:
7740 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
7741 "vsha1h");
7742 case NEON::BI__builtin_neon_vsha1mq_u32:
7743 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
7744 "vsha1h");
7745
7746 case NEON::BI__builtin_neon_vcvth_bf16_f32: {
7747 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
7748 "vcvtbfp2bf");
7749 }
7750
7751 // The ARM _MoveToCoprocessor builtins put the input register value as
7752 // the first argument, but the LLVM intrinsic expects it as the third one.
7753 case ARM::BI_MoveToCoprocessor:
7754 case ARM::BI_MoveToCoprocessor2: {
7755 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
7756 Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
7757 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
7758 Ops[3], Ops[4], Ops[5]});
7759 }
7760 }
7761
7762 // Get the last argument, which specifies the vector type.
7763 assert(HasExtraArg)(static_cast <bool> (HasExtraArg) ? void (0) : __assert_fail
("HasExtraArg", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 7763, __extension__ __PRETTY_FUNCTION__))
;
7764 const Expr *Arg = E->getArg(E->getNumArgs()-1);
7765 Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext());
7766 if (!Result)
7767 return nullptr;
7768
7769 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
7770 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
7771 // Determine the overloaded type of this builtin.
7772 llvm::Type *Ty;
7773 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
7774 Ty = FloatTy;
7775 else
7776 Ty = DoubleTy;
7777
7778 // Determine whether this is an unsigned conversion or not.
7779 bool usgn = Result->getZExtValue() == 1;
7780 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
7781
7782 // Call the appropriate intrinsic.
7783 Function *F = CGM.getIntrinsic(Int, Ty);
7784 return Builder.CreateCall(F, Ops, "vcvtr");
7785 }
7786
7787 // Determine the type of this overloaded NEON intrinsic.
7788 NeonTypeFlags Type = Result->getZExtValue();
7789 bool usgn = Type.isUnsigned();
7790 bool rightShift = false;
7791
7792 llvm::FixedVectorType *VTy =
7793 GetNeonType(this, Type, getTarget().hasLegalHalfType(), false,
7794 getTarget().hasBFloat16Type());
7795 llvm::Type *Ty = VTy;
7796 if (!Ty)
7797 return nullptr;
7798
7799 // Many NEON builtins have identical semantics and uses in ARM and
7800 // AArch64. Emit these in a single function.
7801 auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
7802 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
7803 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
7804 if (Builtin)
7805 return EmitCommonNeonBuiltinExpr(
7806 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
7807 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
7808
7809 unsigned Int;
7810 switch (BuiltinID) {
7811 default: return nullptr;
7812 case NEON::BI__builtin_neon_vld1q_lane_v:
7813 // Handle 64-bit integer elements as a special case. Use shuffles of
7814 // one-element vectors to avoid poor code for i64 in the backend.
7815 if (VTy->getElementType()->isIntegerTy(64)) {
7816 // Extract the other lane.
7817 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7818 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
7819 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
7820 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7821 // Load the value as a one-element vector.
7822 Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
7823 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7824 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
7825 Value *Align = getAlignmentValue32(PtrOp0);
7826 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
7827 // Combine them.
7828 int Indices[] = {1 - Lane, Lane};
7829 return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
7830 }
7831 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7832 case NEON::BI__builtin_neon_vld1_lane_v: {
7833 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7834 PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
7835 Value *Ld = Builder.CreateLoad(PtrOp0);
7836 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
7837 }
7838 case NEON::BI__builtin_neon_vqrshrn_n_v:
7839 Int =
7840 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
7841 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
7842 1, true);
7843 case NEON::BI__builtin_neon_vqrshrun_n_v:
7844 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
7845 Ops, "vqrshrun_n", 1, true);
7846 case NEON::BI__builtin_neon_vqshrn_n_v:
7847 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
7848 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
7849 1, true);
7850 case NEON::BI__builtin_neon_vqshrun_n_v:
7851 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
7852 Ops, "vqshrun_n", 1, true);
7853 case NEON::BI__builtin_neon_vrecpe_v:
7854 case NEON::BI__builtin_neon_vrecpeq_v:
7855 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
7856 Ops, "vrecpe");
7857 case NEON::BI__builtin_neon_vrshrn_n_v:
7858 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
7859 Ops, "vrshrn_n", 1, true);
7860 case NEON::BI__builtin_neon_vrsra_n_v:
7861 case NEON::BI__builtin_neon_vrsraq_n_v:
7862 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7863 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7864 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
7865 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
7866 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
7867 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
7868 case NEON::BI__builtin_neon_vsri_n_v:
7869 case NEON::BI__builtin_neon_vsriq_n_v:
7870 rightShift = true;
7871 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7872 case NEON::BI__builtin_neon_vsli_n_v:
7873 case NEON::BI__builtin_neon_vsliq_n_v:
7874 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
7875 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
7876 Ops, "vsli_n");
7877 case NEON::BI__builtin_neon_vsra_n_v:
7878 case NEON::BI__builtin_neon_vsraq_n_v:
7879 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7880 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
7881 return Builder.CreateAdd(Ops[0], Ops[1]);
7882 case NEON::BI__builtin_neon_vst1q_lane_v:
7883 // Handle 64-bit integer elements as a special case. Use a shuffle to get
7884 // a one-element vector and avoid poor code for i64 in the backend.
7885 if (VTy->getElementType()->isIntegerTy(64)) {
7886 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7887 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
7888 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7889 Ops[2] = getAlignmentValue32(PtrOp0);
7890 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
7891 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
7892 Tys), Ops);
7893 }
7894 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7895 case NEON::BI__builtin_neon_vst1_lane_v: {
7896 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7897 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
7898 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
7899 auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
7900 return St;
7901 }
7902 case NEON::BI__builtin_neon_vtbl1_v:
7903 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
7904 Ops, "vtbl1");
7905 case NEON::BI__builtin_neon_vtbl2_v:
7906 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
7907 Ops, "vtbl2");
7908 case NEON::BI__builtin_neon_vtbl3_v:
7909 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
7910 Ops, "vtbl3");
7911 case NEON::BI__builtin_neon_vtbl4_v:
7912 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
7913 Ops, "vtbl4");
7914 case NEON::BI__builtin_neon_vtbx1_v:
7915 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
7916 Ops, "vtbx1");
7917 case NEON::BI__builtin_neon_vtbx2_v:
7918 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
7919 Ops, "vtbx2");
7920 case NEON::BI__builtin_neon_vtbx3_v:
7921 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
7922 Ops, "vtbx3");
7923 case NEON::BI__builtin_neon_vtbx4_v:
7924 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
7925 Ops, "vtbx4");
7926 }
7927}
7928
7929template<typename Integer>
7930static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
7931 return E->getIntegerConstantExpr(Context)->getExtValue();
7932}
7933
7934static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
7935 llvm::Type *T, bool Unsigned) {
7936 // Helper function called by Tablegen-constructed ARM MVE builtin codegen,
7937 // which finds it convenient to specify signed/unsigned as a boolean flag.
7938 return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T);
7939}
7940
7941static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
7942 uint32_t Shift, bool Unsigned) {
7943 // MVE helper function for integer shift right. This must handle signed vs
7944 // unsigned, and also deal specially with the case where the shift count is
7945 // equal to the lane size. In LLVM IR, an LShr with that parameter would be
7946 // undefined behavior, but in MVE it's legal, so we must convert it to code
7947 // that is not undefined in IR.
7948 unsigned LaneBits = cast<llvm::VectorType>(V->getType())
7949 ->getElementType()
7950 ->getPrimitiveSizeInBits();
7951 if (Shift == LaneBits) {
7952 // An unsigned shift of the full lane size always generates zero, so we can
7953 // simply emit a zero vector. A signed shift of the full lane size does the
7954 // same thing as shifting by one bit fewer.
7955 if (Unsigned)
7956 return llvm::Constant::getNullValue(V->getType());
7957 else
7958 --Shift;
7959 }
7960 return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift);
7961}
7962
7963static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
7964 // MVE-specific helper function for a vector splat, which infers the element
7965 // count of the output vector by knowing that MVE vectors are all 128 bits
7966 // wide.
7967 unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits();
7968 return Builder.CreateVectorSplat(Elements, V);
7969}
7970
7971static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
7972 CodeGenFunction *CGF,
7973 llvm::Value *V,
7974 llvm::Type *DestType) {
7975 // Convert one MVE vector type into another by reinterpreting its in-register
7976 // format.
7977 //
7978 // Little-endian, this is identical to a bitcast (which reinterprets the
7979 // memory format). But big-endian, they're not necessarily the same, because
7980 // the register and memory formats map to each other differently depending on
7981 // the lane size.
7982 //
7983 // We generate a bitcast whenever we can (if we're little-endian, or if the
7984 // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
7985 // that performs the different kind of reinterpretation.
7986 if (CGF->getTarget().isBigEndian() &&
7987 V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
7988 return Builder.CreateCall(
7989 CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
7990 {DestType, V->getType()}),
7991 V);
7992 } else {
7993 return Builder.CreateBitCast(V, DestType);
7994 }
7995}
7996
7997static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
7998 // Make a shufflevector that extracts every other element of a vector (evens
7999 // or odds, as desired).
8000 SmallVector<int, 16> Indices;
8001 unsigned InputElements =
8002 cast<llvm::FixedVectorType>(V->getType())->getNumElements();
8003 for (unsigned i = 0; i < InputElements; i += 2)
8004 Indices.push_back(i + Odd);
8005 return Builder.CreateShuffleVector(V, Indices);
8006}
8007
8008static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
8009 llvm::Value *V1) {
8010 // Make a shufflevector that interleaves two vectors element by element.
8011 assert(V0->getType() == V1->getType() && "Can't zip different vector types")(static_cast <bool> (V0->getType() == V1->getType
() && "Can't zip different vector types") ? void (0) :
__assert_fail ("V0->getType() == V1->getType() && \"Can't zip different vector types\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8011, __extension__ __PRETTY_FUNCTION__))
;
8012 SmallVector<int, 16> Indices;
8013 unsigned InputElements =
8014 cast<llvm::FixedVectorType>(V0->getType())->getNumElements();
8015 for (unsigned i = 0; i < InputElements; i++) {
8016 Indices.push_back(i);
8017 Indices.push_back(i + InputElements);
8018 }
8019 return Builder.CreateShuffleVector(V0, V1, Indices);
8020}
8021
8022template<unsigned HighBit, unsigned OtherBits>
8023static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
8024 // MVE-specific helper function to make a vector splat of a constant such as
8025 // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
8026 llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
8027 unsigned LaneBits = T->getPrimitiveSizeInBits();
8028 uint32_t Value = HighBit << (LaneBits - 1);
8029 if (OtherBits)
8030 Value |= (1UL << (LaneBits - 1)) - 1;
8031 llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
8032 return ARMMVEVectorSplat(Builder, Lane);
8033}
8034
8035static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
8036 llvm::Value *V,
8037 unsigned ReverseWidth) {
8038 // MVE-specific helper function which reverses the elements of a
8039 // vector within every (ReverseWidth)-bit collection of lanes.
8040 SmallVector<int, 16> Indices;
8041 unsigned LaneSize = V->getType()->getScalarSizeInBits();
8042 unsigned Elements = 128 / LaneSize;
8043 unsigned Mask = ReverseWidth / LaneSize - 1;
8044 for (unsigned i = 0; i < Elements; i++)
8045 Indices.push_back(i ^ Mask);
8046 return Builder.CreateShuffleVector(V, Indices);
8047}
8048
8049Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
8050 const CallExpr *E,
8051 ReturnValueSlot ReturnValue,
8052 llvm::Triple::ArchType Arch) {
8053 enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType;
8054 Intrinsic::ID IRIntr;
8055 unsigned NumVectors;
8056
8057 // Code autogenerated by Tablegen will handle all the simple builtins.
8058 switch (BuiltinID) {
8059 #include "clang/Basic/arm_mve_builtin_cg.inc"
8060
8061 // If we didn't match an MVE builtin id at all, go back to the
8062 // main EmitARMBuiltinExpr.
8063 default:
8064 return nullptr;
8065 }
8066
8067 // Anything that breaks from that switch is an MVE builtin that
8068 // needs handwritten code to generate.
8069
8070 switch (CustomCodeGenType) {
8071
8072 case CustomCodeGen::VLD24: {
8073 llvm::SmallVector<Value *, 4> Ops;
8074 llvm::SmallVector<llvm::Type *, 4> Tys;
8075
8076 auto MvecCType = E->getType();
8077 auto MvecLType = ConvertType(MvecCType);
8078 assert(MvecLType->isStructTy() &&(static_cast <bool> (MvecLType->isStructTy() &&
"Return type for vld[24]q should be a struct") ? void (0) : __assert_fail
("MvecLType->isStructTy() && \"Return type for vld[24]q should be a struct\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8079, __extension__ __PRETTY_FUNCTION__))
8079 "Return type for vld[24]q should be a struct")(static_cast <bool> (MvecLType->isStructTy() &&
"Return type for vld[24]q should be a struct") ? void (0) : __assert_fail
("MvecLType->isStructTy() && \"Return type for vld[24]q should be a struct\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8079, __extension__ __PRETTY_FUNCTION__))
;
8080 assert(MvecLType->getStructNumElements() == 1 &&(static_cast <bool> (MvecLType->getStructNumElements
() == 1 && "Return-type struct for vld[24]q should have one element"
) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Return-type struct for vld[24]q should have one element\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8081, __extension__ __PRETTY_FUNCTION__))
8081 "Return-type struct for vld[24]q should have one element")(static_cast <bool> (MvecLType->getStructNumElements
() == 1 && "Return-type struct for vld[24]q should have one element"
) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Return-type struct for vld[24]q should have one element\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8081, __extension__ __PRETTY_FUNCTION__))
;
8082 auto MvecLTypeInner = MvecLType->getStructElementType(0);
8083 assert(MvecLTypeInner->isArrayTy() &&(static_cast <bool> (MvecLTypeInner->isArrayTy() &&
"Return-type struct for vld[24]q should contain an array") ?
void (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Return-type struct for vld[24]q should contain an array\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8084, __extension__ __PRETTY_FUNCTION__))
8084 "Return-type struct for vld[24]q should contain an array")(static_cast <bool> (MvecLTypeInner->isArrayTy() &&
"Return-type struct for vld[24]q should contain an array") ?
void (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Return-type struct for vld[24]q should contain an array\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8084, __extension__ __PRETTY_FUNCTION__))
;
8085 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&(static_cast <bool> (MvecLTypeInner->getArrayNumElements
() == NumVectors && "Array member of return-type struct vld[24]q has wrong length"
) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8086, __extension__ __PRETTY_FUNCTION__))
8086 "Array member of return-type struct vld[24]q has wrong length")(static_cast <bool> (MvecLTypeInner->getArrayNumElements
() == NumVectors && "Array member of return-type struct vld[24]q has wrong length"
) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8086, __extension__ __PRETTY_FUNCTION__))
;
8087 auto VecLType = MvecLTypeInner->getArrayElementType();
8088
8089 Tys.push_back(VecLType);
8090
8091 auto Addr = E->getArg(0);
8092 Ops.push_back(EmitScalarExpr(Addr));
8093 Tys.push_back(ConvertType(Addr->getType()));
8094
8095 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
8096 Value *LoadResult = Builder.CreateCall(F, Ops);
8097 Value *MvecOut = UndefValue::get(MvecLType);
8098 for (unsigned i = 0; i < NumVectors; ++i) {
8099 Value *Vec = Builder.CreateExtractValue(LoadResult, i);
8100 MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
8101 }
8102
8103 if (ReturnValue.isNull())
8104 return MvecOut;
8105 else
8106 return Builder.CreateStore(MvecOut, ReturnValue.getValue());
8107 }
8108
8109 case CustomCodeGen::VST24: {
8110 llvm::SmallVector<Value *, 4> Ops;
8111 llvm::SmallVector<llvm::Type *, 4> Tys;
8112
8113 auto Addr = E->getArg(0);
8114 Ops.push_back(EmitScalarExpr(Addr));
8115 Tys.push_back(ConvertType(Addr->getType()));
8116
8117 auto MvecCType = E->getArg(1)->getType();
8118 auto MvecLType = ConvertType(MvecCType);
8119 assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct")(static_cast <bool> (MvecLType->isStructTy() &&
"Data type for vst2q should be a struct") ? void (0) : __assert_fail
("MvecLType->isStructTy() && \"Data type for vst2q should be a struct\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8119, __extension__ __PRETTY_FUNCTION__))
;
8120 assert(MvecLType->getStructNumElements() == 1 &&(static_cast <bool> (MvecLType->getStructNumElements
() == 1 && "Data-type struct for vst2q should have one element"
) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Data-type struct for vst2q should have one element\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8121, __extension__ __PRETTY_FUNCTION__))
8121 "Data-type struct for vst2q should have one element")(static_cast <bool> (MvecLType->getStructNumElements
() == 1 && "Data-type struct for vst2q should have one element"
) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Data-type struct for vst2q should have one element\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8121, __extension__ __PRETTY_FUNCTION__))
;
8122 auto MvecLTypeInner = MvecLType->getStructElementType(0);
8123 assert(MvecLTypeInner->isArrayTy() &&(static_cast <bool> (MvecLTypeInner->isArrayTy() &&
"Data-type struct for vst2q should contain an array") ? void
(0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Data-type struct for vst2q should contain an array\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8124, __extension__ __PRETTY_FUNCTION__))
8124 "Data-type struct for vst2q should contain an array")(static_cast <bool> (MvecLTypeInner->isArrayTy() &&
"Data-type struct for vst2q should contain an array") ? void
(0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Data-type struct for vst2q should contain an array\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8124, __extension__ __PRETTY_FUNCTION__))
;
8125 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&(static_cast <bool> (MvecLTypeInner->getArrayNumElements
() == NumVectors && "Array member of return-type struct vld[24]q has wrong length"
) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8126, __extension__ __PRETTY_FUNCTION__))
8126 "Array member of return-type struct vld[24]q has wrong length")(static_cast <bool> (MvecLTypeInner->getArrayNumElements
() == NumVectors && "Array member of return-type struct vld[24]q has wrong length"
) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8126, __extension__ __PRETTY_FUNCTION__))
;
8127 auto VecLType = MvecLTypeInner->getArrayElementType();
8128
8129 Tys.push_back(VecLType);
8130
8131 AggValueSlot MvecSlot = CreateAggTemp(MvecCType);
8132 EmitAggExpr(E->getArg(1), MvecSlot);
8133 auto Mvec = Builder.CreateLoad(MvecSlot.getAddress());
8134 for (unsigned i = 0; i < NumVectors; i++)
8135 Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
8136
8137 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
8138 Value *ToReturn = nullptr;
8139 for (unsigned i = 0; i < NumVectors; i++) {
8140 Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
8141 ToReturn = Builder.CreateCall(F, Ops);
8142 Ops.pop_back();
8143 }
8144 return ToReturn;
8145 }
8146 }
8147 llvm_unreachable("unknown custom codegen type.")::llvm::llvm_unreachable_internal("unknown custom codegen type."
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8147)
;
8148}
8149
8150Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
8151 const CallExpr *E,
8152 ReturnValueSlot ReturnValue,
8153 llvm::Triple::ArchType Arch) {
8154 switch (BuiltinID) {
8155 default:
8156 return nullptr;
8157#include "clang/Basic/arm_cde_builtin_cg.inc"
8158 }
8159}
8160
8161static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
8162 const CallExpr *E,
8163 SmallVectorImpl<Value *> &Ops,
8164 llvm::Triple::ArchType Arch) {
8165 unsigned int Int = 0;
8166 const char *s = nullptr;
8167
8168 switch (BuiltinID) {
8169 default:
8170 return nullptr;
8171 case NEON::BI__builtin_neon_vtbl1_v:
8172 case NEON::BI__builtin_neon_vqtbl1_v:
8173 case NEON::BI__builtin_neon_vqtbl1q_v:
8174 case NEON::BI__builtin_neon_vtbl2_v:
8175 case NEON::BI__builtin_neon_vqtbl2_v:
8176 case NEON::BI__builtin_neon_vqtbl2q_v:
8177 case NEON::BI__builtin_neon_vtbl3_v:
8178 case NEON::BI__builtin_neon_vqtbl3_v:
8179 case NEON::BI__builtin_neon_vqtbl3q_v:
8180 case NEON::BI__builtin_neon_vtbl4_v:
8181 case NEON::BI__builtin_neon_vqtbl4_v:
8182 case NEON::BI__builtin_neon_vqtbl4q_v:
8183 break;
8184 case NEON::BI__builtin_neon_vtbx1_v:
8185 case NEON::BI__builtin_neon_vqtbx1_v:
8186 case NEON::BI__builtin_neon_vqtbx1q_v:
8187 case NEON::BI__builtin_neon_vtbx2_v:
8188 case NEON::BI__builtin_neon_vqtbx2_v:
8189 case NEON::BI__builtin_neon_vqtbx2q_v:
8190 case NEON::BI__builtin_neon_vtbx3_v:
8191 case NEON::BI__builtin_neon_vqtbx3_v:
8192 case NEON::BI__builtin_neon_vqtbx3q_v:
8193 case NEON::BI__builtin_neon_vtbx4_v:
8194 case NEON::BI__builtin_neon_vqtbx4_v:
8195 case NEON::BI__builtin_neon_vqtbx4q_v:
8196 break;
8197 }
8198
8199 assert(E->getNumArgs() >= 3)(static_cast <bool> (E->getNumArgs() >= 3) ? void
(0) : __assert_fail ("E->getNumArgs() >= 3", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8199, __extension__ __PRETTY_FUNCTION__))
;
8200
8201 // Get the last argument, which specifies the vector type.
8202 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
8203 Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext());
8204 if (!Result)
8205 return nullptr;
8206
8207 // Determine the type of this overloaded NEON intrinsic.
8208 NeonTypeFlags Type = Result->getZExtValue();
8209 llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type);
8210 if (!Ty)
8211 return nullptr;
8212
8213 CodeGen::CGBuilderTy &Builder = CGF.Builder;
8214
8215 // AArch64 scalar builtins are not overloaded, they do not have an extra
8216 // argument that specifies the vector type, need to handle each case.
8217 switch (BuiltinID) {
8218 case NEON::BI__builtin_neon_vtbl1_v: {
8219 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
8220 Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
8221 "vtbl1");
8222 }
8223 case NEON::BI__builtin_neon_vtbl2_v: {
8224 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
8225 Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
8226 "vtbl1");
8227 }
8228 case NEON::BI__builtin_neon_vtbl3_v: {
8229 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
8230 Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
8231 "vtbl2");
8232 }
8233 case NEON::BI__builtin_neon_vtbl4_v: {
8234 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
8235 Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
8236 "vtbl2");
8237 }
8238 case NEON::BI__builtin_neon_vtbx1_v: {
8239 Value *TblRes =
8240 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
8241 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
8242
8243 llvm::Constant *EightV = ConstantInt::get(Ty, 8);
8244 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
8245 CmpRes = Builder.CreateSExt(CmpRes, Ty);
8246
8247 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
8248 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
8249 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
8250 }
8251 case NEON::BI__builtin_neon_vtbx2_v: {
8252 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
8253 Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
8254 "vtbx1");
8255 }
8256 case NEON::BI__builtin_neon_vtbx3_v: {
8257 Value *TblRes =
8258 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
8259 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
8260
8261 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
8262 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
8263 TwentyFourV);
8264 CmpRes = Builder.CreateSExt(CmpRes, Ty);
8265
8266 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
8267 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
8268 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
8269 }
8270 case NEON::BI__builtin_neon_vtbx4_v: {
8271 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
8272 Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
8273 "vtbx2");
8274 }
8275 case NEON::BI__builtin_neon_vqtbl1_v:
8276 case NEON::BI__builtin_neon_vqtbl1q_v:
8277 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
8278 case NEON::BI__builtin_neon_vqtbl2_v:
8279 case NEON::BI__builtin_neon_vqtbl2q_v: {
8280 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
8281 case NEON::BI__builtin_neon_vqtbl3_v:
8282 case NEON::BI__builtin_neon_vqtbl3q_v:
8283 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
8284 case NEON::BI__builtin_neon_vqtbl4_v:
8285 case NEON::BI__builtin_neon_vqtbl4q_v:
8286 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
8287 case NEON::BI__builtin_neon_vqtbx1_v:
8288 case NEON::BI__builtin_neon_vqtbx1q_v:
8289 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
8290 case NEON::BI__builtin_neon_vqtbx2_v:
8291 case NEON::BI__builtin_neon_vqtbx2q_v:
8292 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
8293 case NEON::BI__builtin_neon_vqtbx3_v:
8294 case NEON::BI__builtin_neon_vqtbx3q_v:
8295 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
8296 case NEON::BI__builtin_neon_vqtbx4_v:
8297 case NEON::BI__builtin_neon_vqtbx4q_v:
8298 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
8299 }
8300 }
8301
8302 if (!Int)
8303 return nullptr;
8304
8305 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
8306 return CGF.EmitNeonCall(F, Ops, s);
8307}
8308
8309Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
8310 auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
8311 Op = Builder.CreateBitCast(Op, Int16Ty);
8312 Value *V = UndefValue::get(VTy);
8313 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
8314 Op = Builder.CreateInsertElement(V, Op, CI);
8315 return Op;
8316}
8317
8318/// SVEBuiltinMemEltTy - Returns the memory element type for this memory
8319/// access builtin. Only required if it can't be inferred from the base pointer
8320/// operand.
8321llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(SVETypeFlags TypeFlags) {
8322 switch (TypeFlags.getMemEltType()) {
8323 case SVETypeFlags::MemEltTyDefault:
8324 return getEltType(TypeFlags);
8325 case SVETypeFlags::MemEltTyInt8:
8326 return Builder.getInt8Ty();
8327 case SVETypeFlags::MemEltTyInt16:
8328 return Builder.getInt16Ty();
8329 case SVETypeFlags::MemEltTyInt32:
8330 return Builder.getInt32Ty();
8331 case SVETypeFlags::MemEltTyInt64:
8332 return Builder.getInt64Ty();
8333 }
8334 llvm_unreachable("Unknown MemEltType")::llvm::llvm_unreachable_internal("Unknown MemEltType", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8334)
;
8335}
8336
8337llvm::Type *CodeGenFunction::getEltType(SVETypeFlags TypeFlags) {
8338 switch (TypeFlags.getEltType()) {
8339 default:
8340 llvm_unreachable("Invalid SVETypeFlag!")::llvm::llvm_unreachable_internal("Invalid SVETypeFlag!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8340)
;
8341
8342 case SVETypeFlags::EltTyInt8:
8343 return Builder.getInt8Ty();
8344 case SVETypeFlags::EltTyInt16:
8345 return Builder.getInt16Ty();
8346 case SVETypeFlags::EltTyInt32:
8347 return Builder.getInt32Ty();
8348 case SVETypeFlags::EltTyInt64:
8349 return Builder.getInt64Ty();
8350
8351 case SVETypeFlags::EltTyFloat16:
8352 return Builder.getHalfTy();
8353 case SVETypeFlags::EltTyFloat32:
8354 return Builder.getFloatTy();
8355 case SVETypeFlags::EltTyFloat64:
8356 return Builder.getDoubleTy();
8357
8358 case SVETypeFlags::EltTyBFloat16:
8359 return Builder.getBFloatTy();
8360
8361 case SVETypeFlags::EltTyBool8:
8362 case SVETypeFlags::EltTyBool16:
8363 case SVETypeFlags::EltTyBool32:
8364 case SVETypeFlags::EltTyBool64:
8365 return Builder.getInt1Ty();
8366 }
8367}
8368
8369// Return the llvm predicate vector type corresponding to the specified element
8370// TypeFlags.
8371llvm::ScalableVectorType *
8372CodeGenFunction::getSVEPredType(SVETypeFlags TypeFlags) {
8373 switch (TypeFlags.getEltType()) {
8374 default: llvm_unreachable("Unhandled SVETypeFlag!")::llvm::llvm_unreachable_internal("Unhandled SVETypeFlag!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8374)
;
8375
8376 case SVETypeFlags::EltTyInt8:
8377 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8378 case SVETypeFlags::EltTyInt16:
8379 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8380 case SVETypeFlags::EltTyInt32:
8381 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8382 case SVETypeFlags::EltTyInt64:
8383 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8384
8385 case SVETypeFlags::EltTyBFloat16:
8386 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8387 case SVETypeFlags::EltTyFloat16:
8388 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8389 case SVETypeFlags::EltTyFloat32:
8390 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8391 case SVETypeFlags::EltTyFloat64:
8392 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8393
8394 case SVETypeFlags::EltTyBool8:
8395 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8396 case SVETypeFlags::EltTyBool16:
8397 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8398 case SVETypeFlags::EltTyBool32:
8399 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8400 case SVETypeFlags::EltTyBool64:
8401 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8402 }
8403}
8404
8405// Return the llvm vector type corresponding to the specified element TypeFlags.
8406llvm::ScalableVectorType *
8407CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
8408 switch (TypeFlags.getEltType()) {
8409 default:
8410 llvm_unreachable("Invalid SVETypeFlag!")::llvm::llvm_unreachable_internal("Invalid SVETypeFlag!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8410)
;
8411
8412 case SVETypeFlags::EltTyInt8:
8413 return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
8414 case SVETypeFlags::EltTyInt16:
8415 return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
8416 case SVETypeFlags::EltTyInt32:
8417 return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
8418 case SVETypeFlags::EltTyInt64:
8419 return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
8420
8421 case SVETypeFlags::EltTyFloat16:
8422 return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
8423 case SVETypeFlags::EltTyBFloat16:
8424 return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
8425 case SVETypeFlags::EltTyFloat32:
8426 return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
8427 case SVETypeFlags::EltTyFloat64:
8428 return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
8429
8430 case SVETypeFlags::EltTyBool8:
8431 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8432 case SVETypeFlags::EltTyBool16:
8433 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8434 case SVETypeFlags::EltTyBool32:
8435 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8436 case SVETypeFlags::EltTyBool64:
8437 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8438 }
8439}
8440
8441llvm::Value *CodeGenFunction::EmitSVEAllTruePred(SVETypeFlags TypeFlags) {
8442 Function *Ptrue =
8443 CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
8444 return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
8445}
8446
8447constexpr unsigned SVEBitsPerBlock = 128;
8448
8449static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
8450 unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
8451 return llvm::ScalableVectorType::get(EltTy, NumElts);
8452}
8453
8454// Reinterpret the input predicate so that it can be used to correctly isolate
8455// the elements of the specified datatype.
8456Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
8457 llvm::ScalableVectorType *VTy) {
8458 auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
8459 if (Pred->getType() == RTy)
8460 return Pred;
8461
8462 unsigned IntID;
8463 llvm::Type *IntrinsicTy;
8464 switch (VTy->getMinNumElements()) {
8465 default:
8466 llvm_unreachable("unsupported element count!")::llvm::llvm_unreachable_internal("unsupported element count!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8466)
;
8467 case 2:
8468 case 4:
8469 case 8:
8470 IntID = Intrinsic::aarch64_sve_convert_from_svbool;
8471 IntrinsicTy = RTy;
8472 break;
8473 case 16:
8474 IntID = Intrinsic::aarch64_sve_convert_to_svbool;
8475 IntrinsicTy = Pred->getType();
8476 break;
8477 }
8478
8479 Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
8480 Value *C = Builder.CreateCall(F, Pred);
8481 assert(C->getType() == RTy && "Unexpected return type!")(static_cast <bool> (C->getType() == RTy && "Unexpected return type!"
) ? void (0) : __assert_fail ("C->getType() == RTy && \"Unexpected return type!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8481, __extension__ __PRETTY_FUNCTION__))
;
8482 return C;
8483}
8484
8485Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags,
8486 SmallVectorImpl<Value *> &Ops,
8487 unsigned IntID) {
8488 auto *ResultTy = getSVEType(TypeFlags);
8489 auto *OverloadedTy =
8490 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
8491
8492 // At the ACLE level there's only one predicate type, svbool_t, which is
8493 // mapped to <n x 16 x i1>. However, this might be incompatible with the
8494 // actual type being loaded. For example, when loading doubles (i64) the
8495 // predicated should be <n x 2 x i1> instead. At the IR level the type of
8496 // the predicate and the data being loaded must match. Cast accordingly.
8497 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
8498
8499 Function *F = nullptr;
8500 if (Ops[1]->getType()->isVectorTy())
8501 // This is the "vector base, scalar offset" case. In order to uniquely
8502 // map this built-in to an LLVM IR intrinsic, we need both the return type
8503 // and the type of the vector base.
8504 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
8505 else
8506 // This is the "scalar base, vector offset case". The type of the offset
8507 // is encoded in the name of the intrinsic. We only need to specify the
8508 // return type in order to uniquely map this built-in to an LLVM IR
8509 // intrinsic.
8510 F = CGM.getIntrinsic(IntID, OverloadedTy);
8511
8512 // Pass 0 when the offset is missing. This can only be applied when using
8513 // the "vector base" addressing mode for which ACLE allows no offset. The
8514 // corresponding LLVM IR always requires an offset.
8515 if (Ops.size() == 2) {
8516 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset")(static_cast <bool> (Ops[1]->getType()->isVectorTy
() && "Scalar base requires an offset") ? void (0) : __assert_fail
("Ops[1]->getType()->isVectorTy() && \"Scalar base requires an offset\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8516, __extension__ __PRETTY_FUNCTION__))
;
8517 Ops.push_back(ConstantInt::get(Int64Ty, 0));
8518 }
8519
8520 // For "vector base, scalar index" scale the index so that it becomes a
8521 // scalar offset.
8522 if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
8523 unsigned BytesPerElt =
8524 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
8525 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8526 Ops[2] = Builder.CreateMul(Ops[2], Scale);
8527 }
8528
8529 Value *Call = Builder.CreateCall(F, Ops);
8530
8531 // The following sext/zext is only needed when ResultTy != OverloadedTy. In
8532 // other cases it's folded into a nop.
8533 return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
8534 : Builder.CreateSExt(Call, ResultTy);
8535}
8536
8537Value *CodeGenFunction::EmitSVEScatterStore(SVETypeFlags TypeFlags,
8538 SmallVectorImpl<Value *> &Ops,
8539 unsigned IntID) {
8540 auto *SrcDataTy = getSVEType(TypeFlags);
8541 auto *OverloadedTy =
8542 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
8543
8544 // In ACLE the source data is passed in the last argument, whereas in LLVM IR
8545 // it's the first argument. Move it accordingly.
8546 Ops.insert(Ops.begin(), Ops.pop_back_val());
8547
8548 Function *F = nullptr;
8549 if (Ops[2]->getType()->isVectorTy())
8550 // This is the "vector base, scalar offset" case. In order to uniquely
8551 // map this built-in to an LLVM IR intrinsic, we need both the return type
8552 // and the type of the vector base.
8553 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
8554 else
8555 // This is the "scalar base, vector offset case". The type of the offset
8556 // is encoded in the name of the intrinsic. We only need to specify the
8557 // return type in order to uniquely map this built-in to an LLVM IR
8558 // intrinsic.
8559 F = CGM.getIntrinsic(IntID, OverloadedTy);
8560
8561 // Pass 0 when the offset is missing. This can only be applied when using
8562 // the "vector base" addressing mode for which ACLE allows no offset. The
8563 // corresponding LLVM IR always requires an offset.
8564 if (Ops.size() == 3) {
8565 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset")(static_cast <bool> (Ops[1]->getType()->isVectorTy
() && "Scalar base requires an offset") ? void (0) : __assert_fail
("Ops[1]->getType()->isVectorTy() && \"Scalar base requires an offset\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8565, __extension__ __PRETTY_FUNCTION__))
;
8566 Ops.push_back(ConstantInt::get(Int64Ty, 0));
8567 }
8568
8569 // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
8570 // folded into a nop.
8571 Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
8572
8573 // At the ACLE level there's only one predicate type, svbool_t, which is
8574 // mapped to <n x 16 x i1>. However, this might be incompatible with the
8575 // actual type being stored. For example, when storing doubles (i64) the
8576 // predicated should be <n x 2 x i1> instead. At the IR level the type of
8577 // the predicate and the data being stored must match. Cast accordingly.
8578 Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
8579
8580 // For "vector base, scalar index" scale the index so that it becomes a
8581 // scalar offset.
8582 if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
8583 unsigned BytesPerElt =
8584 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
8585 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8586 Ops[3] = Builder.CreateMul(Ops[3], Scale);
8587 }
8588
8589 return Builder.CreateCall(F, Ops);
8590}
8591
8592Value *CodeGenFunction::EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
8593 SmallVectorImpl<Value *> &Ops,
8594 unsigned IntID) {
8595 // The gather prefetches are overloaded on the vector input - this can either
8596 // be the vector of base addresses or vector of offsets.
8597 auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
8598 if (!OverloadedTy)
8599 OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
8600
8601 // Cast the predicate from svbool_t to the right number of elements.
8602 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
8603
8604 // vector + imm addressing modes
8605 if (Ops[1]->getType()->isVectorTy()) {
8606 if (Ops.size() == 3) {
8607 // Pass 0 for 'vector+imm' when the index is omitted.
8608 Ops.push_back(ConstantInt::get(Int64Ty, 0));
8609
8610 // The sv_prfop is the last operand in the builtin and IR intrinsic.
8611 std::swap(Ops[2], Ops[3]);
8612 } else {
8613 // Index needs to be passed as scaled offset.
8614 llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8615 unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
8616 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8617 Ops[2] = Builder.CreateMul(Ops[2], Scale);
8618 }
8619 }
8620
8621 Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
8622 return Builder.CreateCall(F, Ops);
8623}
8624
8625Value *CodeGenFunction::EmitSVEStructLoad(SVETypeFlags TypeFlags,
8626 SmallVectorImpl<Value*> &Ops,
8627 unsigned IntID) {
8628 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8629 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8630 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8631
8632 unsigned N;
8633 switch (IntID) {
8634 case Intrinsic::aarch64_sve_ld2:
8635 N = 2;
8636 break;
8637 case Intrinsic::aarch64_sve_ld3:
8638 N = 3;
8639 break;
8640 case Intrinsic::aarch64_sve_ld4:
8641 N = 4;
8642 break;
8643 default:
8644 llvm_unreachable("unknown intrinsic!")::llvm::llvm_unreachable_internal("unknown intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8644)
;
8645 }
8646 auto RetTy = llvm::VectorType::get(VTy->getElementType(),
8647 VTy->getElementCount() * N);
8648
8649 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8650 Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy);
8651 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8652 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8653 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8654
8655 Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()});
8656 return Builder.CreateCall(F, { Predicate, BasePtr });
8657}
8658
8659Value *CodeGenFunction::EmitSVEStructStore(SVETypeFlags TypeFlags,
8660 SmallVectorImpl<Value*> &Ops,
8661 unsigned IntID) {
8662 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8663 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8664 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8665
8666 unsigned N;
8667 switch (IntID) {
8668 case Intrinsic::aarch64_sve_st2:
8669 N = 2;
8670 break;
8671 case Intrinsic::aarch64_sve_st3:
8672 N = 3;
8673 break;
8674 case Intrinsic::aarch64_sve_st4:
8675 N = 4;
8676 break;
8677 default:
8678 llvm_unreachable("unknown intrinsic!")::llvm::llvm_unreachable_internal("unknown intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8678)
;
8679 }
8680 auto TupleTy =
8681 llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N);
8682
8683 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8684 Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy);
8685 Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0);
8686 Value *Val = Ops.back();
8687 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8688 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8689
8690 // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
8691 // need to break up the tuple vector.
8692 SmallVector<llvm::Value*, 5> Operands;
8693 Function *FExtr =
8694 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8695 for (unsigned I = 0; I < N; ++I)
8696 Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)}));
8697 Operands.append({Predicate, BasePtr});
8698
8699 Function *F = CGM.getIntrinsic(IntID, { VTy });
8700 return Builder.CreateCall(F, Operands);
8701}
8702
8703// SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
8704// svpmullt_pair intrinsics, with the exception that their results are bitcast
8705// to a wider type.
8706Value *CodeGenFunction::EmitSVEPMull(SVETypeFlags TypeFlags,
8707 SmallVectorImpl<Value *> &Ops,
8708 unsigned BuiltinID) {
8709 // Splat scalar operand to vector (intrinsics with _n infix)
8710 if (TypeFlags.hasSplatOperand()) {
8711 unsigned OpNo = TypeFlags.getSplatOperand();
8712 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8713 }
8714
8715 // The pair-wise function has a narrower overloaded type.
8716 Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
8717 Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
8718
8719 // Now bitcast to the wider result type.
8720 llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
8721 return EmitSVEReinterpret(Call, Ty);
8722}
8723
8724Value *CodeGenFunction::EmitSVEMovl(SVETypeFlags TypeFlags,
8725 ArrayRef<Value *> Ops, unsigned BuiltinID) {
8726 llvm::Type *OverloadedTy = getSVEType(TypeFlags);
8727 Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
8728 return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
8729}
8730
8731Value *CodeGenFunction::EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
8732 SmallVectorImpl<Value *> &Ops,
8733 unsigned BuiltinID) {
8734 auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8735 auto *VectorTy = getSVEVectorForElementType(MemEltTy);
8736 auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8737
8738 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8739 Value *BasePtr = Ops[1];
8740
8741 // Implement the index operand if not omitted.
8742 if (Ops.size() > 3) {
8743 BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
8744 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
8745 }
8746
8747 // Prefetch intriniscs always expect an i8*
8748 BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
8749 Value *PrfOp = Ops.back();
8750
8751 Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
8752 return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
8753}
8754
8755Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
8756 llvm::Type *ReturnTy,
8757 SmallVectorImpl<Value *> &Ops,
8758 unsigned BuiltinID,
8759 bool IsZExtReturn) {
8760 QualType LangPTy = E->getArg(1)->getType();
8761 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8762 LangPTy->castAs<PointerType>()->getPointeeType());
8763
8764 // The vector type that is returned may be different from the
8765 // eventual type loaded from memory.
8766 auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
8767 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8768
8769 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8770 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8771 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8772 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8773
8774 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8775 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8776 Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
8777
8778 return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
8779 : Builder.CreateSExt(Load, VectorTy);
8780}
8781
8782Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
8783 SmallVectorImpl<Value *> &Ops,
8784 unsigned BuiltinID) {
8785 QualType LangPTy = E->getArg(1)->getType();
8786 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8787 LangPTy->castAs<PointerType>()->getPointeeType());
8788
8789 // The vector type that is stored may be different from the
8790 // eventual type stored to memory.
8791 auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
8792 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8793
8794 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8795 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8796 Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0);
8797 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8798
8799 // Last value is always the data
8800 llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
8801
8802 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8803 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8804 return Builder.CreateCall(F, {Val, Predicate, BasePtr});
8805}
8806
8807// Limit the usage of scalable llvm IR generated by the ACLE by using the
8808// sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
8809Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
8810 auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
8811 return Builder.CreateCall(F, Scalar);
8812}
8813
8814Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
8815 return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
8816}
8817
8818Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
8819 // FIXME: For big endian this needs an additional REV, or needs a separate
8820 // intrinsic that is code-generated as a no-op, because the LLVM bitcast
8821 // instruction is defined as 'bitwise' equivalent from memory point of
8822 // view (when storing/reloading), whereas the svreinterpret builtin
8823 // implements bitwise equivalent cast from register point of view.
8824 // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
8825 return Builder.CreateBitCast(Val, Ty);
8826}
8827
8828static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8829 SmallVectorImpl<Value *> &Ops) {
8830 auto *SplatZero = Constant::getNullValue(Ty);
8831 Ops.insert(Ops.begin(), SplatZero);
8832}
8833
8834static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8835 SmallVectorImpl<Value *> &Ops) {
8836 auto *SplatUndef = UndefValue::get(Ty);
8837 Ops.insert(Ops.begin(), SplatUndef);
8838}
8839
8840SmallVector<llvm::Type *, 2> CodeGenFunction::getSVEOverloadTypes(
8841 SVETypeFlags TypeFlags, llvm::Type *ResultType, ArrayRef<Value *> Ops) {
8842 if (TypeFlags.isOverloadNone())
8843 return {};
8844
8845 llvm::Type *DefaultType = getSVEType(TypeFlags);
8846
8847 if (TypeFlags.isOverloadWhile())
8848 return {DefaultType, Ops[1]->getType()};
8849
8850 if (TypeFlags.isOverloadWhileRW())
8851 return {getSVEPredType(TypeFlags), Ops[0]->getType()};
8852
8853 if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
8854 return {Ops[0]->getType(), Ops.back()->getType()};
8855
8856 if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
8857 return {ResultType, Ops[0]->getType()};
8858
8859 assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads")(static_cast <bool> (TypeFlags.isOverloadDefault() &&
"Unexpected value for overloads") ? void (0) : __assert_fail
("TypeFlags.isOverloadDefault() && \"Unexpected value for overloads\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8859, __extension__ __PRETTY_FUNCTION__))
;
8860 return {DefaultType};
8861}
8862
8863Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
8864 const CallExpr *E) {
8865 // Find out if any arguments are required to be integer constant expressions.
8866 unsigned ICEArguments = 0;
8867 ASTContext::GetBuiltinTypeError Error;
8868 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
8869 assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None &&
"Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8869, __extension__ __PRETTY_FUNCTION__))
;
8870
8871 llvm::Type *Ty = ConvertType(E->getType());
8872 if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
8873 BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
8874 Value *Val = EmitScalarExpr(E->getArg(0));
8875 return EmitSVEReinterpret(Val, Ty);
8876 }
8877
8878 llvm::SmallVector<Value *, 4> Ops;
8879 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
8880 if ((ICEArguments & (1 << i)) == 0)
8881 Ops.push_back(EmitScalarExpr(E->getArg(i)));
8882 else {
8883 // If this is required to be a constant, constant fold it so that we know
8884 // that the generated intrinsic gets a ConstantInt.
8885 Optional<llvm::APSInt> Result =
8886 E->getArg(i)->getIntegerConstantExpr(getContext());
8887 assert(Result && "Expected argument to be a constant")(static_cast <bool> (Result && "Expected argument to be a constant"
) ? void (0) : __assert_fail ("Result && \"Expected argument to be a constant\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 8887, __extension__ __PRETTY_FUNCTION__))
;
8888
8889 // Immediates for SVE llvm intrinsics are always 32bit. We can safely
8890 // truncate because the immediate has been range checked and no valid
8891 // immediate requires more than a handful of bits.
8892 *Result = Result->extOrTrunc(32);
8893 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
8894 }
8895 }
8896
8897 auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
8898 AArch64SVEIntrinsicsProvenSorted);
8899 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8900 if (TypeFlags.isLoad())
8901 return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
8902 TypeFlags.isZExtReturn());
8903 else if (TypeFlags.isStore())
8904 return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
8905 else if (TypeFlags.isGatherLoad())
8906 return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8907 else if (TypeFlags.isScatterStore())
8908 return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8909 else if (TypeFlags.isPrefetch())
8910 return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8911 else if (TypeFlags.isGatherPrefetch())
8912 return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8913 else if (TypeFlags.isStructLoad())
8914 return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8915 else if (TypeFlags.isStructStore())
8916 return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8917 else if (TypeFlags.isUndef())
8918 return UndefValue::get(Ty);
8919 else if (Builtin->LLVMIntrinsic != 0) {
8920 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
8921 InsertExplicitZeroOperand(Builder, Ty, Ops);
8922
8923 if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
8924 InsertExplicitUndefOperand(Builder, Ty, Ops);
8925
8926 // Some ACLE builtins leave out the argument to specify the predicate
8927 // pattern, which is expected to be expanded to an SV_ALL pattern.
8928 if (TypeFlags.isAppendSVALL())
8929 Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
8930 if (TypeFlags.isInsertOp1SVALL())
8931 Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
8932
8933 // Predicates must match the main datatype.
8934 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
8935 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
8936 if (PredTy->getElementType()->isIntegerTy(1))
8937 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
8938
8939 // Splat scalar operand to vector (intrinsics with _n infix)
8940 if (TypeFlags.hasSplatOperand()) {
8941 unsigned OpNo = TypeFlags.getSplatOperand();
8942 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8943 }
8944
8945 if (TypeFlags.isReverseCompare())
8946 std::swap(Ops[1], Ops[2]);
8947
8948 if (TypeFlags.isReverseUSDOT())
8949 std::swap(Ops[1], Ops[2]);
8950
8951 // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
8952 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
8953 llvm::Type *OpndTy = Ops[1]->getType();
8954 auto *SplatZero = Constant::getNullValue(OpndTy);
8955 Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
8956 Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
8957 }
8958
8959 Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
8960 getSVEOverloadTypes(TypeFlags, Ty, Ops));
8961 Value *Call = Builder.CreateCall(F, Ops);
8962
8963 // Predicate results must be converted to svbool_t.
8964 if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
8965 if (PredTy->getScalarType()->isIntegerTy(1))
8966 Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
8967
8968 return Call;
8969 }
8970
8971 switch (BuiltinID) {
8972 default:
8973 return nullptr;
8974
8975 case SVE::BI__builtin_sve_svmov_b_z: {
8976 // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
8977 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8978 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
8979 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
8980 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
8981 }
8982
8983 case SVE::BI__builtin_sve_svnot_b_z: {
8984 // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
8985 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8986 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
8987 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
8988 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
8989 }
8990
8991 case SVE::BI__builtin_sve_svmovlb_u16:
8992 case SVE::BI__builtin_sve_svmovlb_u32:
8993 case SVE::BI__builtin_sve_svmovlb_u64:
8994 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
8995
8996 case SVE::BI__builtin_sve_svmovlb_s16:
8997 case SVE::BI__builtin_sve_svmovlb_s32:
8998 case SVE::BI__builtin_sve_svmovlb_s64:
8999 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
9000
9001 case SVE::BI__builtin_sve_svmovlt_u16:
9002 case SVE::BI__builtin_sve_svmovlt_u32:
9003 case SVE::BI__builtin_sve_svmovlt_u64:
9004 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
9005
9006 case SVE::BI__builtin_sve_svmovlt_s16:
9007 case SVE::BI__builtin_sve_svmovlt_s32:
9008 case SVE::BI__builtin_sve_svmovlt_s64:
9009 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
9010
9011 case SVE::BI__builtin_sve_svpmullt_u16:
9012 case SVE::BI__builtin_sve_svpmullt_u64:
9013 case SVE::BI__builtin_sve_svpmullt_n_u16:
9014 case SVE::BI__builtin_sve_svpmullt_n_u64:
9015 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
9016
9017 case SVE::BI__builtin_sve_svpmullb_u16:
9018 case SVE::BI__builtin_sve_svpmullb_u64:
9019 case SVE::BI__builtin_sve_svpmullb_n_u16:
9020 case SVE::BI__builtin_sve_svpmullb_n_u64:
9021 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
9022
9023 case SVE::BI__builtin_sve_svdup_n_b8:
9024 case SVE::BI__builtin_sve_svdup_n_b16:
9025 case SVE::BI__builtin_sve_svdup_n_b32:
9026 case SVE::BI__builtin_sve_svdup_n_b64: {
9027 Value *CmpNE =
9028 Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
9029 llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
9030 Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
9031 return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
9032 }
9033
9034 case SVE::BI__builtin_sve_svdupq_n_b8:
9035 case SVE::BI__builtin_sve_svdupq_n_b16:
9036 case SVE::BI__builtin_sve_svdupq_n_b32:
9037 case SVE::BI__builtin_sve_svdupq_n_b64:
9038 case SVE::BI__builtin_sve_svdupq_n_u8:
9039 case SVE::BI__builtin_sve_svdupq_n_s8:
9040 case SVE::BI__builtin_sve_svdupq_n_u64:
9041 case SVE::BI__builtin_sve_svdupq_n_f64:
9042 case SVE::BI__builtin_sve_svdupq_n_s64:
9043 case SVE::BI__builtin_sve_svdupq_n_u16:
9044 case SVE::BI__builtin_sve_svdupq_n_f16:
9045 case SVE::BI__builtin_sve_svdupq_n_bf16:
9046 case SVE::BI__builtin_sve_svdupq_n_s16:
9047 case SVE::BI__builtin_sve_svdupq_n_u32:
9048 case SVE::BI__builtin_sve_svdupq_n_f32:
9049 case SVE::BI__builtin_sve_svdupq_n_s32: {
9050 // These builtins are implemented by storing each element to an array and using
9051 // ld1rq to materialize a vector.
9052 unsigned NumOpnds = Ops.size();
9053
9054 bool IsBoolTy =
9055 cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
9056
9057 // For svdupq_n_b* the element type of is an integer of type 128/numelts,
9058 // so that the compare can use the width that is natural for the expected
9059 // number of predicate lanes.
9060 llvm::Type *EltTy = Ops[0]->getType();
9061 if (IsBoolTy)
9062 EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
9063
9064 SmallVector<llvm::Value *, 16> VecOps;
9065 for (unsigned I = 0; I < NumOpnds; ++I)
9066 VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy));
9067 Value *Vec = BuildVector(VecOps);
9068
9069 SVETypeFlags TypeFlags(Builtin->TypeModifier);
9070 Value *Pred = EmitSVEAllTruePred(TypeFlags);
9071
9072 llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
9073 Value *InsertSubVec = Builder.CreateInsertVector(
9074 OverloadedTy, UndefValue::get(OverloadedTy), Vec, Builder.getInt64(0));
9075
9076 Function *F =
9077 CGM.getIntrinsic(Intrinsic::aarch64_sve_dupq_lane, OverloadedTy);
9078 Value *DupQLane =
9079 Builder.CreateCall(F, {InsertSubVec, Builder.getInt64(0)});
9080
9081 if (!IsBoolTy)
9082 return DupQLane;
9083
9084 // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
9085 F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
9086 : Intrinsic::aarch64_sve_cmpne_wide,
9087 OverloadedTy);
9088 Value *Call = Builder.CreateCall(
9089 F, {Pred, DupQLane, EmitSVEDupX(Builder.getInt64(0))});
9090 return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
9091 }
9092
9093 case SVE::BI__builtin_sve_svpfalse_b:
9094 return ConstantInt::getFalse(Ty);
9095
9096 case SVE::BI__builtin_sve_svlen_bf16:
9097 case SVE::BI__builtin_sve_svlen_f16:
9098 case SVE::BI__builtin_sve_svlen_f32:
9099 case SVE::BI__builtin_sve_svlen_f64:
9100 case SVE::BI__builtin_sve_svlen_s8:
9101 case SVE::BI__builtin_sve_svlen_s16:
9102 case SVE::BI__builtin_sve_svlen_s32:
9103 case SVE::BI__builtin_sve_svlen_s64:
9104 case SVE::BI__builtin_sve_svlen_u8:
9105 case SVE::BI__builtin_sve_svlen_u16:
9106 case SVE::BI__builtin_sve_svlen_u32:
9107 case SVE::BI__builtin_sve_svlen_u64: {
9108 SVETypeFlags TF(Builtin->TypeModifier);
9109 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
9110 auto *NumEls =
9111 llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue());
9112
9113 Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
9114 return Builder.CreateMul(NumEls, Builder.CreateCall(F));
9115 }
9116
9117 case SVE::BI__builtin_sve_svtbl2_u8:
9118 case SVE::BI__builtin_sve_svtbl2_s8:
9119 case SVE::BI__builtin_sve_svtbl2_u16:
9120 case SVE::BI__builtin_sve_svtbl2_s16:
9121 case SVE::BI__builtin_sve_svtbl2_u32:
9122 case SVE::BI__builtin_sve_svtbl2_s32:
9123 case SVE::BI__builtin_sve_svtbl2_u64:
9124 case SVE::BI__builtin_sve_svtbl2_s64:
9125 case SVE::BI__builtin_sve_svtbl2_f16:
9126 case SVE::BI__builtin_sve_svtbl2_bf16:
9127 case SVE::BI__builtin_sve_svtbl2_f32:
9128 case SVE::BI__builtin_sve_svtbl2_f64: {
9129 SVETypeFlags TF(Builtin->TypeModifier);
9130 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
9131 auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy);
9132 Function *FExtr =
9133 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
9134 Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
9135 Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)});
9136 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
9137 return Builder.CreateCall(F, {V0, V1, Ops[1]});
9138 }
9139 }
9140
9141 /// Should not happen
9142 return nullptr;
9143}
9144
9145Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
9146 const CallExpr *E,
9147 llvm::Triple::ArchType Arch) {
9148 if (BuiltinID >= AArch64::FirstSVEBuiltin &&
9149 BuiltinID <= AArch64::LastSVEBuiltin)
9150 return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
9151
9152 unsigned HintID = static_cast<unsigned>(-1);
9153 switch (BuiltinID) {
9154 default: break;
9155 case AArch64::BI__builtin_arm_nop:
9156 HintID = 0;
9157 break;
9158 case AArch64::BI__builtin_arm_yield:
9159 case AArch64::BI__yield:
9160 HintID = 1;
9161 break;
9162 case AArch64::BI__builtin_arm_wfe:
9163 case AArch64::BI__wfe:
9164 HintID = 2;
9165 break;
9166 case AArch64::BI__builtin_arm_wfi:
9167 case AArch64::BI__wfi:
9168 HintID = 3;
9169 break;
9170 case AArch64::BI__builtin_arm_sev:
9171 case AArch64::BI__sev:
9172 HintID = 4;
9173 break;
9174 case AArch64::BI__builtin_arm_sevl:
9175 case AArch64::BI__sevl:
9176 HintID = 5;
9177 break;
9178 }
9179
9180 if (HintID != static_cast<unsigned>(-1)) {
9181 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
9182 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
9183 }
9184
9185 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
9186 Value *Address = EmitScalarExpr(E->getArg(0));
9187 Value *RW = EmitScalarExpr(E->getArg(1));
9188 Value *CacheLevel = EmitScalarExpr(E->getArg(2));
9189 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
9190 Value *IsData = EmitScalarExpr(E->getArg(4));
9191
9192 Value *Locality = nullptr;
9193 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
9194 // Temporal fetch, needs to convert cache level to locality.
9195 Locality = llvm::ConstantInt::get(Int32Ty,
9196 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
9197 } else {
9198 // Streaming fetch.
9199 Locality = llvm::ConstantInt::get(Int32Ty, 0);
9200 }
9201
9202 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
9203 // PLDL3STRM or PLDL2STRM.
9204 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
9205 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
9206 }
9207
9208 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
9209 assert((getContext().getTypeSize(E->getType()) == 32) &&(static_cast <bool> ((getContext().getTypeSize(E->getType
()) == 32) && "rbit of unusual size!") ? void (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9210, __extension__ __PRETTY_FUNCTION__))
9210 "rbit of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType
()) == 32) && "rbit of unusual size!") ? void (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9210, __extension__ __PRETTY_FUNCTION__))
;
9211 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9212 return Builder.CreateCall(
9213 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
9214 }
9215 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
9216 assert((getContext().getTypeSize(E->getType()) == 64) &&(static_cast <bool> ((getContext().getTypeSize(E->getType
()) == 64) && "rbit of unusual size!") ? void (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9217, __extension__ __PRETTY_FUNCTION__))
9217 "rbit of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType
()) == 64) && "rbit of unusual size!") ? void (0) : __assert_fail
("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9217, __extension__ __PRETTY_FUNCTION__))
;
9218 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9219 return Builder.CreateCall(
9220 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
9221 }
9222
9223 if (BuiltinID == AArch64::BI__builtin_arm_cls) {
9224 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9225 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
9226 "cls");
9227 }
9228 if (BuiltinID == AArch64::BI__builtin_arm_cls64) {
9229 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9230 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
9231 "cls");
9232 }
9233
9234 if (BuiltinID == AArch64::BI__builtin_arm_frint32zf ||
9235 BuiltinID == AArch64::BI__builtin_arm_frint32z) {
9236 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9237 llvm::Type *Ty = Arg->getType();
9238 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty),
9239 Arg, "frint32z");
9240 }
9241
9242 if (BuiltinID == AArch64::BI__builtin_arm_frint64zf ||
9243 BuiltinID == AArch64::BI__builtin_arm_frint64z) {
9244 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9245 llvm::Type *Ty = Arg->getType();
9246 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty),
9247 Arg, "frint64z");
9248 }
9249
9250 if (BuiltinID == AArch64::BI__builtin_arm_frint32xf ||
9251 BuiltinID == AArch64::BI__builtin_arm_frint32x) {
9252 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9253 llvm::Type *Ty = Arg->getType();
9254 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty),
9255 Arg, "frint32x");
9256 }
9257
9258 if (BuiltinID == AArch64::BI__builtin_arm_frint64xf ||
9259 BuiltinID == AArch64::BI__builtin_arm_frint64x) {
9260 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9261 llvm::Type *Ty = Arg->getType();
9262 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty),
9263 Arg, "frint64x");
9264 }
9265
9266 if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
9267 assert((getContext().getTypeSize(E->getType()) == 32) &&(static_cast <bool> ((getContext().getTypeSize(E->getType
()) == 32) && "__jcvt of unusual size!") ? void (0) :
__assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"__jcvt of unusual size!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9268, __extension__ __PRETTY_FUNCTION__))
9268 "__jcvt of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType
()) == 32) && "__jcvt of unusual size!") ? void (0) :
__assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"__jcvt of unusual size!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9268, __extension__ __PRETTY_FUNCTION__))
;
9269 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9270 return Builder.CreateCall(
9271 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
9272 }
9273
9274 if (BuiltinID == AArch64::BI__builtin_arm_ld64b ||
9275 BuiltinID == AArch64::BI__builtin_arm_st64b ||
9276 BuiltinID == AArch64::BI__builtin_arm_st64bv ||
9277 BuiltinID == AArch64::BI__builtin_arm_st64bv0) {
9278 llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0));
9279 llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1));
9280
9281 if (BuiltinID == AArch64::BI__builtin_arm_ld64b) {
9282 // Load from the address via an LLVM intrinsic, receiving a
9283 // tuple of 8 i64 words, and store each one to ValPtr.
9284 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b);
9285 llvm::Value *Val = Builder.CreateCall(F, MemAddr);
9286 llvm::Value *ToRet;
9287 for (size_t i = 0; i < 8; i++) {
9288 llvm::Value *ValOffsetPtr = Builder.CreateGEP(ValPtr, Builder.getInt32(i));
9289 Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
9290 ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr);
9291 }
9292 return ToRet;
9293 } else {
9294 // Load 8 i64 words from ValPtr, and store them to the address
9295 // via an LLVM intrinsic.
9296 SmallVector<llvm::Value *, 9> Args;
9297 Args.push_back(MemAddr);
9298 for (size_t i = 0; i < 8; i++) {
9299 llvm::Value *ValOffsetPtr = Builder.CreateGEP(ValPtr, Builder.getInt32(i));
9300 Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
9301 Args.push_back(Builder.CreateLoad(Addr));
9302 }
9303
9304 auto Intr = (BuiltinID == AArch64::BI__builtin_arm_st64b
9305 ? Intrinsic::aarch64_st64b
9306 : BuiltinID == AArch64::BI__builtin_arm_st64bv
9307 ? Intrinsic::aarch64_st64bv
9308 : Intrinsic::aarch64_st64bv0);
9309 Function *F = CGM.getIntrinsic(Intr);
9310 return Builder.CreateCall(F, Args);
9311 }
9312 }
9313
9314 if (BuiltinID == AArch64::BI__builtin_arm_rndr ||
9315 BuiltinID == AArch64::BI__builtin_arm_rndrrs) {
9316
9317 auto Intr = (BuiltinID == AArch64::BI__builtin_arm_rndr
9318 ? Intrinsic::aarch64_rndr
9319 : Intrinsic::aarch64_rndrrs);
9320 Function *F = CGM.getIntrinsic(Intr);
9321 llvm::Value *Val = Builder.CreateCall(F);
9322 Value *RandomValue = Builder.CreateExtractValue(Val, 0);
9323 Value *Status = Builder.CreateExtractValue(Val, 1);
9324
9325 Address MemAddress = EmitPointerWithAlignment(E->getArg(0));
9326 Builder.CreateStore(RandomValue, MemAddress);
9327 Status = Builder.CreateZExt(Status, Int32Ty);
9328 return Status;
9329 }
9330
9331 if (BuiltinID == AArch64::BI__clear_cache) {
9332 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")(static_cast <bool> (E->getNumArgs() == 2 &&
"__clear_cache takes 2 arguments") ? void (0) : __assert_fail
("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9332, __extension__ __PRETTY_FUNCTION__))
;
9333 const FunctionDecl *FD = E->getDirectCallee();
9334 Value *Ops[2];
9335 for (unsigned i = 0; i < 2; i++)
9336 Ops[i] = EmitScalarExpr(E->getArg(i));
9337 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
9338 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
9339 StringRef Name = FD->getName();
9340 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
9341 }
9342
9343 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
9344 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
9345 getContext().getTypeSize(E->getType()) == 128) {
9346 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
9347 ? Intrinsic::aarch64_ldaxp
9348 : Intrinsic::aarch64_ldxp);
9349
9350 Value *LdPtr = EmitScalarExpr(E->getArg(0));
9351 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
9352 "ldxp");
9353
9354 Value *Val0 = Builder.CreateExtractValue(Val, 1);
9355 Value *Val1 = Builder.CreateExtractValue(Val, 0);
9356 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
9357 Val0 = Builder.CreateZExt(Val0, Int128Ty);
9358 Val1 = Builder.CreateZExt(Val1, Int128Ty);
9359
9360 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
9361 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
9362 Val = Builder.CreateOr(Val, Val1);
9363 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
9364 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
9365 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
9366 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
9367
9368 QualType Ty = E->getType();
9369 llvm::Type *RealResTy = ConvertType(Ty);
9370 llvm::Type *PtrTy = llvm::IntegerType::get(
9371 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
9372 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
9373
9374 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
9375 ? Intrinsic::aarch64_ldaxr
9376 : Intrinsic::aarch64_ldxr,
9377 PtrTy);
9378 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
9379
9380 if (RealResTy->isPointerTy())
9381 return Builder.CreateIntToPtr(Val, RealResTy);
9382
9383 llvm::Type *IntResTy = llvm::IntegerType::get(
9384 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
9385 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
9386 return Builder.CreateBitCast(Val, RealResTy);
9387 }
9388
9389 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
9390 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
9391 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
9392 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
9393 ? Intrinsic::aarch64_stlxp
9394 : Intrinsic::aarch64_stxp);
9395 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
9396
9397 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
9398 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
9399
9400 Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
9401 llvm::Value *Val = Builder.CreateLoad(Tmp);
9402
9403 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
9404 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
9405 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
9406 Int8PtrTy);
9407 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
9408 }
9409
9410 if (BuiltinID == AArch64::BI__builtin_arm_strex ||
9411 BuiltinID == AArch64::BI__builtin_arm_stlex) {
9412 Value *StoreVal = EmitScalarExpr(E->getArg(0));
9413 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
9414
9415 QualType Ty = E->getArg(0)->getType();
9416 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
9417 getContext().getTypeSize(Ty));
9418 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
9419
9420 if (StoreVal->getType()->isPointerTy())
9421 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
9422 else {
9423 llvm::Type *IntTy = llvm::IntegerType::get(
9424 getLLVMContext(),
9425 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
9426 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
9427 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
9428 }
9429
9430 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
9431 ? Intrinsic::aarch64_stlxr
9432 : Intrinsic::aarch64_stxr,
9433 StoreAddr->getType());
9434 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
9435 }
9436
9437 if (BuiltinID == AArch64::BI__getReg) {
9438 Expr::EvalResult Result;
9439 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
9440 llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9440)
;
9441
9442 llvm::APSInt Value = Result.Val.getInt();
9443 LLVMContext &Context = CGM.getLLVMContext();
9444 std::string Reg = Value == 31 ? "sp" : "x" + toString(Value, 10);
9445
9446 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
9447 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
9448 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
9449
9450 llvm::Function *F =
9451 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
9452 return Builder.CreateCall(F, Metadata);
9453 }
9454
9455 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
9456 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
9457 return Builder.CreateCall(F);
9458 }
9459
9460 if (BuiltinID == AArch64::BI_ReadWriteBarrier)
9461 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
9462 llvm::SyncScope::SingleThread);
9463
9464 // CRC32
9465 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
9466 switch (BuiltinID) {
9467 case AArch64::BI__builtin_arm_crc32b:
9468 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
9469 case AArch64::BI__builtin_arm_crc32cb:
9470 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
9471 case AArch64::BI__builtin_arm_crc32h:
9472 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
9473 case AArch64::BI__builtin_arm_crc32ch:
9474 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
9475 case AArch64::BI__builtin_arm_crc32w:
9476 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
9477 case AArch64::BI__builtin_arm_crc32cw:
9478 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
9479 case AArch64::BI__builtin_arm_crc32d:
9480 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
9481 case AArch64::BI__builtin_arm_crc32cd:
9482 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
9483 }
9484
9485 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
9486 Value *Arg0 = EmitScalarExpr(E->getArg(0));
9487 Value *Arg1 = EmitScalarExpr(E->getArg(1));
9488 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
9489
9490 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
9491 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
9492
9493 return Builder.CreateCall(F, {Arg0, Arg1});
9494 }
9495
9496 // Memory Tagging Extensions (MTE) Intrinsics
9497 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
9498 switch (BuiltinID) {
9499 case AArch64::BI__builtin_arm_irg:
9500 MTEIntrinsicID = Intrinsic::aarch64_irg; break;
9501 case AArch64::BI__builtin_arm_addg:
9502 MTEIntrinsicID = Intrinsic::aarch64_addg; break;
9503 case AArch64::BI__builtin_arm_gmi:
9504 MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
9505 case AArch64::BI__builtin_arm_ldg:
9506 MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
9507 case AArch64::BI__builtin_arm_stg:
9508 MTEIntrinsicID = Intrinsic::aarch64_stg; break;
9509 case AArch64::BI__builtin_arm_subp:
9510 MTEIntrinsicID = Intrinsic::aarch64_subp; break;
9511 }
9512
9513 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
9514 llvm::Type *T = ConvertType(E->getType());
9515
9516 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
9517 Value *Pointer = EmitScalarExpr(E->getArg(0));
9518 Value *Mask = EmitScalarExpr(E->getArg(1));
9519
9520 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9521 Mask = Builder.CreateZExt(Mask, Int64Ty);
9522 Value *RV = Builder.CreateCall(
9523 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
9524 return Builder.CreatePointerCast(RV, T);
9525 }
9526 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
9527 Value *Pointer = EmitScalarExpr(E->getArg(0));
9528 Value *TagOffset = EmitScalarExpr(E->getArg(1));
9529
9530 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9531 TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
9532 Value *RV = Builder.CreateCall(
9533 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
9534 return Builder.CreatePointerCast(RV, T);
9535 }
9536 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
9537 Value *Pointer = EmitScalarExpr(E->getArg(0));
9538 Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
9539
9540 ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
9541 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9542 return Builder.CreateCall(
9543 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
9544 }
9545 // Although it is possible to supply a different return
9546 // address (first arg) to this intrinsic, for now we set
9547 // return address same as input address.
9548 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
9549 Value *TagAddress = EmitScalarExpr(E->getArg(0));
9550 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
9551 Value *RV = Builder.CreateCall(
9552 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
9553 return Builder.CreatePointerCast(RV, T);
9554 }
9555 // Although it is possible to supply a different tag (to set)
9556 // to this intrinsic (as first arg), for now we supply
9557 // the tag that is in input address arg (common use case).
9558 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
9559 Value *TagAddress = EmitScalarExpr(E->getArg(0));
9560 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
9561 return Builder.CreateCall(
9562 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
9563 }
9564 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
9565 Value *PointerA = EmitScalarExpr(E->getArg(0));
9566 Value *PointerB = EmitScalarExpr(E->getArg(1));
9567 PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
9568 PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
9569 return Builder.CreateCall(
9570 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
9571 }
9572 }
9573
9574 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
9575 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
9576 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
9577 BuiltinID == AArch64::BI__builtin_arm_wsr ||
9578 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
9579 BuiltinID == AArch64::BI__builtin_arm_wsrp) {
9580
9581 SpecialRegisterAccessKind AccessKind = Write;
9582 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
9583 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
9584 BuiltinID == AArch64::BI__builtin_arm_rsrp)
9585 AccessKind = VolatileRead;
9586
9587 bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
9588 BuiltinID == AArch64::BI__builtin_arm_wsrp;
9589
9590 bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
9591 BuiltinID != AArch64::BI__builtin_arm_wsr;
9592
9593 llvm::Type *ValueType;
9594 llvm::Type *RegisterType = Int64Ty;
9595 if (IsPointerBuiltin) {
9596 ValueType = VoidPtrTy;
9597 } else if (Is64Bit) {
9598 ValueType = Int64Ty;
9599 } else {
9600 ValueType = Int32Ty;
9601 }
9602
9603 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
9604 AccessKind);
9605 }
9606
9607 if (BuiltinID == AArch64::BI_ReadStatusReg ||
9608 BuiltinID == AArch64::BI_WriteStatusReg) {
9609 LLVMContext &Context = CGM.getLLVMContext();
9610
9611 unsigned SysReg =
9612 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
9613
9614 std::string SysRegStr;
9615 llvm::raw_string_ostream(SysRegStr) <<
9616 ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
9617 ((SysReg >> 11) & 7) << ":" <<
9618 ((SysReg >> 7) & 15) << ":" <<
9619 ((SysReg >> 3) & 15) << ":" <<
9620 ( SysReg & 7);
9621
9622 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
9623 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
9624 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
9625
9626 llvm::Type *RegisterType = Int64Ty;
9627 llvm::Type *Types[] = { RegisterType };
9628
9629 if (BuiltinID == AArch64::BI_ReadStatusReg) {
9630 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
9631
9632 return Builder.CreateCall(F, Metadata);
9633 }
9634
9635 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
9636 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
9637
9638 return Builder.CreateCall(F, { Metadata, ArgValue });
9639 }
9640
9641 if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
9642 llvm::Function *F =
9643 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
9644 return Builder.CreateCall(F);
9645 }
9646
9647 if (BuiltinID == AArch64::BI__builtin_sponentry) {
9648 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
9649 return Builder.CreateCall(F);
9650 }
9651
9652 // Handle MSVC intrinsics before argument evaluation to prevent double
9653 // evaluation.
9654 if (Optional<MSVCIntrin> MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID))
9655 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
9656
9657 // Find out if any arguments are required to be integer constant
9658 // expressions.
9659 unsigned ICEArguments = 0;
9660 ASTContext::GetBuiltinTypeError Error;
9661 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
9662 assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None &&
"Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9662, __extension__ __PRETTY_FUNCTION__))
;
9663
9664 llvm::SmallVector<Value*, 4> Ops;
9665 Address PtrOp0 = Address::invalid();
9666 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
9667 if (i == 0) {
9668 switch (BuiltinID) {
9669 case NEON::BI__builtin_neon_vld1_v:
9670 case NEON::BI__builtin_neon_vld1q_v:
9671 case NEON::BI__builtin_neon_vld1_dup_v:
9672 case NEON::BI__builtin_neon_vld1q_dup_v:
9673 case NEON::BI__builtin_neon_vld1_lane_v:
9674 case NEON::BI__builtin_neon_vld1q_lane_v:
9675 case NEON::BI__builtin_neon_vst1_v:
9676 case NEON::BI__builtin_neon_vst1q_v:
9677 case NEON::BI__builtin_neon_vst1_lane_v:
9678 case NEON::BI__builtin_neon_vst1q_lane_v:
9679 // Get the alignment for the argument in addition to the value;
9680 // we'll use it later.
9681 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
9682 Ops.push_back(PtrOp0.getPointer());
9683 continue;
9684 }
9685 }
9686 if ((ICEArguments & (1 << i)) == 0) {
9687 Ops.push_back(EmitScalarExpr(E->getArg(i)));
9688 } else {
9689 // If this is required to be a constant, constant fold it so that we know
9690 // that the generated intrinsic gets a ConstantInt.
9691 Ops.push_back(llvm::ConstantInt::get(
9692 getLLVMContext(),
9693 *E->getArg(i)->getIntegerConstantExpr(getContext())));
9694 }
9695 }
9696
9697 auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
9698 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
9699 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
9700
9701 if (Builtin) {
9702 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
9703 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
9704 assert(Result && "SISD intrinsic should have been handled")(static_cast <bool> (Result && "SISD intrinsic should have been handled"
) ? void (0) : __assert_fail ("Result && \"SISD intrinsic should have been handled\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9704, __extension__ __PRETTY_FUNCTION__))
;
9705 return Result;
9706 }
9707
9708 const Expr *Arg = E->getArg(E->getNumArgs()-1);
9709 NeonTypeFlags Type(0);
9710 if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()))
9711 // Determine the type of this overloaded NEON intrinsic.
9712 Type = NeonTypeFlags(Result->getZExtValue());
9713
9714 bool usgn = Type.isUnsigned();
9715 bool quad = Type.isQuad();
9716
9717 // Handle non-overloaded intrinsics first.
9718 switch (BuiltinID) {
9719 default: break;
9720 case NEON::BI__builtin_neon_vabsh_f16:
9721 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9722 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
9723 case NEON::BI__builtin_neon_vaddq_p128: {
9724 llvm::Type *Ty = GetNeonType(this, NeonTypeFlags::Poly128);
9725 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9726 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9727 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9728 Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
9729 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
9730 return Builder.CreateBitCast(Ops[0], Int128Ty);
9731 }
9732 case NEON::BI__builtin_neon_vldrq_p128: {
9733 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
9734 llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
9735 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
9736 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
9737 CharUnits::fromQuantity(16));
9738 }
9739 case NEON::BI__builtin_neon_vstrq_p128: {
9740 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
9741 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
9742 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
9743 }
9744 case NEON::BI__builtin_neon_vcvts_f32_u32:
9745 case NEON::BI__builtin_neon_vcvtd_f64_u64:
9746 usgn = true;
9747 LLVM_FALLTHROUGH[[gnu::fallthrough]];
9748 case NEON::BI__builtin_neon_vcvts_f32_s32:
9749 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
9750 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9751 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
9752 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
9753 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
9754 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9755 if (usgn)
9756 return Builder.CreateUIToFP(Ops[0], FTy);
9757 return Builder.CreateSIToFP(Ops[0], FTy);
9758 }
9759 case NEON::BI__builtin_neon_vcvth_f16_u16:
9760 case NEON::BI__builtin_neon_vcvth_f16_u32:
9761 case NEON::BI__builtin_neon_vcvth_f16_u64:
9762 usgn = true;
9763 LLVM_FALLTHROUGH[[gnu::fallthrough]];
9764 case NEON::BI__builtin_neon_vcvth_f16_s16:
9765 case NEON::BI__builtin_neon_vcvth_f16_s32:
9766 case NEON::BI__builtin_neon_vcvth_f16_s64: {
9767 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9768 llvm::Type *FTy = HalfTy;
9769 llvm::Type *InTy;
9770 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
9771 InTy = Int64Ty;
9772 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
9773 InTy = Int32Ty;
9774 else
9775 InTy = Int16Ty;
9776 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9777 if (usgn)
9778 return Builder.CreateUIToFP(Ops[0], FTy);
9779 return Builder.CreateSIToFP(Ops[0], FTy);
9780 }
9781 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9782 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9783 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9784 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9785 case NEON::BI__builtin_neon_vcvth_u16_f16:
9786 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9787 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9788 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9789 case NEON::BI__builtin_neon_vcvtph_s16_f16:
9790 case NEON::BI__builtin_neon_vcvth_s16_f16: {
9791 unsigned Int;
9792 llvm::Type* InTy = Int32Ty;
9793 llvm::Type* FTy = HalfTy;
9794 llvm::Type *Tys[2] = {InTy, FTy};
9795 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9796 switch (BuiltinID) {
9797 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9797)
;
9798 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9799 Int = Intrinsic::aarch64_neon_fcvtau; break;
9800 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9801 Int = Intrinsic::aarch64_neon_fcvtmu; break;
9802 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9803 Int = Intrinsic::aarch64_neon_fcvtnu; break;
9804 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9805 Int = Intrinsic::aarch64_neon_fcvtpu; break;
9806 case NEON::BI__builtin_neon_vcvth_u16_f16:
9807 Int = Intrinsic::aarch64_neon_fcvtzu; break;
9808 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9809 Int = Intrinsic::aarch64_neon_fcvtas; break;
9810 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9811 Int = Intrinsic::aarch64_neon_fcvtms; break;
9812 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9813 Int = Intrinsic::aarch64_neon_fcvtns; break;
9814 case NEON::BI__builtin_neon_vcvtph_s16_f16:
9815 Int = Intrinsic::aarch64_neon_fcvtps; break;
9816 case NEON::BI__builtin_neon_vcvth_s16_f16:
9817 Int = Intrinsic::aarch64_neon_fcvtzs; break;
9818 }
9819 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
9820 return Builder.CreateTrunc(Ops[0], Int16Ty);
9821 }
9822 case NEON::BI__builtin_neon_vcaleh_f16:
9823 case NEON::BI__builtin_neon_vcalth_f16:
9824 case NEON::BI__builtin_neon_vcageh_f16:
9825 case NEON::BI__builtin_neon_vcagth_f16: {
9826 unsigned Int;
9827 llvm::Type* InTy = Int32Ty;
9828 llvm::Type* FTy = HalfTy;
9829 llvm::Type *Tys[2] = {InTy, FTy};
9830 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9831 switch (BuiltinID) {
9832 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9832)
;
9833 case NEON::BI__builtin_neon_vcageh_f16:
9834 Int = Intrinsic::aarch64_neon_facge; break;
9835 case NEON::BI__builtin_neon_vcagth_f16:
9836 Int = Intrinsic::aarch64_neon_facgt; break;
9837 case NEON::BI__builtin_neon_vcaleh_f16:
9838 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
9839 case NEON::BI__builtin_neon_vcalth_f16:
9840 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
9841 }
9842 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
9843 return Builder.CreateTrunc(Ops[0], Int16Ty);
9844 }
9845 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
9846 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
9847 unsigned Int;
9848 llvm::Type* InTy = Int32Ty;
9849 llvm::Type* FTy = HalfTy;
9850 llvm::Type *Tys[2] = {InTy, FTy};
9851 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9852 switch (BuiltinID) {
9853 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9853)
;
9854 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
9855 Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
9856 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
9857 Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
9858 }
9859 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
9860 return Builder.CreateTrunc(Ops[0], Int16Ty);
9861 }
9862 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
9863 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
9864 unsigned Int;
9865 llvm::Type* FTy = HalfTy;
9866 llvm::Type* InTy = Int32Ty;
9867 llvm::Type *Tys[2] = {FTy, InTy};
9868 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9869 switch (BuiltinID) {
9870 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9870)
;
9871 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
9872 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
9873 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
9874 break;
9875 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
9876 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
9877 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
9878 break;
9879 }
9880 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
9881 }
9882 case NEON::BI__builtin_neon_vpaddd_s64: {
9883 auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
9884 Value *Vec = EmitScalarExpr(E->getArg(0));
9885 // The vector is v2f64, so make sure it's bitcast to that.
9886 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
9887 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9888 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9889 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9890 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9891 // Pairwise addition of a v2f64 into a scalar f64.
9892 return Builder.CreateAdd(Op0, Op1, "vpaddd");
9893 }
9894 case NEON::BI__builtin_neon_vpaddd_f64: {
9895 auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
9896 Value *Vec = EmitScalarExpr(E->getArg(0));
9897 // The vector is v2f64, so make sure it's bitcast to that.
9898 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
9899 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9900 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9901 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9902 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9903 // Pairwise addition of a v2f64 into a scalar f64.
9904 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
9905 }
9906 case NEON::BI__builtin_neon_vpadds_f32: {
9907 auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
9908 Value *Vec = EmitScalarExpr(E->getArg(0));
9909 // The vector is v2f32, so make sure it's bitcast to that.
9910 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
9911 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9912 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9913 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9914 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9915 // Pairwise addition of a v2f32 into a scalar f32.
9916 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
9917 }
9918 case NEON::BI__builtin_neon_vceqzd_s64:
9919 case NEON::BI__builtin_neon_vceqzd_f64:
9920 case NEON::BI__builtin_neon_vceqzs_f32:
9921 case NEON::BI__builtin_neon_vceqzh_f16:
9922 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9923 return EmitAArch64CompareBuiltinExpr(
9924 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9925 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
9926 case NEON::BI__builtin_neon_vcgezd_s64:
9927 case NEON::BI__builtin_neon_vcgezd_f64:
9928 case NEON::BI__builtin_neon_vcgezs_f32:
9929 case NEON::BI__builtin_neon_vcgezh_f16:
9930 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9931 return EmitAArch64CompareBuiltinExpr(
9932 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9933 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
9934 case NEON::BI__builtin_neon_vclezd_s64:
9935 case NEON::BI__builtin_neon_vclezd_f64:
9936 case NEON::BI__builtin_neon_vclezs_f32:
9937 case NEON::BI__builtin_neon_vclezh_f16:
9938 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9939 return EmitAArch64CompareBuiltinExpr(
9940 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9941 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
9942 case NEON::BI__builtin_neon_vcgtzd_s64:
9943 case NEON::BI__builtin_neon_vcgtzd_f64:
9944 case NEON::BI__builtin_neon_vcgtzs_f32:
9945 case NEON::BI__builtin_neon_vcgtzh_f16:
9946 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9947 return EmitAArch64CompareBuiltinExpr(
9948 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9949 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
9950 case NEON::BI__builtin_neon_vcltzd_s64:
9951 case NEON::BI__builtin_neon_vcltzd_f64:
9952 case NEON::BI__builtin_neon_vcltzs_f32:
9953 case NEON::BI__builtin_neon_vcltzh_f16:
9954 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9955 return EmitAArch64CompareBuiltinExpr(
9956 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9957 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
9958
9959 case NEON::BI__builtin_neon_vceqzd_u64: {
9960 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9961 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9962 Ops[0] =
9963 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
9964 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
9965 }
9966 case NEON::BI__builtin_neon_vceqd_f64:
9967 case NEON::BI__builtin_neon_vcled_f64:
9968 case NEON::BI__builtin_neon_vcltd_f64:
9969 case NEON::BI__builtin_neon_vcged_f64:
9970 case NEON::BI__builtin_neon_vcgtd_f64: {
9971 llvm::CmpInst::Predicate P;
9972 switch (BuiltinID) {
9973 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9973)
;
9974 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
9975 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
9976 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
9977 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
9978 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
9979 }
9980 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9981 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
9982 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
9983 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9984 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
9985 }
9986 case NEON::BI__builtin_neon_vceqs_f32:
9987 case NEON::BI__builtin_neon_vcles_f32:
9988 case NEON::BI__builtin_neon_vclts_f32:
9989 case NEON::BI__builtin_neon_vcges_f32:
9990 case NEON::BI__builtin_neon_vcgts_f32: {
9991 llvm::CmpInst::Predicate P;
9992 switch (BuiltinID) {
9993 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 9993)
;
9994 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
9995 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
9996 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
9997 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
9998 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
9999 }
10000 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10001 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
10002 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
10003 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
10004 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
10005 }
10006 case NEON::BI__builtin_neon_vceqh_f16:
10007 case NEON::BI__builtin_neon_vcleh_f16:
10008 case NEON::BI__builtin_neon_vclth_f16:
10009 case NEON::BI__builtin_neon_vcgeh_f16:
10010 case NEON::BI__builtin_neon_vcgth_f16: {
10011 llvm::CmpInst::Predicate P;
10012 switch (BuiltinID) {
10013 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10013)
;
10014 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
10015 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
10016 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
10017 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
10018 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
10019 }
10020 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10021 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
10022 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
10023 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
10024 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
10025 }
10026 case NEON::BI__builtin_neon_vceqd_s64:
10027 case NEON::BI__builtin_neon_vceqd_u64:
10028 case NEON::BI__builtin_neon_vcgtd_s64:
10029 case NEON::BI__builtin_neon_vcgtd_u64:
10030 case NEON::BI__builtin_neon_vcltd_s64:
10031 case NEON::BI__builtin_neon_vcltd_u64:
10032 case NEON::BI__builtin_neon_vcged_u64:
10033 case NEON::BI__builtin_neon_vcged_s64:
10034 case NEON::BI__builtin_neon_vcled_u64:
10035 case NEON::BI__builtin_neon_vcled_s64: {
10036 llvm::CmpInst::Predicate P;
10037 switch (BuiltinID) {
10038 default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10038)
;
10039 case NEON::BI__builtin_neon_vceqd_s64:
10040 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
10041 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
10042 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
10043 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
10044 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
10045 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
10046 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
10047 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
10048 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
10049 }
10050 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10051 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
10052 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
10053 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
10054 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
10055 }
10056 case NEON::BI__builtin_neon_vtstd_s64:
10057 case NEON::BI__builtin_neon_vtstd_u64: {
10058 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10059 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
10060 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
10061 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
10062 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
10063 llvm::Constant::getNullValue(Int64Ty));
10064 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
10065 }
10066 case NEON::BI__builtin_neon_vset_lane_i8:
10067 case NEON::BI__builtin_neon_vset_lane_i16:
10068 case NEON::BI__builtin_neon_vset_lane_i32:
10069 case NEON::BI__builtin_neon_vset_lane_i64:
10070 case NEON::BI__builtin_neon_vset_lane_bf16:
10071 case NEON::BI__builtin_neon_vset_lane_f32:
10072 case NEON::BI__builtin_neon_vsetq_lane_i8:
10073 case NEON::BI__builtin_neon_vsetq_lane_i16:
10074 case NEON::BI__builtin_neon_vsetq_lane_i32:
10075 case NEON::BI__builtin_neon_vsetq_lane_i64:
10076 case NEON::BI__builtin_neon_vsetq_lane_bf16:
10077 case NEON::BI__builtin_neon_vsetq_lane_f32:
10078 Ops.push_back(EmitScalarExpr(E->getArg(2)));
10079 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
10080 case NEON::BI__builtin_neon_vset_lane_f64:
10081 // The vector type needs a cast for the v1f64 variant.
10082 Ops[1] =
10083 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
10084 Ops.push_back(EmitScalarExpr(E->getArg(2)));
10085 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
10086 case NEON::BI__builtin_neon_vsetq_lane_f64:
10087 // The vector type needs a cast for the v2f64 variant.
10088 Ops[1] =
10089 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
10090 Ops.push_back(EmitScalarExpr(E->getArg(2)));
10091 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
10092
10093 case NEON::BI__builtin_neon_vget_lane_i8:
10094 case NEON::BI__builtin_neon_vdupb_lane_i8:
10095 Ops[0] =
10096 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
10097 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10098 "vget_lane");
10099 case NEON::BI__builtin_neon_vgetq_lane_i8:
10100 case NEON::BI__builtin_neon_vdupb_laneq_i8:
10101 Ops[0] =
10102 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
10103 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10104 "vgetq_lane");
10105 case NEON::BI__builtin_neon_vget_lane_i16:
10106 case NEON::BI__builtin_neon_vduph_lane_i16:
10107 Ops[0] =
10108 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
10109 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10110 "vget_lane");
10111 case NEON::BI__builtin_neon_vgetq_lane_i16:
10112 case NEON::BI__builtin_neon_vduph_laneq_i16:
10113 Ops[0] =
10114 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
10115 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10116 "vgetq_lane");
10117 case NEON::BI__builtin_neon_vget_lane_i32:
10118 case NEON::BI__builtin_neon_vdups_lane_i32:
10119 Ops[0] =
10120 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
10121 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10122 "vget_lane");
10123 case NEON::BI__builtin_neon_vdups_lane_f32:
10124 Ops[0] =
10125 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
10126 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10127 "vdups_lane");
10128 case NEON::BI__builtin_neon_vgetq_lane_i32:
10129 case NEON::BI__builtin_neon_vdups_laneq_i32:
10130 Ops[0] =
10131 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
10132 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10133 "vgetq_lane");
10134 case NEON::BI__builtin_neon_vget_lane_i64:
10135 case NEON::BI__builtin_neon_vdupd_lane_i64:
10136 Ops[0] =
10137 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
10138 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10139 "vget_lane");
10140 case NEON::BI__builtin_neon_vdupd_lane_f64:
10141 Ops[0] =
10142 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
10143 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10144 "vdupd_lane");
10145 case NEON::BI__builtin_neon_vgetq_lane_i64:
10146 case NEON::BI__builtin_neon_vdupd_laneq_i64:
10147 Ops[0] =
10148 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
10149 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10150 "vgetq_lane");
10151 case NEON::BI__builtin_neon_vget_lane_f32:
10152 Ops[0] =
10153 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
10154 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10155 "vget_lane");
10156 case NEON::BI__builtin_neon_vget_lane_f64:
10157 Ops[0] =
10158 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
10159 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10160 "vget_lane");
10161 case NEON::BI__builtin_neon_vgetq_lane_f32:
10162 case NEON::BI__builtin_neon_vdups_laneq_f32:
10163 Ops[0] =
10164 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
10165 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10166 "vgetq_lane");
10167 case NEON::BI__builtin_neon_vgetq_lane_f64:
10168 case NEON::BI__builtin_neon_vdupd_laneq_f64:
10169 Ops[0] =
10170 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
10171 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10172 "vgetq_lane");
10173 case NEON::BI__builtin_neon_vaddh_f16:
10174 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10175 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
10176 case NEON::BI__builtin_neon_vsubh_f16:
10177 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10178 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
10179 case NEON::BI__builtin_neon_vmulh_f16:
10180 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10181 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
10182 case NEON::BI__builtin_neon_vdivh_f16:
10183 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10184 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
10185 case NEON::BI__builtin_neon_vfmah_f16:
10186 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
10187 return emitCallMaybeConstrainedFPBuiltin(
10188 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
10189 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
10190 case NEON::BI__builtin_neon_vfmsh_f16: {
10191 // FIXME: This should be an fneg instruction:
10192 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
10193 Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
10194
10195 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
10196 return emitCallMaybeConstrainedFPBuiltin(
10197 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
10198 {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
10199 }
10200 case NEON::BI__builtin_neon_vaddd_s64:
10201 case NEON::BI__builtin_neon_vaddd_u64:
10202 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
10203 case NEON::BI__builtin_neon_vsubd_s64:
10204 case NEON::BI__builtin_neon_vsubd_u64:
10205 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
10206 case NEON::BI__builtin_neon_vqdmlalh_s16:
10207 case NEON::BI__builtin_neon_vqdmlslh_s16: {
10208 SmallVector<Value *, 2> ProductOps;
10209 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
10210 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
10211 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
10212 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
10213 ProductOps, "vqdmlXl");
10214 Constant *CI = ConstantInt::get(SizeTy, 0);
10215 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
10216
10217 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
10218 ? Intrinsic::aarch64_neon_sqadd
10219 : Intrinsic::aarch64_neon_sqsub;
10220 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
10221 }
10222 case NEON::BI__builtin_neon_vqshlud_n_s64: {
10223 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10224 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
10225 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
10226 Ops, "vqshlu_n");
10227 }
10228 case NEON::BI__builtin_neon_vqshld_n_u64:
10229 case NEON::BI__builtin_neon_vqshld_n_s64: {
10230 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
10231 ? Intrinsic::aarch64_neon_uqshl
10232 : Intrinsic::aarch64_neon_sqshl;
10233 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10234 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
10235 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
10236 }
10237 case NEON::BI__builtin_neon_vrshrd_n_u64:
10238 case NEON::BI__builtin_neon_vrshrd_n_s64: {
10239 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
10240 ? Intrinsic::aarch64_neon_urshl
10241 : Intrinsic::aarch64_neon_srshl;
10242 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10243 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
10244 Ops[1] = ConstantInt::get(Int64Ty, -SV);
10245 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
10246 }
10247 case NEON::BI__builtin_neon_vrsrad_n_u64:
10248 case NEON::BI__builtin_neon_vrsrad_n_s64: {
10249 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
10250 ? Intrinsic::aarch64_neon_urshl
10251 : Intrinsic::aarch64_neon_srshl;
10252 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
10253 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
10254 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
10255 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
10256 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
10257 }
10258 case NEON::BI__builtin_neon_vshld_n_s64:
10259 case NEON::BI__builtin_neon_vshld_n_u64: {
10260 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10261 return Builder.CreateShl(
10262 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
10263 }
10264 case NEON::BI__builtin_neon_vshrd_n_s64: {
10265 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10266 return Builder.CreateAShr(
10267 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
10268 Amt->getZExtValue())),
10269 "shrd_n");
10270 }
10271 case NEON::BI__builtin_neon_vshrd_n_u64: {
10272 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10273 uint64_t ShiftAmt = Amt->getZExtValue();
10274 // Right-shifting an unsigned value by its size yields 0.
10275 if (ShiftAmt == 64)
10276 return ConstantInt::get(Int64Ty, 0);
10277 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
10278 "shrd_n");
10279 }
10280 case NEON::BI__builtin_neon_vsrad_n_s64: {
10281 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
10282 Ops[1] = Builder.CreateAShr(
10283 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
10284 Amt->getZExtValue())),
10285 "shrd_n");
10286 return Builder.CreateAdd(Ops[0], Ops[1]);
10287 }
10288 case NEON::BI__builtin_neon_vsrad_n_u64: {
10289 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
10290 uint64_t ShiftAmt = Amt->getZExtValue();
10291 // Right-shifting an unsigned value by its size yields 0.
10292 // As Op + 0 = Op, return Ops[0] directly.
10293 if (ShiftAmt == 64)
10294 return Ops[0];
10295 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
10296 "shrd_n");
10297 return Builder.CreateAdd(Ops[0], Ops[1]);
10298 }
10299 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
10300 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
10301 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
10302 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
10303 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
10304 "lane");
10305 SmallVector<Value *, 2> ProductOps;
10306 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
10307 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
10308 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
10309 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
10310 ProductOps, "vqdmlXl");
10311 Constant *CI = ConstantInt::get(SizeTy, 0);
10312 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
10313 Ops.pop_back();
10314
10315 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
10316 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
10317 ? Intrinsic::aarch64_neon_sqadd
10318 : Intrinsic::aarch64_neon_sqsub;
10319 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
10320 }
10321 case NEON::BI__builtin_neon_vqdmlals_s32:
10322 case NEON::BI__builtin_neon_vqdmlsls_s32: {
10323 SmallVector<Value *, 2> ProductOps;
10324 ProductOps.push_back(Ops[1]);
10325 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
10326 Ops[1] =
10327 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
10328 ProductOps, "vqdmlXl");
10329
10330 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
10331 ? Intrinsic::aarch64_neon_sqadd
10332 : Intrinsic::aarch64_neon_sqsub;
10333 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
10334 }
10335 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
10336 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
10337 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
10338 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
10339 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
10340 "lane");
10341 SmallVector<Value *, 2> ProductOps;
10342 ProductOps.push_back(Ops[1]);
10343 ProductOps.push_back(Ops[2]);
10344 Ops[1] =
10345 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
10346 ProductOps, "vqdmlXl");
10347 Ops.pop_back();
10348
10349 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
10350 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
10351 ? Intrinsic::aarch64_neon_sqadd
10352 : Intrinsic::aarch64_neon_sqsub;
10353 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
10354 }
10355 case NEON::BI__builtin_neon_vget_lane_bf16:
10356 case NEON::BI__builtin_neon_vduph_lane_bf16:
10357 case NEON::BI__builtin_neon_vduph_lane_f16: {
10358 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10359 "vget_lane");
10360 }
10361 case NEON::BI__builtin_neon_vgetq_lane_bf16:
10362 case NEON::BI__builtin_neon_vduph_laneq_bf16:
10363 case NEON::BI__builtin_neon_vduph_laneq_f16: {
10364 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10365 "vgetq_lane");
10366 }
10367
10368 case AArch64::BI_InterlockedAdd: {
10369 Value *Arg0 = EmitScalarExpr(E->getArg(0));
10370 Value *Arg1 = EmitScalarExpr(E->getArg(1));
10371 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
10372 AtomicRMWInst::Add, Arg0, Arg1,
10373 llvm::AtomicOrdering::SequentiallyConsistent);
10374 return Builder.CreateAdd(RMWI, Arg1);
10375 }
10376 }
10377
10378 llvm::FixedVectorType *VTy = GetNeonType(this, Type);
10379 llvm::Type *Ty = VTy;
10380 if (!Ty)
10381 return nullptr;
10382
10383 // Not all intrinsics handled by the common case work for AArch64 yet, so only
10384 // defer to common code if it's been added to our special map.
10385 Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
10386 AArch64SIMDIntrinsicsProvenSorted);
10387
10388 if (Builtin)
10389 return EmitCommonNeonBuiltinExpr(
10390 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
10391 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
10392 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
10393
10394 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
10395 return V;
10396
10397 unsigned Int;
10398 switch (BuiltinID) {
10399 default: return nullptr;
10400 case NEON::BI__builtin_neon_vbsl_v:
10401 case NEON::BI__builtin_neon_vbslq_v: {
10402 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
10403 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
10404 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
10405 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
10406
10407 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
10408 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
10409 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
10410 return Builder.CreateBitCast(Ops[0], Ty);
10411 }
10412 case NEON::BI__builtin_neon_vfma_lane_v:
10413 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
10414 // The ARM builtins (and instructions) have the addend as the first
10415 // operand, but the 'fma' intrinsics have it last. Swap it around here.
10416 Value *Addend = Ops[0];
10417 Value *Multiplicand = Ops[1];
10418 Value *LaneSource = Ops[2];
10419 Ops[0] = Multiplicand;
10420 Ops[1] = LaneSource;
10421 Ops[2] = Addend;
10422
10423 // Now adjust things to handle the lane access.
10424 auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
10425 ? llvm::FixedVectorType::get(VTy->getElementType(),
10426 VTy->getNumElements() / 2)
10427 : VTy;
10428 llvm::Constant *cst = cast<Constant>(Ops[3]);
10429 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
10430 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
10431 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
10432
10433 Ops.pop_back();
10434 Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
10435 : Intrinsic::fma;
10436 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
10437 }
10438 case NEON::BI__builtin_neon_vfma_laneq_v: {
10439 auto *VTy = cast<llvm::FixedVectorType>(Ty);
10440 // v1f64 fma should be mapped to Neon scalar f64 fma
10441 if (VTy && VTy->getElementType() == DoubleTy) {
10442 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10443 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
10444 llvm::FixedVectorType *VTy =
10445 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
10446 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
10447 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
10448 Value *Result;
10449 Result = emitCallMaybeConstrainedFPBuiltin(
10450 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
10451 DoubleTy, {Ops[1], Ops[2], Ops[0]});
10452 return Builder.CreateBitCast(Result, Ty);
10453 }
10454 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10455 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10456
10457 auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
10458 VTy->getNumElements() * 2);
10459 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
10460 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
10461 cast<ConstantInt>(Ops[3]));
10462 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
10463
10464 return emitCallMaybeConstrainedFPBuiltin(
10465 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10466 {Ops[2], Ops[1], Ops[0]});
10467 }
10468 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
10469 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10470 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10471
10472 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10473 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
10474 return emitCallMaybeConstrainedFPBuiltin(
10475 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10476 {Ops[2], Ops[1], Ops[0]});
10477 }
10478 case NEON::BI__builtin_neon_vfmah_lane_f16:
10479 case NEON::BI__builtin_neon_vfmas_lane_f32:
10480 case NEON::BI__builtin_neon_vfmah_laneq_f16:
10481 case NEON::BI__builtin_neon_vfmas_laneq_f32:
10482 case NEON::BI__builtin_neon_vfmad_lane_f64:
10483 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
10484 Ops.push_back(EmitScalarExpr(E->getArg(3)));
10485 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
10486 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
10487 return emitCallMaybeConstrainedFPBuiltin(
10488 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10489 {Ops[1], Ops[2], Ops[0]});
10490 }
10491 case NEON::BI__builtin_neon_vmull_v:
10492 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10493 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
10494 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
10495 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
10496 case NEON::BI__builtin_neon_vmax_v:
10497 case NEON::BI__builtin_neon_vmaxq_v:
10498 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10499 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
10500 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
10501 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
10502 case NEON::BI__builtin_neon_vmaxh_f16: {
10503 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10504 Int = Intrinsic::aarch64_neon_fmax;
10505 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
10506 }
10507 case NEON::BI__builtin_neon_vmin_v:
10508 case NEON::BI__builtin_neon_vminq_v:
10509 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10510 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
10511 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
10512 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
10513 case NEON::BI__builtin_neon_vminh_f16: {
10514 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10515 Int = Intrinsic::aarch64_neon_fmin;
10516 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
10517 }
10518 case NEON::BI__builtin_neon_vabd_v:
10519 case NEON::BI__builtin_neon_vabdq_v:
10520 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10521 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
10522 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
10523 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
10524 case NEON::BI__builtin_neon_vpadal_v:
10525 case NEON::BI__builtin_neon_vpadalq_v: {
10526 unsigned ArgElts = VTy->getNumElements();
10527 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
10528 unsigned BitWidth = EltTy->getBitWidth();
10529 auto *ArgTy = llvm::FixedVectorType::get(
10530 llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
10531 llvm::Type* Tys[2] = { VTy, ArgTy };
10532 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
10533 SmallVector<llvm::Value*, 1> TmpOps;
10534 TmpOps.push_back(Ops[1]);
10535 Function *F = CGM.getIntrinsic(Int, Tys);
10536 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
10537 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
10538 return Builder.CreateAdd(tmp, addend);
10539 }
10540 case NEON::BI__builtin_neon_vpmin_v:
10541 case NEON::BI__builtin_neon_vpminq_v:
10542 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10543 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
10544 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
10545 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
10546 case NEON::BI__builtin_neon_vpmax_v:
10547 case NEON::BI__builtin_neon_vpmaxq_v:
10548 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10549 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
10550 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
10551 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
10552 case NEON::BI__builtin_neon_vminnm_v:
10553 case NEON::BI__builtin_neon_vminnmq_v:
10554 Int = Intrinsic::aarch64_neon_fminnm;
10555 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
10556 case NEON::BI__builtin_neon_vminnmh_f16:
10557 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10558 Int = Intrinsic::aarch64_neon_fminnm;
10559 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
10560 case NEON::BI__builtin_neon_vmaxnm_v:
10561 case NEON::BI__builtin_neon_vmaxnmq_v:
10562 Int = Intrinsic::aarch64_neon_fmaxnm;
10563 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
10564 case NEON::BI__builtin_neon_vmaxnmh_f16:
10565 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10566 Int = Intrinsic::aarch64_neon_fmaxnm;
10567 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
10568 case NEON::BI__builtin_neon_vrecpss_f32: {
10569 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10570 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
10571 Ops, "vrecps");
10572 }
10573 case NEON::BI__builtin_neon_vrecpsd_f64:
10574 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10575 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
10576 Ops, "vrecps");
10577 case NEON::BI__builtin_neon_vrecpsh_f16:
10578 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10579 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
10580 Ops, "vrecps");
10581 case NEON::BI__builtin_neon_vqshrun_n_v:
10582 Int = Intrinsic::aarch64_neon_sqshrun;
10583 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
10584 case NEON::BI__builtin_neon_vqrshrun_n_v:
10585 Int = Intrinsic::aarch64_neon_sqrshrun;
10586 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
10587 case NEON::BI__builtin_neon_vqshrn_n_v:
10588 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
10589 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
10590 case NEON::BI__builtin_neon_vrshrn_n_v:
10591 Int = Intrinsic::aarch64_neon_rshrn;
10592 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
10593 case NEON::BI__builtin_neon_vqrshrn_n_v:
10594 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
10595 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
10596 case NEON::BI__builtin_neon_vrndah_f16: {
10597 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10598 Int = Builder.getIsFPConstrained()
10599 ? Intrinsic::experimental_constrained_round
10600 : Intrinsic::round;
10601 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
10602 }
10603 case NEON::BI__builtin_neon_vrnda_v:
10604 case NEON::BI__builtin_neon_vrndaq_v: {
10605 Int = Builder.getIsFPConstrained()
10606 ? Intrinsic::experimental_constrained_round
10607 : Intrinsic::round;
10608 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
10609 }
10610 case NEON::BI__builtin_neon_vrndih_f16: {
10611 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10612 Int = Builder.getIsFPConstrained()
10613 ? Intrinsic::experimental_constrained_nearbyint
10614 : Intrinsic::nearbyint;
10615 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
10616 }
10617 case NEON::BI__builtin_neon_vrndmh_f16: {
10618 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10619 Int = Builder.getIsFPConstrained()
10620 ? Intrinsic::experimental_constrained_floor
10621 : Intrinsic::floor;
10622 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
10623 }
10624 case NEON::BI__builtin_neon_vrndm_v:
10625 case NEON::BI__builtin_neon_vrndmq_v: {
10626 Int = Builder.getIsFPConstrained()
10627 ? Intrinsic::experimental_constrained_floor
10628 : Intrinsic::floor;
10629 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
10630 }
10631 case NEON::BI__builtin_neon_vrndnh_f16: {
10632 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10633 Int = Builder.getIsFPConstrained()
10634 ? Intrinsic::experimental_constrained_roundeven
10635 : Intrinsic::roundeven;
10636 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
10637 }
10638 case NEON::BI__builtin_neon_vrndn_v:
10639 case NEON::BI__builtin_neon_vrndnq_v: {
10640 Int = Builder.getIsFPConstrained()
10641 ? Intrinsic::experimental_constrained_roundeven
10642 : Intrinsic::roundeven;
10643 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
10644 }
10645 case NEON::BI__builtin_neon_vrndns_f32: {
10646 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10647 Int = Builder.getIsFPConstrained()
10648 ? Intrinsic::experimental_constrained_roundeven
10649 : Intrinsic::roundeven;
10650 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
10651 }
10652 case NEON::BI__builtin_neon_vrndph_f16: {
10653 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10654 Int = Builder.getIsFPConstrained()
10655 ? Intrinsic::experimental_constrained_ceil
10656 : Intrinsic::ceil;
10657 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
10658 }
10659 case NEON::BI__builtin_neon_vrndp_v:
10660 case NEON::BI__builtin_neon_vrndpq_v: {
10661 Int = Builder.getIsFPConstrained()
10662 ? Intrinsic::experimental_constrained_ceil
10663 : Intrinsic::ceil;
10664 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
10665 }
10666 case NEON::BI__builtin_neon_vrndxh_f16: {
10667 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10668 Int = Builder.getIsFPConstrained()
10669 ? Intrinsic::experimental_constrained_rint
10670 : Intrinsic::rint;
10671 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
10672 }
10673 case NEON::BI__builtin_neon_vrndx_v:
10674 case NEON::BI__builtin_neon_vrndxq_v: {
10675 Int = Builder.getIsFPConstrained()
10676 ? Intrinsic::experimental_constrained_rint
10677 : Intrinsic::rint;
10678 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
10679 }
10680 case NEON::BI__builtin_neon_vrndh_f16: {
10681 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10682 Int = Builder.getIsFPConstrained()
10683 ? Intrinsic::experimental_constrained_trunc
10684 : Intrinsic::trunc;
10685 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
10686 }
10687 case NEON::BI__builtin_neon_vrnd32x_v:
10688 case NEON::BI__builtin_neon_vrnd32xq_v: {
10689 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10690 Int = Intrinsic::aarch64_neon_frint32x;
10691 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x");
10692 }
10693 case NEON::BI__builtin_neon_vrnd32z_v:
10694 case NEON::BI__builtin_neon_vrnd32zq_v: {
10695 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10696 Int = Intrinsic::aarch64_neon_frint32z;
10697 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z");
10698 }
10699 case NEON::BI__builtin_neon_vrnd64x_v:
10700 case NEON::BI__builtin_neon_vrnd64xq_v: {
10701 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10702 Int = Intrinsic::aarch64_neon_frint64x;
10703 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x");
10704 }
10705 case NEON::BI__builtin_neon_vrnd64z_v:
10706 case NEON::BI__builtin_neon_vrnd64zq_v: {
10707 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10708 Int = Intrinsic::aarch64_neon_frint64z;
10709 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
10710 }
10711 case NEON::BI__builtin_neon_vrnd_v:
10712 case NEON::BI__builtin_neon_vrndq_v: {
10713 Int = Builder.getIsFPConstrained()
10714 ? Intrinsic::experimental_constrained_trunc
10715 : Intrinsic::trunc;
10716 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
10717 }
10718 case NEON::BI__builtin_neon_vcvt_f64_v:
10719 case NEON::BI__builtin_neon_vcvtq_f64_v:
10720 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10721 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
10722 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
10723 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
10724 case NEON::BI__builtin_neon_vcvt_f64_f32: {
10725 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&(static_cast <bool> (Type.getEltType() == NeonTypeFlags
::Float64 && quad && "unexpected vcvt_f64_f32 builtin"
) ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10726, __extension__ __PRETTY_FUNCTION__))
10726 "unexpected vcvt_f64_f32 builtin")(static_cast <bool> (Type.getEltType() == NeonTypeFlags
::Float64 && quad && "unexpected vcvt_f64_f32 builtin"
) ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10726, __extension__ __PRETTY_FUNCTION__))
;
10727 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
10728 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10729
10730 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
10731 }
10732 case NEON::BI__builtin_neon_vcvt_f32_f64: {
10733 assert(Type.getEltType() == NeonTypeFlags::Float32 &&(static_cast <bool> (Type.getEltType() == NeonTypeFlags
::Float32 && "unexpected vcvt_f32_f64 builtin") ? void
(0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10734, __extension__ __PRETTY_FUNCTION__))
10734 "unexpected vcvt_f32_f64 builtin")(static_cast <bool> (Type.getEltType() == NeonTypeFlags
::Float32 && "unexpected vcvt_f32_f64 builtin") ? void
(0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 10734, __extension__ __PRETTY_FUNCTION__))
;
10735 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
10736 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10737
10738 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
10739 }
10740 case NEON::BI__builtin_neon_vcvt_s32_v:
10741 case NEON::BI__builtin_neon_vcvt_u32_v:
10742 case NEON::BI__builtin_neon_vcvt_s64_v:
10743 case NEON::BI__builtin_neon_vcvt_u64_v:
10744 case NEON::BI__builtin_neon_vcvt_s16_v:
10745 case NEON::BI__builtin_neon_vcvt_u16_v:
10746 case NEON::BI__builtin_neon_vcvtq_s32_v:
10747 case NEON::BI__builtin_neon_vcvtq_u32_v:
10748 case NEON::BI__builtin_neon_vcvtq_s64_v:
10749 case NEON::BI__builtin_neon_vcvtq_u64_v:
10750 case NEON::BI__builtin_neon_vcvtq_s16_v:
10751 case NEON::BI__builtin_neon_vcvtq_u16_v: {
10752 Int =
10753 usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
10754 llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
10755 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
10756 }
10757 case NEON::BI__builtin_neon_vcvta_s16_v:
10758 case NEON::BI__builtin_neon_vcvta_u16_v:
10759 case NEON::BI__builtin_neon_vcvta_s32_v:
10760 case NEON::BI__builtin_neon_vcvtaq_s16_v:
10761 case NEON::BI__builtin_neon_vcvtaq_s32_v:
10762 case NEON::BI__builtin_neon_vcvta_u32_v:
10763 case NEON::BI__builtin_neon_vcvtaq_u16_v:
10764 case NEON::BI__builtin_neon_vcvtaq_u32_v:
10765 case NEON::BI__builtin_neon_vcvta_s64_v:
10766 case NEON::BI__builtin_neon_vcvtaq_s64_v:
10767 case NEON::BI__builtin_neon_vcvta_u64_v:
10768 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
10769 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
10770 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10771 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
10772 }
10773 case NEON::BI__builtin_neon_vcvtm_s16_v:
10774 case NEON::BI__builtin_neon_vcvtm_s32_v:
10775 case NEON::BI__builtin_neon_vcvtmq_s16_v:
10776 case NEON::BI__builtin_neon_vcvtmq_s32_v:
10777 case NEON::BI__builtin_neon_vcvtm_u16_v:
10778 case NEON::BI__builtin_neon_vcvtm_u32_v:
10779 case NEON::BI__builtin_neon_vcvtmq_u16_v:
10780 case NEON::BI__builtin_neon_vcvtmq_u32_v:
10781 case NEON::BI__builtin_neon_vcvtm_s64_v:
10782 case NEON::BI__builtin_neon_vcvtmq_s64_v:
10783 case NEON::BI__builtin_neon_vcvtm_u64_v:
10784 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
10785 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
10786 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10787 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
10788 }
10789 case NEON::BI__builtin_neon_vcvtn_s16_v:
10790 case NEON::BI__builtin_neon_vcvtn_s32_v:
10791 case NEON::BI__builtin_neon_vcvtnq_s16_v:
10792 case NEON::BI__builtin_neon_vcvtnq_s32_v:
10793 case NEON::BI__builtin_neon_vcvtn_u16_v:
10794 case NEON::BI__builtin_neon_vcvtn_u32_v:
10795 case NEON::BI__builtin_neon_vcvtnq_u16_v:
10796 case NEON::BI__builtin_neon_vcvtnq_u32_v:
10797 case NEON::BI__builtin_neon_vcvtn_s64_v:
10798 case NEON::BI__builtin_neon_vcvtnq_s64_v:
10799 case NEON::BI__builtin_neon_vcvtn_u64_v:
10800 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
10801 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
10802 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10803 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
10804 }
10805 case NEON::BI__builtin_neon_vcvtp_s16_v:
10806 case NEON::BI__builtin_neon_vcvtp_s32_v:
10807 case NEON::BI__builtin_neon_vcvtpq_s16_v:
10808 case NEON::BI__builtin_neon_vcvtpq_s32_v:
10809 case NEON::BI__builtin_neon_vcvtp_u16_v:
10810 case NEON::BI__builtin_neon_vcvtp_u32_v:
10811 case NEON::BI__builtin_neon_vcvtpq_u16_v:
10812 case NEON::BI__builtin_neon_vcvtpq_u32_v:
10813 case NEON::BI__builtin_neon_vcvtp_s64_v:
10814 case NEON::BI__builtin_neon_vcvtpq_s64_v:
10815 case NEON::BI__builtin_neon_vcvtp_u64_v:
10816 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
10817 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
10818 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10819 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
10820 }
10821 case NEON::BI__builtin_neon_vmulx_v:
10822 case NEON::BI__builtin_neon_vmulxq_v: {
10823 Int = Intrinsic::aarch64_neon_fmulx;
10824 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
10825 }
10826 case NEON::BI__builtin_neon_vmulxh_lane_f16:
10827 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
10828 // vmulx_lane should be mapped to Neon scalar mulx after
10829 // extracting the scalar element
10830 Ops.push_back(EmitScalarExpr(E->getArg(2)));
10831 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
10832 Ops.pop_back();
10833 Int = Intrinsic::aarch64_neon_fmulx;
10834 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
10835 }
10836 case NEON::BI__builtin_neon_vmul_lane_v:
10837 case NEON::BI__builtin_neon_vmul_laneq_v: {
10838 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
10839 bool Quad = false;
10840 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
10841 Quad = true;
10842 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10843 llvm::FixedVectorType *VTy =
10844 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
10845 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
10846 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
10847 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
10848 return Builder.CreateBitCast(Result, Ty);
10849 }
10850 case NEON::BI__builtin_neon_vnegd_s64:
10851 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
10852 case NEON::BI__builtin_neon_vnegh_f16:
10853 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
10854 case NEON::BI__builtin_neon_vpmaxnm_v:
10855 case NEON::BI__builtin_neon_vpmaxnmq_v: {
10856 Int = Intrinsic::aarch64_neon_fmaxnmp;
10857 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
10858 }
10859 case NEON::BI__builtin_neon_vpminnm_v:
10860 case NEON::BI__builtin_neon_vpminnmq_v: {
10861 Int = Intrinsic::aarch64_neon_fminnmp;
10862 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
10863 }
10864 case NEON::BI__builtin_neon_vsqrth_f16: {
10865 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10866 Int = Builder.getIsFPConstrained()
10867 ? Intrinsic::experimental_constrained_sqrt
10868 : Intrinsic::sqrt;
10869 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
10870 }
10871 case NEON::BI__builtin_neon_vsqrt_v:
10872 case NEON::BI__builtin_neon_vsqrtq_v: {
10873 Int = Builder.getIsFPConstrained()
10874 ? Intrinsic::experimental_constrained_sqrt
10875 : Intrinsic::sqrt;
10876 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10877 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
10878 }
10879 case NEON::BI__builtin_neon_vrbit_v:
10880 case NEON::BI__builtin_neon_vrbitq_v: {
10881 Int = Intrinsic::bitreverse;
10882 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
10883 }
10884 case NEON::BI__builtin_neon_vaddv_u8:
10885 // FIXME: These are handled by the AArch64 scalar code.
10886 usgn = true;
10887 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10888 case NEON::BI__builtin_neon_vaddv_s8: {
10889 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10890 Ty = Int32Ty;
10891 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10892 llvm::Type *Tys[2] = { Ty, VTy };
10893 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10894 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10895 return Builder.CreateTrunc(Ops[0], Int8Ty);
10896 }
10897 case NEON::BI__builtin_neon_vaddv_u16:
10898 usgn = true;
10899 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10900 case NEON::BI__builtin_neon_vaddv_s16: {
10901 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10902 Ty = Int32Ty;
10903 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10904 llvm::Type *Tys[2] = { Ty, VTy };
10905 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10906 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10907 return Builder.CreateTrunc(Ops[0], Int16Ty);
10908 }
10909 case NEON::BI__builtin_neon_vaddvq_u8:
10910 usgn = true;
10911 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10912 case NEON::BI__builtin_neon_vaddvq_s8: {
10913 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10914 Ty = Int32Ty;
10915 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10916 llvm::Type *Tys[2] = { Ty, VTy };
10917 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10918 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10919 return Builder.CreateTrunc(Ops[0], Int8Ty);
10920 }
10921 case NEON::BI__builtin_neon_vaddvq_u16:
10922 usgn = true;
10923 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10924 case NEON::BI__builtin_neon_vaddvq_s16: {
10925 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10926 Ty = Int32Ty;
10927 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10928 llvm::Type *Tys[2] = { Ty, VTy };
10929 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10930 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10931 return Builder.CreateTrunc(Ops[0], Int16Ty);
10932 }
10933 case NEON::BI__builtin_neon_vmaxv_u8: {
10934 Int = Intrinsic::aarch64_neon_umaxv;
10935 Ty = Int32Ty;
10936 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10937 llvm::Type *Tys[2] = { Ty, VTy };
10938 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10939 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10940 return Builder.CreateTrunc(Ops[0], Int8Ty);
10941 }
10942 case NEON::BI__builtin_neon_vmaxv_u16: {
10943 Int = Intrinsic::aarch64_neon_umaxv;
10944 Ty = Int32Ty;
10945 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10946 llvm::Type *Tys[2] = { Ty, VTy };
10947 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10948 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10949 return Builder.CreateTrunc(Ops[0], Int16Ty);
10950 }
10951 case NEON::BI__builtin_neon_vmaxvq_u8: {
10952 Int = Intrinsic::aarch64_neon_umaxv;
10953 Ty = Int32Ty;
10954 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10955 llvm::Type *Tys[2] = { Ty, VTy };
10956 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10957 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10958 return Builder.CreateTrunc(Ops[0], Int8Ty);
10959 }
10960 case NEON::BI__builtin_neon_vmaxvq_u16: {
10961 Int = Intrinsic::aarch64_neon_umaxv;
10962 Ty = Int32Ty;
10963 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10964 llvm::Type *Tys[2] = { Ty, VTy };
10965 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10966 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10967 return Builder.CreateTrunc(Ops[0], Int16Ty);
10968 }
10969 case NEON::BI__builtin_neon_vmaxv_s8: {
10970 Int = Intrinsic::aarch64_neon_smaxv;
10971 Ty = Int32Ty;
10972 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10973 llvm::Type *Tys[2] = { Ty, VTy };
10974 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10975 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10976 return Builder.CreateTrunc(Ops[0], Int8Ty);
10977 }
10978 case NEON::BI__builtin_neon_vmaxv_s16: {
10979 Int = Intrinsic::aarch64_neon_smaxv;
10980 Ty = Int32Ty;
10981 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10982 llvm::Type *Tys[2] = { Ty, VTy };
10983 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10984 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10985 return Builder.CreateTrunc(Ops[0], Int16Ty);
10986 }
10987 case NEON::BI__builtin_neon_vmaxvq_s8: {
10988 Int = Intrinsic::aarch64_neon_smaxv;
10989 Ty = Int32Ty;
10990 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10991 llvm::Type *Tys[2] = { Ty, VTy };
10992 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10993 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10994 return Builder.CreateTrunc(Ops[0], Int8Ty);
10995 }
10996 case NEON::BI__builtin_neon_vmaxvq_s16: {
10997 Int = Intrinsic::aarch64_neon_smaxv;
10998 Ty = Int32Ty;
10999 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11000 llvm::Type *Tys[2] = { Ty, VTy };
11001 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11002 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11003 return Builder.CreateTrunc(Ops[0], Int16Ty);
11004 }
11005 case NEON::BI__builtin_neon_vmaxv_f16: {
11006 Int = Intrinsic::aarch64_neon_fmaxv;
11007 Ty = HalfTy;
11008 VTy = llvm::FixedVectorType::get(HalfTy, 4);
11009 llvm::Type *Tys[2] = { Ty, VTy };
11010 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11011 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11012 return Builder.CreateTrunc(Ops[0], HalfTy);
11013 }
11014 case NEON::BI__builtin_neon_vmaxvq_f16: {
11015 Int = Intrinsic::aarch64_neon_fmaxv;
11016 Ty = HalfTy;
11017 VTy = llvm::FixedVectorType::get(HalfTy, 8);
11018 llvm::Type *Tys[2] = { Ty, VTy };
11019 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11020 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11021 return Builder.CreateTrunc(Ops[0], HalfTy);
11022 }
11023 case NEON::BI__builtin_neon_vminv_u8: {
11024 Int = Intrinsic::aarch64_neon_uminv;
11025 Ty = Int32Ty;
11026 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11027 llvm::Type *Tys[2] = { Ty, VTy };
11028 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11029 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11030 return Builder.CreateTrunc(Ops[0], Int8Ty);
11031 }
11032 case NEON::BI__builtin_neon_vminv_u16: {
11033 Int = Intrinsic::aarch64_neon_uminv;
11034 Ty = Int32Ty;
11035 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11036 llvm::Type *Tys[2] = { Ty, VTy };
11037 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11038 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11039 return Builder.CreateTrunc(Ops[0], Int16Ty);
11040 }
11041 case NEON::BI__builtin_neon_vminvq_u8: {
11042 Int = Intrinsic::aarch64_neon_uminv;
11043 Ty = Int32Ty;
11044 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11045 llvm::Type *Tys[2] = { Ty, VTy };
11046 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11047 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11048 return Builder.CreateTrunc(Ops[0], Int8Ty);
11049 }
11050 case NEON::BI__builtin_neon_vminvq_u16: {
11051 Int = Intrinsic::aarch64_neon_uminv;
11052 Ty = Int32Ty;
11053 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11054 llvm::Type *Tys[2] = { Ty, VTy };
11055 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11056 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11057 return Builder.CreateTrunc(Ops[0], Int16Ty);
11058 }
11059 case NEON::BI__builtin_neon_vminv_s8: {
11060 Int = Intrinsic::aarch64_neon_sminv;
11061 Ty = Int32Ty;
11062 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11063 llvm::Type *Tys[2] = { Ty, VTy };
11064 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11065 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11066 return Builder.CreateTrunc(Ops[0], Int8Ty);
11067 }
11068 case NEON::BI__builtin_neon_vminv_s16: {
11069 Int = Intrinsic::aarch64_neon_sminv;
11070 Ty = Int32Ty;
11071 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11072 llvm::Type *Tys[2] = { Ty, VTy };
11073 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11074 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11075 return Builder.CreateTrunc(Ops[0], Int16Ty);
11076 }
11077 case NEON::BI__builtin_neon_vminvq_s8: {
11078 Int = Intrinsic::aarch64_neon_sminv;
11079 Ty = Int32Ty;
11080 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11081 llvm::Type *Tys[2] = { Ty, VTy };
11082 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11083 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11084 return Builder.CreateTrunc(Ops[0], Int8Ty);
11085 }
11086 case NEON::BI__builtin_neon_vminvq_s16: {
11087 Int = Intrinsic::aarch64_neon_sminv;
11088 Ty = Int32Ty;
11089 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11090 llvm::Type *Tys[2] = { Ty, VTy };
11091 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11092 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11093 return Builder.CreateTrunc(Ops[0], Int16Ty);
11094 }
11095 case NEON::BI__builtin_neon_vminv_f16: {
11096 Int = Intrinsic::aarch64_neon_fminv;
11097 Ty = HalfTy;
11098 VTy = llvm::FixedVectorType::get(HalfTy, 4);
11099 llvm::Type *Tys[2] = { Ty, VTy };
11100 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11101 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11102 return Builder.CreateTrunc(Ops[0], HalfTy);
11103 }
11104 case NEON::BI__builtin_neon_vminvq_f16: {
11105 Int = Intrinsic::aarch64_neon_fminv;
11106 Ty = HalfTy;
11107 VTy = llvm::FixedVectorType::get(HalfTy, 8);
11108 llvm::Type *Tys[2] = { Ty, VTy };
11109 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11110 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11111 return Builder.CreateTrunc(Ops[0], HalfTy);
11112 }
11113 case NEON::BI__builtin_neon_vmaxnmv_f16: {
11114 Int = Intrinsic::aarch64_neon_fmaxnmv;
11115 Ty = HalfTy;
11116 VTy = llvm::FixedVectorType::get(HalfTy, 4);
11117 llvm::Type *Tys[2] = { Ty, VTy };
11118 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11119 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
11120 return Builder.CreateTrunc(Ops[0], HalfTy);
11121 }
11122 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
11123 Int = Intrinsic::aarch64_neon_fmaxnmv;
11124 Ty = HalfTy;
11125 VTy = llvm::FixedVectorType::get(HalfTy, 8);
11126 llvm::Type *Tys[2] = { Ty, VTy };
11127 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11128 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
11129 return Builder.CreateTrunc(Ops[0], HalfTy);
11130 }
11131 case NEON::BI__builtin_neon_vminnmv_f16: {
11132 Int = Intrinsic::aarch64_neon_fminnmv;
11133 Ty = HalfTy;
11134 VTy = llvm::FixedVectorType::get(HalfTy, 4);
11135 llvm::Type *Tys[2] = { Ty, VTy };
11136 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11137 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
11138 return Builder.CreateTrunc(Ops[0], HalfTy);
11139 }
11140 case NEON::BI__builtin_neon_vminnmvq_f16: {
11141 Int = Intrinsic::aarch64_neon_fminnmv;
11142 Ty = HalfTy;
11143 VTy = llvm::FixedVectorType::get(HalfTy, 8);
11144 llvm::Type *Tys[2] = { Ty, VTy };
11145 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11146 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
11147 return Builder.CreateTrunc(Ops[0], HalfTy);
11148 }
11149 case NEON::BI__builtin_neon_vmul_n_f64: {
11150 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
11151 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
11152 return Builder.CreateFMul(Ops[0], RHS);
11153 }
11154 case NEON::BI__builtin_neon_vaddlv_u8: {
11155 Int = Intrinsic::aarch64_neon_uaddlv;
11156 Ty = Int32Ty;
11157 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11158 llvm::Type *Tys[2] = { Ty, VTy };
11159 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11160 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11161 return Builder.CreateTrunc(Ops[0], Int16Ty);
11162 }
11163 case NEON::BI__builtin_neon_vaddlv_u16: {
11164 Int = Intrinsic::aarch64_neon_uaddlv;
11165 Ty = Int32Ty;
11166 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11167 llvm::Type *Tys[2] = { Ty, VTy };
11168 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11169 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11170 }
11171 case NEON::BI__builtin_neon_vaddlvq_u8: {
11172 Int = Intrinsic::aarch64_neon_uaddlv;
11173 Ty = Int32Ty;
11174 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11175 llvm::Type *Tys[2] = { Ty, VTy };
11176 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11177 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11178 return Builder.CreateTrunc(Ops[0], Int16Ty);
11179 }
11180 case NEON::BI__builtin_neon_vaddlvq_u16: {
11181 Int = Intrinsic::aarch64_neon_uaddlv;
11182 Ty = Int32Ty;
11183 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11184 llvm::Type *Tys[2] = { Ty, VTy };
11185 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11186 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11187 }
11188 case NEON::BI__builtin_neon_vaddlv_s8: {
11189 Int = Intrinsic::aarch64_neon_saddlv;
11190 Ty = Int32Ty;
11191 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11192 llvm::Type *Tys[2] = { Ty, VTy };
11193 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11194 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11195 return Builder.CreateTrunc(Ops[0], Int16Ty);
11196 }
11197 case NEON::BI__builtin_neon_vaddlv_s16: {
11198 Int = Intrinsic::aarch64_neon_saddlv;
11199 Ty = Int32Ty;
11200 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11201 llvm::Type *Tys[2] = { Ty, VTy };
11202 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11203 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11204 }
11205 case NEON::BI__builtin_neon_vaddlvq_s8: {
11206 Int = Intrinsic::aarch64_neon_saddlv;
11207 Ty = Int32Ty;
11208 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11209 llvm::Type *Tys[2] = { Ty, VTy };
11210 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11211 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11212 return Builder.CreateTrunc(Ops[0], Int16Ty);
11213 }
11214 case NEON::BI__builtin_neon_vaddlvq_s16: {
11215 Int = Intrinsic::aarch64_neon_saddlv;
11216 Ty = Int32Ty;
11217 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11218 llvm::Type *Tys[2] = { Ty, VTy };
11219 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11220 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11221 }
11222 case NEON::BI__builtin_neon_vsri_n_v:
11223 case NEON::BI__builtin_neon_vsriq_n_v: {
11224 Int = Intrinsic::aarch64_neon_vsri;
11225 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
11226 return EmitNeonCall(Intrin, Ops, "vsri_n");
11227 }
11228 case NEON::BI__builtin_neon_vsli_n_v:
11229 case NEON::BI__builtin_neon_vsliq_n_v: {
11230 Int = Intrinsic::aarch64_neon_vsli;
11231 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
11232 return EmitNeonCall(Intrin, Ops, "vsli_n");
11233 }
11234 case NEON::BI__builtin_neon_vsra_n_v:
11235 case NEON::BI__builtin_neon_vsraq_n_v:
11236 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11237 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
11238 return Builder.CreateAdd(Ops[0], Ops[1]);
11239 case NEON::BI__builtin_neon_vrsra_n_v:
11240 case NEON::BI__builtin_neon_vrsraq_n_v: {
11241 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
11242 SmallVector<llvm::Value*,2> TmpOps;
11243 TmpOps.push_back(Ops[1]);
11244 TmpOps.push_back(Ops[2]);
11245 Function* F = CGM.getIntrinsic(Int, Ty);
11246 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
11247 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
11248 return Builder.CreateAdd(Ops[0], tmp);
11249 }
11250 case NEON::BI__builtin_neon_vld1_v:
11251 case NEON::BI__builtin_neon_vld1q_v: {
11252 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
11253 return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
11254 }
11255 case NEON::BI__builtin_neon_vst1_v:
11256 case NEON::BI__builtin_neon_vst1q_v:
11257 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
11258 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
11259 return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
11260 case NEON::BI__builtin_neon_vld1_lane_v:
11261 case NEON::BI__builtin_neon_vld1q_lane_v: {
11262 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11263 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
11264 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11265 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
11266 PtrOp0.getAlignment());
11267 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
11268 }
11269 case NEON::BI__builtin_neon_vld1_dup_v:
11270 case NEON::BI__builtin_neon_vld1q_dup_v: {
11271 Value *V = UndefValue::get(Ty);
11272 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
11273 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11274 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
11275 PtrOp0.getAlignment());
11276 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
11277 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
11278 return EmitNeonSplat(Ops[0], CI);
11279 }
11280 case NEON::BI__builtin_neon_vst1_lane_v:
11281 case NEON::BI__builtin_neon_vst1q_lane_v:
11282 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11283 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
11284 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11285 return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
11286 PtrOp0.getAlignment());
11287 case NEON::BI__builtin_neon_vld2_v:
11288 case NEON::BI__builtin_neon_vld2q_v: {
11289 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
11290 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11291 llvm::Type *Tys[2] = { VTy, PTy };
11292 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
11293 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
11294 Ops[0] = Builder.CreateBitCast(Ops[0],
11295 llvm::PointerType::getUnqual(Ops[1]->getType()));
11296 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11297 }
11298 case NEON::BI__builtin_neon_vld3_v:
11299 case NEON::BI__builtin_neon_vld3q_v: {
11300 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
11301 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11302 llvm::Type *Tys[2] = { VTy, PTy };
11303 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
11304 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
11305 Ops[0] = Builder.CreateBitCast(Ops[0],
11306 llvm::PointerType::getUnqual(Ops[1]->getType()));
11307 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11308 }
11309 case NEON::BI__builtin_neon_vld4_v:
11310 case NEON::BI__builtin_neon_vld4q_v: {
11311 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
11312 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11313 llvm::Type *Tys[2] = { VTy, PTy };
11314 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
11315 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
11316 Ops[0] = Builder.CreateBitCast(Ops[0],
11317 llvm::PointerType::getUnqual(Ops[1]->getType()));
11318 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11319 }
11320 case NEON::BI__builtin_neon_vld2_dup_v:
11321 case NEON::BI__builtin_neon_vld2q_dup_v: {
11322 llvm::Type *PTy =
11323 llvm::PointerType::getUnqual(VTy->getElementType());
11324 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11325 llvm::Type *Tys[2] = { VTy, PTy };
11326 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
11327 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
11328 Ops[0] = Builder.CreateBitCast(Ops[0],
11329 llvm::PointerType::getUnqual(Ops[1]->getType()));
11330 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11331 }
11332 case NEON::BI__builtin_neon_vld3_dup_v:
11333 case NEON::BI__builtin_neon_vld3q_dup_v: {
11334 llvm::Type *PTy =
11335 llvm::PointerType::getUnqual(VTy->getElementType());
11336 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11337 llvm::Type *Tys[2] = { VTy, PTy };
11338 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
11339 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
11340 Ops[0] = Builder.CreateBitCast(Ops[0],
11341 llvm::PointerType::getUnqual(Ops[1]->getType()));
11342 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11343 }
11344 case NEON::BI__builtin_neon_vld4_dup_v:
11345 case NEON::BI__builtin_neon_vld4q_dup_v: {
11346 llvm::Type *PTy =
11347 llvm::PointerType::getUnqual(VTy->getElementType());
11348 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11349 llvm::Type *Tys[2] = { VTy, PTy };
11350 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
11351 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
11352 Ops[0] = Builder.CreateBitCast(Ops[0],
11353 llvm::PointerType::getUnqual(Ops[1]->getType()));
11354 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11355 }
11356 case NEON::BI__builtin_neon_vld2_lane_v:
11357 case NEON::BI__builtin_neon_vld2q_lane_v: {
11358 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
11359 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
11360 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
11361 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11362 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11363 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
11364 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
11365 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11366 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11367 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11368 }
11369 case NEON::BI__builtin_neon_vld3_lane_v:
11370 case NEON::BI__builtin_neon_vld3q_lane_v: {
11371 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
11372 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
11373 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
11374 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11375 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11376 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
11377 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
11378 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
11379 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11380 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11381 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11382 }
11383 case NEON::BI__builtin_neon_vld4_lane_v:
11384 case NEON::BI__builtin_neon_vld4q_lane_v: {
11385 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
11386 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
11387 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
11388 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11389 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11390 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
11391 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
11392 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
11393 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
11394 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11395 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11396 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11397 }
11398 case NEON::BI__builtin_neon_vst2_v:
11399 case NEON::BI__builtin_neon_vst2q_v: {
11400 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11401 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
11402 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
11403 Ops, "");
11404 }
11405 case NEON::BI__builtin_neon_vst2_lane_v:
11406 case NEON::BI__builtin_neon_vst2q_lane_v: {
11407 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11408 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
11409 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
11410 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
11411 Ops, "");
11412 }
11413 case NEON::BI__builtin_neon_vst3_v:
11414 case NEON::BI__builtin_neon_vst3q_v: {
11415 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11416 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
11417 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
11418 Ops, "");
11419 }
11420 case NEON::BI__builtin_neon_vst3_lane_v:
11421 case NEON::BI__builtin_neon_vst3q_lane_v: {
11422 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11423 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
11424 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
11425 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
11426 Ops, "");
11427 }
11428 case NEON::BI__builtin_neon_vst4_v:
11429 case NEON::BI__builtin_neon_vst4q_v: {
11430 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11431 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
11432 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
11433 Ops, "");
11434 }
11435 case NEON::BI__builtin_neon_vst4_lane_v:
11436 case NEON::BI__builtin_neon_vst4q_lane_v: {
11437 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11438 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
11439 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
11440 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
11441 Ops, "");
11442 }
11443 case NEON::BI__builtin_neon_vtrn_v:
11444 case NEON::BI__builtin_neon_vtrnq_v: {
11445 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11446 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11447 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11448 Value *SV = nullptr;
11449
11450 for (unsigned vi = 0; vi != 2; ++vi) {
11451 SmallVector<int, 16> Indices;
11452 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
11453 Indices.push_back(i+vi);
11454 Indices.push_back(i+e+vi);
11455 }
11456 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11457 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
11458 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11459 }
11460 return SV;
11461 }
11462 case NEON::BI__builtin_neon_vuzp_v:
11463 case NEON::BI__builtin_neon_vuzpq_v: {
11464 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11465 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11466 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11467 Value *SV = nullptr;
11468
11469 for (unsigned vi = 0; vi != 2; ++vi) {
11470 SmallVector<int, 16> Indices;
11471 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
11472 Indices.push_back(2*i+vi);
11473
11474 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11475 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
11476 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11477 }
11478 return SV;
11479 }
11480 case NEON::BI__builtin_neon_vzip_v:
11481 case NEON::BI__builtin_neon_vzipq_v: {
11482 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11483 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11484 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11485 Value *SV = nullptr;
11486
11487 for (unsigned vi = 0; vi != 2; ++vi) {
11488 SmallVector<int, 16> Indices;
11489 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
11490 Indices.push_back((i + vi*e) >> 1);
11491 Indices.push_back(((i + vi*e) >> 1)+e);
11492 }
11493 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11494 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
11495 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11496 }
11497 return SV;
11498 }
11499 case NEON::BI__builtin_neon_vqtbl1q_v: {
11500 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
11501 Ops, "vtbl1");
11502 }
11503 case NEON::BI__builtin_neon_vqtbl2q_v: {
11504 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
11505 Ops, "vtbl2");
11506 }
11507 case NEON::BI__builtin_neon_vqtbl3q_v: {
11508 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
11509 Ops, "vtbl3");
11510 }
11511 case NEON::BI__builtin_neon_vqtbl4q_v: {
11512 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
11513 Ops, "vtbl4");
11514 }
11515 case NEON::BI__builtin_neon_vqtbx1q_v: {
11516 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
11517 Ops, "vtbx1");
11518 }
11519 case NEON::BI__builtin_neon_vqtbx2q_v: {
11520 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
11521 Ops, "vtbx2");
11522 }
11523 case NEON::BI__builtin_neon_vqtbx3q_v: {
11524 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
11525 Ops, "vtbx3");
11526 }
11527 case NEON::BI__builtin_neon_vqtbx4q_v: {
11528 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
11529 Ops, "vtbx4");
11530 }
11531 case NEON::BI__builtin_neon_vsqadd_v:
11532 case NEON::BI__builtin_neon_vsqaddq_v: {
11533 Int = Intrinsic::aarch64_neon_usqadd;
11534 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
11535 }
11536 case NEON::BI__builtin_neon_vuqadd_v:
11537 case NEON::BI__builtin_neon_vuqaddq_v: {
11538 Int = Intrinsic::aarch64_neon_suqadd;
11539 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
11540 }
11541 }
11542}
11543
11544Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
11545 const CallExpr *E) {
11546 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info
|| BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID ==
BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value
) && "unexpected BPF builtin") ? void (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11550, __extension__ __PRETTY_FUNCTION__))
11547 BuiltinID == BPF::BI__builtin_btf_type_id ||(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info
|| BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID ==
BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value
) && "unexpected BPF builtin") ? void (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11550, __extension__ __PRETTY_FUNCTION__))
11548 BuiltinID == BPF::BI__builtin_preserve_type_info ||(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info
|| BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID ==
BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value
) && "unexpected BPF builtin") ? void (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11550, __extension__ __PRETTY_FUNCTION__))
11549 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info
|| BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID ==
BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value
) && "unexpected BPF builtin") ? void (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11550, __extension__ __PRETTY_FUNCTION__))
11550 "unexpected BPF builtin")(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info
|| BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID ==
BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value
) && "unexpected BPF builtin") ? void (0) : __assert_fail
("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11550, __extension__ __PRETTY_FUNCTION__))
;
11551
11552 // A sequence number, injected into IR builtin functions, to
11553 // prevent CSE given the only difference of the funciton
11554 // may just be the debuginfo metadata.
11555 static uint32_t BuiltinSeqNum;
11556
11557 switch (BuiltinID) {
11558 default:
11559 llvm_unreachable("Unexpected BPF builtin")::llvm::llvm_unreachable_internal("Unexpected BPF builtin", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11559)
;
11560 case BPF::BI__builtin_preserve_field_info: {
11561 const Expr *Arg = E->getArg(0);
11562 bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
11563
11564 if (!getDebugInfo()) {
11565 CGM.Error(E->getExprLoc(),
11566 "using __builtin_preserve_field_info() without -g");
11567 return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
11568 : EmitLValue(Arg).getPointer(*this);
11569 }
11570
11571 // Enable underlying preserve_*_access_index() generation.
11572 bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
11573 IsInPreservedAIRegion = true;
11574 Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
11575 : EmitLValue(Arg).getPointer(*this);
11576 IsInPreservedAIRegion = OldIsInPreservedAIRegion;
11577
11578 ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11579 Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
11580
11581 // Built the IR for the preserve_field_info intrinsic.
11582 llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
11583 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
11584 {FieldAddr->getType()});
11585 return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
11586 }
11587 case BPF::BI__builtin_btf_type_id:
11588 case BPF::BI__builtin_preserve_type_info: {
11589 if (!getDebugInfo()) {
11590 CGM.Error(E->getExprLoc(), "using builtin function without -g");
11591 return nullptr;
11592 }
11593
11594 const Expr *Arg0 = E->getArg(0);
11595 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
11596 Arg0->getType(), Arg0->getExprLoc());
11597
11598 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11599 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11600 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
11601
11602 llvm::Function *FnDecl;
11603 if (BuiltinID == BPF::BI__builtin_btf_type_id)
11604 FnDecl = llvm::Intrinsic::getDeclaration(
11605 &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
11606 else
11607 FnDecl = llvm::Intrinsic::getDeclaration(
11608 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
11609 CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
11610 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11611 return Fn;
11612 }
11613 case BPF::BI__builtin_preserve_enum_value: {
11614 if (!getDebugInfo()) {
11615 CGM.Error(E->getExprLoc(), "using builtin function without -g");
11616 return nullptr;
11617 }
11618
11619 const Expr *Arg0 = E->getArg(0);
11620 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
11621 Arg0->getType(), Arg0->getExprLoc());
11622
11623 // Find enumerator
11624 const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens());
11625 const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr());
11626 const auto *DR = cast<DeclRefExpr>(CE->getSubExpr());
11627 const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl());
11628
11629 auto &InitVal = Enumerator->getInitVal();
11630 std::string InitValStr;
11631 if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX(9223372036854775807L)))
11632 InitValStr = std::to_string(InitVal.getSExtValue());
11633 else
11634 InitValStr = std::to_string(InitVal.getZExtValue());
11635 std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr;
11636 Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr);
11637
11638 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11639 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11640 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
11641
11642 llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration(
11643 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
11644 CallInst *Fn =
11645 Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
11646 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11647 return Fn;
11648 }
11649 }
11650}
11651
11652llvm::Value *CodeGenFunction::
11653BuildVector(ArrayRef<llvm::Value*> Ops) {
11654 assert((Ops.size() & (Ops.size() - 1)) == 0 &&(static_cast <bool> ((Ops.size() & (Ops.size() - 1)
) == 0 && "Not a power-of-two sized vector!") ? void (
0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11655, __extension__ __PRETTY_FUNCTION__))
11655 "Not a power-of-two sized vector!")(static_cast <bool> ((Ops.size() & (Ops.size() - 1)
) == 0 && "Not a power-of-two sized vector!") ? void (
0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11655, __extension__ __PRETTY_FUNCTION__))
;
11656 bool AllConstants = true;
11657 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
11658 AllConstants &= isa<Constant>(Ops[i]);
11659
11660 // If this is a constant vector, create a ConstantVector.
11661 if (AllConstants) {
11662 SmallVector<llvm::Constant*, 16> CstOps;
11663 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11664 CstOps.push_back(cast<Constant>(Ops[i]));
11665 return llvm::ConstantVector::get(CstOps);
11666 }
11667
11668 // Otherwise, insertelement the values to build the vector.
11669 Value *Result = llvm::UndefValue::get(
11670 llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
11671
11672 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11673 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
11674
11675 return Result;
11676}
11677
11678// Convert the mask from an integer type to a vector of i1.
11679static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
11680 unsigned NumElts) {
11681
11682 auto *MaskTy = llvm::FixedVectorType::get(
11683 CGF.Builder.getInt1Ty(),
11684 cast<IntegerType>(Mask->getType())->getBitWidth());
11685 Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
11686
11687 // If we have less than 8 elements, then the starting mask was an i8 and
11688 // we need to extract down to the right number of elements.
11689 if (NumElts < 8) {
11690 int Indices[4];
11691 for (unsigned i = 0; i != NumElts; ++i)
11692 Indices[i] = i;
11693 MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
11694 makeArrayRef(Indices, NumElts),
11695 "extract");
11696 }
11697 return MaskVec;
11698}
11699
11700static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11701 Align Alignment) {
11702 // Cast the pointer to right type.
11703 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11704 llvm::PointerType::getUnqual(Ops[1]->getType()));
11705
11706 Value *MaskVec = getMaskVecValue(
11707 CGF, Ops[2],
11708 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
11709
11710 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
11711}
11712
11713static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11714 Align Alignment) {
11715 // Cast the pointer to right type.
11716 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11717 llvm::PointerType::getUnqual(Ops[1]->getType()));
11718
11719 Value *MaskVec = getMaskVecValue(
11720 CGF, Ops[2],
11721 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
11722
11723 return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
11724}
11725
11726static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
11727 ArrayRef<Value *> Ops) {
11728 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
11729 llvm::Type *PtrTy = ResultTy->getElementType();
11730
11731 // Cast the pointer to element type.
11732 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11733 llvm::PointerType::getUnqual(PtrTy));
11734
11735 Value *MaskVec = getMaskVecValue(
11736 CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
11737
11738 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
11739 ResultTy);
11740 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
11741}
11742
11743static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
11744 ArrayRef<Value *> Ops,
11745 bool IsCompress) {
11746 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11747
11748 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11749
11750 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
11751 : Intrinsic::x86_avx512_mask_expand;
11752 llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
11753 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
11754}
11755
11756static Value *EmitX86CompressStore(CodeGenFunction &CGF,
11757 ArrayRef<Value *> Ops) {
11758 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11759 llvm::Type *PtrTy = ResultTy->getElementType();
11760
11761 // Cast the pointer to element type.
11762 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11763 llvm::PointerType::getUnqual(PtrTy));
11764
11765 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11766
11767 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
11768 ResultTy);
11769 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
11770}
11771
11772static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
11773 ArrayRef<Value *> Ops,
11774 bool InvertLHS = false) {
11775 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11776 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
11777 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
11778
11779 if (InvertLHS)
11780 LHS = CGF.Builder.CreateNot(LHS);
11781
11782 return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
11783 Ops[0]->getType());
11784}
11785
11786static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
11787 Value *Amt, bool IsRight) {
11788 llvm::Type *Ty = Op0->getType();
11789
11790 // Amount may be scalar immediate, in which case create a splat vector.
11791 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
11792 // we only care about the lowest log2 bits anyway.
11793 if (Amt->getType() != Ty) {
11794 unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements();
11795 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
11796 Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
11797 }
11798
11799 unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
11800 Function *F = CGF.CGM.getIntrinsic(IID, Ty);
11801 return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
11802}
11803
11804static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11805 bool IsSigned) {
11806 Value *Op0 = Ops[0];
11807 Value *Op1 = Ops[1];
11808 llvm::Type *Ty = Op0->getType();
11809 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
11810
11811 CmpInst::Predicate Pred;
11812 switch (Imm) {
11813 case 0x0:
11814 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
11815 break;
11816 case 0x1:
11817 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
11818 break;
11819 case 0x2:
11820 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
11821 break;
11822 case 0x3:
11823 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
11824 break;
11825 case 0x4:
11826 Pred = ICmpInst::ICMP_EQ;
11827 break;
11828 case 0x5:
11829 Pred = ICmpInst::ICMP_NE;
11830 break;
11831 case 0x6:
11832 return llvm::Constant::getNullValue(Ty); // FALSE
11833 case 0x7:
11834 return llvm::Constant::getAllOnesValue(Ty); // TRUE
11835 default:
11836 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate")::llvm::llvm_unreachable_internal("Unexpected XOP vpcom/vpcomu predicate"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11836)
;
11837 }
11838
11839 Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
11840 Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
11841 return Res;
11842}
11843
11844static Value *EmitX86Select(CodeGenFunction &CGF,
11845 Value *Mask, Value *Op0, Value *Op1) {
11846
11847 // If the mask is all ones just return first argument.
11848 if (const auto *C = dyn_cast<Constant>(Mask))
11849 if (C->isAllOnesValue())
11850 return Op0;
11851
11852 Mask = getMaskVecValue(
11853 CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements());
11854
11855 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
11856}
11857
11858static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
11859 Value *Mask, Value *Op0, Value *Op1) {
11860 // If the mask is all ones just return first argument.
11861 if (const auto *C = dyn_cast<Constant>(Mask))
11862 if (C->isAllOnesValue())
11863 return Op0;
11864
11865 auto *MaskTy = llvm::FixedVectorType::get(
11866 CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
11867 Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
11868 Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
11869 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
11870}
11871
11872static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
11873 unsigned NumElts, Value *MaskIn) {
11874 if (MaskIn) {
11875 const auto *C = dyn_cast<Constant>(MaskIn);
11876 if (!C || !C->isAllOnesValue())
11877 Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
11878 }
11879
11880 if (NumElts < 8) {
11881 int Indices[8];
11882 for (unsigned i = 0; i != NumElts; ++i)
11883 Indices[i] = i;
11884 for (unsigned i = NumElts; i != 8; ++i)
11885 Indices[i] = i % NumElts + NumElts;
11886 Cmp = CGF.Builder.CreateShuffleVector(
11887 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
11888 }
11889
11890 return CGF.Builder.CreateBitCast(Cmp,
11891 IntegerType::get(CGF.getLLVMContext(),
11892 std::max(NumElts, 8U)));
11893}
11894
11895static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
11896 bool Signed, ArrayRef<Value *> Ops) {
11897 assert((Ops.size() == 2 || Ops.size() == 4) &&(static_cast <bool> ((Ops.size() == 2 || Ops.size() == 4
) && "Unexpected number of arguments") ? void (0) : __assert_fail
("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11898, __extension__ __PRETTY_FUNCTION__))
11898 "Unexpected number of arguments")(static_cast <bool> ((Ops.size() == 2 || Ops.size() == 4
) && "Unexpected number of arguments") ? void (0) : __assert_fail
("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11898, __extension__ __PRETTY_FUNCTION__))
;
11899 unsigned NumElts =
11900 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
11901 Value *Cmp;
11902
11903 if (CC == 3) {
11904 Cmp = Constant::getNullValue(
11905 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
11906 } else if (CC == 7) {
11907 Cmp = Constant::getAllOnesValue(
11908 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
11909 } else {
11910 ICmpInst::Predicate Pred;
11911 switch (CC) {
11912 default: llvm_unreachable("Unknown condition code")::llvm::llvm_unreachable_internal("Unknown condition code", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 11912)
;
11913 case 0: Pred = ICmpInst::ICMP_EQ; break;
11914 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
11915 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
11916 case 4: Pred = ICmpInst::ICMP_NE; break;
11917 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
11918 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
11919 }
11920 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
11921 }
11922
11923 Value *MaskIn = nullptr;
11924 if (Ops.size() == 4)
11925 MaskIn = Ops[3];
11926
11927 return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
11928}
11929
11930static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
11931 Value *Zero = Constant::getNullValue(In->getType());
11932 return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
11933}
11934
11935static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E,
11936 ArrayRef<Value *> Ops, bool IsSigned) {
11937 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
11938 llvm::Type *Ty = Ops[1]->getType();
11939
11940 Value *Res;
11941 if (Rnd != 4) {
11942 Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
11943 : Intrinsic::x86_avx512_uitofp_round;
11944 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
11945 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
11946 } else {
11947 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
11948 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
11949 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
11950 }
11951
11952 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
11953}
11954
11955// Lowers X86 FMA intrinsics to IR.
11956static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
11957 ArrayRef<Value *> Ops, unsigned BuiltinID,
11958 bool IsAddSub) {
11959
11960 bool Subtract = false;
11961 Intrinsic::ID IID = Intrinsic::not_intrinsic;
11962 switch (BuiltinID) {
11963 default: break;
11964 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
11965 Subtract = true;
11966 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11967 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
11968 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
11969 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
11970 IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
11971 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
11972 Subtract = true;
11973 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11974 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
11975 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
11976 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
11977 IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
11978 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
11979 Subtract = true;
11980 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11981 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
11982 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
11983 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
11984 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
11985 break;
11986 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
11987 Subtract = true;
11988 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11989 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
11990 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
11991 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
11992 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
11993 break;
11994 }
11995
11996 Value *A = Ops[0];
11997 Value *B = Ops[1];
11998 Value *C = Ops[2];
11999
12000 if (Subtract)
12001 C = CGF.Builder.CreateFNeg(C);
12002
12003 Value *Res;
12004
12005 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
12006 if (IID != Intrinsic::not_intrinsic &&
12007 (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
12008 IsAddSub)) {
12009 Function *Intr = CGF.CGM.getIntrinsic(IID);
12010 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
12011 } else {
12012 llvm::Type *Ty = A->getType();
12013 Function *FMA;
12014 if (CGF.Builder.getIsFPConstrained()) {
12015 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
12016 FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
12017 Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
12018 } else {
12019 FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
12020 Res = CGF.Builder.CreateCall(FMA, {A, B, C});
12021 }
12022 }
12023
12024 // Handle any required masking.
12025 Value *MaskFalseVal = nullptr;
12026 switch (BuiltinID) {
12027 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
12028 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
12029 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
12030 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12031 MaskFalseVal = Ops[0];
12032 break;
12033 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
12034 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
12035 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12036 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12037 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
12038 break;
12039 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
12040 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
12041 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
12042 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
12043 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12044 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12045 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12046 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12047 MaskFalseVal = Ops[2];
12048 break;
12049 }
12050
12051 if (MaskFalseVal)
12052 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
12053
12054 return Res;
12055}
12056
12057static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E,
12058 MutableArrayRef<Value *> Ops, Value *Upper,
12059 bool ZeroMask = false, unsigned PTIdx = 0,
12060 bool NegAcc = false) {
12061 unsigned Rnd = 4;
12062 if (Ops.size() > 4)
12063 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
12064
12065 if (NegAcc)
12066 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
12067
12068 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
12069 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
12070 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
12071 Value *Res;
12072 if (Rnd != 4) {
12073 Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
12074 Intrinsic::x86_avx512_vfmadd_f32 :
12075 Intrinsic::x86_avx512_vfmadd_f64;
12076 Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
12077 {Ops[0], Ops[1], Ops[2], Ops[4]});
12078 } else if (CGF.Builder.getIsFPConstrained()) {
12079 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
12080 Function *FMA = CGF.CGM.getIntrinsic(
12081 Intrinsic::experimental_constrained_fma, Ops[0]->getType());
12082 Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
12083 } else {
12084 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
12085 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
12086 }
12087 // If we have more than 3 arguments, we need to do masking.
12088 if (Ops.size() > 3) {
12089 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
12090 : Ops[PTIdx];
12091
12092 // If we negated the accumulator and the its the PassThru value we need to
12093 // bypass the negate. Conveniently Upper should be the same thing in this
12094 // case.
12095 if (NegAcc && PTIdx == 2)
12096 PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
12097
12098 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
12099 }
12100 return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
12101}
12102
12103static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
12104 ArrayRef<Value *> Ops) {
12105 llvm::Type *Ty = Ops[0]->getType();
12106 // Arguments have a vXi32 type so cast to vXi64.
12107 Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
12108 Ty->getPrimitiveSizeInBits() / 64);
12109 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
12110 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
12111
12112 if (IsSigned) {
12113 // Shift left then arithmetic shift right.
12114 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
12115 LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
12116 LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
12117 RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
12118 RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
12119 } else {
12120 // Clear the upper bits.
12121 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
12122 LHS = CGF.Builder.CreateAnd(LHS, Mask);
12123 RHS = CGF.Builder.CreateAnd(RHS, Mask);
12124 }
12125
12126 return CGF.Builder.CreateMul(LHS, RHS);
12127}
12128
12129// Emit a masked pternlog intrinsic. This only exists because the header has to
12130// use a macro and we aren't able to pass the input argument to a pternlog
12131// builtin and a select builtin without evaluating it twice.
12132static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
12133 ArrayRef<Value *> Ops) {
12134 llvm::Type *Ty = Ops[0]->getType();
12135
12136 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
12137 unsigned EltWidth = Ty->getScalarSizeInBits();
12138 Intrinsic::ID IID;
12139 if (VecWidth == 128 && EltWidth == 32)
12140 IID = Intrinsic::x86_avx512_pternlog_d_128;
12141 else if (VecWidth == 256 && EltWidth == 32)
12142 IID = Intrinsic::x86_avx512_pternlog_d_256;
12143 else if (VecWidth == 512 && EltWidth == 32)
12144 IID = Intrinsic::x86_avx512_pternlog_d_512;
12145 else if (VecWidth == 128 && EltWidth == 64)
12146 IID = Intrinsic::x86_avx512_pternlog_q_128;
12147 else if (VecWidth == 256 && EltWidth == 64)
12148 IID = Intrinsic::x86_avx512_pternlog_q_256;
12149 else if (VecWidth == 512 && EltWidth == 64)
12150 IID = Intrinsic::x86_avx512_pternlog_q_512;
12151 else
12152 llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12152)
;
12153
12154 Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
12155 Ops.drop_back());
12156 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
12157 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
12158}
12159
12160static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
12161 llvm::Type *DstTy) {
12162 unsigned NumberOfElements =
12163 cast<llvm::FixedVectorType>(DstTy)->getNumElements();
12164 Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
12165 return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
12166}
12167
12168// Emit binary intrinsic with the same type used in result/args.
12169static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF,
12170 ArrayRef<Value *> Ops, Intrinsic::ID IID) {
12171 llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
12172 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
12173}
12174
12175Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
12176 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
12177 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
12178 return EmitX86CpuIs(CPUStr);
12179}
12180
12181// Convert F16 halfs to floats.
12182static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
12183 ArrayRef<Value *> Ops,
12184 llvm::Type *DstTy) {
12185 assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&(static_cast <bool> ((Ops.size() == 1 || Ops.size() == 3
|| Ops.size() == 4) && "Unknown cvtph2ps intrinsic")
? void (0) : __assert_fail ("(Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && \"Unknown cvtph2ps intrinsic\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12186, __extension__ __PRETTY_FUNCTION__))
12186 "Unknown cvtph2ps intrinsic")(static_cast <bool> ((Ops.size() == 1 || Ops.size() == 3
|| Ops.size() == 4) && "Unknown cvtph2ps intrinsic")
? void (0) : __assert_fail ("(Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && \"Unknown cvtph2ps intrinsic\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12186, __extension__ __PRETTY_FUNCTION__))
;
12187
12188 // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
12189 if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
12190 Function *F =
12191 CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
12192 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
12193 }
12194
12195 unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
12196 Value *Src = Ops[0];
12197
12198 // Extract the subvector.
12199 if (NumDstElts !=
12200 cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
12201 assert(NumDstElts == 4 && "Unexpected vector size")(static_cast <bool> (NumDstElts == 4 && "Unexpected vector size"
) ? void (0) : __assert_fail ("NumDstElts == 4 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12201, __extension__ __PRETTY_FUNCTION__))
;
12202 Src = CGF.Builder.CreateShuffleVector(Src, ArrayRef<int>{0, 1, 2, 3});
12203 }
12204
12205 // Bitcast from vXi16 to vXf16.
12206 auto *HalfTy = llvm::FixedVectorType::get(
12207 llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
12208 Src = CGF.Builder.CreateBitCast(Src, HalfTy);
12209
12210 // Perform the fp-extension.
12211 Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
12212
12213 if (Ops.size() >= 3)
12214 Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
12215 return Res;
12216}
12217
12218// Convert a BF16 to a float.
12219static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
12220 const CallExpr *E,
12221 ArrayRef<Value *> Ops) {
12222 llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
12223 Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
12224 Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
12225 llvm::Type *ResultType = CGF.ConvertType(E->getType());
12226 Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
12227 return BitCast;
12228}
12229
12230Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
12231
12232 llvm::Type *Int32Ty = Builder.getInt32Ty();
12233
12234 // Matching the struct layout from the compiler-rt/libgcc structure that is
12235 // filled in:
12236 // unsigned int __cpu_vendor;
12237 // unsigned int __cpu_type;
12238 // unsigned int __cpu_subtype;
12239 // unsigned int __cpu_features[1];
12240 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
12241 llvm::ArrayType::get(Int32Ty, 1));
12242
12243 // Grab the global __cpu_model.
12244 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
12245 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
12246
12247 // Calculate the index needed to access the correct field based on the
12248 // range. Also adjust the expected value.
12249 unsigned Index;
12250 unsigned Value;
12251 std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
12252#define X86_VENDOR(ENUM, STRING) \
12253 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
12254#define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
12255 .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
12256#define X86_CPU_TYPE(ENUM, STR) \
12257 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
12258#define X86_CPU_SUBTYPE(ENUM, STR) \
12259 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
12260#include "llvm/Support/X86TargetParser.def"
12261 .Default({0, 0});
12262 assert(Value != 0 && "Invalid CPUStr passed to CpuIs")(static_cast <bool> (Value != 0 && "Invalid CPUStr passed to CpuIs"
) ? void (0) : __assert_fail ("Value != 0 && \"Invalid CPUStr passed to CpuIs\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12262, __extension__ __PRETTY_FUNCTION__))
;
12263
12264 // Grab the appropriate field from __cpu_model.
12265 llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
12266 ConstantInt::get(Int32Ty, Index)};
12267 llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
12268 CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue,
12269 CharUnits::fromQuantity(4));
12270
12271 // Check the value of the field against the requested value.
12272 return Builder.CreateICmpEQ(CpuValue,
12273 llvm::ConstantInt::get(Int32Ty, Value));
12274}
12275
12276Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
12277 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
12278 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
12279 return EmitX86CpuSupports(FeatureStr);
12280}
12281
12282uint64_t
12283CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
12284 // Processor features and mapping to processor feature value.
12285 uint64_t FeaturesMask = 0;
12286 for (const StringRef &FeatureStr : FeatureStrs) {
12287 unsigned Feature =
12288 StringSwitch<unsigned>(FeatureStr)
12289#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM)
12290#include "llvm/Support/X86TargetParser.def"
12291 ;
12292 FeaturesMask |= (1ULL << Feature);
12293 }
12294 return FeaturesMask;
12295}
12296
12297Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
12298 return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
12299}
12300
12301llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
12302 uint32_t Features1 = Lo_32(FeaturesMask);
12303 uint32_t Features2 = Hi_32(FeaturesMask);
12304
12305 Value *Result = Builder.getTrue();
12306
12307 if (Features1 != 0) {
12308 // Matching the struct layout from the compiler-rt/libgcc structure that is
12309 // filled in:
12310 // unsigned int __cpu_vendor;
12311 // unsigned int __cpu_type;
12312 // unsigned int __cpu_subtype;
12313 // unsigned int __cpu_features[1];
12314 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
12315 llvm::ArrayType::get(Int32Ty, 1));
12316
12317 // Grab the global __cpu_model.
12318 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
12319 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
12320
12321 // Grab the first (0th) element from the field __cpu_features off of the
12322 // global in the struct STy.
12323 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
12324 Builder.getInt32(0)};
12325 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
12326 Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures,
12327 CharUnits::fromQuantity(4));
12328
12329 // Check the value of the bit corresponding to the feature requested.
12330 Value *Mask = Builder.getInt32(Features1);
12331 Value *Bitset = Builder.CreateAnd(Features, Mask);
12332 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
12333 Result = Builder.CreateAnd(Result, Cmp);
12334 }
12335
12336 if (Features2 != 0) {
12337 llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
12338 "__cpu_features2");
12339 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
12340
12341 Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures2,
12342 CharUnits::fromQuantity(4));
12343
12344 // Check the value of the bit corresponding to the feature requested.
12345 Value *Mask = Builder.getInt32(Features2);
12346 Value *Bitset = Builder.CreateAnd(Features, Mask);
12347 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
12348 Result = Builder.CreateAnd(Result, Cmp);
12349 }
12350
12351 return Result;
12352}
12353
12354Value *CodeGenFunction::EmitX86CpuInit() {
12355 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
12356 /*Variadic*/ false);
12357 llvm::FunctionCallee Func =
12358 CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
12359 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
12360 cast<llvm::GlobalValue>(Func.getCallee())
12361 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
12362 return Builder.CreateCall(Func);
12363}
12364
12365Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
12366 const CallExpr *E) {
12367 if (BuiltinID == X86::BI__builtin_cpu_is)
12368 return EmitX86CpuIs(E);
12369 if (BuiltinID == X86::BI__builtin_cpu_supports)
12370 return EmitX86CpuSupports(E);
12371 if (BuiltinID == X86::BI__builtin_cpu_init)
12372 return EmitX86CpuInit();
12373
12374 // Handle MSVC intrinsics before argument evaluation to prevent double
12375 // evaluation.
12376 if (Optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
12377 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
12378
12379 SmallVector<Value*, 4> Ops;
12380 bool IsMaskFCmp = false;
12381
12382 // Find out if any arguments are required to be integer constant expressions.
12383 unsigned ICEArguments = 0;
12384 ASTContext::GetBuiltinTypeError Error;
12385 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
12386 assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None &&
"Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12386, __extension__ __PRETTY_FUNCTION__))
;
12387
12388 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
12389 // If this is a normal argument, just emit it as a scalar.
12390 if ((ICEArguments & (1 << i)) == 0) {
12391 Ops.push_back(EmitScalarExpr(E->getArg(i)));
12392 continue;
12393 }
12394
12395 // If this is required to be a constant, constant fold it so that we know
12396 // that the generated intrinsic gets a ConstantInt.
12397 Ops.push_back(llvm::ConstantInt::get(
12398 getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
12399 }
12400
12401 // These exist so that the builtin that takes an immediate can be bounds
12402 // checked by clang to avoid passing bad immediates to the backend. Since
12403 // AVX has a larger immediate than SSE we would need separate builtins to
12404 // do the different bounds checking. Rather than create a clang specific
12405 // SSE only builtin, this implements eight separate builtins to match gcc
12406 // implementation.
12407 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
12408 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
12409 llvm::Function *F = CGM.getIntrinsic(ID);
12410 return Builder.CreateCall(F, Ops);
12411 };
12412
12413 // For the vector forms of FP comparisons, translate the builtins directly to
12414 // IR.
12415 // TODO: The builtins could be removed if the SSE header files used vector
12416 // extension comparisons directly (vector ordered/unordered may need
12417 // additional support via __builtin_isnan()).
12418 auto getVectorFCmpIR = [this, &Ops, E](CmpInst::Predicate Pred,
12419 bool IsSignaling) {
12420 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
12421 Value *Cmp;
12422 if (IsSignaling)
12423 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
12424 else
12425 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
12426 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
12427 llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
12428 Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
12429 return Builder.CreateBitCast(Sext, FPVecTy);
12430 };
12431
12432 switch (BuiltinID) {
12433 default: return nullptr;
12434 case X86::BI_mm_prefetch: {
12435 Value *Address = Ops[0];
12436 ConstantInt *C = cast<ConstantInt>(Ops[1]);
12437 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
12438 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
12439 Value *Data = ConstantInt::get(Int32Ty, 1);
12440 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
12441 return Builder.CreateCall(F, {Address, RW, Locality, Data});
12442 }
12443 case X86::BI_mm_clflush: {
12444 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
12445 Ops[0]);
12446 }
12447 case X86::BI_mm_lfence: {
12448 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
12449 }
12450 case X86::BI_mm_mfence: {
12451 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
12452 }
12453 case X86::BI_mm_sfence: {
12454 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
12455 }
12456 case X86::BI_mm_pause: {
12457 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
12458 }
12459 case X86::BI__rdtsc: {
12460 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
12461 }
12462 case X86::BI__builtin_ia32_rdtscp: {
12463 Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
12464 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
12465 Ops[0]);
12466 return Builder.CreateExtractValue(Call, 0);
12467 }
12468 case X86::BI__builtin_ia32_lzcnt_u16:
12469 case X86::BI__builtin_ia32_lzcnt_u32:
12470 case X86::BI__builtin_ia32_lzcnt_u64: {
12471 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
12472 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
12473 }
12474 case X86::BI__builtin_ia32_tzcnt_u16:
12475 case X86::BI__builtin_ia32_tzcnt_u32:
12476 case X86::BI__builtin_ia32_tzcnt_u64: {
12477 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
12478 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
12479 }
12480 case X86::BI__builtin_ia32_undef128:
12481 case X86::BI__builtin_ia32_undef256:
12482 case X86::BI__builtin_ia32_undef512:
12483 // The x86 definition of "undef" is not the same as the LLVM definition
12484 // (PR32176). We leave optimizing away an unnecessary zero constant to the
12485 // IR optimizer and backend.
12486 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
12487 // value, we should use that here instead of a zero.
12488 return llvm::Constant::getNullValue(ConvertType(E->getType()));
12489 case X86::BI__builtin_ia32_vec_init_v8qi:
12490 case X86::BI__builtin_ia32_vec_init_v4hi:
12491 case X86::BI__builtin_ia32_vec_init_v2si:
12492 return Builder.CreateBitCast(BuildVector(Ops),
12493 llvm::Type::getX86_MMXTy(getLLVMContext()));
12494 case X86::BI__builtin_ia32_vec_ext_v2si:
12495 case X86::BI__builtin_ia32_vec_ext_v16qi:
12496 case X86::BI__builtin_ia32_vec_ext_v8hi:
12497 case X86::BI__builtin_ia32_vec_ext_v4si:
12498 case X86::BI__builtin_ia32_vec_ext_v4sf:
12499 case X86::BI__builtin_ia32_vec_ext_v2di:
12500 case X86::BI__builtin_ia32_vec_ext_v32qi:
12501 case X86::BI__builtin_ia32_vec_ext_v16hi:
12502 case X86::BI__builtin_ia32_vec_ext_v8si:
12503 case X86::BI__builtin_ia32_vec_ext_v4di: {
12504 unsigned NumElts =
12505 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12506 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
12507 Index &= NumElts - 1;
12508 // These builtins exist so we can ensure the index is an ICE and in range.
12509 // Otherwise we could just do this in the header file.
12510 return Builder.CreateExtractElement(Ops[0], Index);
12511 }
12512 case X86::BI__builtin_ia32_vec_set_v16qi:
12513 case X86::BI__builtin_ia32_vec_set_v8hi:
12514 case X86::BI__builtin_ia32_vec_set_v4si:
12515 case X86::BI__builtin_ia32_vec_set_v2di:
12516 case X86::BI__builtin_ia32_vec_set_v32qi:
12517 case X86::BI__builtin_ia32_vec_set_v16hi:
12518 case X86::BI__builtin_ia32_vec_set_v8si:
12519 case X86::BI__builtin_ia32_vec_set_v4di: {
12520 unsigned NumElts =
12521 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12522 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
12523 Index &= NumElts - 1;
12524 // These builtins exist so we can ensure the index is an ICE and in range.
12525 // Otherwise we could just do this in the header file.
12526 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
12527 }
12528 case X86::BI_mm_setcsr:
12529 case X86::BI__builtin_ia32_ldmxcsr: {
12530 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
12531 Builder.CreateStore(Ops[0], Tmp);
12532 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
12533 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
12534 }
12535 case X86::BI_mm_getcsr:
12536 case X86::BI__builtin_ia32_stmxcsr: {
12537 Address Tmp = CreateMemTemp(E->getType());
12538 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
12539 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
12540 return Builder.CreateLoad(Tmp, "stmxcsr");
12541 }
12542 case X86::BI__builtin_ia32_xsave:
12543 case X86::BI__builtin_ia32_xsave64:
12544 case X86::BI__builtin_ia32_xrstor:
12545 case X86::BI__builtin_ia32_xrstor64:
12546 case X86::BI__builtin_ia32_xsaveopt:
12547 case X86::BI__builtin_ia32_xsaveopt64:
12548 case X86::BI__builtin_ia32_xrstors:
12549 case X86::BI__builtin_ia32_xrstors64:
12550 case X86::BI__builtin_ia32_xsavec:
12551 case X86::BI__builtin_ia32_xsavec64:
12552 case X86::BI__builtin_ia32_xsaves:
12553 case X86::BI__builtin_ia32_xsaves64:
12554 case X86::BI__builtin_ia32_xsetbv:
12555 case X86::BI_xsetbv: {
12556 Intrinsic::ID ID;
12557#define INTRINSIC_X86_XSAVE_ID(NAME) \
12558 case X86::BI__builtin_ia32_##NAME: \
12559 ID = Intrinsic::x86_##NAME; \
12560 break
12561 switch (BuiltinID) {
12562 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12562)
;
12563 INTRINSIC_X86_XSAVE_ID(xsave);
12564 INTRINSIC_X86_XSAVE_ID(xsave64);
12565 INTRINSIC_X86_XSAVE_ID(xrstor);
12566 INTRINSIC_X86_XSAVE_ID(xrstor64);
12567 INTRINSIC_X86_XSAVE_ID(xsaveopt);
12568 INTRINSIC_X86_XSAVE_ID(xsaveopt64);
12569 INTRINSIC_X86_XSAVE_ID(xrstors);
12570 INTRINSIC_X86_XSAVE_ID(xrstors64);
12571 INTRINSIC_X86_XSAVE_ID(xsavec);
12572 INTRINSIC_X86_XSAVE_ID(xsavec64);
12573 INTRINSIC_X86_XSAVE_ID(xsaves);
12574 INTRINSIC_X86_XSAVE_ID(xsaves64);
12575 INTRINSIC_X86_XSAVE_ID(xsetbv);
12576 case X86::BI_xsetbv:
12577 ID = Intrinsic::x86_xsetbv;
12578 break;
12579 }
12580#undef INTRINSIC_X86_XSAVE_ID
12581 Value *Mhi = Builder.CreateTrunc(
12582 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
12583 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
12584 Ops[1] = Mhi;
12585 Ops.push_back(Mlo);
12586 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
12587 }
12588 case X86::BI__builtin_ia32_xgetbv:
12589 case X86::BI_xgetbv:
12590 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
12591 case X86::BI__builtin_ia32_storedqudi128_mask:
12592 case X86::BI__builtin_ia32_storedqusi128_mask:
12593 case X86::BI__builtin_ia32_storedquhi128_mask:
12594 case X86::BI__builtin_ia32_storedquqi128_mask:
12595 case X86::BI__builtin_ia32_storeupd128_mask:
12596 case X86::BI__builtin_ia32_storeups128_mask:
12597 case X86::BI__builtin_ia32_storedqudi256_mask:
12598 case X86::BI__builtin_ia32_storedqusi256_mask:
12599 case X86::BI__builtin_ia32_storedquhi256_mask:
12600 case X86::BI__builtin_ia32_storedquqi256_mask:
12601 case X86::BI__builtin_ia32_storeupd256_mask:
12602 case X86::BI__builtin_ia32_storeups256_mask:
12603 case X86::BI__builtin_ia32_storedqudi512_mask:
12604 case X86::BI__builtin_ia32_storedqusi512_mask:
12605 case X86::BI__builtin_ia32_storedquhi512_mask:
12606 case X86::BI__builtin_ia32_storedquqi512_mask:
12607 case X86::BI__builtin_ia32_storeupd512_mask:
12608 case X86::BI__builtin_ia32_storeups512_mask:
12609 return EmitX86MaskedStore(*this, Ops, Align(1));
12610
12611 case X86::BI__builtin_ia32_storess128_mask:
12612 case X86::BI__builtin_ia32_storesd128_mask:
12613 return EmitX86MaskedStore(*this, Ops, Align(1));
12614
12615 case X86::BI__builtin_ia32_vpopcntb_128:
12616 case X86::BI__builtin_ia32_vpopcntd_128:
12617 case X86::BI__builtin_ia32_vpopcntq_128:
12618 case X86::BI__builtin_ia32_vpopcntw_128:
12619 case X86::BI__builtin_ia32_vpopcntb_256:
12620 case X86::BI__builtin_ia32_vpopcntd_256:
12621 case X86::BI__builtin_ia32_vpopcntq_256:
12622 case X86::BI__builtin_ia32_vpopcntw_256:
12623 case X86::BI__builtin_ia32_vpopcntb_512:
12624 case X86::BI__builtin_ia32_vpopcntd_512:
12625 case X86::BI__builtin_ia32_vpopcntq_512:
12626 case X86::BI__builtin_ia32_vpopcntw_512: {
12627 llvm::Type *ResultType = ConvertType(E->getType());
12628 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
12629 return Builder.CreateCall(F, Ops);
12630 }
12631 case X86::BI__builtin_ia32_cvtmask2b128:
12632 case X86::BI__builtin_ia32_cvtmask2b256:
12633 case X86::BI__builtin_ia32_cvtmask2b512:
12634 case X86::BI__builtin_ia32_cvtmask2w128:
12635 case X86::BI__builtin_ia32_cvtmask2w256:
12636 case X86::BI__builtin_ia32_cvtmask2w512:
12637 case X86::BI__builtin_ia32_cvtmask2d128:
12638 case X86::BI__builtin_ia32_cvtmask2d256:
12639 case X86::BI__builtin_ia32_cvtmask2d512:
12640 case X86::BI__builtin_ia32_cvtmask2q128:
12641 case X86::BI__builtin_ia32_cvtmask2q256:
12642 case X86::BI__builtin_ia32_cvtmask2q512:
12643 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
12644
12645 case X86::BI__builtin_ia32_cvtb2mask128:
12646 case X86::BI__builtin_ia32_cvtb2mask256:
12647 case X86::BI__builtin_ia32_cvtb2mask512:
12648 case X86::BI__builtin_ia32_cvtw2mask128:
12649 case X86::BI__builtin_ia32_cvtw2mask256:
12650 case X86::BI__builtin_ia32_cvtw2mask512:
12651 case X86::BI__builtin_ia32_cvtd2mask128:
12652 case X86::BI__builtin_ia32_cvtd2mask256:
12653 case X86::BI__builtin_ia32_cvtd2mask512:
12654 case X86::BI__builtin_ia32_cvtq2mask128:
12655 case X86::BI__builtin_ia32_cvtq2mask256:
12656 case X86::BI__builtin_ia32_cvtq2mask512:
12657 return EmitX86ConvertToMask(*this, Ops[0]);
12658
12659 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
12660 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
12661 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
12662 return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ true);
12663 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
12664 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
12665 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
12666 return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ false);
12667
12668 case X86::BI__builtin_ia32_vfmaddss3:
12669 case X86::BI__builtin_ia32_vfmaddsd3:
12670 case X86::BI__builtin_ia32_vfmaddss3_mask:
12671 case X86::BI__builtin_ia32_vfmaddsd3_mask:
12672 return EmitScalarFMAExpr(*this, E, Ops, Ops[0]);
12673 case X86::BI__builtin_ia32_vfmaddss:
12674 case X86::BI__builtin_ia32_vfmaddsd:
12675 return EmitScalarFMAExpr(*this, E, Ops,
12676 Constant::getNullValue(Ops[0]->getType()));
12677 case X86::BI__builtin_ia32_vfmaddss3_maskz:
12678 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
12679 return EmitScalarFMAExpr(*this, E, Ops, Ops[0], /*ZeroMask*/ true);
12680 case X86::BI__builtin_ia32_vfmaddss3_mask3:
12681 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
12682 return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2);
12683 case X86::BI__builtin_ia32_vfmsubss3_mask3:
12684 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
12685 return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
12686 /*NegAcc*/ true);
12687 case X86::BI__builtin_ia32_vfmaddps:
12688 case X86::BI__builtin_ia32_vfmaddpd:
12689 case X86::BI__builtin_ia32_vfmaddps256:
12690 case X86::BI__builtin_ia32_vfmaddpd256:
12691 case X86::BI__builtin_ia32_vfmaddps512_mask:
12692 case X86::BI__builtin_ia32_vfmaddps512_maskz:
12693 case X86::BI__builtin_ia32_vfmaddps512_mask3:
12694 case X86::BI__builtin_ia32_vfmsubps512_mask3:
12695 case X86::BI__builtin_ia32_vfmaddpd512_mask:
12696 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
12697 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
12698 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
12699 return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ false);
12700 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
12701 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12702 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12703 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12704 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12705 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12706 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12707 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12708 return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ true);
12709
12710 case X86::BI__builtin_ia32_movdqa32store128_mask:
12711 case X86::BI__builtin_ia32_movdqa64store128_mask:
12712 case X86::BI__builtin_ia32_storeaps128_mask:
12713 case X86::BI__builtin_ia32_storeapd128_mask:
12714 case X86::BI__builtin_ia32_movdqa32store256_mask:
12715 case X86::BI__builtin_ia32_movdqa64store256_mask:
12716 case X86::BI__builtin_ia32_storeaps256_mask:
12717 case X86::BI__builtin_ia32_storeapd256_mask:
12718 case X86::BI__builtin_ia32_movdqa32store512_mask:
12719 case X86::BI__builtin_ia32_movdqa64store512_mask:
12720 case X86::BI__builtin_ia32_storeaps512_mask:
12721 case X86::BI__builtin_ia32_storeapd512_mask:
12722 return EmitX86MaskedStore(
12723 *this, Ops,
12724 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12725
12726 case X86::BI__builtin_ia32_loadups128_mask:
12727 case X86::BI__builtin_ia32_loadups256_mask:
12728 case X86::BI__builtin_ia32_loadups512_mask:
12729 case X86::BI__builtin_ia32_loadupd128_mask:
12730 case X86::BI__builtin_ia32_loadupd256_mask:
12731 case X86::BI__builtin_ia32_loadupd512_mask:
12732 case X86::BI__builtin_ia32_loaddquqi128_mask:
12733 case X86::BI__builtin_ia32_loaddquqi256_mask:
12734 case X86::BI__builtin_ia32_loaddquqi512_mask:
12735 case X86::BI__builtin_ia32_loaddquhi128_mask:
12736 case X86::BI__builtin_ia32_loaddquhi256_mask:
12737 case X86::BI__builtin_ia32_loaddquhi512_mask:
12738 case X86::BI__builtin_ia32_loaddqusi128_mask:
12739 case X86::BI__builtin_ia32_loaddqusi256_mask:
12740 case X86::BI__builtin_ia32_loaddqusi512_mask:
12741 case X86::BI__builtin_ia32_loaddqudi128_mask:
12742 case X86::BI__builtin_ia32_loaddqudi256_mask:
12743 case X86::BI__builtin_ia32_loaddqudi512_mask:
12744 return EmitX86MaskedLoad(*this, Ops, Align(1));
12745
12746 case X86::BI__builtin_ia32_loadss128_mask:
12747 case X86::BI__builtin_ia32_loadsd128_mask:
12748 return EmitX86MaskedLoad(*this, Ops, Align(1));
12749
12750 case X86::BI__builtin_ia32_loadaps128_mask:
12751 case X86::BI__builtin_ia32_loadaps256_mask:
12752 case X86::BI__builtin_ia32_loadaps512_mask:
12753 case X86::BI__builtin_ia32_loadapd128_mask:
12754 case X86::BI__builtin_ia32_loadapd256_mask:
12755 case X86::BI__builtin_ia32_loadapd512_mask:
12756 case X86::BI__builtin_ia32_movdqa32load128_mask:
12757 case X86::BI__builtin_ia32_movdqa32load256_mask:
12758 case X86::BI__builtin_ia32_movdqa32load512_mask:
12759 case X86::BI__builtin_ia32_movdqa64load128_mask:
12760 case X86::BI__builtin_ia32_movdqa64load256_mask:
12761 case X86::BI__builtin_ia32_movdqa64load512_mask:
12762 return EmitX86MaskedLoad(
12763 *this, Ops,
12764 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12765
12766 case X86::BI__builtin_ia32_expandloaddf128_mask:
12767 case X86::BI__builtin_ia32_expandloaddf256_mask:
12768 case X86::BI__builtin_ia32_expandloaddf512_mask:
12769 case X86::BI__builtin_ia32_expandloadsf128_mask:
12770 case X86::BI__builtin_ia32_expandloadsf256_mask:
12771 case X86::BI__builtin_ia32_expandloadsf512_mask:
12772 case X86::BI__builtin_ia32_expandloaddi128_mask:
12773 case X86::BI__builtin_ia32_expandloaddi256_mask:
12774 case X86::BI__builtin_ia32_expandloaddi512_mask:
12775 case X86::BI__builtin_ia32_expandloadsi128_mask:
12776 case X86::BI__builtin_ia32_expandloadsi256_mask:
12777 case X86::BI__builtin_ia32_expandloadsi512_mask:
12778 case X86::BI__builtin_ia32_expandloadhi128_mask:
12779 case X86::BI__builtin_ia32_expandloadhi256_mask:
12780 case X86::BI__builtin_ia32_expandloadhi512_mask:
12781 case X86::BI__builtin_ia32_expandloadqi128_mask:
12782 case X86::BI__builtin_ia32_expandloadqi256_mask:
12783 case X86::BI__builtin_ia32_expandloadqi512_mask:
12784 return EmitX86ExpandLoad(*this, Ops);
12785
12786 case X86::BI__builtin_ia32_compressstoredf128_mask:
12787 case X86::BI__builtin_ia32_compressstoredf256_mask:
12788 case X86::BI__builtin_ia32_compressstoredf512_mask:
12789 case X86::BI__builtin_ia32_compressstoresf128_mask:
12790 case X86::BI__builtin_ia32_compressstoresf256_mask:
12791 case X86::BI__builtin_ia32_compressstoresf512_mask:
12792 case X86::BI__builtin_ia32_compressstoredi128_mask:
12793 case X86::BI__builtin_ia32_compressstoredi256_mask:
12794 case X86::BI__builtin_ia32_compressstoredi512_mask:
12795 case X86::BI__builtin_ia32_compressstoresi128_mask:
12796 case X86::BI__builtin_ia32_compressstoresi256_mask:
12797 case X86::BI__builtin_ia32_compressstoresi512_mask:
12798 case X86::BI__builtin_ia32_compressstorehi128_mask:
12799 case X86::BI__builtin_ia32_compressstorehi256_mask:
12800 case X86::BI__builtin_ia32_compressstorehi512_mask:
12801 case X86::BI__builtin_ia32_compressstoreqi128_mask:
12802 case X86::BI__builtin_ia32_compressstoreqi256_mask:
12803 case X86::BI__builtin_ia32_compressstoreqi512_mask:
12804 return EmitX86CompressStore(*this, Ops);
12805
12806 case X86::BI__builtin_ia32_expanddf128_mask:
12807 case X86::BI__builtin_ia32_expanddf256_mask:
12808 case X86::BI__builtin_ia32_expanddf512_mask:
12809 case X86::BI__builtin_ia32_expandsf128_mask:
12810 case X86::BI__builtin_ia32_expandsf256_mask:
12811 case X86::BI__builtin_ia32_expandsf512_mask:
12812 case X86::BI__builtin_ia32_expanddi128_mask:
12813 case X86::BI__builtin_ia32_expanddi256_mask:
12814 case X86::BI__builtin_ia32_expanddi512_mask:
12815 case X86::BI__builtin_ia32_expandsi128_mask:
12816 case X86::BI__builtin_ia32_expandsi256_mask:
12817 case X86::BI__builtin_ia32_expandsi512_mask:
12818 case X86::BI__builtin_ia32_expandhi128_mask:
12819 case X86::BI__builtin_ia32_expandhi256_mask:
12820 case X86::BI__builtin_ia32_expandhi512_mask:
12821 case X86::BI__builtin_ia32_expandqi128_mask:
12822 case X86::BI__builtin_ia32_expandqi256_mask:
12823 case X86::BI__builtin_ia32_expandqi512_mask:
12824 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
12825
12826 case X86::BI__builtin_ia32_compressdf128_mask:
12827 case X86::BI__builtin_ia32_compressdf256_mask:
12828 case X86::BI__builtin_ia32_compressdf512_mask:
12829 case X86::BI__builtin_ia32_compresssf128_mask:
12830 case X86::BI__builtin_ia32_compresssf256_mask:
12831 case X86::BI__builtin_ia32_compresssf512_mask:
12832 case X86::BI__builtin_ia32_compressdi128_mask:
12833 case X86::BI__builtin_ia32_compressdi256_mask:
12834 case X86::BI__builtin_ia32_compressdi512_mask:
12835 case X86::BI__builtin_ia32_compresssi128_mask:
12836 case X86::BI__builtin_ia32_compresssi256_mask:
12837 case X86::BI__builtin_ia32_compresssi512_mask:
12838 case X86::BI__builtin_ia32_compresshi128_mask:
12839 case X86::BI__builtin_ia32_compresshi256_mask:
12840 case X86::BI__builtin_ia32_compresshi512_mask:
12841 case X86::BI__builtin_ia32_compressqi128_mask:
12842 case X86::BI__builtin_ia32_compressqi256_mask:
12843 case X86::BI__builtin_ia32_compressqi512_mask:
12844 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
12845
12846 case X86::BI__builtin_ia32_gather3div2df:
12847 case X86::BI__builtin_ia32_gather3div2di:
12848 case X86::BI__builtin_ia32_gather3div4df:
12849 case X86::BI__builtin_ia32_gather3div4di:
12850 case X86::BI__builtin_ia32_gather3div4sf:
12851 case X86::BI__builtin_ia32_gather3div4si:
12852 case X86::BI__builtin_ia32_gather3div8sf:
12853 case X86::BI__builtin_ia32_gather3div8si:
12854 case X86::BI__builtin_ia32_gather3siv2df:
12855 case X86::BI__builtin_ia32_gather3siv2di:
12856 case X86::BI__builtin_ia32_gather3siv4df:
12857 case X86::BI__builtin_ia32_gather3siv4di:
12858 case X86::BI__builtin_ia32_gather3siv4sf:
12859 case X86::BI__builtin_ia32_gather3siv4si:
12860 case X86::BI__builtin_ia32_gather3siv8sf:
12861 case X86::BI__builtin_ia32_gather3siv8si:
12862 case X86::BI__builtin_ia32_gathersiv8df:
12863 case X86::BI__builtin_ia32_gathersiv16sf:
12864 case X86::BI__builtin_ia32_gatherdiv8df:
12865 case X86::BI__builtin_ia32_gatherdiv16sf:
12866 case X86::BI__builtin_ia32_gathersiv8di:
12867 case X86::BI__builtin_ia32_gathersiv16si:
12868 case X86::BI__builtin_ia32_gatherdiv8di:
12869 case X86::BI__builtin_ia32_gatherdiv16si: {
12870 Intrinsic::ID IID;
12871 switch (BuiltinID) {
12872 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12872)
;
12873 case X86::BI__builtin_ia32_gather3div2df:
12874 IID = Intrinsic::x86_avx512_mask_gather3div2_df;
12875 break;
12876 case X86::BI__builtin_ia32_gather3div2di:
12877 IID = Intrinsic::x86_avx512_mask_gather3div2_di;
12878 break;
12879 case X86::BI__builtin_ia32_gather3div4df:
12880 IID = Intrinsic::x86_avx512_mask_gather3div4_df;
12881 break;
12882 case X86::BI__builtin_ia32_gather3div4di:
12883 IID = Intrinsic::x86_avx512_mask_gather3div4_di;
12884 break;
12885 case X86::BI__builtin_ia32_gather3div4sf:
12886 IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
12887 break;
12888 case X86::BI__builtin_ia32_gather3div4si:
12889 IID = Intrinsic::x86_avx512_mask_gather3div4_si;
12890 break;
12891 case X86::BI__builtin_ia32_gather3div8sf:
12892 IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
12893 break;
12894 case X86::BI__builtin_ia32_gather3div8si:
12895 IID = Intrinsic::x86_avx512_mask_gather3div8_si;
12896 break;
12897 case X86::BI__builtin_ia32_gather3siv2df:
12898 IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
12899 break;
12900 case X86::BI__builtin_ia32_gather3siv2di:
12901 IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
12902 break;
12903 case X86::BI__builtin_ia32_gather3siv4df:
12904 IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
12905 break;
12906 case X86::BI__builtin_ia32_gather3siv4di:
12907 IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
12908 break;
12909 case X86::BI__builtin_ia32_gather3siv4sf:
12910 IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
12911 break;
12912 case X86::BI__builtin_ia32_gather3siv4si:
12913 IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
12914 break;
12915 case X86::BI__builtin_ia32_gather3siv8sf:
12916 IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
12917 break;
12918 case X86::BI__builtin_ia32_gather3siv8si:
12919 IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
12920 break;
12921 case X86::BI__builtin_ia32_gathersiv8df:
12922 IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
12923 break;
12924 case X86::BI__builtin_ia32_gathersiv16sf:
12925 IID = Intrinsic::x86_avx512_mask_gather_dps_512;
12926 break;
12927 case X86::BI__builtin_ia32_gatherdiv8df:
12928 IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
12929 break;
12930 case X86::BI__builtin_ia32_gatherdiv16sf:
12931 IID = Intrinsic::x86_avx512_mask_gather_qps_512;
12932 break;
12933 case X86::BI__builtin_ia32_gathersiv8di:
12934 IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
12935 break;
12936 case X86::BI__builtin_ia32_gathersiv16si:
12937 IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
12938 break;
12939 case X86::BI__builtin_ia32_gatherdiv8di:
12940 IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
12941 break;
12942 case X86::BI__builtin_ia32_gatherdiv16si:
12943 IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
12944 break;
12945 }
12946
12947 unsigned MinElts = std::min(
12948 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
12949 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
12950 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
12951 Function *Intr = CGM.getIntrinsic(IID);
12952 return Builder.CreateCall(Intr, Ops);
12953 }
12954
12955 case X86::BI__builtin_ia32_scattersiv8df:
12956 case X86::BI__builtin_ia32_scattersiv16sf:
12957 case X86::BI__builtin_ia32_scatterdiv8df:
12958 case X86::BI__builtin_ia32_scatterdiv16sf:
12959 case X86::BI__builtin_ia32_scattersiv8di:
12960 case X86::BI__builtin_ia32_scattersiv16si:
12961 case X86::BI__builtin_ia32_scatterdiv8di:
12962 case X86::BI__builtin_ia32_scatterdiv16si:
12963 case X86::BI__builtin_ia32_scatterdiv2df:
12964 case X86::BI__builtin_ia32_scatterdiv2di:
12965 case X86::BI__builtin_ia32_scatterdiv4df:
12966 case X86::BI__builtin_ia32_scatterdiv4di:
12967 case X86::BI__builtin_ia32_scatterdiv4sf:
12968 case X86::BI__builtin_ia32_scatterdiv4si:
12969 case X86::BI__builtin_ia32_scatterdiv8sf:
12970 case X86::BI__builtin_ia32_scatterdiv8si:
12971 case X86::BI__builtin_ia32_scattersiv2df:
12972 case X86::BI__builtin_ia32_scattersiv2di:
12973 case X86::BI__builtin_ia32_scattersiv4df:
12974 case X86::BI__builtin_ia32_scattersiv4di:
12975 case X86::BI__builtin_ia32_scattersiv4sf:
12976 case X86::BI__builtin_ia32_scattersiv4si:
12977 case X86::BI__builtin_ia32_scattersiv8sf:
12978 case X86::BI__builtin_ia32_scattersiv8si: {
12979 Intrinsic::ID IID;
12980 switch (BuiltinID) {
12981 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 12981)
;
12982 case X86::BI__builtin_ia32_scattersiv8df:
12983 IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
12984 break;
12985 case X86::BI__builtin_ia32_scattersiv16sf:
12986 IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
12987 break;
12988 case X86::BI__builtin_ia32_scatterdiv8df:
12989 IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
12990 break;
12991 case X86::BI__builtin_ia32_scatterdiv16sf:
12992 IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
12993 break;
12994 case X86::BI__builtin_ia32_scattersiv8di:
12995 IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
12996 break;
12997 case X86::BI__builtin_ia32_scattersiv16si:
12998 IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
12999 break;
13000 case X86::BI__builtin_ia32_scatterdiv8di:
13001 IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
13002 break;
13003 case X86::BI__builtin_ia32_scatterdiv16si:
13004 IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
13005 break;
13006 case X86::BI__builtin_ia32_scatterdiv2df:
13007 IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
13008 break;
13009 case X86::BI__builtin_ia32_scatterdiv2di:
13010 IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
13011 break;
13012 case X86::BI__builtin_ia32_scatterdiv4df:
13013 IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
13014 break;
13015 case X86::BI__builtin_ia32_scatterdiv4di:
13016 IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
13017 break;
13018 case X86::BI__builtin_ia32_scatterdiv4sf:
13019 IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
13020 break;
13021 case X86::BI__builtin_ia32_scatterdiv4si:
13022 IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
13023 break;
13024 case X86::BI__builtin_ia32_scatterdiv8sf:
13025 IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
13026 break;
13027 case X86::BI__builtin_ia32_scatterdiv8si:
13028 IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
13029 break;
13030 case X86::BI__builtin_ia32_scattersiv2df:
13031 IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
13032 break;
13033 case X86::BI__builtin_ia32_scattersiv2di:
13034 IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
13035 break;
13036 case X86::BI__builtin_ia32_scattersiv4df:
13037 IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
13038 break;
13039 case X86::BI__builtin_ia32_scattersiv4di:
13040 IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
13041 break;
13042 case X86::BI__builtin_ia32_scattersiv4sf:
13043 IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
13044 break;
13045 case X86::BI__builtin_ia32_scattersiv4si:
13046 IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
13047 break;
13048 case X86::BI__builtin_ia32_scattersiv8sf:
13049 IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
13050 break;
13051 case X86::BI__builtin_ia32_scattersiv8si:
13052 IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
13053 break;
13054 }
13055
13056 unsigned MinElts = std::min(
13057 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
13058 cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
13059 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
13060 Function *Intr = CGM.getIntrinsic(IID);
13061 return Builder.CreateCall(Intr, Ops);
13062 }
13063
13064 case X86::BI__builtin_ia32_vextractf128_pd256:
13065 case X86::BI__builtin_ia32_vextractf128_ps256:
13066 case X86::BI__builtin_ia32_vextractf128_si256:
13067 case X86::BI__builtin_ia32_extract128i256:
13068 case X86::BI__builtin_ia32_extractf64x4_mask:
13069 case X86::BI__builtin_ia32_extractf32x4_mask:
13070 case X86::BI__builtin_ia32_extracti64x4_mask:
13071 case X86::BI__builtin_ia32_extracti32x4_mask:
13072 case X86::BI__builtin_ia32_extractf32x8_mask:
13073 case X86::BI__builtin_ia32_extracti32x8_mask:
13074 case X86::BI__builtin_ia32_extractf32x4_256_mask:
13075 case X86::BI__builtin_ia32_extracti32x4_256_mask:
13076 case X86::BI__builtin_ia32_extractf64x2_256_mask:
13077 case X86::BI__builtin_ia32_extracti64x2_256_mask:
13078 case X86::BI__builtin_ia32_extractf64x2_512_mask:
13079 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
13080 auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType()));
13081 unsigned NumElts = DstTy->getNumElements();
13082 unsigned SrcNumElts =
13083 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13084 unsigned SubVectors = SrcNumElts / NumElts;
13085 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
13086 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")(static_cast <bool> (llvm::isPowerOf2_32(SubVectors) &&
"Expected power of 2 subvectors") ? void (0) : __assert_fail
("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13086, __extension__ __PRETTY_FUNCTION__))
;
13087 Index &= SubVectors - 1; // Remove any extra bits.
13088 Index *= NumElts;
13089
13090 int Indices[16];
13091 for (unsigned i = 0; i != NumElts; ++i)
13092 Indices[i] = i + Index;
13093
13094 Value *Res = Builder.CreateShuffleVector(Ops[0],
13095 makeArrayRef(Indices, NumElts),
13096 "extract");
13097
13098 if (Ops.size() == 4)
13099 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
13100
13101 return Res;
13102 }
13103 case X86::BI__builtin_ia32_vinsertf128_pd256:
13104 case X86::BI__builtin_ia32_vinsertf128_ps256:
13105 case X86::BI__builtin_ia32_vinsertf128_si256:
13106 case X86::BI__builtin_ia32_insert128i256:
13107 case X86::BI__builtin_ia32_insertf64x4:
13108 case X86::BI__builtin_ia32_insertf32x4:
13109 case X86::BI__builtin_ia32_inserti64x4:
13110 case X86::BI__builtin_ia32_inserti32x4:
13111 case X86::BI__builtin_ia32_insertf32x8:
13112 case X86::BI__builtin_ia32_inserti32x8:
13113 case X86::BI__builtin_ia32_insertf32x4_256:
13114 case X86::BI__builtin_ia32_inserti32x4_256:
13115 case X86::BI__builtin_ia32_insertf64x2_256:
13116 case X86::BI__builtin_ia32_inserti64x2_256:
13117 case X86::BI__builtin_ia32_insertf64x2_512:
13118 case X86::BI__builtin_ia32_inserti64x2_512: {
13119 unsigned DstNumElts =
13120 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13121 unsigned SrcNumElts =
13122 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
13123 unsigned SubVectors = DstNumElts / SrcNumElts;
13124 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
13125 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")(static_cast <bool> (llvm::isPowerOf2_32(SubVectors) &&
"Expected power of 2 subvectors") ? void (0) : __assert_fail
("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13125, __extension__ __PRETTY_FUNCTION__))
;
13126 Index &= SubVectors - 1; // Remove any extra bits.
13127 Index *= SrcNumElts;
13128
13129 int Indices[16];
13130 for (unsigned i = 0; i != DstNumElts; ++i)
13131 Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
13132
13133 Value *Op1 = Builder.CreateShuffleVector(Ops[1],
13134 makeArrayRef(Indices, DstNumElts),
13135 "widen");
13136
13137 for (unsigned i = 0; i != DstNumElts; ++i) {
13138 if (i >= Index && i < (Index + SrcNumElts))
13139 Indices[i] = (i - Index) + DstNumElts;
13140 else
13141 Indices[i] = i;
13142 }
13143
13144 return Builder.CreateShuffleVector(Ops[0], Op1,
13145 makeArrayRef(Indices, DstNumElts),
13146 "insert");
13147 }
13148 case X86::BI__builtin_ia32_pmovqd512_mask:
13149 case X86::BI__builtin_ia32_pmovwb512_mask: {
13150 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
13151 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
13152 }
13153 case X86::BI__builtin_ia32_pmovdb512_mask:
13154 case X86::BI__builtin_ia32_pmovdw512_mask:
13155 case X86::BI__builtin_ia32_pmovqw512_mask: {
13156 if (const auto *C = dyn_cast<Constant>(Ops[2]))
13157 if (C->isAllOnesValue())
13158 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
13159
13160 Intrinsic::ID IID;
13161 switch (BuiltinID) {
13162 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13162)
;
13163 case X86::BI__builtin_ia32_pmovdb512_mask:
13164 IID = Intrinsic::x86_avx512_mask_pmov_db_512;
13165 break;
13166 case X86::BI__builtin_ia32_pmovdw512_mask:
13167 IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
13168 break;
13169 case X86::BI__builtin_ia32_pmovqw512_mask:
13170 IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
13171 break;
13172 }
13173
13174 Function *Intr = CGM.getIntrinsic(IID);
13175 return Builder.CreateCall(Intr, Ops);
13176 }
13177 case X86::BI__builtin_ia32_pblendw128:
13178 case X86::BI__builtin_ia32_blendpd:
13179 case X86::BI__builtin_ia32_blendps:
13180 case X86::BI__builtin_ia32_blendpd256:
13181 case X86::BI__builtin_ia32_blendps256:
13182 case X86::BI__builtin_ia32_pblendw256:
13183 case X86::BI__builtin_ia32_pblendd128:
13184 case X86::BI__builtin_ia32_pblendd256: {
13185 unsigned NumElts =
13186 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13187 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13188
13189 int Indices[16];
13190 // If there are more than 8 elements, the immediate is used twice so make
13191 // sure we handle that.
13192 for (unsigned i = 0; i != NumElts; ++i)
13193 Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
13194
13195 return Builder.CreateShuffleVector(Ops[0], Ops[1],
13196 makeArrayRef(Indices, NumElts),
13197 "blend");
13198 }
13199 case X86::BI__builtin_ia32_pshuflw:
13200 case X86::BI__builtin_ia32_pshuflw256:
13201 case X86::BI__builtin_ia32_pshuflw512: {
13202 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13203 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13204 unsigned NumElts = Ty->getNumElements();
13205
13206 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13207 Imm = (Imm & 0xff) * 0x01010101;
13208
13209 int Indices[32];
13210 for (unsigned l = 0; l != NumElts; l += 8) {
13211 for (unsigned i = 0; i != 4; ++i) {
13212 Indices[l + i] = l + (Imm & 3);
13213 Imm >>= 2;
13214 }
13215 for (unsigned i = 4; i != 8; ++i)
13216 Indices[l + i] = l + i;
13217 }
13218
13219 return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13220 "pshuflw");
13221 }
13222 case X86::BI__builtin_ia32_pshufhw:
13223 case X86::BI__builtin_ia32_pshufhw256:
13224 case X86::BI__builtin_ia32_pshufhw512: {
13225 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13226 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13227 unsigned NumElts = Ty->getNumElements();
13228
13229 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13230 Imm = (Imm & 0xff) * 0x01010101;
13231
13232 int Indices[32];
13233 for (unsigned l = 0; l != NumElts; l += 8) {
13234 for (unsigned i = 0; i != 4; ++i)
13235 Indices[l + i] = l + i;
13236 for (unsigned i = 4; i != 8; ++i) {
13237 Indices[l + i] = l + 4 + (Imm & 3);
13238 Imm >>= 2;
13239 }
13240 }
13241
13242 return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13243 "pshufhw");
13244 }
13245 case X86::BI__builtin_ia32_pshufd:
13246 case X86::BI__builtin_ia32_pshufd256:
13247 case X86::BI__builtin_ia32_pshufd512:
13248 case X86::BI__builtin_ia32_vpermilpd:
13249 case X86::BI__builtin_ia32_vpermilps:
13250 case X86::BI__builtin_ia32_vpermilpd256:
13251 case X86::BI__builtin_ia32_vpermilps256:
13252 case X86::BI__builtin_ia32_vpermilpd512:
13253 case X86::BI__builtin_ia32_vpermilps512: {
13254 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13255 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13256 unsigned NumElts = Ty->getNumElements();
13257 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
13258 unsigned NumLaneElts = NumElts / NumLanes;
13259
13260 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13261 Imm = (Imm & 0xff) * 0x01010101;
13262
13263 int Indices[16];
13264 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
13265 for (unsigned i = 0; i != NumLaneElts; ++i) {
13266 Indices[i + l] = (Imm % NumLaneElts) + l;
13267 Imm /= NumLaneElts;
13268 }
13269 }
13270
13271 return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13272 "permil");
13273 }
13274 case X86::BI__builtin_ia32_shufpd:
13275 case X86::BI__builtin_ia32_shufpd256:
13276 case X86::BI__builtin_ia32_shufpd512:
13277 case X86::BI__builtin_ia32_shufps:
13278 case X86::BI__builtin_ia32_shufps256:
13279 case X86::BI__builtin_ia32_shufps512: {
13280 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13281 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13282 unsigned NumElts = Ty->getNumElements();
13283 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
13284 unsigned NumLaneElts = NumElts / NumLanes;
13285
13286 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13287 Imm = (Imm & 0xff) * 0x01010101;
13288
13289 int Indices[16];
13290 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
13291 for (unsigned i = 0; i != NumLaneElts; ++i) {
13292 unsigned Index = Imm % NumLaneElts;
13293 Imm /= NumLaneElts;
13294 if (i >= (NumLaneElts / 2))
13295 Index += NumElts;
13296 Indices[l + i] = l + Index;
13297 }
13298 }
13299
13300 return Builder.CreateShuffleVector(Ops[0], Ops[1],
13301 makeArrayRef(Indices, NumElts),
13302 "shufp");
13303 }
13304 case X86::BI__builtin_ia32_permdi256:
13305 case X86::BI__builtin_ia32_permdf256:
13306 case X86::BI__builtin_ia32_permdi512:
13307 case X86::BI__builtin_ia32_permdf512: {
13308 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13309 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13310 unsigned NumElts = Ty->getNumElements();
13311
13312 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
13313 int Indices[8];
13314 for (unsigned l = 0; l != NumElts; l += 4)
13315 for (unsigned i = 0; i != 4; ++i)
13316 Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
13317
13318 return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13319 "perm");
13320 }
13321 case X86::BI__builtin_ia32_palignr128:
13322 case X86::BI__builtin_ia32_palignr256:
13323 case X86::BI__builtin_ia32_palignr512: {
13324 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
13325
13326 unsigned NumElts =
13327 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13328 assert(NumElts % 16 == 0)(static_cast <bool> (NumElts % 16 == 0) ? void (0) : __assert_fail
("NumElts % 16 == 0", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13328, __extension__ __PRETTY_FUNCTION__))
;
13329
13330 // If palignr is shifting the pair of vectors more than the size of two
13331 // lanes, emit zero.
13332 if (ShiftVal >= 32)
13333 return llvm::Constant::getNullValue(ConvertType(E->getType()));
13334
13335 // If palignr is shifting the pair of input vectors more than one lane,
13336 // but less than two lanes, convert to shifting in zeroes.
13337 if (ShiftVal > 16) {
13338 ShiftVal -= 16;
13339 Ops[1] = Ops[0];
13340 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
13341 }
13342
13343 int Indices[64];
13344 // 256-bit palignr operates on 128-bit lanes so we need to handle that
13345 for (unsigned l = 0; l != NumElts; l += 16) {
13346 for (unsigned i = 0; i != 16; ++i) {
13347 unsigned Idx = ShiftVal + i;
13348 if (Idx >= 16)
13349 Idx += NumElts - 16; // End of lane, switch operand.
13350 Indices[l + i] = Idx + l;
13351 }
13352 }
13353
13354 return Builder.CreateShuffleVector(Ops[1], Ops[0],
13355 makeArrayRef(Indices, NumElts),
13356 "palignr");
13357 }
13358 case X86::BI__builtin_ia32_alignd128:
13359 case X86::BI__builtin_ia32_alignd256:
13360 case X86::BI__builtin_ia32_alignd512:
13361 case X86::BI__builtin_ia32_alignq128:
13362 case X86::BI__builtin_ia32_alignq256:
13363 case X86::BI__builtin_ia32_alignq512: {
13364 unsigned NumElts =
13365 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13366 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
13367
13368 // Mask the shift amount to width of two vectors.
13369 ShiftVal &= (2 * NumElts) - 1;
13370
13371 int Indices[16];
13372 for (unsigned i = 0; i != NumElts; ++i)
13373 Indices[i] = i + ShiftVal;
13374
13375 return Builder.CreateShuffleVector(Ops[1], Ops[0],
13376 makeArrayRef(Indices, NumElts),
13377 "valign");
13378 }
13379 case X86::BI__builtin_ia32_shuf_f32x4_256:
13380 case X86::BI__builtin_ia32_shuf_f64x2_256:
13381 case X86::BI__builtin_ia32_shuf_i32x4_256:
13382 case X86::BI__builtin_ia32_shuf_i64x2_256:
13383 case X86::BI__builtin_ia32_shuf_f32x4:
13384 case X86::BI__builtin_ia32_shuf_f64x2:
13385 case X86::BI__builtin_ia32_shuf_i32x4:
13386 case X86::BI__builtin_ia32_shuf_i64x2: {
13387 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13388 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13389 unsigned NumElts = Ty->getNumElements();
13390 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
13391 unsigned NumLaneElts = NumElts / NumLanes;
13392
13393 int Indices[16];
13394 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
13395 unsigned Index = (Imm % NumLanes) * NumLaneElts;
13396 Imm /= NumLanes; // Discard the bits we just used.
13397 if (l >= (NumElts / 2))
13398 Index += NumElts; // Switch to other source.
13399 for (unsigned i = 0; i != NumLaneElts; ++i) {
13400 Indices[l + i] = Index + i;
13401 }
13402 }
13403
13404 return Builder.CreateShuffleVector(Ops[0], Ops[1],
13405 makeArrayRef(Indices, NumElts),
13406 "shuf");
13407 }
13408
13409 case X86::BI__builtin_ia32_vperm2f128_pd256:
13410 case X86::BI__builtin_ia32_vperm2f128_ps256:
13411 case X86::BI__builtin_ia32_vperm2f128_si256:
13412 case X86::BI__builtin_ia32_permti256: {
13413 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13414 unsigned NumElts =
13415 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13416
13417 // This takes a very simple approach since there are two lanes and a
13418 // shuffle can have 2 inputs. So we reserve the first input for the first
13419 // lane and the second input for the second lane. This may result in
13420 // duplicate sources, but this can be dealt with in the backend.
13421
13422 Value *OutOps[2];
13423 int Indices[8];
13424 for (unsigned l = 0; l != 2; ++l) {
13425 // Determine the source for this lane.
13426 if (Imm & (1 << ((l * 4) + 3)))
13427 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
13428 else if (Imm & (1 << ((l * 4) + 1)))
13429 OutOps[l] = Ops[1];
13430 else
13431 OutOps[l] = Ops[0];
13432
13433 for (unsigned i = 0; i != NumElts/2; ++i) {
13434 // Start with ith element of the source for this lane.
13435 unsigned Idx = (l * NumElts) + i;
13436 // If bit 0 of the immediate half is set, switch to the high half of
13437 // the source.
13438 if (Imm & (1 << (l * 4)))
13439 Idx += NumElts/2;
13440 Indices[(l * (NumElts/2)) + i] = Idx;
13441 }
13442 }
13443
13444 return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
13445 makeArrayRef(Indices, NumElts),
13446 "vperm");
13447 }
13448
13449 case X86::BI__builtin_ia32_pslldqi128_byteshift:
13450 case X86::BI__builtin_ia32_pslldqi256_byteshift:
13451 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
13452 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13453 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
13454 // Builtin type is vXi64 so multiply by 8 to get bytes.
13455 unsigned NumElts = ResultType->getNumElements() * 8;
13456
13457 // If pslldq is shifting the vector more than 15 bytes, emit zero.
13458 if (ShiftVal >= 16)
13459 return llvm::Constant::getNullValue(ResultType);
13460
13461 int Indices[64];
13462 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
13463 for (unsigned l = 0; l != NumElts; l += 16) {
13464 for (unsigned i = 0; i != 16; ++i) {
13465 unsigned Idx = NumElts + i - ShiftVal;
13466 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
13467 Indices[l + i] = Idx + l;
13468 }
13469 }
13470
13471 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
13472 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
13473 Value *Zero = llvm::Constant::getNullValue(VecTy);
13474 Value *SV = Builder.CreateShuffleVector(Zero, Cast,
13475 makeArrayRef(Indices, NumElts),
13476 "pslldq");
13477 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
13478 }
13479 case X86::BI__builtin_ia32_psrldqi128_byteshift:
13480 case X86::BI__builtin_ia32_psrldqi256_byteshift:
13481 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
13482 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13483 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
13484 // Builtin type is vXi64 so multiply by 8 to get bytes.
13485 unsigned NumElts = ResultType->getNumElements() * 8;
13486
13487 // If psrldq is shifting the vector more than 15 bytes, emit zero.
13488 if (ShiftVal >= 16)
13489 return llvm::Constant::getNullValue(ResultType);
13490
13491 int Indices[64];
13492 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
13493 for (unsigned l = 0; l != NumElts; l += 16) {
13494 for (unsigned i = 0; i != 16; ++i) {
13495 unsigned Idx = i + ShiftVal;
13496 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
13497 Indices[l + i] = Idx + l;
13498 }
13499 }
13500
13501 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
13502 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
13503 Value *Zero = llvm::Constant::getNullValue(VecTy);
13504 Value *SV = Builder.CreateShuffleVector(Cast, Zero,
13505 makeArrayRef(Indices, NumElts),
13506 "psrldq");
13507 return Builder.CreateBitCast(SV, ResultType, "cast");
13508 }
13509 case X86::BI__builtin_ia32_kshiftliqi:
13510 case X86::BI__builtin_ia32_kshiftlihi:
13511 case X86::BI__builtin_ia32_kshiftlisi:
13512 case X86::BI__builtin_ia32_kshiftlidi: {
13513 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13514 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13515
13516 if (ShiftVal >= NumElts)
13517 return llvm::Constant::getNullValue(Ops[0]->getType());
13518
13519 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
13520
13521 int Indices[64];
13522 for (unsigned i = 0; i != NumElts; ++i)
13523 Indices[i] = NumElts + i - ShiftVal;
13524
13525 Value *Zero = llvm::Constant::getNullValue(In->getType());
13526 Value *SV = Builder.CreateShuffleVector(Zero, In,
13527 makeArrayRef(Indices, NumElts),
13528 "kshiftl");
13529 return Builder.CreateBitCast(SV, Ops[0]->getType());
13530 }
13531 case X86::BI__builtin_ia32_kshiftriqi:
13532 case X86::BI__builtin_ia32_kshiftrihi:
13533 case X86::BI__builtin_ia32_kshiftrisi:
13534 case X86::BI__builtin_ia32_kshiftridi: {
13535 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13536 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13537
13538 if (ShiftVal >= NumElts)
13539 return llvm::Constant::getNullValue(Ops[0]->getType());
13540
13541 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
13542
13543 int Indices[64];
13544 for (unsigned i = 0; i != NumElts; ++i)
13545 Indices[i] = i + ShiftVal;
13546
13547 Value *Zero = llvm::Constant::getNullValue(In->getType());
13548 Value *SV = Builder.CreateShuffleVector(In, Zero,
13549 makeArrayRef(Indices, NumElts),
13550 "kshiftr");
13551 return Builder.CreateBitCast(SV, Ops[0]->getType());
13552 }
13553 case X86::BI__builtin_ia32_movnti:
13554 case X86::BI__builtin_ia32_movnti64:
13555 case X86::BI__builtin_ia32_movntsd:
13556 case X86::BI__builtin_ia32_movntss: {
13557 llvm::MDNode *Node = llvm::MDNode::get(
13558 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
13559
13560 Value *Ptr = Ops[0];
13561 Value *Src = Ops[1];
13562
13563 // Extract the 0'th element of the source vector.
13564 if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
13565 BuiltinID == X86::BI__builtin_ia32_movntss)
13566 Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
13567
13568 // Convert the type of the pointer to a pointer to the stored type.
13569 Value *BC = Builder.CreateBitCast(
13570 Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
13571
13572 // Unaligned nontemporal store of the scalar value.
13573 StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
13574 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
13575 SI->setAlignment(llvm::Align(1));
13576 return SI;
13577 }
13578 // Rotate is a special case of funnel shift - 1st 2 args are the same.
13579 case X86::BI__builtin_ia32_vprotb:
13580 case X86::BI__builtin_ia32_vprotw:
13581 case X86::BI__builtin_ia32_vprotd:
13582 case X86::BI__builtin_ia32_vprotq:
13583 case X86::BI__builtin_ia32_vprotbi:
13584 case X86::BI__builtin_ia32_vprotwi:
13585 case X86::BI__builtin_ia32_vprotdi:
13586 case X86::BI__builtin_ia32_vprotqi:
13587 case X86::BI__builtin_ia32_prold128:
13588 case X86::BI__builtin_ia32_prold256:
13589 case X86::BI__builtin_ia32_prold512:
13590 case X86::BI__builtin_ia32_prolq128:
13591 case X86::BI__builtin_ia32_prolq256:
13592 case X86::BI__builtin_ia32_prolq512:
13593 case X86::BI__builtin_ia32_prolvd128:
13594 case X86::BI__builtin_ia32_prolvd256:
13595 case X86::BI__builtin_ia32_prolvd512:
13596 case X86::BI__builtin_ia32_prolvq128:
13597 case X86::BI__builtin_ia32_prolvq256:
13598 case X86::BI__builtin_ia32_prolvq512:
13599 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
13600 case X86::BI__builtin_ia32_prord128:
13601 case X86::BI__builtin_ia32_prord256:
13602 case X86::BI__builtin_ia32_prord512:
13603 case X86::BI__builtin_ia32_prorq128:
13604 case X86::BI__builtin_ia32_prorq256:
13605 case X86::BI__builtin_ia32_prorq512:
13606 case X86::BI__builtin_ia32_prorvd128:
13607 case X86::BI__builtin_ia32_prorvd256:
13608 case X86::BI__builtin_ia32_prorvd512:
13609 case X86::BI__builtin_ia32_prorvq128:
13610 case X86::BI__builtin_ia32_prorvq256:
13611 case X86::BI__builtin_ia32_prorvq512:
13612 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
13613 case X86::BI__builtin_ia32_selectb_128:
13614 case X86::BI__builtin_ia32_selectb_256:
13615 case X86::BI__builtin_ia32_selectb_512:
13616 case X86::BI__builtin_ia32_selectw_128:
13617 case X86::BI__builtin_ia32_selectw_256:
13618 case X86::BI__builtin_ia32_selectw_512:
13619 case X86::BI__builtin_ia32_selectd_128:
13620 case X86::BI__builtin_ia32_selectd_256:
13621 case X86::BI__builtin_ia32_selectd_512:
13622 case X86::BI__builtin_ia32_selectq_128:
13623 case X86::BI__builtin_ia32_selectq_256:
13624 case X86::BI__builtin_ia32_selectq_512:
13625 case X86::BI__builtin_ia32_selectps_128:
13626 case X86::BI__builtin_ia32_selectps_256:
13627 case X86::BI__builtin_ia32_selectps_512:
13628 case X86::BI__builtin_ia32_selectpd_128:
13629 case X86::BI__builtin_ia32_selectpd_256:
13630 case X86::BI__builtin_ia32_selectpd_512:
13631 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
13632 case X86::BI__builtin_ia32_selectss_128:
13633 case X86::BI__builtin_ia32_selectsd_128: {
13634 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13635 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13636 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
13637 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
13638 }
13639 case X86::BI__builtin_ia32_cmpb128_mask:
13640 case X86::BI__builtin_ia32_cmpb256_mask:
13641 case X86::BI__builtin_ia32_cmpb512_mask:
13642 case X86::BI__builtin_ia32_cmpw128_mask:
13643 case X86::BI__builtin_ia32_cmpw256_mask:
13644 case X86::BI__builtin_ia32_cmpw512_mask:
13645 case X86::BI__builtin_ia32_cmpd128_mask:
13646 case X86::BI__builtin_ia32_cmpd256_mask:
13647 case X86::BI__builtin_ia32_cmpd512_mask:
13648 case X86::BI__builtin_ia32_cmpq128_mask:
13649 case X86::BI__builtin_ia32_cmpq256_mask:
13650 case X86::BI__builtin_ia32_cmpq512_mask: {
13651 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13652 return EmitX86MaskedCompare(*this, CC, true, Ops);
13653 }
13654 case X86::BI__builtin_ia32_ucmpb128_mask:
13655 case X86::BI__builtin_ia32_ucmpb256_mask:
13656 case X86::BI__builtin_ia32_ucmpb512_mask:
13657 case X86::BI__builtin_ia32_ucmpw128_mask:
13658 case X86::BI__builtin_ia32_ucmpw256_mask:
13659 case X86::BI__builtin_ia32_ucmpw512_mask:
13660 case X86::BI__builtin_ia32_ucmpd128_mask:
13661 case X86::BI__builtin_ia32_ucmpd256_mask:
13662 case X86::BI__builtin_ia32_ucmpd512_mask:
13663 case X86::BI__builtin_ia32_ucmpq128_mask:
13664 case X86::BI__builtin_ia32_ucmpq256_mask:
13665 case X86::BI__builtin_ia32_ucmpq512_mask: {
13666 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13667 return EmitX86MaskedCompare(*this, CC, false, Ops);
13668 }
13669 case X86::BI__builtin_ia32_vpcomb:
13670 case X86::BI__builtin_ia32_vpcomw:
13671 case X86::BI__builtin_ia32_vpcomd:
13672 case X86::BI__builtin_ia32_vpcomq:
13673 return EmitX86vpcom(*this, Ops, true);
13674 case X86::BI__builtin_ia32_vpcomub:
13675 case X86::BI__builtin_ia32_vpcomuw:
13676 case X86::BI__builtin_ia32_vpcomud:
13677 case X86::BI__builtin_ia32_vpcomuq:
13678 return EmitX86vpcom(*this, Ops, false);
13679
13680 case X86::BI__builtin_ia32_kortestcqi:
13681 case X86::BI__builtin_ia32_kortestchi:
13682 case X86::BI__builtin_ia32_kortestcsi:
13683 case X86::BI__builtin_ia32_kortestcdi: {
13684 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13685 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
13686 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13687 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13688 }
13689 case X86::BI__builtin_ia32_kortestzqi:
13690 case X86::BI__builtin_ia32_kortestzhi:
13691 case X86::BI__builtin_ia32_kortestzsi:
13692 case X86::BI__builtin_ia32_kortestzdi: {
13693 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13694 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
13695 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13696 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13697 }
13698
13699 case X86::BI__builtin_ia32_ktestcqi:
13700 case X86::BI__builtin_ia32_ktestzqi:
13701 case X86::BI__builtin_ia32_ktestchi:
13702 case X86::BI__builtin_ia32_ktestzhi:
13703 case X86::BI__builtin_ia32_ktestcsi:
13704 case X86::BI__builtin_ia32_ktestzsi:
13705 case X86::BI__builtin_ia32_ktestcdi:
13706 case X86::BI__builtin_ia32_ktestzdi: {
13707 Intrinsic::ID IID;
13708 switch (BuiltinID) {
13709 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13709)
;
13710 case X86::BI__builtin_ia32_ktestcqi:
13711 IID = Intrinsic::x86_avx512_ktestc_b;
13712 break;
13713 case X86::BI__builtin_ia32_ktestzqi:
13714 IID = Intrinsic::x86_avx512_ktestz_b;
13715 break;
13716 case X86::BI__builtin_ia32_ktestchi:
13717 IID = Intrinsic::x86_avx512_ktestc_w;
13718 break;
13719 case X86::BI__builtin_ia32_ktestzhi:
13720 IID = Intrinsic::x86_avx512_ktestz_w;
13721 break;
13722 case X86::BI__builtin_ia32_ktestcsi:
13723 IID = Intrinsic::x86_avx512_ktestc_d;
13724 break;
13725 case X86::BI__builtin_ia32_ktestzsi:
13726 IID = Intrinsic::x86_avx512_ktestz_d;
13727 break;
13728 case X86::BI__builtin_ia32_ktestcdi:
13729 IID = Intrinsic::x86_avx512_ktestc_q;
13730 break;
13731 case X86::BI__builtin_ia32_ktestzdi:
13732 IID = Intrinsic::x86_avx512_ktestz_q;
13733 break;
13734 }
13735
13736 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13737 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13738 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13739 Function *Intr = CGM.getIntrinsic(IID);
13740 return Builder.CreateCall(Intr, {LHS, RHS});
13741 }
13742
13743 case X86::BI__builtin_ia32_kaddqi:
13744 case X86::BI__builtin_ia32_kaddhi:
13745 case X86::BI__builtin_ia32_kaddsi:
13746 case X86::BI__builtin_ia32_kadddi: {
13747 Intrinsic::ID IID;
13748 switch (BuiltinID) {
13749 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 13749)
;
13750 case X86::BI__builtin_ia32_kaddqi:
13751 IID = Intrinsic::x86_avx512_kadd_b;
13752 break;
13753 case X86::BI__builtin_ia32_kaddhi:
13754 IID = Intrinsic::x86_avx512_kadd_w;
13755 break;
13756 case X86::BI__builtin_ia32_kaddsi:
13757 IID = Intrinsic::x86_avx512_kadd_d;
13758 break;
13759 case X86::BI__builtin_ia32_kadddi:
13760 IID = Intrinsic::x86_avx512_kadd_q;
13761 break;
13762 }
13763
13764 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13765 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13766 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13767 Function *Intr = CGM.getIntrinsic(IID);
13768 Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
13769 return Builder.CreateBitCast(Res, Ops[0]->getType());
13770 }
13771 case X86::BI__builtin_ia32_kandqi:
13772 case X86::BI__builtin_ia32_kandhi:
13773 case X86::BI__builtin_ia32_kandsi:
13774 case X86::BI__builtin_ia32_kanddi:
13775 return EmitX86MaskLogic(*this, Instruction::And, Ops);
13776 case X86::BI__builtin_ia32_kandnqi:
13777 case X86::BI__builtin_ia32_kandnhi:
13778 case X86::BI__builtin_ia32_kandnsi:
13779 case X86::BI__builtin_ia32_kandndi:
13780 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
13781 case X86::BI__builtin_ia32_korqi:
13782 case X86::BI__builtin_ia32_korhi:
13783 case X86::BI__builtin_ia32_korsi:
13784 case X86::BI__builtin_ia32_kordi:
13785 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
13786 case X86::BI__builtin_ia32_kxnorqi:
13787 case X86::BI__builtin_ia32_kxnorhi:
13788 case X86::BI__builtin_ia32_kxnorsi:
13789 case X86::BI__builtin_ia32_kxnordi:
13790 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
13791 case X86::BI__builtin_ia32_kxorqi:
13792 case X86::BI__builtin_ia32_kxorhi:
13793 case X86::BI__builtin_ia32_kxorsi:
13794 case X86::BI__builtin_ia32_kxordi:
13795 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
13796 case X86::BI__builtin_ia32_knotqi:
13797 case X86::BI__builtin_ia32_knothi:
13798 case X86::BI__builtin_ia32_knotsi:
13799 case X86::BI__builtin_ia32_knotdi: {
13800 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13801 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
13802 return Builder.CreateBitCast(Builder.CreateNot(Res),
13803 Ops[0]->getType());
13804 }
13805 case X86::BI__builtin_ia32_kmovb:
13806 case X86::BI__builtin_ia32_kmovw:
13807 case X86::BI__builtin_ia32_kmovd:
13808 case X86::BI__builtin_ia32_kmovq: {
13809 // Bitcast to vXi1 type and then back to integer. This gets the mask
13810 // register type into the IR, but might be optimized out depending on
13811 // what's around it.
13812 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13813 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
13814 return Builder.CreateBitCast(Res, Ops[0]->getType());
13815 }
13816
13817 case X86::BI__builtin_ia32_kunpckdi:
13818 case X86::BI__builtin_ia32_kunpcksi:
13819 case X86::BI__builtin_ia32_kunpckhi: {
13820 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13821 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13822 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13823 int Indices[64];
13824 for (unsigned i = 0; i != NumElts; ++i)
13825 Indices[i] = i;
13826
13827 // First extract half of each vector. This gives better codegen than
13828 // doing it in a single shuffle.
13829 LHS = Builder.CreateShuffleVector(LHS, LHS,
13830 makeArrayRef(Indices, NumElts / 2));
13831 RHS = Builder.CreateShuffleVector(RHS, RHS,
13832 makeArrayRef(Indices, NumElts / 2));
13833 // Concat the vectors.
13834 // NOTE: Operands are swapped to match the intrinsic definition.
13835 Value *Res = Builder.CreateShuffleVector(RHS, LHS,
13836 makeArrayRef(Indices, NumElts));
13837 return Builder.CreateBitCast(Res, Ops[0]->getType());
13838 }
13839
13840 case X86::BI__builtin_ia32_vplzcntd_128:
13841 case X86::BI__builtin_ia32_vplzcntd_256:
13842 case X86::BI__builtin_ia32_vplzcntd_512:
13843 case X86::BI__builtin_ia32_vplzcntq_128:
13844 case X86::BI__builtin_ia32_vplzcntq_256:
13845 case X86::BI__builtin_ia32_vplzcntq_512: {
13846 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
13847 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
13848 }
13849 case X86::BI__builtin_ia32_sqrtss:
13850 case X86::BI__builtin_ia32_sqrtsd: {
13851 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
13852 Function *F;
13853 if (Builder.getIsFPConstrained()) {
13854 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
13855 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13856 A->getType());
13857 A = Builder.CreateConstrainedFPCall(F, {A});
13858 } else {
13859 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
13860 A = Builder.CreateCall(F, {A});
13861 }
13862 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
13863 }
13864 case X86::BI__builtin_ia32_sqrtsd_round_mask:
13865 case X86::BI__builtin_ia32_sqrtss_round_mask: {
13866 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
13867 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
13868 // otherwise keep the intrinsic.
13869 if (CC != 4) {
13870 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
13871 Intrinsic::x86_avx512_mask_sqrt_sd :
13872 Intrinsic::x86_avx512_mask_sqrt_ss;
13873 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13874 }
13875 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13876 Function *F;
13877 if (Builder.getIsFPConstrained()) {
13878 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
13879 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13880 A->getType());
13881 A = Builder.CreateConstrainedFPCall(F, A);
13882 } else {
13883 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
13884 A = Builder.CreateCall(F, A);
13885 }
13886 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13887 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
13888 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
13889 }
13890 case X86::BI__builtin_ia32_sqrtpd256:
13891 case X86::BI__builtin_ia32_sqrtpd:
13892 case X86::BI__builtin_ia32_sqrtps256:
13893 case X86::BI__builtin_ia32_sqrtps:
13894 case X86::BI__builtin_ia32_sqrtps512:
13895 case X86::BI__builtin_ia32_sqrtpd512: {
13896 if (Ops.size() == 2) {
13897 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13898 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
13899 // otherwise keep the intrinsic.
13900 if (CC != 4) {
13901 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
13902 Intrinsic::x86_avx512_sqrt_ps_512 :
13903 Intrinsic::x86_avx512_sqrt_pd_512;
13904 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13905 }
13906 }
13907 if (Builder.getIsFPConstrained()) {
13908 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
13909 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13910 Ops[0]->getType());
13911 return Builder.CreateConstrainedFPCall(F, Ops[0]);
13912 } else {
13913 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
13914 return Builder.CreateCall(F, Ops[0]);
13915 }
13916 }
13917 case X86::BI__builtin_ia32_pabsb128:
13918 case X86::BI__builtin_ia32_pabsw128:
13919 case X86::BI__builtin_ia32_pabsd128:
13920 case X86::BI__builtin_ia32_pabsb256:
13921 case X86::BI__builtin_ia32_pabsw256:
13922 case X86::BI__builtin_ia32_pabsd256:
13923 case X86::BI__builtin_ia32_pabsq128:
13924 case X86::BI__builtin_ia32_pabsq256:
13925 case X86::BI__builtin_ia32_pabsb512:
13926 case X86::BI__builtin_ia32_pabsw512:
13927 case X86::BI__builtin_ia32_pabsd512:
13928 case X86::BI__builtin_ia32_pabsq512: {
13929 Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType());
13930 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
13931 }
13932 case X86::BI__builtin_ia32_pmaxsb128:
13933 case X86::BI__builtin_ia32_pmaxsw128:
13934 case X86::BI__builtin_ia32_pmaxsd128:
13935 case X86::BI__builtin_ia32_pmaxsq128:
13936 case X86::BI__builtin_ia32_pmaxsb256:
13937 case X86::BI__builtin_ia32_pmaxsw256:
13938 case X86::BI__builtin_ia32_pmaxsd256:
13939 case X86::BI__builtin_ia32_pmaxsq256:
13940 case X86::BI__builtin_ia32_pmaxsb512:
13941 case X86::BI__builtin_ia32_pmaxsw512:
13942 case X86::BI__builtin_ia32_pmaxsd512:
13943 case X86::BI__builtin_ia32_pmaxsq512:
13944 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax);
13945 case X86::BI__builtin_ia32_pmaxub128:
13946 case X86::BI__builtin_ia32_pmaxuw128:
13947 case X86::BI__builtin_ia32_pmaxud128:
13948 case X86::BI__builtin_ia32_pmaxuq128:
13949 case X86::BI__builtin_ia32_pmaxub256:
13950 case X86::BI__builtin_ia32_pmaxuw256:
13951 case X86::BI__builtin_ia32_pmaxud256:
13952 case X86::BI__builtin_ia32_pmaxuq256:
13953 case X86::BI__builtin_ia32_pmaxub512:
13954 case X86::BI__builtin_ia32_pmaxuw512:
13955 case X86::BI__builtin_ia32_pmaxud512:
13956 case X86::BI__builtin_ia32_pmaxuq512:
13957 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax);
13958 case X86::BI__builtin_ia32_pminsb128:
13959 case X86::BI__builtin_ia32_pminsw128:
13960 case X86::BI__builtin_ia32_pminsd128:
13961 case X86::BI__builtin_ia32_pminsq128:
13962 case X86::BI__builtin_ia32_pminsb256:
13963 case X86::BI__builtin_ia32_pminsw256:
13964 case X86::BI__builtin_ia32_pminsd256:
13965 case X86::BI__builtin_ia32_pminsq256:
13966 case X86::BI__builtin_ia32_pminsb512:
13967 case X86::BI__builtin_ia32_pminsw512:
13968 case X86::BI__builtin_ia32_pminsd512:
13969 case X86::BI__builtin_ia32_pminsq512:
13970 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin);
13971 case X86::BI__builtin_ia32_pminub128:
13972 case X86::BI__builtin_ia32_pminuw128:
13973 case X86::BI__builtin_ia32_pminud128:
13974 case X86::BI__builtin_ia32_pminuq128:
13975 case X86::BI__builtin_ia32_pminub256:
13976 case X86::BI__builtin_ia32_pminuw256:
13977 case X86::BI__builtin_ia32_pminud256:
13978 case X86::BI__builtin_ia32_pminuq256:
13979 case X86::BI__builtin_ia32_pminub512:
13980 case X86::BI__builtin_ia32_pminuw512:
13981 case X86::BI__builtin_ia32_pminud512:
13982 case X86::BI__builtin_ia32_pminuq512:
13983 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin);
13984
13985 case X86::BI__builtin_ia32_pmuludq128:
13986 case X86::BI__builtin_ia32_pmuludq256:
13987 case X86::BI__builtin_ia32_pmuludq512:
13988 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
13989
13990 case X86::BI__builtin_ia32_pmuldq128:
13991 case X86::BI__builtin_ia32_pmuldq256:
13992 case X86::BI__builtin_ia32_pmuldq512:
13993 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
13994
13995 case X86::BI__builtin_ia32_pternlogd512_mask:
13996 case X86::BI__builtin_ia32_pternlogq512_mask:
13997 case X86::BI__builtin_ia32_pternlogd128_mask:
13998 case X86::BI__builtin_ia32_pternlogd256_mask:
13999 case X86::BI__builtin_ia32_pternlogq128_mask:
14000 case X86::BI__builtin_ia32_pternlogq256_mask:
14001 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
14002
14003 case X86::BI__builtin_ia32_pternlogd512_maskz:
14004 case X86::BI__builtin_ia32_pternlogq512_maskz:
14005 case X86::BI__builtin_ia32_pternlogd128_maskz:
14006 case X86::BI__builtin_ia32_pternlogd256_maskz:
14007 case X86::BI__builtin_ia32_pternlogq128_maskz:
14008 case X86::BI__builtin_ia32_pternlogq256_maskz:
14009 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
14010
14011 case X86::BI__builtin_ia32_vpshldd128:
14012 case X86::BI__builtin_ia32_vpshldd256:
14013 case X86::BI__builtin_ia32_vpshldd512:
14014 case X86::BI__builtin_ia32_vpshldq128:
14015 case X86::BI__builtin_ia32_vpshldq256:
14016 case X86::BI__builtin_ia32_vpshldq512:
14017 case X86::BI__builtin_ia32_vpshldw128:
14018 case X86::BI__builtin_ia32_vpshldw256:
14019 case X86::BI__builtin_ia32_vpshldw512:
14020 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
14021
14022 case X86::BI__builtin_ia32_vpshrdd128:
14023 case X86::BI__builtin_ia32_vpshrdd256:
14024 case X86::BI__builtin_ia32_vpshrdd512:
14025 case X86::BI__builtin_ia32_vpshrdq128:
14026 case X86::BI__builtin_ia32_vpshrdq256:
14027 case X86::BI__builtin_ia32_vpshrdq512:
14028 case X86::BI__builtin_ia32_vpshrdw128:
14029 case X86::BI__builtin_ia32_vpshrdw256:
14030 case X86::BI__builtin_ia32_vpshrdw512:
14031 // Ops 0 and 1 are swapped.
14032 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
14033
14034 case X86::BI__builtin_ia32_vpshldvd128:
14035 case X86::BI__builtin_ia32_vpshldvd256:
14036 case X86::BI__builtin_ia32_vpshldvd512:
14037 case X86::BI__builtin_ia32_vpshldvq128:
14038 case X86::BI__builtin_ia32_vpshldvq256:
14039 case X86::BI__builtin_ia32_vpshldvq512:
14040 case X86::BI__builtin_ia32_vpshldvw128:
14041 case X86::BI__builtin_ia32_vpshldvw256:
14042 case X86::BI__builtin_ia32_vpshldvw512:
14043 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
14044
14045 case X86::BI__builtin_ia32_vpshrdvd128:
14046 case X86::BI__builtin_ia32_vpshrdvd256:
14047 case X86::BI__builtin_ia32_vpshrdvd512:
14048 case X86::BI__builtin_ia32_vpshrdvq128:
14049 case X86::BI__builtin_ia32_vpshrdvq256:
14050 case X86::BI__builtin_ia32_vpshrdvq512:
14051 case X86::BI__builtin_ia32_vpshrdvw128:
14052 case X86::BI__builtin_ia32_vpshrdvw256:
14053 case X86::BI__builtin_ia32_vpshrdvw512:
14054 // Ops 0 and 1 are swapped.
14055 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
14056
14057 // Reductions
14058 case X86::BI__builtin_ia32_reduce_add_d512:
14059 case X86::BI__builtin_ia32_reduce_add_q512: {
14060 Function *F =
14061 CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType());
14062 return Builder.CreateCall(F, {Ops[0]});
14063 }
14064 case X86::BI__builtin_ia32_reduce_and_d512:
14065 case X86::BI__builtin_ia32_reduce_and_q512: {
14066 Function *F =
14067 CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType());
14068 return Builder.CreateCall(F, {Ops[0]});
14069 }
14070 case X86::BI__builtin_ia32_reduce_fadd_pd512:
14071 case X86::BI__builtin_ia32_reduce_fadd_ps512: {
14072 Function *F =
14073 CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType());
14074 Builder.getFastMathFlags().setAllowReassoc();
14075 return Builder.CreateCall(F, {Ops[0], Ops[1]});
14076 }
14077 case X86::BI__builtin_ia32_reduce_fmul_pd512:
14078 case X86::BI__builtin_ia32_reduce_fmul_ps512: {
14079 Function *F =
14080 CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType());
14081 Builder.getFastMathFlags().setAllowReassoc();
14082 return Builder.CreateCall(F, {Ops[0], Ops[1]});
14083 }
14084 case X86::BI__builtin_ia32_reduce_fmax_pd512:
14085 case X86::BI__builtin_ia32_reduce_fmax_ps512: {
14086 Function *F =
14087 CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType());
14088 Builder.getFastMathFlags().setNoNaNs();
14089 return Builder.CreateCall(F, {Ops[0]});
14090 }
14091 case X86::BI__builtin_ia32_reduce_fmin_pd512:
14092 case X86::BI__builtin_ia32_reduce_fmin_ps512: {
14093 Function *F =
14094 CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType());
14095 Builder.getFastMathFlags().setNoNaNs();
14096 return Builder.CreateCall(F, {Ops[0]});
14097 }
14098 case X86::BI__builtin_ia32_reduce_mul_d512:
14099 case X86::BI__builtin_ia32_reduce_mul_q512: {
14100 Function *F =
14101 CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType());
14102 return Builder.CreateCall(F, {Ops[0]});
14103 }
14104 case X86::BI__builtin_ia32_reduce_or_d512:
14105 case X86::BI__builtin_ia32_reduce_or_q512: {
14106 Function *F =
14107 CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType());
14108 return Builder.CreateCall(F, {Ops[0]});
14109 }
14110 case X86::BI__builtin_ia32_reduce_smax_d512:
14111 case X86::BI__builtin_ia32_reduce_smax_q512: {
14112 Function *F =
14113 CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType());
14114 return Builder.CreateCall(F, {Ops[0]});
14115 }
14116 case X86::BI__builtin_ia32_reduce_smin_d512:
14117 case X86::BI__builtin_ia32_reduce_smin_q512: {
14118 Function *F =
14119 CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType());
14120 return Builder.CreateCall(F, {Ops[0]});
14121 }
14122 case X86::BI__builtin_ia32_reduce_umax_d512:
14123 case X86::BI__builtin_ia32_reduce_umax_q512: {
14124 Function *F =
14125 CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType());
14126 return Builder.CreateCall(F, {Ops[0]});
14127 }
14128 case X86::BI__builtin_ia32_reduce_umin_d512:
14129 case X86::BI__builtin_ia32_reduce_umin_q512: {
14130 Function *F =
14131 CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType());
14132 return Builder.CreateCall(F, {Ops[0]});
14133 }
14134
14135 // 3DNow!
14136 case X86::BI__builtin_ia32_pswapdsf:
14137 case X86::BI__builtin_ia32_pswapdsi: {
14138 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
14139 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
14140 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
14141 return Builder.CreateCall(F, Ops, "pswapd");
14142 }
14143 case X86::BI__builtin_ia32_rdrand16_step:
14144 case X86::BI__builtin_ia32_rdrand32_step:
14145 case X86::BI__builtin_ia32_rdrand64_step:
14146 case X86::BI__builtin_ia32_rdseed16_step:
14147 case X86::BI__builtin_ia32_rdseed32_step:
14148 case X86::BI__builtin_ia32_rdseed64_step: {
14149 Intrinsic::ID ID;
14150 switch (BuiltinID) {
14151 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14151)
;
14152 case X86::BI__builtin_ia32_rdrand16_step:
14153 ID = Intrinsic::x86_rdrand_16;
14154 break;
14155 case X86::BI__builtin_ia32_rdrand32_step:
14156 ID = Intrinsic::x86_rdrand_32;
14157 break;
14158 case X86::BI__builtin_ia32_rdrand64_step:
14159 ID = Intrinsic::x86_rdrand_64;
14160 break;
14161 case X86::BI__builtin_ia32_rdseed16_step:
14162 ID = Intrinsic::x86_rdseed_16;
14163 break;
14164 case X86::BI__builtin_ia32_rdseed32_step:
14165 ID = Intrinsic::x86_rdseed_32;
14166 break;
14167 case X86::BI__builtin_ia32_rdseed64_step:
14168 ID = Intrinsic::x86_rdseed_64;
14169 break;
14170 }
14171
14172 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
14173 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
14174 Ops[0]);
14175 return Builder.CreateExtractValue(Call, 1);
14176 }
14177 case X86::BI__builtin_ia32_addcarryx_u32:
14178 case X86::BI__builtin_ia32_addcarryx_u64:
14179 case X86::BI__builtin_ia32_subborrow_u32:
14180 case X86::BI__builtin_ia32_subborrow_u64: {
14181 Intrinsic::ID IID;
14182 switch (BuiltinID) {
14183 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14183)
;
14184 case X86::BI__builtin_ia32_addcarryx_u32:
14185 IID = Intrinsic::x86_addcarry_32;
14186 break;
14187 case X86::BI__builtin_ia32_addcarryx_u64:
14188 IID = Intrinsic::x86_addcarry_64;
14189 break;
14190 case X86::BI__builtin_ia32_subborrow_u32:
14191 IID = Intrinsic::x86_subborrow_32;
14192 break;
14193 case X86::BI__builtin_ia32_subborrow_u64:
14194 IID = Intrinsic::x86_subborrow_64;
14195 break;
14196 }
14197
14198 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
14199 { Ops[0], Ops[1], Ops[2] });
14200 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
14201 Ops[3]);
14202 return Builder.CreateExtractValue(Call, 0);
14203 }
14204
14205 case X86::BI__builtin_ia32_fpclassps128_mask:
14206 case X86::BI__builtin_ia32_fpclassps256_mask:
14207 case X86::BI__builtin_ia32_fpclassps512_mask:
14208 case X86::BI__builtin_ia32_fpclasspd128_mask:
14209 case X86::BI__builtin_ia32_fpclasspd256_mask:
14210 case X86::BI__builtin_ia32_fpclasspd512_mask: {
14211 unsigned NumElts =
14212 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14213 Value *MaskIn = Ops[2];
14214 Ops.erase(&Ops[2]);
14215
14216 Intrinsic::ID ID;
14217 switch (BuiltinID) {
14218 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14218)
;
14219 case X86::BI__builtin_ia32_fpclassps128_mask:
14220 ID = Intrinsic::x86_avx512_fpclass_ps_128;
14221 break;
14222 case X86::BI__builtin_ia32_fpclassps256_mask:
14223 ID = Intrinsic::x86_avx512_fpclass_ps_256;
14224 break;
14225 case X86::BI__builtin_ia32_fpclassps512_mask:
14226 ID = Intrinsic::x86_avx512_fpclass_ps_512;
14227 break;
14228 case X86::BI__builtin_ia32_fpclasspd128_mask:
14229 ID = Intrinsic::x86_avx512_fpclass_pd_128;
14230 break;
14231 case X86::BI__builtin_ia32_fpclasspd256_mask:
14232 ID = Intrinsic::x86_avx512_fpclass_pd_256;
14233 break;
14234 case X86::BI__builtin_ia32_fpclasspd512_mask:
14235 ID = Intrinsic::x86_avx512_fpclass_pd_512;
14236 break;
14237 }
14238
14239 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14240 return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
14241 }
14242
14243 case X86::BI__builtin_ia32_vp2intersect_q_512:
14244 case X86::BI__builtin_ia32_vp2intersect_q_256:
14245 case X86::BI__builtin_ia32_vp2intersect_q_128:
14246 case X86::BI__builtin_ia32_vp2intersect_d_512:
14247 case X86::BI__builtin_ia32_vp2intersect_d_256:
14248 case X86::BI__builtin_ia32_vp2intersect_d_128: {
14249 unsigned NumElts =
14250 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14251 Intrinsic::ID ID;
14252
14253 switch (BuiltinID) {
14254 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14254)
;
14255 case X86::BI__builtin_ia32_vp2intersect_q_512:
14256 ID = Intrinsic::x86_avx512_vp2intersect_q_512;
14257 break;
14258 case X86::BI__builtin_ia32_vp2intersect_q_256:
14259 ID = Intrinsic::x86_avx512_vp2intersect_q_256;
14260 break;
14261 case X86::BI__builtin_ia32_vp2intersect_q_128:
14262 ID = Intrinsic::x86_avx512_vp2intersect_q_128;
14263 break;
14264 case X86::BI__builtin_ia32_vp2intersect_d_512:
14265 ID = Intrinsic::x86_avx512_vp2intersect_d_512;
14266 break;
14267 case X86::BI__builtin_ia32_vp2intersect_d_256:
14268 ID = Intrinsic::x86_avx512_vp2intersect_d_256;
14269 break;
14270 case X86::BI__builtin_ia32_vp2intersect_d_128:
14271 ID = Intrinsic::x86_avx512_vp2intersect_d_128;
14272 break;
14273 }
14274
14275 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
14276 Value *Result = Builder.CreateExtractValue(Call, 0);
14277 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
14278 Builder.CreateDefaultAlignedStore(Result, Ops[2]);
14279
14280 Result = Builder.CreateExtractValue(Call, 1);
14281 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
14282 return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
14283 }
14284
14285 case X86::BI__builtin_ia32_vpmultishiftqb128:
14286 case X86::BI__builtin_ia32_vpmultishiftqb256:
14287 case X86::BI__builtin_ia32_vpmultishiftqb512: {
14288 Intrinsic::ID ID;
14289 switch (BuiltinID) {
14290 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14290)
;
14291 case X86::BI__builtin_ia32_vpmultishiftqb128:
14292 ID = Intrinsic::x86_avx512_pmultishift_qb_128;
14293 break;
14294 case X86::BI__builtin_ia32_vpmultishiftqb256:
14295 ID = Intrinsic::x86_avx512_pmultishift_qb_256;
14296 break;
14297 case X86::BI__builtin_ia32_vpmultishiftqb512:
14298 ID = Intrinsic::x86_avx512_pmultishift_qb_512;
14299 break;
14300 }
14301
14302 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14303 }
14304
14305 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
14306 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
14307 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
14308 unsigned NumElts =
14309 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14310 Value *MaskIn = Ops[2];
14311 Ops.erase(&Ops[2]);
14312
14313 Intrinsic::ID ID;
14314 switch (BuiltinID) {
14315 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14315)
;
14316 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
14317 ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
14318 break;
14319 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
14320 ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
14321 break;
14322 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
14323 ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
14324 break;
14325 }
14326
14327 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14328 return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
14329 }
14330
14331 // packed comparison intrinsics
14332 case X86::BI__builtin_ia32_cmpeqps:
14333 case X86::BI__builtin_ia32_cmpeqpd:
14334 return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
14335 case X86::BI__builtin_ia32_cmpltps:
14336 case X86::BI__builtin_ia32_cmpltpd:
14337 return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
14338 case X86::BI__builtin_ia32_cmpleps:
14339 case X86::BI__builtin_ia32_cmplepd:
14340 return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
14341 case X86::BI__builtin_ia32_cmpunordps:
14342 case X86::BI__builtin_ia32_cmpunordpd:
14343 return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
14344 case X86::BI__builtin_ia32_cmpneqps:
14345 case X86::BI__builtin_ia32_cmpneqpd:
14346 return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
14347 case X86::BI__builtin_ia32_cmpnltps:
14348 case X86::BI__builtin_ia32_cmpnltpd:
14349 return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
14350 case X86::BI__builtin_ia32_cmpnleps:
14351 case X86::BI__builtin_ia32_cmpnlepd:
14352 return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
14353 case X86::BI__builtin_ia32_cmpordps:
14354 case X86::BI__builtin_ia32_cmpordpd:
14355 return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
14356 case X86::BI__builtin_ia32_cmpps128_mask:
14357 case X86::BI__builtin_ia32_cmpps256_mask:
14358 case X86::BI__builtin_ia32_cmpps512_mask:
14359 case X86::BI__builtin_ia32_cmppd128_mask:
14360 case X86::BI__builtin_ia32_cmppd256_mask:
14361 case X86::BI__builtin_ia32_cmppd512_mask:
14362 IsMaskFCmp = true;
14363 LLVM_FALLTHROUGH[[gnu::fallthrough]];
14364 case X86::BI__builtin_ia32_cmpps:
14365 case X86::BI__builtin_ia32_cmpps256:
14366 case X86::BI__builtin_ia32_cmppd:
14367 case X86::BI__builtin_ia32_cmppd256: {
14368 // Lowering vector comparisons to fcmp instructions, while
14369 // ignoring signalling behaviour requested
14370 // ignoring rounding mode requested
14371 // This is only possible if fp-model is not strict and FENV_ACCESS is off.
14372
14373 // The third argument is the comparison condition, and integer in the
14374 // range [0, 31]
14375 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
14376
14377 // Lowering to IR fcmp instruction.
14378 // Ignoring requested signaling behaviour,
14379 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
14380 FCmpInst::Predicate Pred;
14381 bool IsSignaling;
14382 // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
14383 // behavior is inverted. We'll handle that after the switch.
14384 switch (CC & 0xf) {
14385 case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
14386 case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
14387 case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
14388 case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
14389 case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
14390 case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
14391 case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
14392 case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
14393 case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
14394 case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
14395 case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
14396 case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
14397 case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
14398 case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
14399 case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
14400 case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
14401 default: llvm_unreachable("Unhandled CC")::llvm::llvm_unreachable_internal("Unhandled CC", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14401)
;
14402 }
14403
14404 // Invert the signalling behavior for 16-31.
14405 if (CC & 0x10)
14406 IsSignaling = !IsSignaling;
14407
14408 // If the predicate is true or false and we're using constrained intrinsics,
14409 // we don't have a compare intrinsic we can use. Just use the legacy X86
14410 // specific intrinsic.
14411 // If the intrinsic is mask enabled and we're using constrained intrinsics,
14412 // use the legacy X86 specific intrinsic.
14413 if (Builder.getIsFPConstrained() &&
14414 (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE ||
14415 IsMaskFCmp)) {
14416
14417 Intrinsic::ID IID;
14418 switch (BuiltinID) {
14419 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14419)
;
14420 case X86::BI__builtin_ia32_cmpps:
14421 IID = Intrinsic::x86_sse_cmp_ps;
14422 break;
14423 case X86::BI__builtin_ia32_cmpps256:
14424 IID = Intrinsic::x86_avx_cmp_ps_256;
14425 break;
14426 case X86::BI__builtin_ia32_cmppd:
14427 IID = Intrinsic::x86_sse2_cmp_pd;
14428 break;
14429 case X86::BI__builtin_ia32_cmppd256:
14430 IID = Intrinsic::x86_avx_cmp_pd_256;
14431 break;
14432 case X86::BI__builtin_ia32_cmpps512_mask:
14433 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
14434 break;
14435 case X86::BI__builtin_ia32_cmppd512_mask:
14436 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
14437 break;
14438 case X86::BI__builtin_ia32_cmpps128_mask:
14439 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
14440 break;
14441 case X86::BI__builtin_ia32_cmpps256_mask:
14442 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
14443 break;
14444 case X86::BI__builtin_ia32_cmppd128_mask:
14445 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
14446 break;
14447 case X86::BI__builtin_ia32_cmppd256_mask:
14448 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
14449 break;
14450 }
14451
14452 Function *Intr = CGM.getIntrinsic(IID);
14453 if (IsMaskFCmp) {
14454 unsigned NumElts =
14455 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14456 Ops[3] = getMaskVecValue(*this, Ops[3], NumElts);
14457 Value *Cmp = Builder.CreateCall(Intr, Ops);
14458 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr);
14459 }
14460
14461 return Builder.CreateCall(Intr, Ops);
14462 }
14463
14464 // Builtins without the _mask suffix return a vector of integers
14465 // of the same width as the input vectors
14466 if (IsMaskFCmp) {
14467 // We ignore SAE if strict FP is disabled. We only keep precise
14468 // exception behavior under strict FP.
14469 // NOTE: If strict FP does ever go through here a CGFPOptionsRAII
14470 // object will be required.
14471 unsigned NumElts =
14472 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14473 Value *Cmp;
14474 if (IsSignaling)
14475 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
14476 else
14477 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
14478 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
14479 }
14480
14481 return getVectorFCmpIR(Pred, IsSignaling);
14482 }
14483
14484 // SSE scalar comparison intrinsics
14485 case X86::BI__builtin_ia32_cmpeqss:
14486 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
14487 case X86::BI__builtin_ia32_cmpltss:
14488 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
14489 case X86::BI__builtin_ia32_cmpless:
14490 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
14491 case X86::BI__builtin_ia32_cmpunordss:
14492 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
14493 case X86::BI__builtin_ia32_cmpneqss:
14494 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
14495 case X86::BI__builtin_ia32_cmpnltss:
14496 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
14497 case X86::BI__builtin_ia32_cmpnless:
14498 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
14499 case X86::BI__builtin_ia32_cmpordss:
14500 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
14501 case X86::BI__builtin_ia32_cmpeqsd:
14502 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
14503 case X86::BI__builtin_ia32_cmpltsd:
14504 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
14505 case X86::BI__builtin_ia32_cmplesd:
14506 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
14507 case X86::BI__builtin_ia32_cmpunordsd:
14508 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
14509 case X86::BI__builtin_ia32_cmpneqsd:
14510 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
14511 case X86::BI__builtin_ia32_cmpnltsd:
14512 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
14513 case X86::BI__builtin_ia32_cmpnlesd:
14514 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
14515 case X86::BI__builtin_ia32_cmpordsd:
14516 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
14517
14518 // f16c half2float intrinsics
14519 case X86::BI__builtin_ia32_vcvtph2ps:
14520 case X86::BI__builtin_ia32_vcvtph2ps256:
14521 case X86::BI__builtin_ia32_vcvtph2ps_mask:
14522 case X86::BI__builtin_ia32_vcvtph2ps256_mask:
14523 case X86::BI__builtin_ia32_vcvtph2ps512_mask: {
14524 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
14525 return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
14526 }
14527
14528// AVX512 bf16 intrinsics
14529 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
14530 Ops[2] = getMaskVecValue(
14531 *this, Ops[2],
14532 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
14533 Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
14534 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
14535 }
14536 case X86::BI__builtin_ia32_cvtsbf162ss_32:
14537 return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
14538
14539 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
14540 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
14541 Intrinsic::ID IID;
14542 switch (BuiltinID) {
14543 default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14543)
;
14544 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
14545 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
14546 break;
14547 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
14548 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
14549 break;
14550 }
14551 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
14552 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
14553 }
14554
14555 case X86::BI__emul:
14556 case X86::BI__emulu: {
14557 llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
14558 bool isSigned = (BuiltinID == X86::BI__emul);
14559 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
14560 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
14561 return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
14562 }
14563 case X86::BI__mulh:
14564 case X86::BI__umulh:
14565 case X86::BI_mul128:
14566 case X86::BI_umul128: {
14567 llvm::Type *ResType = ConvertType(E->getType());
14568 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
14569
14570 bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
14571 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
14572 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
14573
14574 Value *MulResult, *HigherBits;
14575 if (IsSigned) {
14576 MulResult = Builder.CreateNSWMul(LHS, RHS);
14577 HigherBits = Builder.CreateAShr(MulResult, 64);
14578 } else {
14579 MulResult = Builder.CreateNUWMul(LHS, RHS);
14580 HigherBits = Builder.CreateLShr(MulResult, 64);
14581 }
14582 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
14583
14584 if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
14585 return HigherBits;
14586
14587 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
14588 Builder.CreateStore(HigherBits, HighBitsAddress);
14589 return Builder.CreateIntCast(MulResult, ResType, IsSigned);
14590 }
14591
14592 case X86::BI__faststorefence: {
14593 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
14594 llvm::SyncScope::System);
14595 }
14596 case X86::BI__shiftleft128:
14597 case X86::BI__shiftright128: {
14598 llvm::Function *F = CGM.getIntrinsic(
14599 BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
14600 Int64Ty);
14601 // Flip low/high ops and zero-extend amount to matching type.
14602 // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt)
14603 // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt)
14604 std::swap(Ops[0], Ops[1]);
14605 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
14606 return Builder.CreateCall(F, Ops);
14607 }
14608 case X86::BI_ReadWriteBarrier:
14609 case X86::BI_ReadBarrier:
14610 case X86::BI_WriteBarrier: {
14611 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
14612 llvm::SyncScope::SingleThread);
14613 }
14614
14615 case X86::BI_AddressOfReturnAddress: {
14616 Function *F =
14617 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
14618 return Builder.CreateCall(F);
14619 }
14620 case X86::BI__stosb: {
14621 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
14622 // instruction, but it will create a memset that won't be optimized away.
14623 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
14624 }
14625 case X86::BI__ud2:
14626 // llvm.trap makes a ud2a instruction on x86.
14627 return EmitTrapCall(Intrinsic::trap);
14628 case X86::BI__int2c: {
14629 // This syscall signals a driver assertion failure in x86 NT kernels.
14630 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
14631 llvm::InlineAsm *IA =
14632 llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
14633 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
14634 getLLVMContext(), llvm::AttributeList::FunctionIndex,
14635 llvm::Attribute::NoReturn);
14636 llvm::CallInst *CI = Builder.CreateCall(IA);
14637 CI->setAttributes(NoReturnAttr);
14638 return CI;
14639 }
14640 case X86::BI__readfsbyte:
14641 case X86::BI__readfsword:
14642 case X86::BI__readfsdword:
14643 case X86::BI__readfsqword: {
14644 llvm::Type *IntTy = ConvertType(E->getType());
14645 Value *Ptr =
14646 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
14647 LoadInst *Load = Builder.CreateAlignedLoad(
14648 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14649 Load->setVolatile(true);
14650 return Load;
14651 }
14652 case X86::BI__readgsbyte:
14653 case X86::BI__readgsword:
14654 case X86::BI__readgsdword:
14655 case X86::BI__readgsqword: {
14656 llvm::Type *IntTy = ConvertType(E->getType());
14657 Value *Ptr =
14658 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
14659 LoadInst *Load = Builder.CreateAlignedLoad(
14660 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14661 Load->setVolatile(true);
14662 return Load;
14663 }
14664 case X86::BI__builtin_ia32_paddsb512:
14665 case X86::BI__builtin_ia32_paddsw512:
14666 case X86::BI__builtin_ia32_paddsb256:
14667 case X86::BI__builtin_ia32_paddsw256:
14668 case X86::BI__builtin_ia32_paddsb128:
14669 case X86::BI__builtin_ia32_paddsw128:
14670 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat);
14671 case X86::BI__builtin_ia32_paddusb512:
14672 case X86::BI__builtin_ia32_paddusw512:
14673 case X86::BI__builtin_ia32_paddusb256:
14674 case X86::BI__builtin_ia32_paddusw256:
14675 case X86::BI__builtin_ia32_paddusb128:
14676 case X86::BI__builtin_ia32_paddusw128:
14677 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat);
14678 case X86::BI__builtin_ia32_psubsb512:
14679 case X86::BI__builtin_ia32_psubsw512:
14680 case X86::BI__builtin_ia32_psubsb256:
14681 case X86::BI__builtin_ia32_psubsw256:
14682 case X86::BI__builtin_ia32_psubsb128:
14683 case X86::BI__builtin_ia32_psubsw128:
14684 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat);
14685 case X86::BI__builtin_ia32_psubusb512:
14686 case X86::BI__builtin_ia32_psubusw512:
14687 case X86::BI__builtin_ia32_psubusb256:
14688 case X86::BI__builtin_ia32_psubusw256:
14689 case X86::BI__builtin_ia32_psubusb128:
14690 case X86::BI__builtin_ia32_psubusw128:
14691 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat);
14692 case X86::BI__builtin_ia32_encodekey128_u32: {
14693 Intrinsic::ID IID = Intrinsic::x86_encodekey128;
14694
14695 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]});
14696
14697 for (int i = 0; i < 6; ++i) {
14698 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14699 Value *Ptr = Builder.CreateConstGEP1_32(Ops[2], i * 16);
14700 Ptr = Builder.CreateBitCast(
14701 Ptr, llvm::PointerType::getUnqual(Extract->getType()));
14702 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
14703 }
14704
14705 return Builder.CreateExtractValue(Call, 0);
14706 }
14707 case X86::BI__builtin_ia32_encodekey256_u32: {
14708 Intrinsic::ID IID = Intrinsic::x86_encodekey256;
14709
14710 Value *Call =
14711 Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
14712
14713 for (int i = 0; i < 7; ++i) {
14714 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14715 Value *Ptr = Builder.CreateConstGEP1_32(Ops[3], i * 16);
14716 Ptr = Builder.CreateBitCast(
14717 Ptr, llvm::PointerType::getUnqual(Extract->getType()));
14718 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
14719 }
14720
14721 return Builder.CreateExtractValue(Call, 0);
14722 }
14723 case X86::BI__builtin_ia32_aesenc128kl_u8:
14724 case X86::BI__builtin_ia32_aesdec128kl_u8:
14725 case X86::BI__builtin_ia32_aesenc256kl_u8:
14726 case X86::BI__builtin_ia32_aesdec256kl_u8: {
14727 Intrinsic::ID IID;
14728 switch (BuiltinID) {
14729 default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14729)
;
14730 case X86::BI__builtin_ia32_aesenc128kl_u8:
14731 IID = Intrinsic::x86_aesenc128kl;
14732 break;
14733 case X86::BI__builtin_ia32_aesdec128kl_u8:
14734 IID = Intrinsic::x86_aesdec128kl;
14735 break;
14736 case X86::BI__builtin_ia32_aesenc256kl_u8:
14737 IID = Intrinsic::x86_aesenc256kl;
14738 break;
14739 case X86::BI__builtin_ia32_aesdec256kl_u8:
14740 IID = Intrinsic::x86_aesdec256kl;
14741 break;
14742 }
14743
14744 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]});
14745
14746 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
14747 Ops[0]);
14748
14749 return Builder.CreateExtractValue(Call, 0);
14750 }
14751 case X86::BI__builtin_ia32_aesencwide128kl_u8:
14752 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
14753 case X86::BI__builtin_ia32_aesencwide256kl_u8:
14754 case X86::BI__builtin_ia32_aesdecwide256kl_u8: {
14755 Intrinsic::ID IID;
14756 switch (BuiltinID) {
14757 case X86::BI__builtin_ia32_aesencwide128kl_u8:
14758 IID = Intrinsic::x86_aesencwide128kl;
14759 break;
14760 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
14761 IID = Intrinsic::x86_aesdecwide128kl;
14762 break;
14763 case X86::BI__builtin_ia32_aesencwide256kl_u8:
14764 IID = Intrinsic::x86_aesencwide256kl;
14765 break;
14766 case X86::BI__builtin_ia32_aesdecwide256kl_u8:
14767 IID = Intrinsic::x86_aesdecwide256kl;
14768 break;
14769 }
14770
14771 llvm::Type *Ty = FixedVectorType::get(Builder.getInt64Ty(), 2);
14772 Value *InOps[9];
14773 InOps[0] = Ops[2];
14774 for (int i = 0; i != 8; ++i) {
14775 Value *Ptr = Builder.CreateConstGEP1_32(Ops[1], i);
14776 InOps[i + 1] = Builder.CreateAlignedLoad(Ty, Ptr, Align(16));
14777 }
14778
14779 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps);
14780
14781 for (int i = 0; i != 8; ++i) {
14782 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14783 Value *Ptr = Builder.CreateConstGEP1_32(Ops[0], i);
14784 Builder.CreateAlignedStore(Extract, Ptr, Align(16));
14785 }
14786
14787 return Builder.CreateExtractValue(Call, 0);
14788 }
14789 }
14790}
14791
14792Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
14793 const CallExpr *E) {
14794 SmallVector<Value*, 4> Ops;
14795
14796 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
14797 Ops.push_back(EmitScalarExpr(E->getArg(i)));
14798
14799 Intrinsic::ID ID = Intrinsic::not_intrinsic;
14800
14801 switch (BuiltinID) {
14802 default: return nullptr;
14803
14804 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
14805 // call __builtin_readcyclecounter.
14806 case PPC::BI__builtin_ppc_get_timebase:
14807 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
14808
14809 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
14810 case PPC::BI__builtin_altivec_lvx:
14811 case PPC::BI__builtin_altivec_lvxl:
14812 case PPC::BI__builtin_altivec_lvebx:
14813 case PPC::BI__builtin_altivec_lvehx:
14814 case PPC::BI__builtin_altivec_lvewx:
14815 case PPC::BI__builtin_altivec_lvsl:
14816 case PPC::BI__builtin_altivec_lvsr:
14817 case PPC::BI__builtin_vsx_lxvd2x:
14818 case PPC::BI__builtin_vsx_lxvw4x:
14819 case PPC::BI__builtin_vsx_lxvd2x_be:
14820 case PPC::BI__builtin_vsx_lxvw4x_be:
14821 case PPC::BI__builtin_vsx_lxvl:
14822 case PPC::BI__builtin_vsx_lxvll:
14823 {
14824 if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
14825 BuiltinID == PPC::BI__builtin_vsx_lxvll){
14826 Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
14827 }else {
14828 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14829 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
14830 Ops.pop_back();
14831 }
14832
14833 switch (BuiltinID) {
14834 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!")::llvm::llvm_unreachable_internal("Unsupported ld/lvsl/lvsr intrinsic!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14834)
;
14835 case PPC::BI__builtin_altivec_lvx:
14836 ID = Intrinsic::ppc_altivec_lvx;
14837 break;
14838 case PPC::BI__builtin_altivec_lvxl:
14839 ID = Intrinsic::ppc_altivec_lvxl;
14840 break;
14841 case PPC::BI__builtin_altivec_lvebx:
14842 ID = Intrinsic::ppc_altivec_lvebx;
14843 break;
14844 case PPC::BI__builtin_altivec_lvehx:
14845 ID = Intrinsic::ppc_altivec_lvehx;
14846 break;
14847 case PPC::BI__builtin_altivec_lvewx:
14848 ID = Intrinsic::ppc_altivec_lvewx;
14849 break;
14850 case PPC::BI__builtin_altivec_lvsl:
14851 ID = Intrinsic::ppc_altivec_lvsl;
14852 break;
14853 case PPC::BI__builtin_altivec_lvsr:
14854 ID = Intrinsic::ppc_altivec_lvsr;
14855 break;
14856 case PPC::BI__builtin_vsx_lxvd2x:
14857 ID = Intrinsic::ppc_vsx_lxvd2x;
14858 break;
14859 case PPC::BI__builtin_vsx_lxvw4x:
14860 ID = Intrinsic::ppc_vsx_lxvw4x;
14861 break;
14862 case PPC::BI__builtin_vsx_lxvd2x_be:
14863 ID = Intrinsic::ppc_vsx_lxvd2x_be;
14864 break;
14865 case PPC::BI__builtin_vsx_lxvw4x_be:
14866 ID = Intrinsic::ppc_vsx_lxvw4x_be;
14867 break;
14868 case PPC::BI__builtin_vsx_lxvl:
14869 ID = Intrinsic::ppc_vsx_lxvl;
14870 break;
14871 case PPC::BI__builtin_vsx_lxvll:
14872 ID = Intrinsic::ppc_vsx_lxvll;
14873 break;
14874 }
14875 llvm::Function *F = CGM.getIntrinsic(ID);
14876 return Builder.CreateCall(F, Ops, "");
14877 }
14878
14879 // vec_st, vec_xst_be
14880 case PPC::BI__builtin_altivec_stvx:
14881 case PPC::BI__builtin_altivec_stvxl:
14882 case PPC::BI__builtin_altivec_stvebx:
14883 case PPC::BI__builtin_altivec_stvehx:
14884 case PPC::BI__builtin_altivec_stvewx:
14885 case PPC::BI__builtin_vsx_stxvd2x:
14886 case PPC::BI__builtin_vsx_stxvw4x:
14887 case PPC::BI__builtin_vsx_stxvd2x_be:
14888 case PPC::BI__builtin_vsx_stxvw4x_be:
14889 case PPC::BI__builtin_vsx_stxvl:
14890 case PPC::BI__builtin_vsx_stxvll:
14891 {
14892 if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
14893 BuiltinID == PPC::BI__builtin_vsx_stxvll ){
14894 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14895 }else {
14896 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
14897 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
14898 Ops.pop_back();
14899 }
14900
14901 switch (BuiltinID) {
14902 default: llvm_unreachable("Unsupported st intrinsic!")::llvm::llvm_unreachable_internal("Unsupported st intrinsic!"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14902)
;
14903 case PPC::BI__builtin_altivec_stvx:
14904 ID = Intrinsic::ppc_altivec_stvx;
14905 break;
14906 case PPC::BI__builtin_altivec_stvxl:
14907 ID = Intrinsic::ppc_altivec_stvxl;
14908 break;
14909 case PPC::BI__builtin_altivec_stvebx:
14910 ID = Intrinsic::ppc_altivec_stvebx;
14911 break;
14912 case PPC::BI__builtin_altivec_stvehx:
14913 ID = Intrinsic::ppc_altivec_stvehx;
14914 break;
14915 case PPC::BI__builtin_altivec_stvewx:
14916 ID = Intrinsic::ppc_altivec_stvewx;
14917 break;
14918 case PPC::BI__builtin_vsx_stxvd2x:
14919 ID = Intrinsic::ppc_vsx_stxvd2x;
14920 break;
14921 case PPC::BI__builtin_vsx_stxvw4x:
14922 ID = Intrinsic::ppc_vsx_stxvw4x;
14923 break;
14924 case PPC::BI__builtin_vsx_stxvd2x_be:
14925 ID = Intrinsic::ppc_vsx_stxvd2x_be;
14926 break;
14927 case PPC::BI__builtin_vsx_stxvw4x_be:
14928 ID = Intrinsic::ppc_vsx_stxvw4x_be;
14929 break;
14930 case PPC::BI__builtin_vsx_stxvl:
14931 ID = Intrinsic::ppc_vsx_stxvl;
14932 break;
14933 case PPC::BI__builtin_vsx_stxvll:
14934 ID = Intrinsic::ppc_vsx_stxvll;
14935 break;
14936 }
14937 llvm::Function *F = CGM.getIntrinsic(ID);
14938 return Builder.CreateCall(F, Ops, "");
14939 }
14940 // Square root
14941 case PPC::BI__builtin_vsx_xvsqrtsp:
14942 case PPC::BI__builtin_vsx_xvsqrtdp: {
14943 llvm::Type *ResultType = ConvertType(E->getType());
14944 Value *X = EmitScalarExpr(E->getArg(0));
14945 if (Builder.getIsFPConstrained()) {
14946 llvm::Function *F = CGM.getIntrinsic(
14947 Intrinsic::experimental_constrained_sqrt, ResultType);
14948 return Builder.CreateConstrainedFPCall(F, X);
14949 } else {
14950 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
14951 return Builder.CreateCall(F, X);
14952 }
14953 }
14954 // Count leading zeros
14955 case PPC::BI__builtin_altivec_vclzb:
14956 case PPC::BI__builtin_altivec_vclzh:
14957 case PPC::BI__builtin_altivec_vclzw:
14958 case PPC::BI__builtin_altivec_vclzd: {
14959 llvm::Type *ResultType = ConvertType(E->getType());
14960 Value *X = EmitScalarExpr(E->getArg(0));
14961 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14962 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
14963 return Builder.CreateCall(F, {X, Undef});
14964 }
14965 case PPC::BI__builtin_altivec_vctzb:
14966 case PPC::BI__builtin_altivec_vctzh:
14967 case PPC::BI__builtin_altivec_vctzw:
14968 case PPC::BI__builtin_altivec_vctzd: {
14969 llvm::Type *ResultType = ConvertType(E->getType());
14970 Value *X = EmitScalarExpr(E->getArg(0));
14971 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14972 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
14973 return Builder.CreateCall(F, {X, Undef});
14974 }
14975 case PPC::BI__builtin_altivec_vec_replace_elt:
14976 case PPC::BI__builtin_altivec_vec_replace_unaligned: {
14977 // The third argument of vec_replace_elt and vec_replace_unaligned must
14978 // be a compile time constant and will be emitted either to the vinsw
14979 // or vinsd instruction.
14980 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14981 assert(ArgCI &&(static_cast <bool> (ArgCI && "Third Arg to vinsw/vinsd intrinsic must be a constant integer!"
) ? void (0) : __assert_fail ("ArgCI && \"Third Arg to vinsw/vinsd intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14982, __extension__ __PRETTY_FUNCTION__))
14982 "Third Arg to vinsw/vinsd intrinsic must be a constant integer!")(static_cast <bool> (ArgCI && "Third Arg to vinsw/vinsd intrinsic must be a constant integer!"
) ? void (0) : __assert_fail ("ArgCI && \"Third Arg to vinsw/vinsd intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14982, __extension__ __PRETTY_FUNCTION__))
;
14983 llvm::Type *ResultType = ConvertType(E->getType());
14984 llvm::Function *F = nullptr;
14985 Value *Call = nullptr;
14986 int64_t ConstArg = ArgCI->getSExtValue();
14987 unsigned ArgWidth = Ops[1]->getType()->getPrimitiveSizeInBits();
14988 bool Is32Bit = false;
14989 assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width")(static_cast <bool> ((ArgWidth == 32 || ArgWidth == 64)
&& "Invalid argument width") ? void (0) : __assert_fail
("(ArgWidth == 32 || ArgWidth == 64) && \"Invalid argument width\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 14989, __extension__ __PRETTY_FUNCTION__))
;
14990 // The input to vec_replace_elt is an element index, not a byte index.
14991 if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt)
14992 ConstArg *= ArgWidth / 8;
14993 if (ArgWidth == 32) {
14994 Is32Bit = true;
14995 // When the second argument is 32 bits, it can either be an integer or
14996 // a float. The vinsw intrinsic is used in this case.
14997 F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw);
14998 // Fix the constant according to endianess.
14999 if (getTarget().isLittleEndian())
15000 ConstArg = 12 - ConstArg;
15001 } else {
15002 // When the second argument is 64 bits, it can either be a long long or
15003 // a double. The vinsd intrinsic is used in this case.
15004 F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd);
15005 // Fix the constant for little endian.
15006 if (getTarget().isLittleEndian())
15007 ConstArg = 8 - ConstArg;
15008 }
15009 Ops[2] = ConstantInt::getSigned(Int32Ty, ConstArg);
15010 // Depending on ArgWidth, the input vector could be a float or a double.
15011 // If the input vector is a float type, bitcast the inputs to integers. Or,
15012 // if the input vector is a double, bitcast the inputs to 64-bit integers.
15013 if (!Ops[1]->getType()->isIntegerTy(ArgWidth)) {
15014 Ops[0] = Builder.CreateBitCast(
15015 Ops[0], Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4)
15016 : llvm::FixedVectorType::get(Int64Ty, 2));
15017 Ops[1] = Builder.CreateBitCast(Ops[1], Is32Bit ? Int32Ty : Int64Ty);
15018 }
15019 // Emit the call to vinsw or vinsd.
15020 Call = Builder.CreateCall(F, Ops);
15021 // Depending on the builtin, bitcast to the approriate result type.
15022 if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
15023 !Ops[1]->getType()->isIntegerTy())
15024 return Builder.CreateBitCast(Call, ResultType);
15025 else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
15026 Ops[1]->getType()->isIntegerTy())
15027 return Call;
15028 else
15029 return Builder.CreateBitCast(Call,
15030 llvm::FixedVectorType::get(Int8Ty, 16));
15031 }
15032 case PPC::BI__builtin_altivec_vpopcntb:
15033 case PPC::BI__builtin_altivec_vpopcnth:
15034 case PPC::BI__builtin_altivec_vpopcntw:
15035 case PPC::BI__builtin_altivec_vpopcntd: {
15036 llvm::Type *ResultType = ConvertType(E->getType());
15037 Value *X = EmitScalarExpr(E->getArg(0));
15038 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
15039 return Builder.CreateCall(F, X);
15040 }
15041 case PPC::BI__builtin_altivec_vadduqm:
15042 case PPC::BI__builtin_altivec_vsubuqm: {
15043 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
15044 Ops[0] =
15045 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int128Ty, 1));
15046 Ops[1] =
15047 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int128Ty, 1));
15048 if (BuiltinID == PPC::BI__builtin_altivec_vadduqm)
15049 return Builder.CreateAdd(Ops[0], Ops[1], "vadduqm");
15050 else
15051 return Builder.CreateSub(Ops[0], Ops[1], "vsubuqm");
15052 }
15053 // Copy sign
15054 case PPC::BI__builtin_vsx_xvcpsgnsp:
15055 case PPC::BI__builtin_vsx_xvcpsgndp: {
15056 llvm::Type *ResultType = ConvertType(E->getType());
15057 Value *X = EmitScalarExpr(E->getArg(0));
15058 Value *Y = EmitScalarExpr(E->getArg(1));
15059 ID = Intrinsic::copysign;
15060 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
15061 return Builder.CreateCall(F, {X, Y});
15062 }
15063 // Rounding/truncation
15064 case PPC::BI__builtin_vsx_xvrspip:
15065 case PPC::BI__builtin_vsx_xvrdpip:
15066 case PPC::BI__builtin_vsx_xvrdpim:
15067 case PPC::BI__builtin_vsx_xvrspim:
15068 case PPC::BI__builtin_vsx_xvrdpi:
15069 case PPC::BI__builtin_vsx_xvrspi:
15070 case PPC::BI__builtin_vsx_xvrdpic:
15071 case PPC::BI__builtin_vsx_xvrspic:
15072 case PPC::BI__builtin_vsx_xvrdpiz:
15073 case PPC::BI__builtin_vsx_xvrspiz: {
15074 llvm::Type *ResultType = ConvertType(E->getType());
15075 Value *X = EmitScalarExpr(E->getArg(0));
15076 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
15077 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
15078 ID = Builder.getIsFPConstrained()
15079 ? Intrinsic::experimental_constrained_floor
15080 : Intrinsic::floor;
15081 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
15082 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
15083 ID = Builder.getIsFPConstrained()
15084 ? Intrinsic::experimental_constrained_round
15085 : Intrinsic::round;
15086 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
15087 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
15088 ID = Builder.getIsFPConstrained()
15089 ? Intrinsic::experimental_constrained_rint
15090 : Intrinsic::rint;
15091 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
15092 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
15093 ID = Builder.getIsFPConstrained()
15094 ? Intrinsic::experimental_constrained_ceil
15095 : Intrinsic::ceil;
15096 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
15097 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
15098 ID = Builder.getIsFPConstrained()
15099 ? Intrinsic::experimental_constrained_trunc
15100 : Intrinsic::trunc;
15101 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
15102 return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
15103 : Builder.CreateCall(F, X);
15104 }
15105
15106 // Absolute value
15107 case PPC::BI__builtin_vsx_xvabsdp:
15108 case PPC::BI__builtin_vsx_xvabssp: {
15109 llvm::Type *ResultType = ConvertType(E->getType());
15110 Value *X = EmitScalarExpr(E->getArg(0));
15111 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15112 return Builder.CreateCall(F, X);
15113 }
15114
15115 // Fastmath by default
15116 case PPC::BI__builtin_ppc_recipdivf:
15117 case PPC::BI__builtin_ppc_recipdivd:
15118 case PPC::BI__builtin_ppc_rsqrtf:
15119 case PPC::BI__builtin_ppc_rsqrtd: {
15120 Builder.getFastMathFlags().setFast();
15121 llvm::Type *ResultType = ConvertType(E->getType());
15122 Value *X = EmitScalarExpr(E->getArg(0));
15123
15124 if (BuiltinID == PPC::BI__builtin_ppc_recipdivf ||
15125 BuiltinID == PPC::BI__builtin_ppc_recipdivd) {
15126 Value *Y = EmitScalarExpr(E->getArg(1));
15127 return Builder.CreateFDiv(X, Y, "recipdiv");
15128 }
15129 auto *One = ConstantFP::get(ResultType, 1.0);
15130 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
15131 return Builder.CreateFDiv(One, Builder.CreateCall(F, X), "rsqrt");
15132 }
15133
15134 // FMA variations
15135 case PPC::BI__builtin_vsx_xvmaddadp:
15136 case PPC::BI__builtin_vsx_xvmaddasp:
15137 case PPC::BI__builtin_vsx_xvnmaddadp:
15138 case PPC::BI__builtin_vsx_xvnmaddasp:
15139 case PPC::BI__builtin_vsx_xvmsubadp:
15140 case PPC::BI__builtin_vsx_xvmsubasp:
15141 case PPC::BI__builtin_vsx_xvnmsubadp:
15142 case PPC::BI__builtin_vsx_xvnmsubasp: {
15143 llvm::Type *ResultType = ConvertType(E->getType());
15144 Value *X = EmitScalarExpr(E->getArg(0));
15145 Value *Y = EmitScalarExpr(E->getArg(1));
15146 Value *Z = EmitScalarExpr(E->getArg(2));
15147 llvm::Function *F;
15148 if (Builder.getIsFPConstrained())
15149 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15150 else
15151 F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15152 switch (BuiltinID) {
15153 case PPC::BI__builtin_vsx_xvmaddadp:
15154 case PPC::BI__builtin_vsx_xvmaddasp:
15155 if (Builder.getIsFPConstrained())
15156 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
15157 else
15158 return Builder.CreateCall(F, {X, Y, Z});
15159 case PPC::BI__builtin_vsx_xvnmaddadp:
15160 case PPC::BI__builtin_vsx_xvnmaddasp:
15161 if (Builder.getIsFPConstrained())
15162 return Builder.CreateFNeg(
15163 Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
15164 else
15165 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
15166 case PPC::BI__builtin_vsx_xvmsubadp:
15167 case PPC::BI__builtin_vsx_xvmsubasp:
15168 if (Builder.getIsFPConstrained())
15169 return Builder.CreateConstrainedFPCall(
15170 F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15171 else
15172 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15173 case PPC::BI__builtin_vsx_xvnmsubadp:
15174 case PPC::BI__builtin_vsx_xvnmsubasp:
15175 if (Builder.getIsFPConstrained())
15176 return Builder.CreateFNeg(
15177 Builder.CreateConstrainedFPCall(
15178 F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
15179 "neg");
15180 else
15181 return Builder.CreateFNeg(
15182 Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
15183 "neg");
15184 }
15185 llvm_unreachable("Unknown FMA operation")::llvm::llvm_unreachable_internal("Unknown FMA operation", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15185)
;
15186 return nullptr; // Suppress no-return warning
15187 }
15188
15189 case PPC::BI__builtin_vsx_insertword: {
15190 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
15191
15192 // Third argument is a compile time constant int. It must be clamped to
15193 // to the range [0, 12].
15194 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
15195 assert(ArgCI &&(static_cast <bool> (ArgCI && "Third arg to xxinsertw intrinsic must be constant integer"
) ? void (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15196, __extension__ __PRETTY_FUNCTION__))
15196 "Third arg to xxinsertw intrinsic must be constant integer")(static_cast <bool> (ArgCI && "Third arg to xxinsertw intrinsic must be constant integer"
) ? void (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15196, __extension__ __PRETTY_FUNCTION__))
;
15197 const int64_t MaxIndex = 12;
15198 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
15199
15200 // The builtin semantics don't exactly match the xxinsertw instructions
15201 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
15202 // word from the first argument, and inserts it in the second argument. The
15203 // instruction extracts the word from its second input register and inserts
15204 // it into its first input register, so swap the first and second arguments.
15205 std::swap(Ops[0], Ops[1]);
15206
15207 // Need to cast the second argument from a vector of unsigned int to a
15208 // vector of long long.
15209 Ops[1] =
15210 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
15211
15212 if (getTarget().isLittleEndian()) {
15213 // Reverse the double words in the vector we will extract from.
15214 Ops[0] =
15215 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
15216 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
15217
15218 // Reverse the index.
15219 Index = MaxIndex - Index;
15220 }
15221
15222 // Intrinsic expects the first arg to be a vector of int.
15223 Ops[0] =
15224 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
15225 Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
15226 return Builder.CreateCall(F, Ops);
15227 }
15228
15229 case PPC::BI__builtin_vsx_extractuword: {
15230 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
15231
15232 // Intrinsic expects the first argument to be a vector of doublewords.
15233 Ops[0] =
15234 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
15235
15236 // The second argument is a compile time constant int that needs to
15237 // be clamped to the range [0, 12].
15238 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
15239 assert(ArgCI &&(static_cast <bool> (ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!"
) ? void (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15240, __extension__ __PRETTY_FUNCTION__))
15240 "Second Arg to xxextractuw intrinsic must be a constant integer!")(static_cast <bool> (ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!"
) ? void (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15240, __extension__ __PRETTY_FUNCTION__))
;
15241 const int64_t MaxIndex = 12;
15242 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
15243
15244 if (getTarget().isLittleEndian()) {
15245 // Reverse the index.
15246 Index = MaxIndex - Index;
15247 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
15248
15249 // Emit the call, then reverse the double words of the results vector.
15250 Value *Call = Builder.CreateCall(F, Ops);
15251
15252 Value *ShuffleCall =
15253 Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
15254 return ShuffleCall;
15255 } else {
15256 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
15257 return Builder.CreateCall(F, Ops);
15258 }
15259 }
15260
15261 case PPC::BI__builtin_vsx_xxpermdi: {
15262 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
15263 assert(ArgCI && "Third arg must be constant integer!")(static_cast <bool> (ArgCI && "Third arg must be constant integer!"
) ? void (0) : __assert_fail ("ArgCI && \"Third arg must be constant integer!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15263, __extension__ __PRETTY_FUNCTION__))
;
15264
15265 unsigned Index = ArgCI->getZExtValue();
15266 Ops[0] =
15267 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
15268 Ops[1] =
15269 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
15270
15271 // Account for endianness by treating this as just a shuffle. So we use the
15272 // same indices for both LE and BE in order to produce expected results in
15273 // both cases.
15274 int ElemIdx0 = (Index & 2) >> 1;
15275 int ElemIdx1 = 2 + (Index & 1);
15276
15277 int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
15278 Value *ShuffleCall =
15279 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
15280 QualType BIRetType = E->getType();
15281 auto RetTy = ConvertType(BIRetType);
15282 return Builder.CreateBitCast(ShuffleCall, RetTy);
15283 }
15284
15285 case PPC::BI__builtin_vsx_xxsldwi: {
15286 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
15287 assert(ArgCI && "Third argument must be a compile time constant")(static_cast <bool> (ArgCI && "Third argument must be a compile time constant"
) ? void (0) : __assert_fail ("ArgCI && \"Third argument must be a compile time constant\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15287, __extension__ __PRETTY_FUNCTION__))
;
15288 unsigned Index = ArgCI->getZExtValue() & 0x3;
15289 Ops[0] =
15290 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
15291 Ops[1] =
15292 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
15293
15294 // Create a shuffle mask
15295 int ElemIdx0;
15296 int ElemIdx1;
15297 int ElemIdx2;
15298 int ElemIdx3;
15299 if (getTarget().isLittleEndian()) {
15300 // Little endian element N comes from element 8+N-Index of the
15301 // concatenated wide vector (of course, using modulo arithmetic on
15302 // the total number of elements).
15303 ElemIdx0 = (8 - Index) % 8;
15304 ElemIdx1 = (9 - Index) % 8;
15305 ElemIdx2 = (10 - Index) % 8;
15306 ElemIdx3 = (11 - Index) % 8;
15307 } else {
15308 // Big endian ElemIdx<N> = Index + N
15309 ElemIdx0 = Index;
15310 ElemIdx1 = Index + 1;
15311 ElemIdx2 = Index + 2;
15312 ElemIdx3 = Index + 3;
15313 }
15314
15315 int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
15316 Value *ShuffleCall =
15317 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
15318 QualType BIRetType = E->getType();
15319 auto RetTy = ConvertType(BIRetType);
15320 return Builder.CreateBitCast(ShuffleCall, RetTy);
15321 }
15322
15323 case PPC::BI__builtin_pack_vector_int128: {
15324 bool isLittleEndian = getTarget().isLittleEndian();
15325 Value *UndefValue =
15326 llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
15327 Value *Res = Builder.CreateInsertElement(
15328 UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
15329 Res = Builder.CreateInsertElement(Res, Ops[1],
15330 (uint64_t)(isLittleEndian ? 0 : 1));
15331 return Builder.CreateBitCast(Res, ConvertType(E->getType()));
15332 }
15333
15334 case PPC::BI__builtin_unpack_vector_int128: {
15335 ConstantInt *Index = cast<ConstantInt>(Ops[1]);
15336 Value *Unpacked = Builder.CreateBitCast(
15337 Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
15338
15339 if (getTarget().isLittleEndian())
15340 Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
15341
15342 return Builder.CreateExtractElement(Unpacked, Index);
15343 }
15344
15345 // The PPC MMA builtins take a pointer to a __vector_quad as an argument.
15346 // Some of the MMA instructions accumulate their result into an existing
15347 // accumulator whereas the others generate a new accumulator. So we need to
15348 // use custom code generation to expand a builtin call with a pointer to a
15349 // load (if the corresponding instruction accumulates its result) followed by
15350 // the call to the intrinsic and a store of the result.
15351#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate) \
15352 case PPC::BI__builtin_##Name:
15353#include "clang/Basic/BuiltinsPPC.def"
15354 {
15355 // The first argument of these two builtins is a pointer used to store their
15356 // result. However, the llvm intrinsics return their result in multiple
15357 // return values. So, here we emit code extracting these values from the
15358 // intrinsic results and storing them using that pointer.
15359 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
15360 BuiltinID == PPC::BI__builtin_vsx_disassemble_pair ||
15361 BuiltinID == PPC::BI__builtin_mma_disassemble_pair) {
15362 unsigned NumVecs = 2;
15363 auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair;
15364 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
15365 NumVecs = 4;
15366 Intrinsic = Intrinsic::ppc_mma_disassemble_acc;
15367 }
15368 llvm::Function *F = CGM.getIntrinsic(Intrinsic);
15369 Address Addr = EmitPointerWithAlignment(E->getArg(1));
15370 Value *Vec = Builder.CreateLoad(Addr);
15371 Value *Call = Builder.CreateCall(F, {Vec});
15372 llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
15373 Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo());
15374 for (unsigned i=0; i<NumVecs; i++) {
15375 Value *Vec = Builder.CreateExtractValue(Call, i);
15376 llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
15377 Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index);
15378 Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16));
15379 }
15380 return Call;
15381 }
15382 bool Accumulate;
15383 switch (BuiltinID) {
15384 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
15385 case PPC::BI__builtin_##Name: \
15386 ID = Intrinsic::ppc_##Intr; \
15387 Accumulate = Acc; \
15388 break;
15389 #include "clang/Basic/BuiltinsPPC.def"
15390 }
15391 if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
15392 BuiltinID == PPC::BI__builtin_vsx_stxvp ||
15393 BuiltinID == PPC::BI__builtin_mma_lxvp ||
15394 BuiltinID == PPC::BI__builtin_mma_stxvp) {
15395 if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
15396 BuiltinID == PPC::BI__builtin_mma_lxvp) {
15397 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
15398 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
15399 } else {
15400 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
15401 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
15402 }
15403 Ops.pop_back();
15404 llvm::Function *F = CGM.getIntrinsic(ID);
15405 return Builder.CreateCall(F, Ops, "");
15406 }
15407 SmallVector<Value*, 4> CallOps;
15408 if (Accumulate) {
15409 Address Addr = EmitPointerWithAlignment(E->getArg(0));
15410 Value *Acc = Builder.CreateLoad(Addr);
15411 CallOps.push_back(Acc);
15412 }
15413 for (unsigned i=1; i<Ops.size(); i++)
15414 CallOps.push_back(Ops[i]);
15415 llvm::Function *F = CGM.getIntrinsic(ID);
15416 Value *Call = Builder.CreateCall(F, CallOps);
15417 return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
15418 }
15419 }
15420}
15421
15422namespace {
15423// If \p E is not null pointer, insert address space cast to match return
15424// type of \p E if necessary.
15425Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
15426 const CallExpr *E = nullptr) {
15427 auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
15428 auto *Call = CGF.Builder.CreateCall(F);
15429 Call->addAttribute(
15430 AttributeList::ReturnIndex,
15431 Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
15432 Call->addAttribute(AttributeList::ReturnIndex,
15433 Attribute::getWithAlignment(Call->getContext(), Align(4)));
15434 if (!E)
15435 return Call;
15436 QualType BuiltinRetType = E->getType();
15437 auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
15438 if (RetTy == Call->getType())
15439 return Call;
15440 return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
15441}
15442
15443// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
15444Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
15445 const unsigned XOffset = 4;
15446 auto *DP = EmitAMDGPUDispatchPtr(CGF);
15447 // Indexing the HSA kernel_dispatch_packet struct.
15448 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
15449 auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
15450 auto *DstTy =
15451 CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
15452 auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
15453 auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
15454 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
15455 llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
15456 APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
15457 LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
15458 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
15459 llvm::MDNode::get(CGF.getLLVMContext(), None));
15460 return LD;
15461}
15462
15463// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
15464Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
15465 const unsigned XOffset = 12;
15466 auto *DP = EmitAMDGPUDispatchPtr(CGF);
15467 // Indexing the HSA kernel_dispatch_packet struct.
15468 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
15469 auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
15470 auto *DstTy =
15471 CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
15472 auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
15473 auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4)));
15474 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
15475 llvm::MDNode::get(CGF.getLLVMContext(), None));
15476 return LD;
15477}
15478} // namespace
15479
15480// For processing memory ordering and memory scope arguments of various
15481// amdgcn builtins.
15482// \p Order takes a C++11 comptabile memory-ordering specifier and converts
15483// it into LLVM's memory ordering specifier using atomic C ABI, and writes
15484// to \p AO. \p Scope takes a const char * and converts it into AMDGCN
15485// specific SyncScopeID and writes it to \p SSID.
15486bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
15487 llvm::AtomicOrdering &AO,
15488 llvm::SyncScope::ID &SSID) {
15489 if (isa<llvm::ConstantInt>(Order)) {
15490 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
15491
15492 // Map C11/C++11 memory ordering to LLVM memory ordering
15493 assert(llvm::isValidAtomicOrderingCABI(ord))(static_cast <bool> (llvm::isValidAtomicOrderingCABI(ord
)) ? void (0) : __assert_fail ("llvm::isValidAtomicOrderingCABI(ord)"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15493, __extension__ __PRETTY_FUNCTION__))
;
15494 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
15495 case llvm::AtomicOrderingCABI::acquire:
15496 case llvm::AtomicOrderingCABI::consume:
15497 AO = llvm::AtomicOrdering::Acquire;
15498 break;
15499 case llvm::AtomicOrderingCABI::release:
15500 AO = llvm::AtomicOrdering::Release;
15501 break;
15502 case llvm::AtomicOrderingCABI::acq_rel:
15503 AO = llvm::AtomicOrdering::AcquireRelease;
15504 break;
15505 case llvm::AtomicOrderingCABI::seq_cst:
15506 AO = llvm::AtomicOrdering::SequentiallyConsistent;
15507 break;
15508 case llvm::AtomicOrderingCABI::relaxed:
15509 AO = llvm::AtomicOrdering::Monotonic;
15510 break;
15511 }
15512
15513 StringRef scp;
15514 llvm::getConstantStringInfo(Scope, scp);
15515 SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
15516 return true;
15517 }
15518 return false;
15519}
15520
15521Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
15522 const CallExpr *E) {
15523 llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
15524 llvm::SyncScope::ID SSID;
15525 switch (BuiltinID) {
15526 case AMDGPU::BI__builtin_amdgcn_div_scale:
15527 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
15528 // Translate from the intrinsics's struct return to the builtin's out
15529 // argument.
15530
15531 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
15532
15533 llvm::Value *X = EmitScalarExpr(E->getArg(0));
15534 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
15535 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
15536
15537 llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
15538 X->getType());
15539
15540 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
15541
15542 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
15543 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
15544
15545 llvm::Type *RealFlagType
15546 = FlagOutPtr.getPointer()->getType()->getPointerElementType();
15547
15548 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
15549 Builder.CreateStore(FlagExt, FlagOutPtr);
15550 return Result;
15551 }
15552 case AMDGPU::BI__builtin_amdgcn_div_fmas:
15553 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
15554 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15555 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15556 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15557 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
15558
15559 llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
15560 Src0->getType());
15561 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
15562 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
15563 }
15564
15565 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
15566 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
15567 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
15568 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
15569 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
15570 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
15571 llvm::SmallVector<llvm::Value *, 6> Args;
15572 for (unsigned I = 0; I != E->getNumArgs(); ++I)
15573 Args.push_back(EmitScalarExpr(E->getArg(I)));
15574 assert(Args.size() == 5 || Args.size() == 6)(static_cast <bool> (Args.size() == 5 || Args.size() ==
6) ? void (0) : __assert_fail ("Args.size() == 5 || Args.size() == 6"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 15574, __extension__ __PRETTY_FUNCTION__))
;
15575 if (Args.size() == 5)
15576 Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
15577 Function *F =
15578 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
15579 return Builder.CreateCall(F, Args);
15580 }
15581 case AMDGPU::BI__builtin_amdgcn_div_fixup:
15582 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
15583 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
15584 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
15585 case AMDGPU::BI__builtin_amdgcn_trig_preop:
15586 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
15587 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
15588 case AMDGPU::BI__builtin_amdgcn_rcp:
15589 case AMDGPU::BI__builtin_amdgcn_rcpf:
15590 case AMDGPU::BI__builtin_amdgcn_rcph:
15591 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
15592 case AMDGPU::BI__builtin_amdgcn_sqrt:
15593 case AMDGPU::BI__builtin_amdgcn_sqrtf:
15594 case AMDGPU::BI__builtin_amdgcn_sqrth:
15595 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
15596 case AMDGPU::BI__builtin_amdgcn_rsq:
15597 case AMDGPU::BI__builtin_amdgcn_rsqf:
15598 case AMDGPU::BI__builtin_amdgcn_rsqh:
15599 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
15600 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
15601 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
15602 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
15603 case AMDGPU::BI__builtin_amdgcn_sinf:
15604 case AMDGPU::BI__builtin_amdgcn_sinh:
15605 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
15606 case AMDGPU::BI__builtin_amdgcn_cosf:
15607 case AMDGPU::BI__builtin_amdgcn_cosh:
15608 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
15609 case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
15610 return EmitAMDGPUDispatchPtr(*this, E);
15611 case AMDGPU::BI__builtin_amdgcn_log_clampf:
15612 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
15613 case AMDGPU::BI__builtin_amdgcn_ldexp:
15614 case AMDGPU::BI__builtin_amdgcn_ldexpf:
15615 case AMDGPU::BI__builtin_amdgcn_ldexph:
15616 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
15617 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
15618 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
15619 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
15620 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
15621 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
15622 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
15623 Value *Src0 = EmitScalarExpr(E->getArg(0));
15624 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
15625 { Builder.getInt32Ty(), Src0->getType() });
15626 return Builder.CreateCall(F, Src0);
15627 }
15628 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
15629 Value *Src0 = EmitScalarExpr(E->getArg(0));
15630 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
15631 { Builder.getInt16Ty(), Src0->getType() });
15632 return Builder.CreateCall(F, Src0);
15633 }
15634 case AMDGPU::BI__builtin_amdgcn_fract:
15635 case AMDGPU::BI__builtin_amdgcn_fractf:
15636 case AMDGPU::BI__builtin_amdgcn_fracth:
15637 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
15638 case AMDGPU::BI__builtin_amdgcn_lerp:
15639 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
15640 case AMDGPU::BI__builtin_amdgcn_ubfe:
15641 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
15642 case AMDGPU::BI__builtin_amdgcn_sbfe:
15643 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
15644 case AMDGPU::BI__builtin_amdgcn_uicmp:
15645 case AMDGPU::BI__builtin_amdgcn_uicmpl:
15646 case AMDGPU::BI__builtin_amdgcn_sicmp:
15647 case AMDGPU::BI__builtin_amdgcn_sicmpl: {
15648 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15649 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15650 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15651
15652 // FIXME-GFX10: How should 32 bit mask be handled?
15653 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
15654 { Builder.getInt64Ty(), Src0->getType() });
15655 return Builder.CreateCall(F, { Src0, Src1, Src2 });
15656 }
15657 case AMDGPU::BI__builtin_amdgcn_fcmp:
15658 case AMDGPU::BI__builtin_amdgcn_fcmpf: {
15659 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15660 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15661 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15662
15663 // FIXME-GFX10: How should 32 bit mask be handled?
15664 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
15665 { Builder.getInt64Ty(), Src0->getType() });
15666 return Builder.CreateCall(F, { Src0, Src1, Src2 });
15667 }
15668 case AMDGPU::BI__builtin_amdgcn_class:
15669 case AMDGPU::BI__builtin_amdgcn_classf:
15670 case AMDGPU::BI__builtin_amdgcn_classh:
15671 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
15672 case AMDGPU::BI__builtin_amdgcn_fmed3f:
15673 case AMDGPU::BI__builtin_amdgcn_fmed3h:
15674 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
15675 case AMDGPU::BI__builtin_amdgcn_ds_append:
15676 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
15677 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
15678 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
15679 Value *Src0 = EmitScalarExpr(E->getArg(0));
15680 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
15681 return Builder.CreateCall(F, { Src0, Builder.getFalse() });
15682 }
15683 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
15684 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
15685 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
15686 Intrinsic::ID Intrin;
15687 switch (BuiltinID) {
15688 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
15689 Intrin = Intrinsic::amdgcn_ds_fadd;
15690 break;
15691 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
15692 Intrin = Intrinsic::amdgcn_ds_fmin;
15693 break;
15694 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
15695 Intrin = Intrinsic::amdgcn_ds_fmax;
15696 break;
15697 }
15698 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15699 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15700 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15701 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
15702 llvm::Value *Src4 = EmitScalarExpr(E->getArg(4));
15703 llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() });
15704 llvm::FunctionType *FTy = F->getFunctionType();
15705 llvm::Type *PTy = FTy->getParamType(0);
15706 Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
15707 return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
15708 }
15709 case AMDGPU::BI__builtin_amdgcn_read_exec: {
15710 CallInst *CI = cast<CallInst>(
15711 EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
15712 CI->setConvergent();
15713 return CI;
15714 }
15715 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
15716 case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
15717 StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
15718 "exec_lo" : "exec_hi";
15719 CallInst *CI = cast<CallInst>(
15720 EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName));
15721 CI->setConvergent();
15722 return CI;
15723 }
15724 // amdgcn workitem
15725 case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
15726 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
15727 case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
15728 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
15729 case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
15730 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
15731
15732 // amdgcn workgroup size
15733 case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
15734 return EmitAMDGPUWorkGroupSize(*this, 0);
15735 case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
15736 return EmitAMDGPUWorkGroupSize(*this, 1);
15737 case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
15738 return EmitAMDGPUWorkGroupSize(*this, 2);
15739
15740 // amdgcn grid size
15741 case AMDGPU::BI__builtin_amdgcn_grid_size_x:
15742 return EmitAMDGPUGridSize(*this, 0);
15743 case AMDGPU::BI__builtin_amdgcn_grid_size_y:
15744 return EmitAMDGPUGridSize(*this, 1);
15745 case AMDGPU::BI__builtin_amdgcn_grid_size_z:
15746 return EmitAMDGPUGridSize(*this, 2);
15747
15748 // r600 intrinsics
15749 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
15750 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
15751 return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
15752 case AMDGPU::BI__builtin_r600_read_tidig_x:
15753 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
15754 case AMDGPU::BI__builtin_r600_read_tidig_y:
15755 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
15756 case AMDGPU::BI__builtin_r600_read_tidig_z:
15757 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
15758 case AMDGPU::BI__builtin_amdgcn_alignbit: {
15759 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15760 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15761 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15762 Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
15763 return Builder.CreateCall(F, { Src0, Src1, Src2 });
15764 }
15765
15766 case AMDGPU::BI__builtin_amdgcn_fence: {
15767 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
15768 EmitScalarExpr(E->getArg(1)), AO, SSID))
15769 return Builder.CreateFence(AO, SSID);
15770 LLVM_FALLTHROUGH[[gnu::fallthrough]];
15771 }
15772 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
15773 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
15774 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
15775 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
15776 unsigned BuiltinAtomicOp;
15777 llvm::Type *ResultType = ConvertType(E->getType());
15778
15779 switch (BuiltinID) {
15780 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
15781 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
15782 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
15783 break;
15784 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
15785 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
15786 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
15787 break;
15788 }
15789
15790 Value *Ptr = EmitScalarExpr(E->getArg(0));
15791 Value *Val = EmitScalarExpr(E->getArg(1));
15792
15793 llvm::Function *F =
15794 CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
15795
15796 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
15797 EmitScalarExpr(E->getArg(3)), AO, SSID)) {
15798
15799 // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
15800 // scope as unsigned values
15801 Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
15802 Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
15803
15804 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
15805 bool Volatile =
15806 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
15807 Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
15808
15809 return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
15810 }
15811 LLVM_FALLTHROUGH[[gnu::fallthrough]];
15812 }
15813 default:
15814 return nullptr;
15815 }
15816}
15817
15818/// Handle a SystemZ function in which the final argument is a pointer
15819/// to an int that receives the post-instruction CC value. At the LLVM level
15820/// this is represented as a function that returns a {result, cc} pair.
15821static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
15822 unsigned IntrinsicID,
15823 const CallExpr *E) {
15824 unsigned NumArgs = E->getNumArgs() - 1;
15825 SmallVector<Value *, 8> Args(NumArgs);
15826 for (unsigned I = 0; I < NumArgs; ++I)
15827 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
15828 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
15829 Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
15830 Value *Call = CGF.Builder.CreateCall(F, Args);
15831 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
15832 CGF.Builder.CreateStore(CC, CCPtr);
15833 return CGF.Builder.CreateExtractValue(Call, 0);
15834}
15835
15836Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
15837 const CallExpr *E) {
15838 switch (BuiltinID) {
15839 case SystemZ::BI__builtin_tbegin: {
15840 Value *TDB = EmitScalarExpr(E->getArg(0));
15841 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
15842 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
15843 return Builder.CreateCall(F, {TDB, Control});
15844 }
15845 case SystemZ::BI__builtin_tbegin_nofloat: {
15846 Value *TDB = EmitScalarExpr(E->getArg(0));
15847 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
15848 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
15849 return Builder.CreateCall(F, {TDB, Control});
15850 }
15851 case SystemZ::BI__builtin_tbeginc: {
15852 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
15853 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
15854 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
15855 return Builder.CreateCall(F, {TDB, Control});
15856 }
15857 case SystemZ::BI__builtin_tabort: {
15858 Value *Data = EmitScalarExpr(E->getArg(0));
15859 Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
15860 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
15861 }
15862 case SystemZ::BI__builtin_non_tx_store: {
15863 Value *Address = EmitScalarExpr(E->getArg(0));
15864 Value *Data = EmitScalarExpr(E->getArg(1));
15865 Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
15866 return Builder.CreateCall(F, {Data, Address});
15867 }
15868
15869 // Vector builtins. Note that most vector builtins are mapped automatically
15870 // to target-specific LLVM intrinsics. The ones handled specially here can
15871 // be represented via standard LLVM IR, which is preferable to enable common
15872 // LLVM optimizations.
15873
15874 case SystemZ::BI__builtin_s390_vpopctb:
15875 case SystemZ::BI__builtin_s390_vpopcth:
15876 case SystemZ::BI__builtin_s390_vpopctf:
15877 case SystemZ::BI__builtin_s390_vpopctg: {
15878 llvm::Type *ResultType = ConvertType(E->getType());
15879 Value *X = EmitScalarExpr(E->getArg(0));
15880 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
15881 return Builder.CreateCall(F, X);
15882 }
15883
15884 case SystemZ::BI__builtin_s390_vclzb:
15885 case SystemZ::BI__builtin_s390_vclzh:
15886 case SystemZ::BI__builtin_s390_vclzf:
15887 case SystemZ::BI__builtin_s390_vclzg: {
15888 llvm::Type *ResultType = ConvertType(E->getType());
15889 Value *X = EmitScalarExpr(E->getArg(0));
15890 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
15891 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
15892 return Builder.CreateCall(F, {X, Undef});
15893 }
15894
15895 case SystemZ::BI__builtin_s390_vctzb:
15896 case SystemZ::BI__builtin_s390_vctzh:
15897 case SystemZ::BI__builtin_s390_vctzf:
15898 case SystemZ::BI__builtin_s390_vctzg: {
15899 llvm::Type *ResultType = ConvertType(E->getType());
15900 Value *X = EmitScalarExpr(E->getArg(0));
15901 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
15902 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
15903 return Builder.CreateCall(F, {X, Undef});
15904 }
15905
15906 case SystemZ::BI__builtin_s390_vfsqsb:
15907 case SystemZ::BI__builtin_s390_vfsqdb: {
15908 llvm::Type *ResultType = ConvertType(E->getType());
15909 Value *X = EmitScalarExpr(E->getArg(0));
15910 if (Builder.getIsFPConstrained()) {
15911 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
15912 return Builder.CreateConstrainedFPCall(F, { X });
15913 } else {
15914 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
15915 return Builder.CreateCall(F, X);
15916 }
15917 }
15918 case SystemZ::BI__builtin_s390_vfmasb:
15919 case SystemZ::BI__builtin_s390_vfmadb: {
15920 llvm::Type *ResultType = ConvertType(E->getType());
15921 Value *X = EmitScalarExpr(E->getArg(0));
15922 Value *Y = EmitScalarExpr(E->getArg(1));
15923 Value *Z = EmitScalarExpr(E->getArg(2));
15924 if (Builder.getIsFPConstrained()) {
15925 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15926 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
15927 } else {
15928 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15929 return Builder.CreateCall(F, {X, Y, Z});
15930 }
15931 }
15932 case SystemZ::BI__builtin_s390_vfmssb:
15933 case SystemZ::BI__builtin_s390_vfmsdb: {
15934 llvm::Type *ResultType = ConvertType(E->getType());
15935 Value *X = EmitScalarExpr(E->getArg(0));
15936 Value *Y = EmitScalarExpr(E->getArg(1));
15937 Value *Z = EmitScalarExpr(E->getArg(2));
15938 if (Builder.getIsFPConstrained()) {
15939 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15940 return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15941 } else {
15942 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15943 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15944 }
15945 }
15946 case SystemZ::BI__builtin_s390_vfnmasb:
15947 case SystemZ::BI__builtin_s390_vfnmadb: {
15948 llvm::Type *ResultType = ConvertType(E->getType());
15949 Value *X = EmitScalarExpr(E->getArg(0));
15950 Value *Y = EmitScalarExpr(E->getArg(1));
15951 Value *Z = EmitScalarExpr(E->getArg(2));
15952 if (Builder.getIsFPConstrained()) {
15953 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15954 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
15955 } else {
15956 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15957 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
15958 }
15959 }
15960 case SystemZ::BI__builtin_s390_vfnmssb:
15961 case SystemZ::BI__builtin_s390_vfnmsdb: {
15962 llvm::Type *ResultType = ConvertType(E->getType());
15963 Value *X = EmitScalarExpr(E->getArg(0));
15964 Value *Y = EmitScalarExpr(E->getArg(1));
15965 Value *Z = EmitScalarExpr(E->getArg(2));
15966 if (Builder.getIsFPConstrained()) {
15967 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15968 Value *NegZ = Builder.CreateFNeg(Z, "sub");
15969 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
15970 } else {
15971 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15972 Value *NegZ = Builder.CreateFNeg(Z, "neg");
15973 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
15974 }
15975 }
15976 case SystemZ::BI__builtin_s390_vflpsb:
15977 case SystemZ::BI__builtin_s390_vflpdb: {
15978 llvm::Type *ResultType = ConvertType(E->getType());
15979 Value *X = EmitScalarExpr(E->getArg(0));
15980 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15981 return Builder.CreateCall(F, X);
15982 }
15983 case SystemZ::BI__builtin_s390_vflnsb:
15984 case SystemZ::BI__builtin_s390_vflndb: {
15985 llvm::Type *ResultType = ConvertType(E->getType());
15986 Value *X = EmitScalarExpr(E->getArg(0));
15987 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15988 return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg");
15989 }
15990 case SystemZ::BI__builtin_s390_vfisb:
15991 case SystemZ::BI__builtin_s390_vfidb: {
15992 llvm::Type *ResultType = ConvertType(E->getType());
15993 Value *X = EmitScalarExpr(E->getArg(0));
15994 // Constant-fold the M4 and M5 mask arguments.
15995 llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext());
15996 llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15997 // Check whether this instance can be represented via a LLVM standard
15998 // intrinsic. We only support some combinations of M4 and M5.
15999 Intrinsic::ID ID = Intrinsic::not_intrinsic;
16000 Intrinsic::ID CI;
16001 switch (M4.getZExtValue()) {
16002 default: break;
16003 case 0: // IEEE-inexact exception allowed
16004 switch (M5.getZExtValue()) {
16005 default: break;
16006 case 0: ID = Intrinsic::rint;
16007 CI = Intrinsic::experimental_constrained_rint; break;
16008 }
16009 break;
16010 case 4: // IEEE-inexact exception suppressed
16011 switch (M5.getZExtValue()) {
16012 default: break;
16013 case 0: ID = Intrinsic::nearbyint;
16014 CI = Intrinsic::experimental_constrained_nearbyint; break;
16015 case 1: ID = Intrinsic::round;
16016 CI = Intrinsic::experimental_constrained_round; break;
16017 case 5: ID = Intrinsic::trunc;
16018 CI = Intrinsic::experimental_constrained_trunc; break;
16019 case 6: ID = Intrinsic::ceil;
16020 CI = Intrinsic::experimental_constrained_ceil; break;
16021 case 7: ID = Intrinsic::floor;
16022 CI = Intrinsic::experimental_constrained_floor; break;
16023 }
16024 break;
16025 }
16026 if (ID != Intrinsic::not_intrinsic) {
16027 if (Builder.getIsFPConstrained()) {
16028 Function *F = CGM.getIntrinsic(CI, ResultType);
16029 return Builder.CreateConstrainedFPCall(F, X);
16030 } else {
16031 Function *F = CGM.getIntrinsic(ID, ResultType);
16032 return Builder.CreateCall(F, X);
16033 }
16034 }
16035 switch (BuiltinID) { // FIXME: constrained version?
16036 case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
16037 case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
16038 default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16038)
;
16039 }
16040 Function *F = CGM.getIntrinsic(ID);
16041 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
16042 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
16043 return Builder.CreateCall(F, {X, M4Value, M5Value});
16044 }
16045 case SystemZ::BI__builtin_s390_vfmaxsb:
16046 case SystemZ::BI__builtin_s390_vfmaxdb: {
16047 llvm::Type *ResultType = ConvertType(E->getType());
16048 Value *X = EmitScalarExpr(E->getArg(0));
16049 Value *Y = EmitScalarExpr(E->getArg(1));
16050 // Constant-fold the M4 mask argument.
16051 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
16052 // Check whether this instance can be represented via a LLVM standard
16053 // intrinsic. We only support some values of M4.
16054 Intrinsic::ID ID = Intrinsic::not_intrinsic;
16055 Intrinsic::ID CI;
16056 switch (M4.getZExtValue()) {
16057 default: break;
16058 case 4: ID = Intrinsic::maxnum;
16059 CI = Intrinsic::experimental_constrained_maxnum; break;
16060 }
16061 if (ID != Intrinsic::not_intrinsic) {
16062 if (Builder.getIsFPConstrained()) {
16063 Function *F = CGM.getIntrinsic(CI, ResultType);
16064 return Builder.CreateConstrainedFPCall(F, {X, Y});
16065 } else {
16066 Function *F = CGM.getIntrinsic(ID, ResultType);
16067 return Builder.CreateCall(F, {X, Y});
16068 }
16069 }
16070 switch (BuiltinID) {
16071 case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
16072 case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
16073 default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16073)
;
16074 }
16075 Function *F = CGM.getIntrinsic(ID);
16076 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
16077 return Builder.CreateCall(F, {X, Y, M4Value});
16078 }
16079 case SystemZ::BI__builtin_s390_vfminsb:
16080 case SystemZ::BI__builtin_s390_vfmindb: {
16081 llvm::Type *ResultType = ConvertType(E->getType());
16082 Value *X = EmitScalarExpr(E->getArg(0));
16083 Value *Y = EmitScalarExpr(E->getArg(1));
16084 // Constant-fold the M4 mask argument.
16085 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
16086 // Check whether this instance can be represented via a LLVM standard
16087 // intrinsic. We only support some values of M4.
16088 Intrinsic::ID ID = Intrinsic::not_intrinsic;
16089 Intrinsic::ID CI;
16090 switch (M4.getZExtValue()) {
16091 default: break;
16092 case 4: ID = Intrinsic::minnum;
16093 CI = Intrinsic::experimental_constrained_minnum; break;
16094 }
16095 if (ID != Intrinsic::not_intrinsic) {
16096 if (Builder.getIsFPConstrained()) {
16097 Function *F = CGM.getIntrinsic(CI, ResultType);
16098 return Builder.CreateConstrainedFPCall(F, {X, Y});
16099 } else {
16100 Function *F = CGM.getIntrinsic(ID, ResultType);
16101 return Builder.CreateCall(F, {X, Y});
16102 }
16103 }
16104 switch (BuiltinID) {
16105 case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
16106 case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
16107 default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16107)
;
16108 }
16109 Function *F = CGM.getIntrinsic(ID);
16110 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
16111 return Builder.CreateCall(F, {X, Y, M4Value});
16112 }
16113
16114 case SystemZ::BI__builtin_s390_vlbrh:
16115 case SystemZ::BI__builtin_s390_vlbrf:
16116 case SystemZ::BI__builtin_s390_vlbrg: {
16117 llvm::Type *ResultType = ConvertType(E->getType());
16118 Value *X = EmitScalarExpr(E->getArg(0));
16119 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
16120 return Builder.CreateCall(F, X);
16121 }
16122
16123 // Vector intrinsics that output the post-instruction CC value.
16124
16125#define INTRINSIC_WITH_CC(NAME) \
16126 case SystemZ::BI__builtin_##NAME: \
16127 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
16128
16129 INTRINSIC_WITH_CC(s390_vpkshs);
16130 INTRINSIC_WITH_CC(s390_vpksfs);
16131 INTRINSIC_WITH_CC(s390_vpksgs);
16132
16133 INTRINSIC_WITH_CC(s390_vpklshs);
16134 INTRINSIC_WITH_CC(s390_vpklsfs);
16135 INTRINSIC_WITH_CC(s390_vpklsgs);
16136
16137 INTRINSIC_WITH_CC(s390_vceqbs);
16138 INTRINSIC_WITH_CC(s390_vceqhs);
16139 INTRINSIC_WITH_CC(s390_vceqfs);
16140 INTRINSIC_WITH_CC(s390_vceqgs);
16141
16142 INTRINSIC_WITH_CC(s390_vchbs);
16143 INTRINSIC_WITH_CC(s390_vchhs);
16144 INTRINSIC_WITH_CC(s390_vchfs);
16145 INTRINSIC_WITH_CC(s390_vchgs);
16146
16147 INTRINSIC_WITH_CC(s390_vchlbs);
16148 INTRINSIC_WITH_CC(s390_vchlhs);
16149 INTRINSIC_WITH_CC(s390_vchlfs);
16150 INTRINSIC_WITH_CC(s390_vchlgs);
16151
16152 INTRINSIC_WITH_CC(s390_vfaebs);
16153 INTRINSIC_WITH_CC(s390_vfaehs);
16154 INTRINSIC_WITH_CC(s390_vfaefs);
16155
16156 INTRINSIC_WITH_CC(s390_vfaezbs);
16157 INTRINSIC_WITH_CC(s390_vfaezhs);
16158 INTRINSIC_WITH_CC(s390_vfaezfs);
16159
16160 INTRINSIC_WITH_CC(s390_vfeebs);
16161 INTRINSIC_WITH_CC(s390_vfeehs);
16162 INTRINSIC_WITH_CC(s390_vfeefs);
16163
16164 INTRINSIC_WITH_CC(s390_vfeezbs);
16165 INTRINSIC_WITH_CC(s390_vfeezhs);
16166 INTRINSIC_WITH_CC(s390_vfeezfs);
16167
16168 INTRINSIC_WITH_CC(s390_vfenebs);
16169 INTRINSIC_WITH_CC(s390_vfenehs);
16170 INTRINSIC_WITH_CC(s390_vfenefs);
16171
16172 INTRINSIC_WITH_CC(s390_vfenezbs);
16173 INTRINSIC_WITH_CC(s390_vfenezhs);
16174 INTRINSIC_WITH_CC(s390_vfenezfs);
16175
16176 INTRINSIC_WITH_CC(s390_vistrbs);
16177 INTRINSIC_WITH_CC(s390_vistrhs);
16178 INTRINSIC_WITH_CC(s390_vistrfs);
16179
16180 INTRINSIC_WITH_CC(s390_vstrcbs);
16181 INTRINSIC_WITH_CC(s390_vstrchs);
16182 INTRINSIC_WITH_CC(s390_vstrcfs);
16183
16184 INTRINSIC_WITH_CC(s390_vstrczbs);
16185 INTRINSIC_WITH_CC(s390_vstrczhs);
16186 INTRINSIC_WITH_CC(s390_vstrczfs);
16187
16188 INTRINSIC_WITH_CC(s390_vfcesbs);
16189 INTRINSIC_WITH_CC(s390_vfcedbs);
16190 INTRINSIC_WITH_CC(s390_vfchsbs);
16191 INTRINSIC_WITH_CC(s390_vfchdbs);
16192 INTRINSIC_WITH_CC(s390_vfchesbs);
16193 INTRINSIC_WITH_CC(s390_vfchedbs);
16194
16195 INTRINSIC_WITH_CC(s390_vftcisb);
16196 INTRINSIC_WITH_CC(s390_vftcidb);
16197
16198 INTRINSIC_WITH_CC(s390_vstrsb);
16199 INTRINSIC_WITH_CC(s390_vstrsh);
16200 INTRINSIC_WITH_CC(s390_vstrsf);
16201
16202 INTRINSIC_WITH_CC(s390_vstrszb);
16203 INTRINSIC_WITH_CC(s390_vstrszh);
16204 INTRINSIC_WITH_CC(s390_vstrszf);
16205
16206#undef INTRINSIC_WITH_CC
16207
16208 default:
16209 return nullptr;
16210 }
16211}
16212
16213namespace {
16214// Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
16215struct NVPTXMmaLdstInfo {
16216 unsigned NumResults; // Number of elements to load/store
16217 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
16218 unsigned IID_col;
16219 unsigned IID_row;
16220};
16221
16222#define MMA_INTR(geom_op_type, layout) \
16223 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
16224#define MMA_LDST(n, geom_op_type) \
16225 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
16226
16227static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
16228 switch (BuiltinID) {
16229 // FP MMA loads
16230 case NVPTX::BI__hmma_m16n16k16_ld_a:
16231 return MMA_LDST(8, m16n16k16_load_a_f16);
16232 case NVPTX::BI__hmma_m16n16k16_ld_b:
16233 return MMA_LDST(8, m16n16k16_load_b_f16);
16234 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
16235 return MMA_LDST(4, m16n16k16_load_c_f16);
16236 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
16237 return MMA_LDST(8, m16n16k16_load_c_f32);
16238 case NVPTX::BI__hmma_m32n8k16_ld_a:
16239 return MMA_LDST(8, m32n8k16_load_a_f16);
16240 case NVPTX::BI__hmma_m32n8k16_ld_b:
16241 return MMA_LDST(8, m32n8k16_load_b_f16);
16242 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
16243 return MMA_LDST(4, m32n8k16_load_c_f16);
16244 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
16245 return MMA_LDST(8, m32n8k16_load_c_f32);
16246 case NVPTX::BI__hmma_m8n32k16_ld_a:
16247 return MMA_LDST(8, m8n32k16_load_a_f16);
16248 case NVPTX::BI__hmma_m8n32k16_ld_b:
16249 return MMA_LDST(8, m8n32k16_load_b_f16);
16250 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
16251 return MMA_LDST(4, m8n32k16_load_c_f16);
16252 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
16253 return MMA_LDST(8, m8n32k16_load_c_f32);
16254
16255 // Integer MMA loads
16256 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
16257 return MMA_LDST(2, m16n16k16_load_a_s8);
16258 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
16259 return MMA_LDST(2, m16n16k16_load_a_u8);
16260 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
16261 return MMA_LDST(2, m16n16k16_load_b_s8);
16262 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
16263 return MMA_LDST(2, m16n16k16_load_b_u8);
16264 case NVPTX::BI__imma_m16n16k16_ld_c:
16265 return MMA_LDST(8, m16n16k16_load_c_s32);
16266 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
16267 return MMA_LDST(4, m32n8k16_load_a_s8);
16268 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
16269 return MMA_LDST(4, m32n8k16_load_a_u8);
16270 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
16271 return MMA_LDST(1, m32n8k16_load_b_s8);
16272 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
16273 return MMA_LDST(1, m32n8k16_load_b_u8);
16274 case NVPTX::BI__imma_m32n8k16_ld_c:
16275 return MMA_LDST(8, m32n8k16_load_c_s32);
16276 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
16277 return MMA_LDST(1, m8n32k16_load_a_s8);
16278 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
16279 return MMA_LDST(1, m8n32k16_load_a_u8);
16280 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
16281 return MMA_LDST(4, m8n32k16_load_b_s8);
16282 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
16283 return MMA_LDST(4, m8n32k16_load_b_u8);
16284 case NVPTX::BI__imma_m8n32k16_ld_c:
16285 return MMA_LDST(8, m8n32k16_load_c_s32);
16286
16287 // Sub-integer MMA loads.
16288 // Only row/col layout is supported by A/B fragments.
16289 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
16290 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
16291 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
16292 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
16293 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
16294 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
16295 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
16296 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
16297 case NVPTX::BI__imma_m8n8k32_ld_c:
16298 return MMA_LDST(2, m8n8k32_load_c_s32);
16299 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
16300 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
16301 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
16302 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
16303 case NVPTX::BI__bmma_m8n8k128_ld_c:
16304 return MMA_LDST(2, m8n8k128_load_c_s32);
16305
16306 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
16307 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
16308 // use fragment C for both loads and stores.
16309 // FP MMA stores.
16310 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
16311 return MMA_LDST(4, m16n16k16_store_d_f16);
16312 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
16313 return MMA_LDST(8, m16n16k16_store_d_f32);
16314 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
16315 return MMA_LDST(4, m32n8k16_store_d_f16);
16316 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
16317 return MMA_LDST(8, m32n8k16_store_d_f32);
16318 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
16319 return MMA_LDST(4, m8n32k16_store_d_f16);
16320 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
16321 return MMA_LDST(8, m8n32k16_store_d_f32);
16322
16323 // Integer and sub-integer MMA stores.
16324 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
16325 // name, integer loads/stores use LLVM's i32.
16326 case NVPTX::BI__imma_m16n16k16_st_c_i32:
16327 return MMA_LDST(8, m16n16k16_store_d_s32);
16328 case NVPTX::BI__imma_m32n8k16_st_c_i32:
16329 return MMA_LDST(8, m32n8k16_store_d_s32);
16330 case NVPTX::BI__imma_m8n32k16_st_c_i32:
16331 return MMA_LDST(8, m8n32k16_store_d_s32);
16332 case NVPTX::BI__imma_m8n8k32_st_c_i32:
16333 return MMA_LDST(2, m8n8k32_store_d_s32);
16334 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
16335 return MMA_LDST(2, m8n8k128_store_d_s32);
16336
16337 default:
16338 llvm_unreachable("Unknown MMA builtin")::llvm::llvm_unreachable_internal("Unknown MMA builtin", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16338)
;
16339 }
16340}
16341#undef MMA_LDST
16342#undef MMA_INTR
16343
16344
16345struct NVPTXMmaInfo {
16346 unsigned NumEltsA;
16347 unsigned NumEltsB;
16348 unsigned NumEltsC;
16349 unsigned NumEltsD;
16350 std::array<unsigned, 8> Variants;
16351
16352 unsigned getMMAIntrinsic(int Layout, bool Satf) {
16353 unsigned Index = Layout * 2 + Satf;
16354 if (Index >= Variants.size())
16355 return 0;
16356 return Variants[Index];
16357 }
16358};
16359
16360 // Returns an intrinsic that matches Layout and Satf for valid combinations of
16361 // Layout and Satf, 0 otherwise.
16362static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
16363 // clang-format off
16364#define MMA_VARIANTS(geom, type) {{ \
16365 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
16366 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
16367 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
16368 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
16369 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
16370 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
16371 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
16372 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
16373 }}
16374// Sub-integer MMA only supports row.col layout.
16375#define MMA_VARIANTS_I4(geom, type) {{ \
16376 0, \
16377 0, \
16378 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
16379 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
16380 0, \
16381 0, \
16382 0, \
16383 0 \
16384 }}
16385// b1 MMA does not support .satfinite.
16386#define MMA_VARIANTS_B1(geom, type) {{ \
16387 0, \
16388 0, \
16389 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
16390 0, \
16391 0, \
16392 0, \
16393 0, \
16394 0 \
16395 }}
16396 // clang-format on
16397 switch (BuiltinID) {
16398 // FP MMA
16399 // Note that 'type' argument of MMA_VARIANT uses D_C notation, while
16400 // NumEltsN of return value are ordered as A,B,C,D.
16401 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
16402 return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)};
16403 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
16404 return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)};
16405 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
16406 return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)};
16407 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
16408 return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)};
16409 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
16410 return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)};
16411 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
16412 return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)};
16413 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
16414 return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)};
16415 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
16416 return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)};
16417 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
16418 return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)};
16419 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
16420 return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)};
16421 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
16422 return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)};
16423 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
16424 return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)};
16425
16426 // Integer MMA
16427 case NVPTX::BI__imma_m16n16k16_mma_s8:
16428 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)};
16429 case NVPTX::BI__imma_m16n16k16_mma_u8:
16430 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)};
16431 case NVPTX::BI__imma_m32n8k16_mma_s8:
16432 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)};
16433 case NVPTX::BI__imma_m32n8k16_mma_u8:
16434 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)};
16435 case NVPTX::BI__imma_m8n32k16_mma_s8:
16436 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)};
16437 case NVPTX::BI__imma_m8n32k16_mma_u8:
16438 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)};
16439
16440 // Sub-integer MMA
16441 case NVPTX::BI__imma_m8n8k32_mma_s4:
16442 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)};
16443 case NVPTX::BI__imma_m8n8k32_mma_u4:
16444 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)};
16445 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
16446 return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)};
16447 default:
16448 llvm_unreachable("Unexpected builtin ID.")::llvm::llvm_unreachable_internal("Unexpected builtin ID.", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16448)
;
16449 }
16450#undef MMA_VARIANTS
16451#undef MMA_VARIANTS_I4
16452#undef MMA_VARIANTS_B1
16453}
16454
16455} // namespace
16456
16457Value *
16458CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
16459 auto MakeLdg = [&](unsigned IntrinsicID) {
16460 Value *Ptr = EmitScalarExpr(E->getArg(0));
16461 clang::CharUnits Align =
16462 CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
16463 return Builder.CreateCall(
16464 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
16465 Ptr->getType()}),
16466 {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
16467 };
16468 auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
16469 Value *Ptr = EmitScalarExpr(E->getArg(0));
16470 return Builder.CreateCall(
16471 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
16472 Ptr->getType()}),
16473 {Ptr, EmitScalarExpr(E->getArg(1))});
16474 };
16475 switch (BuiltinID) {
16476 case NVPTX::BI__nvvm_atom_add_gen_i:
16477 case NVPTX::BI__nvvm_atom_add_gen_l:
16478 case NVPTX::BI__nvvm_atom_add_gen_ll:
16479 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
16480
16481 case NVPTX::BI__nvvm_atom_sub_gen_i:
16482 case NVPTX::BI__nvvm_atom_sub_gen_l:
16483 case NVPTX::BI__nvvm_atom_sub_gen_ll:
16484 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
16485
16486 case NVPTX::BI__nvvm_atom_and_gen_i:
16487 case NVPTX::BI__nvvm_atom_and_gen_l:
16488 case NVPTX::BI__nvvm_atom_and_gen_ll:
16489 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
16490
16491 case NVPTX::BI__nvvm_atom_or_gen_i:
16492 case NVPTX::BI__nvvm_atom_or_gen_l:
16493 case NVPTX::BI__nvvm_atom_or_gen_ll:
16494 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
16495
16496 case NVPTX::BI__nvvm_atom_xor_gen_i:
16497 case NVPTX::BI__nvvm_atom_xor_gen_l:
16498 case NVPTX::BI__nvvm_atom_xor_gen_ll:
16499 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
16500
16501 case NVPTX::BI__nvvm_atom_xchg_gen_i:
16502 case NVPTX::BI__nvvm_atom_xchg_gen_l:
16503 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
16504 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
16505
16506 case NVPTX::BI__nvvm_atom_max_gen_i:
16507 case NVPTX::BI__nvvm_atom_max_gen_l:
16508 case NVPTX::BI__nvvm_atom_max_gen_ll:
16509 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
16510
16511 case NVPTX::BI__nvvm_atom_max_gen_ui:
16512 case NVPTX::BI__nvvm_atom_max_gen_ul:
16513 case NVPTX::BI__nvvm_atom_max_gen_ull:
16514 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
16515
16516 case NVPTX::BI__nvvm_atom_min_gen_i:
16517 case NVPTX::BI__nvvm_atom_min_gen_l:
16518 case NVPTX::BI__nvvm_atom_min_gen_ll:
16519 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
16520
16521 case NVPTX::BI__nvvm_atom_min_gen_ui:
16522 case NVPTX::BI__nvvm_atom_min_gen_ul:
16523 case NVPTX::BI__nvvm_atom_min_gen_ull:
16524 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
16525
16526 case NVPTX::BI__nvvm_atom_cas_gen_i:
16527 case NVPTX::BI__nvvm_atom_cas_gen_l:
16528 case NVPTX::BI__nvvm_atom_cas_gen_ll:
16529 // __nvvm_atom_cas_gen_* should return the old value rather than the
16530 // success flag.
16531 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
16532
16533 case NVPTX::BI__nvvm_atom_add_gen_f:
16534 case NVPTX::BI__nvvm_atom_add_gen_d: {
16535 Value *Ptr = EmitScalarExpr(E->getArg(0));
16536 Value *Val = EmitScalarExpr(E->getArg(1));
16537 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
16538 AtomicOrdering::SequentiallyConsistent);
16539 }
16540
16541 case NVPTX::BI__nvvm_atom_inc_gen_ui: {
16542 Value *Ptr = EmitScalarExpr(E->getArg(0));
16543 Value *Val = EmitScalarExpr(E->getArg(1));
16544 Function *FnALI32 =
16545 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
16546 return Builder.CreateCall(FnALI32, {Ptr, Val});
16547 }
16548
16549 case NVPTX::BI__nvvm_atom_dec_gen_ui: {
16550 Value *Ptr = EmitScalarExpr(E->getArg(0));
16551 Value *Val = EmitScalarExpr(E->getArg(1));
16552 Function *FnALD32 =
16553 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
16554 return Builder.CreateCall(FnALD32, {Ptr, Val});
16555 }
16556
16557 case NVPTX::BI__nvvm_ldg_c:
16558 case NVPTX::BI__nvvm_ldg_c2:
16559 case NVPTX::BI__nvvm_ldg_c4:
16560 case NVPTX::BI__nvvm_ldg_s:
16561 case NVPTX::BI__nvvm_ldg_s2:
16562 case NVPTX::BI__nvvm_ldg_s4:
16563 case NVPTX::BI__nvvm_ldg_i:
16564 case NVPTX::BI__nvvm_ldg_i2:
16565 case NVPTX::BI__nvvm_ldg_i4:
16566 case NVPTX::BI__nvvm_ldg_l:
16567 case NVPTX::BI__nvvm_ldg_ll:
16568 case NVPTX::BI__nvvm_ldg_ll2:
16569 case NVPTX::BI__nvvm_ldg_uc:
16570 case NVPTX::BI__nvvm_ldg_uc2:
16571 case NVPTX::BI__nvvm_ldg_uc4:
16572 case NVPTX::BI__nvvm_ldg_us:
16573 case NVPTX::BI__nvvm_ldg_us2:
16574 case NVPTX::BI__nvvm_ldg_us4:
16575 case NVPTX::BI__nvvm_ldg_ui:
16576 case NVPTX::BI__nvvm_ldg_ui2:
16577 case NVPTX::BI__nvvm_ldg_ui4:
16578 case NVPTX::BI__nvvm_ldg_ul:
16579 case NVPTX::BI__nvvm_ldg_ull:
16580 case NVPTX::BI__nvvm_ldg_ull2:
16581 // PTX Interoperability section 2.2: "For a vector with an even number of
16582 // elements, its alignment is set to number of elements times the alignment
16583 // of its member: n*alignof(t)."
16584 return MakeLdg(Intrinsic::nvvm_ldg_global_i);
16585 case NVPTX::BI__nvvm_ldg_f:
16586 case NVPTX::BI__nvvm_ldg_f2:
16587 case NVPTX::BI__nvvm_ldg_f4:
16588 case NVPTX::BI__nvvm_ldg_d:
16589 case NVPTX::BI__nvvm_ldg_d2:
16590 return MakeLdg(Intrinsic::nvvm_ldg_global_f);
16591
16592 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
16593 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
16594 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
16595 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
16596 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
16597 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
16598 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
16599 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
16600 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
16601 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
16602 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
16603 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
16604 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
16605 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
16606 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
16607 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
16608 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
16609 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
16610 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
16611 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
16612 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
16613 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
16614 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
16615 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
16616 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
16617 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
16618 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
16619 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
16620 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
16621 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
16622 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
16623 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
16624 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
16625 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
16626 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
16627 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
16628 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
16629 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
16630 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
16631 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
16632 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
16633 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
16634 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
16635 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
16636 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
16637 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
16638 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
16639 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
16640 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
16641 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
16642 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
16643 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
16644 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
16645 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
16646 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
16647 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
16648 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
16649 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
16650 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
16651 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
16652 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
16653 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
16654 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
16655 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
16656 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
16657 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
16658 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
16659 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
16660 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
16661 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
16662 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
16663 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
16664 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
16665 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
16666 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
16667 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
16668 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
16669 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
16670 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
16671 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
16672 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
16673 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
16674 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
16675 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
16676 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
16677 Value *Ptr = EmitScalarExpr(E->getArg(0));
16678 return Builder.CreateCall(
16679 CGM.getIntrinsic(
16680 Intrinsic::nvvm_atomic_cas_gen_i_cta,
16681 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
16682 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
16683 }
16684 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
16685 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
16686 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
16687 Value *Ptr = EmitScalarExpr(E->getArg(0));
16688 return Builder.CreateCall(
16689 CGM.getIntrinsic(
16690 Intrinsic::nvvm_atomic_cas_gen_i_sys,
16691 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
16692 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
16693 }
16694 case NVPTX::BI__nvvm_match_all_sync_i32p:
16695 case NVPTX::BI__nvvm_match_all_sync_i64p: {
16696 Value *Mask = EmitScalarExpr(E->getArg(0));
16697 Value *Val = EmitScalarExpr(E->getArg(1));
16698 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
16699 Value *ResultPair = Builder.CreateCall(
16700 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
16701 ? Intrinsic::nvvm_match_all_sync_i32p
16702 : Intrinsic::nvvm_match_all_sync_i64p),
16703 {Mask, Val});
16704 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
16705 PredOutPtr.getElementType());
16706 Builder.CreateStore(Pred, PredOutPtr);
16707 return Builder.CreateExtractValue(ResultPair, 0);
16708 }
16709
16710 // FP MMA loads
16711 case NVPTX::BI__hmma_m16n16k16_ld_a:
16712 case NVPTX::BI__hmma_m16n16k16_ld_b:
16713 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
16714 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
16715 case NVPTX::BI__hmma_m32n8k16_ld_a:
16716 case NVPTX::BI__hmma_m32n8k16_ld_b:
16717 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
16718 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
16719 case NVPTX::BI__hmma_m8n32k16_ld_a:
16720 case NVPTX::BI__hmma_m8n32k16_ld_b:
16721 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
16722 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
16723 // Integer MMA loads.
16724 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
16725 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
16726 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
16727 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
16728 case NVPTX::BI__imma_m16n16k16_ld_c:
16729 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
16730 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
16731 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
16732 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
16733 case NVPTX::BI__imma_m32n8k16_ld_c:
16734 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
16735 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
16736 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
16737 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
16738 case NVPTX::BI__imma_m8n32k16_ld_c:
16739 // Sub-integer MMA loads.
16740 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
16741 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
16742 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
16743 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
16744 case NVPTX::BI__imma_m8n8k32_ld_c:
16745 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
16746 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
16747 case NVPTX::BI__bmma_m8n8k128_ld_c:
16748 {
16749 Address Dst = EmitPointerWithAlignment(E->getArg(0));
16750 Value *Src = EmitScalarExpr(E->getArg(1));
16751 Value *Ldm = EmitScalarExpr(E->getArg(2));
16752 Optional<llvm::APSInt> isColMajorArg =
16753 E->getArg(3)->getIntegerConstantExpr(getContext());
16754 if (!isColMajorArg)
16755 return nullptr;
16756 bool isColMajor = isColMajorArg->getSExtValue();
16757 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
16758 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
16759 if (IID == 0)
16760 return nullptr;
16761
16762 Value *Result =
16763 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
16764
16765 // Save returned values.
16766 assert(II.NumResults)(static_cast <bool> (II.NumResults) ? void (0) : __assert_fail
("II.NumResults", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16766, __extension__ __PRETTY_FUNCTION__))
;
16767 if (II.NumResults == 1) {
16768 Builder.CreateAlignedStore(Result, Dst.getPointer(),
16769 CharUnits::fromQuantity(4));
16770 } else {
16771 for (unsigned i = 0; i < II.NumResults; ++i) {
16772 Builder.CreateAlignedStore(
16773 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
16774 Dst.getElementType()),
16775 Builder.CreateGEP(Dst.getPointer(),
16776 llvm::ConstantInt::get(IntTy, i)),
16777 CharUnits::fromQuantity(4));
16778 }
16779 }
16780 return Result;
16781 }
16782
16783 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
16784 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
16785 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
16786 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
16787 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
16788 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
16789 case NVPTX::BI__imma_m16n16k16_st_c_i32:
16790 case NVPTX::BI__imma_m32n8k16_st_c_i32:
16791 case NVPTX::BI__imma_m8n32k16_st_c_i32:
16792 case NVPTX::BI__imma_m8n8k32_st_c_i32:
16793 case NVPTX::BI__bmma_m8n8k128_st_c_i32: {
16794 Value *Dst = EmitScalarExpr(E->getArg(0));
16795 Address Src = EmitPointerWithAlignment(E->getArg(1));
16796 Value *Ldm = EmitScalarExpr(E->getArg(2));
16797 Optional<llvm::APSInt> isColMajorArg =
16798 E->getArg(3)->getIntegerConstantExpr(getContext());
16799 if (!isColMajorArg)
16800 return nullptr;
16801 bool isColMajor = isColMajorArg->getSExtValue();
16802 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
16803 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
16804 if (IID == 0)
16805 return nullptr;
16806 Function *Intrinsic =
16807 CGM.getIntrinsic(IID, Dst->getType());
16808 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
16809 SmallVector<Value *, 10> Values = {Dst};
16810 for (unsigned i = 0; i < II.NumResults; ++i) {
16811 Value *V = Builder.CreateAlignedLoad(
16812 Src.getElementType(),
16813 Builder.CreateGEP(Src.getElementType(), Src.getPointer(),
16814 llvm::ConstantInt::get(IntTy, i)),
16815 CharUnits::fromQuantity(4));
16816 Values.push_back(Builder.CreateBitCast(V, ParamType));
16817 }
16818 Values.push_back(Ldm);
16819 Value *Result = Builder.CreateCall(Intrinsic, Values);
16820 return Result;
16821 }
16822
16823 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
16824 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
16825 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
16826 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
16827 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
16828 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
16829 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
16830 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
16831 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
16832 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
16833 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
16834 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
16835 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
16836 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
16837 case NVPTX::BI__imma_m16n16k16_mma_s8:
16838 case NVPTX::BI__imma_m16n16k16_mma_u8:
16839 case NVPTX::BI__imma_m32n8k16_mma_s8:
16840 case NVPTX::BI__imma_m32n8k16_mma_u8:
16841 case NVPTX::BI__imma_m8n32k16_mma_s8:
16842 case NVPTX::BI__imma_m8n32k16_mma_u8:
16843 case NVPTX::BI__imma_m8n8k32_mma_s4:
16844 case NVPTX::BI__imma_m8n8k32_mma_u4:
16845 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: {
16846 Address Dst = EmitPointerWithAlignment(E->getArg(0));
16847 Address SrcA = EmitPointerWithAlignment(E->getArg(1));
16848 Address SrcB = EmitPointerWithAlignment(E->getArg(2));
16849 Address SrcC = EmitPointerWithAlignment(E->getArg(3));
16850 Optional<llvm::APSInt> LayoutArg =
16851 E->getArg(4)->getIntegerConstantExpr(getContext());
16852 if (!LayoutArg)
16853 return nullptr;
16854 int Layout = LayoutArg->getSExtValue();
16855 if (Layout < 0 || Layout > 3)
16856 return nullptr;
16857 llvm::APSInt SatfArg;
16858 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
16859 SatfArg = 0; // .b1 does not have satf argument.
16860 else if (Optional<llvm::APSInt> OptSatfArg =
16861 E->getArg(5)->getIntegerConstantExpr(getContext()))
16862 SatfArg = *OptSatfArg;
16863 else
16864 return nullptr;
16865 bool Satf = SatfArg.getSExtValue();
16866 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
16867 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
16868 if (IID == 0) // Unsupported combination of Layout/Satf.
16869 return nullptr;
16870
16871 SmallVector<Value *, 24> Values;
16872 Function *Intrinsic = CGM.getIntrinsic(IID);
16873 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
16874 // Load A
16875 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
16876 Value *V = Builder.CreateAlignedLoad(
16877 SrcA.getElementType(),
16878 Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(),
16879 llvm::ConstantInt::get(IntTy, i)),
16880 CharUnits::fromQuantity(4));
16881 Values.push_back(Builder.CreateBitCast(V, AType));
16882 }
16883 // Load B
16884 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
16885 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
16886 Value *V = Builder.CreateAlignedLoad(
16887 SrcB.getElementType(),
16888 Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(),
16889 llvm::ConstantInt::get(IntTy, i)),
16890 CharUnits::fromQuantity(4));
16891 Values.push_back(Builder.CreateBitCast(V, BType));
16892 }
16893 // Load C
16894 llvm::Type *CType =
16895 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
16896 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
16897 Value *V = Builder.CreateAlignedLoad(
16898 SrcC.getElementType(),
16899 Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(),
16900 llvm::ConstantInt::get(IntTy, i)),
16901 CharUnits::fromQuantity(4));
16902 Values.push_back(Builder.CreateBitCast(V, CType));
16903 }
16904 Value *Result = Builder.CreateCall(Intrinsic, Values);
16905 llvm::Type *DType = Dst.getElementType();
16906 for (unsigned i = 0; i < MI.NumEltsD; ++i)
16907 Builder.CreateAlignedStore(
16908 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
16909 Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
16910 CharUnits::fromQuantity(4));
16911 return Result;
16912 }
16913 default:
16914 return nullptr;
16915 }
16916}
16917
16918namespace {
16919struct BuiltinAlignArgs {
16920 llvm::Value *Src = nullptr;
16921 llvm::Type *SrcType = nullptr;
16922 llvm::Value *Alignment = nullptr;
16923 llvm::Value *Mask = nullptr;
16924 llvm::IntegerType *IntType = nullptr;
16925
16926 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
16927 QualType AstType = E->getArg(0)->getType();
16928 if (AstType->isArrayType())
16929 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
16930 else
16931 Src = CGF.EmitScalarExpr(E->getArg(0));
16932 SrcType = Src->getType();
16933 if (SrcType->isPointerTy()) {
16934 IntType = IntegerType::get(
16935 CGF.getLLVMContext(),
16936 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
16937 } else {
16938 assert(SrcType->isIntegerTy())(static_cast <bool> (SrcType->isIntegerTy()) ? void (
0) : __assert_fail ("SrcType->isIntegerTy()", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 16938, __extension__ __PRETTY_FUNCTION__))
;
16939 IntType = cast<llvm::IntegerType>(SrcType);
16940 }
16941 Alignment = CGF.EmitScalarExpr(E->getArg(1));
16942 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
16943 auto *One = llvm::ConstantInt::get(IntType, 1);
16944 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
16945 }
16946};
16947} // namespace
16948
16949/// Generate (x & (y-1)) == 0.
16950RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
16951 BuiltinAlignArgs Args(E, *this);
16952 llvm::Value *SrcAddress = Args.Src;
16953 if (Args.SrcType->isPointerTy())
16954 SrcAddress =
16955 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
16956 return RValue::get(Builder.CreateICmpEQ(
16957 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
16958 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
16959}
16960
16961/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
16962/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
16963/// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
16964/// TODO: actually use ptrmask once most optimization passes know about it.
16965RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
16966 BuiltinAlignArgs Args(E, *this);
16967 llvm::Value *SrcAddr = Args.Src;
16968 if (Args.Src->getType()->isPointerTy())
16969 SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
16970 llvm::Value *SrcForMask = SrcAddr;
16971 if (AlignUp) {
16972 // When aligning up we have to first add the mask to ensure we go over the
16973 // next alignment value and then align down to the next valid multiple.
16974 // By adding the mask, we ensure that align_up on an already aligned
16975 // value will not change the value.
16976 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
16977 }
16978 // Invert the mask to only clear the lower bits.
16979 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
16980 llvm::Value *Result =
16981 Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
16982 if (Args.Src->getType()->isPointerTy()) {
16983 /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
16984 // Result = Builder.CreateIntrinsic(
16985 // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
16986 // {SrcForMask, NegatedMask}, nullptr, "aligned_result");
16987 Result->setName("aligned_intptr");
16988 llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
16989 // The result must point to the same underlying allocation. This means we
16990 // can use an inbounds GEP to enable better optimization.
16991 Value *Base = EmitCastToVoidPtr(Args.Src);
16992 if (getLangOpts().isSignedOverflowDefined())
16993 Result = Builder.CreateGEP(Base, Difference, "aligned_result");
16994 else
16995 Result = EmitCheckedInBoundsGEP(Base, Difference,
16996 /*SignedIndices=*/true,
16997 /*isSubtraction=*/!AlignUp,
16998 E->getExprLoc(), "aligned_result");
16999 Result = Builder.CreatePointerCast(Result, Args.SrcType);
17000 // Emit an alignment assumption to ensure that the new alignment is
17001 // propagated to loads/stores, etc.
17002 emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
17003 }
17004 assert(Result->getType() == Args.SrcType)(static_cast <bool> (Result->getType() == Args.SrcType
) ? void (0) : __assert_fail ("Result->getType() == Args.SrcType"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17004, __extension__ __PRETTY_FUNCTION__))
;
17005 return RValue::get(Result);
17006}
17007
17008Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
17009 const CallExpr *E) {
17010 switch (BuiltinID) {
17011 case WebAssembly::BI__builtin_wasm_memory_size: {
17012 llvm::Type *ResultType = ConvertType(E->getType());
17013 Value *I = EmitScalarExpr(E->getArg(0));
17014 Function *Callee =
17015 CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
17016 return Builder.CreateCall(Callee, I);
17017 }
17018 case WebAssembly::BI__builtin_wasm_memory_grow: {
17019 llvm::Type *ResultType = ConvertType(E->getType());
17020 Value *Args[] = {EmitScalarExpr(E->getArg(0)),
17021 EmitScalarExpr(E->getArg(1))};
17022 Function *Callee =
17023 CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
17024 return Builder.CreateCall(Callee, Args);
17025 }
17026 case WebAssembly::BI__builtin_wasm_tls_size: {
17027 llvm::Type *ResultType = ConvertType(E->getType());
17028 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
17029 return Builder.CreateCall(Callee);
17030 }
17031 case WebAssembly::BI__builtin_wasm_tls_align: {
17032 llvm::Type *ResultType = ConvertType(E->getType());
17033 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
17034 return Builder.CreateCall(Callee);
17035 }
17036 case WebAssembly::BI__builtin_wasm_tls_base: {
17037 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
17038 return Builder.CreateCall(Callee);
17039 }
17040 case WebAssembly::BI__builtin_wasm_throw: {
17041 Value *Tag = EmitScalarExpr(E->getArg(0));
17042 Value *Obj = EmitScalarExpr(E->getArg(1));
17043 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
17044 return Builder.CreateCall(Callee, {Tag, Obj});
17045 }
17046 case WebAssembly::BI__builtin_wasm_rethrow: {
17047 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow);
17048 return Builder.CreateCall(Callee);
17049 }
17050 case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: {
17051 Value *Addr = EmitScalarExpr(E->getArg(0));
17052 Value *Expected = EmitScalarExpr(E->getArg(1));
17053 Value *Timeout = EmitScalarExpr(E->getArg(2));
17054 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32);
17055 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
17056 }
17057 case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: {
17058 Value *Addr = EmitScalarExpr(E->getArg(0));
17059 Value *Expected = EmitScalarExpr(E->getArg(1));
17060 Value *Timeout = EmitScalarExpr(E->getArg(2));
17061 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64);
17062 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
17063 }
17064 case WebAssembly::BI__builtin_wasm_memory_atomic_notify: {
17065 Value *Addr = EmitScalarExpr(E->getArg(0));
17066 Value *Count = EmitScalarExpr(E->getArg(1));
17067 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_notify);
17068 return Builder.CreateCall(Callee, {Addr, Count});
17069 }
17070 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
17071 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
17072 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
17073 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
17074 Value *Src = EmitScalarExpr(E->getArg(0));
17075 llvm::Type *ResT = ConvertType(E->getType());
17076 Function *Callee =
17077 CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
17078 return Builder.CreateCall(Callee, {Src});
17079 }
17080 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
17081 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
17082 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
17083 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
17084 Value *Src = EmitScalarExpr(E->getArg(0));
17085 llvm::Type *ResT = ConvertType(E->getType());
17086 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
17087 {ResT, Src->getType()});
17088 return Builder.CreateCall(Callee, {Src});
17089 }
17090 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
17091 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
17092 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
17093 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
17094 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
17095 Value *Src = EmitScalarExpr(E->getArg(0));
17096 llvm::Type *ResT = ConvertType(E->getType());
17097 Function *Callee =
17098 CGM.getIntrinsic(Intrinsic::fptosi_sat, {ResT, Src->getType()});
17099 return Builder.CreateCall(Callee, {Src});
17100 }
17101 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
17102 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
17103 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
17104 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
17105 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
17106 Value *Src = EmitScalarExpr(E->getArg(0));
17107 llvm::Type *ResT = ConvertType(E->getType());
17108 Function *Callee =
17109 CGM.getIntrinsic(Intrinsic::fptoui_sat, {ResT, Src->getType()});
17110 return Builder.CreateCall(Callee, {Src});
17111 }
17112 case WebAssembly::BI__builtin_wasm_min_f32:
17113 case WebAssembly::BI__builtin_wasm_min_f64:
17114 case WebAssembly::BI__builtin_wasm_min_f32x4:
17115 case WebAssembly::BI__builtin_wasm_min_f64x2: {
17116 Value *LHS = EmitScalarExpr(E->getArg(0));
17117 Value *RHS = EmitScalarExpr(E->getArg(1));
17118 Function *Callee =
17119 CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType()));
17120 return Builder.CreateCall(Callee, {LHS, RHS});
17121 }
17122 case WebAssembly::BI__builtin_wasm_max_f32:
17123 case WebAssembly::BI__builtin_wasm_max_f64:
17124 case WebAssembly::BI__builtin_wasm_max_f32x4:
17125 case WebAssembly::BI__builtin_wasm_max_f64x2: {
17126 Value *LHS = EmitScalarExpr(E->getArg(0));
17127 Value *RHS = EmitScalarExpr(E->getArg(1));
17128 Function *Callee =
17129 CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType()));
17130 return Builder.CreateCall(Callee, {LHS, RHS});
17131 }
17132 case WebAssembly::BI__builtin_wasm_pmin_f32x4:
17133 case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
17134 Value *LHS = EmitScalarExpr(E->getArg(0));
17135 Value *RHS = EmitScalarExpr(E->getArg(1));
17136 Function *Callee =
17137 CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
17138 return Builder.CreateCall(Callee, {LHS, RHS});
17139 }
17140 case WebAssembly::BI__builtin_wasm_pmax_f32x4:
17141 case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
17142 Value *LHS = EmitScalarExpr(E->getArg(0));
17143 Value *RHS = EmitScalarExpr(E->getArg(1));
17144 Function *Callee =
17145 CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
17146 return Builder.CreateCall(Callee, {LHS, RHS});
17147 }
17148 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
17149 case WebAssembly::BI__builtin_wasm_floor_f32x4:
17150 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
17151 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
17152 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
17153 case WebAssembly::BI__builtin_wasm_floor_f64x2:
17154 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
17155 case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
17156 unsigned IntNo;
17157 switch (BuiltinID) {
17158 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
17159 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
17160 IntNo = Intrinsic::ceil;
17161 break;
17162 case WebAssembly::BI__builtin_wasm_floor_f32x4:
17163 case WebAssembly::BI__builtin_wasm_floor_f64x2:
17164 IntNo = Intrinsic::floor;
17165 break;
17166 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
17167 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
17168 IntNo = Intrinsic::trunc;
17169 break;
17170 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
17171 case WebAssembly::BI__builtin_wasm_nearest_f64x2:
17172 IntNo = Intrinsic::nearbyint;
17173 break;
17174 default:
17175 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17175)
;
17176 }
17177 Value *Value = EmitScalarExpr(E->getArg(0));
17178 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
17179 return Builder.CreateCall(Callee, Value);
17180 }
17181 case WebAssembly::BI__builtin_wasm_swizzle_i8x16: {
17182 Value *Src = EmitScalarExpr(E->getArg(0));
17183 Value *Indices = EmitScalarExpr(E->getArg(1));
17184 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
17185 return Builder.CreateCall(Callee, {Src, Indices});
17186 }
17187 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
17188 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
17189 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
17190 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
17191 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
17192 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
17193 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
17194 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
17195 llvm::APSInt LaneConst =
17196 *E->getArg(1)->getIntegerConstantExpr(getContext());
17197 Value *Vec = EmitScalarExpr(E->getArg(0));
17198 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
17199 Value *Extract = Builder.CreateExtractElement(Vec, Lane);
17200 switch (BuiltinID) {
17201 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
17202 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
17203 return Builder.CreateSExt(Extract, ConvertType(E->getType()));
17204 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
17205 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
17206 return Builder.CreateZExt(Extract, ConvertType(E->getType()));
17207 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
17208 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
17209 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
17210 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
17211 return Extract;
17212 default:
17213 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17213)
;
17214 }
17215 }
17216 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
17217 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
17218 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
17219 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
17220 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
17221 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
17222 llvm::APSInt LaneConst =
17223 *E->getArg(1)->getIntegerConstantExpr(getContext());
17224 Value *Vec = EmitScalarExpr(E->getArg(0));
17225 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
17226 Value *Val = EmitScalarExpr(E->getArg(2));
17227 switch (BuiltinID) {
17228 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
17229 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
17230 llvm::Type *ElemType =
17231 cast<llvm::VectorType>(ConvertType(E->getType()))->getElementType();
17232 Value *Trunc = Builder.CreateTrunc(Val, ElemType);
17233 return Builder.CreateInsertElement(Vec, Trunc, Lane);
17234 }
17235 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
17236 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
17237 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
17238 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
17239 return Builder.CreateInsertElement(Vec, Val, Lane);
17240 default:
17241 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17241)
;
17242 }
17243 }
17244 case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
17245 case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
17246 case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
17247 case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
17248 case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
17249 case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
17250 case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
17251 case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: {
17252 unsigned IntNo;
17253 switch (BuiltinID) {
17254 case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
17255 case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
17256 IntNo = Intrinsic::sadd_sat;
17257 break;
17258 case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
17259 case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
17260 IntNo = Intrinsic::uadd_sat;
17261 break;
17262 case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
17263 case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
17264 IntNo = Intrinsic::wasm_sub_sat_signed;
17265 break;
17266 case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
17267 case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8:
17268 IntNo = Intrinsic::wasm_sub_sat_unsigned;
17269 break;
17270 default:
17271 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17271)
;
17272 }
17273 Value *LHS = EmitScalarExpr(E->getArg(0));
17274 Value *RHS = EmitScalarExpr(E->getArg(1));
17275 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
17276 return Builder.CreateCall(Callee, {LHS, RHS});
17277 }
17278 case WebAssembly::BI__builtin_wasm_abs_i8x16:
17279 case WebAssembly::BI__builtin_wasm_abs_i16x8:
17280 case WebAssembly::BI__builtin_wasm_abs_i32x4:
17281 case WebAssembly::BI__builtin_wasm_abs_i64x2: {
17282 Value *Vec = EmitScalarExpr(E->getArg(0));
17283 Value *Neg = Builder.CreateNeg(Vec, "neg");
17284 Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
17285 Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
17286 return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
17287 }
17288 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
17289 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
17290 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
17291 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
17292 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
17293 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
17294 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
17295 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
17296 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
17297 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
17298 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
17299 case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
17300 Value *LHS = EmitScalarExpr(E->getArg(0));
17301 Value *RHS = EmitScalarExpr(E->getArg(1));
17302 Value *ICmp;
17303 switch (BuiltinID) {
17304 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
17305 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
17306 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
17307 ICmp = Builder.CreateICmpSLT(LHS, RHS);
17308 break;
17309 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
17310 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
17311 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
17312 ICmp = Builder.CreateICmpULT(LHS, RHS);
17313 break;
17314 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
17315 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
17316 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
17317 ICmp = Builder.CreateICmpSGT(LHS, RHS);
17318 break;
17319 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
17320 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
17321 case WebAssembly::BI__builtin_wasm_max_u_i32x4:
17322 ICmp = Builder.CreateICmpUGT(LHS, RHS);
17323 break;
17324 default:
17325 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17325)
;
17326 }
17327 return Builder.CreateSelect(ICmp, LHS, RHS);
17328 }
17329 case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
17330 case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
17331 Value *LHS = EmitScalarExpr(E->getArg(0));
17332 Value *RHS = EmitScalarExpr(E->getArg(1));
17333 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned,
17334 ConvertType(E->getType()));
17335 return Builder.CreateCall(Callee, {LHS, RHS});
17336 }
17337 case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8: {
17338 Value *LHS = EmitScalarExpr(E->getArg(0));
17339 Value *RHS = EmitScalarExpr(E->getArg(1));
17340 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_q15mulr_sat_signed);
17341 return Builder.CreateCall(Callee, {LHS, RHS});
17342 }
17343 case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_s_i16x8:
17344 case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_s_i16x8:
17345 case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_u_i16x8:
17346 case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_u_i16x8:
17347 case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_s_i32x4:
17348 case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_s_i32x4:
17349 case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_u_i32x4:
17350 case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_u_i32x4:
17351 case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_s_i64x2:
17352 case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_s_i64x2:
17353 case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_u_i64x2:
17354 case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_u_i64x2: {
17355 Value *LHS = EmitScalarExpr(E->getArg(0));
17356 Value *RHS = EmitScalarExpr(E->getArg(1));
17357 unsigned IntNo;
17358 switch (BuiltinID) {
17359 case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_s_i16x8:
17360 case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_s_i32x4:
17361 case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_s_i64x2:
17362 IntNo = Intrinsic::wasm_extmul_low_signed;
17363 break;
17364 case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_u_i16x8:
17365 case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_u_i32x4:
17366 case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_u_i64x2:
17367 IntNo = Intrinsic::wasm_extmul_low_unsigned;
17368 break;
17369 case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_s_i16x8:
17370 case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_s_i32x4:
17371 case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_s_i64x2:
17372 IntNo = Intrinsic::wasm_extmul_high_signed;
17373 break;
17374 case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_u_i16x8:
17375 case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_u_i32x4:
17376 case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_u_i64x2:
17377 IntNo = Intrinsic::wasm_extmul_high_unsigned;
17378 break;
17379 default:
17380 llvm_unreachable("unexptected builtin ID")::llvm::llvm_unreachable_internal("unexptected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17380)
;
17381 }
17382
17383 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
17384 return Builder.CreateCall(Callee, {LHS, RHS});
17385 }
17386 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
17387 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
17388 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
17389 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: {
17390 Value *Vec = EmitScalarExpr(E->getArg(0));
17391 unsigned IntNo;
17392 switch (BuiltinID) {
17393 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
17394 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
17395 IntNo = Intrinsic::wasm_extadd_pairwise_signed;
17396 break;
17397 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
17398 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4:
17399 IntNo = Intrinsic::wasm_extadd_pairwise_unsigned;
17400 break;
17401 default:
17402 llvm_unreachable("unexptected builtin ID")::llvm::llvm_unreachable_internal("unexptected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17402)
;
17403 }
17404
17405 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
17406 return Builder.CreateCall(Callee, Vec);
17407 }
17408 case WebAssembly::BI__builtin_wasm_bitselect: {
17409 Value *V1 = EmitScalarExpr(E->getArg(0));
17410 Value *V2 = EmitScalarExpr(E->getArg(1));
17411 Value *C = EmitScalarExpr(E->getArg(2));
17412 Function *Callee =
17413 CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType()));
17414 return Builder.CreateCall(Callee, {V1, V2, C});
17415 }
17416 case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
17417 Value *LHS = EmitScalarExpr(E->getArg(0));
17418 Value *RHS = EmitScalarExpr(E->getArg(1));
17419 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
17420 return Builder.CreateCall(Callee, {LHS, RHS});
17421 }
17422 case WebAssembly::BI__builtin_wasm_popcnt_i8x16: {
17423 Value *Vec = EmitScalarExpr(E->getArg(0));
17424 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_popcnt);
17425 return Builder.CreateCall(Callee, {Vec});
17426 }
17427 case WebAssembly::BI__builtin_wasm_any_true_v128:
17428 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
17429 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
17430 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
17431 case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
17432 unsigned IntNo;
17433 switch (BuiltinID) {
17434 case WebAssembly::BI__builtin_wasm_any_true_v128:
17435 IntNo = Intrinsic::wasm_anytrue;
17436 break;
17437 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
17438 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
17439 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
17440 case WebAssembly::BI__builtin_wasm_all_true_i64x2:
17441 IntNo = Intrinsic::wasm_alltrue;
17442 break;
17443 default:
17444 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17444)
;
17445 }
17446 Value *Vec = EmitScalarExpr(E->getArg(0));
17447 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
17448 return Builder.CreateCall(Callee, {Vec});
17449 }
17450 case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
17451 case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
17452 case WebAssembly::BI__builtin_wasm_bitmask_i32x4:
17453 case WebAssembly::BI__builtin_wasm_bitmask_i64x2: {
17454 Value *Vec = EmitScalarExpr(E->getArg(0));
17455 Function *Callee =
17456 CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
17457 return Builder.CreateCall(Callee, {Vec});
17458 }
17459 case WebAssembly::BI__builtin_wasm_abs_f32x4:
17460 case WebAssembly::BI__builtin_wasm_abs_f64x2: {
17461 Value *Vec = EmitScalarExpr(E->getArg(0));
17462 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
17463 return Builder.CreateCall(Callee, {Vec});
17464 }
17465 case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
17466 case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
17467 Value *Vec = EmitScalarExpr(E->getArg(0));
17468 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
17469 return Builder.CreateCall(Callee, {Vec});
17470 }
17471 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
17472 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
17473 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
17474 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
17475 Value *Low = EmitScalarExpr(E->getArg(0));
17476 Value *High = EmitScalarExpr(E->getArg(1));
17477 unsigned IntNo;
17478 switch (BuiltinID) {
17479 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
17480 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
17481 IntNo = Intrinsic::wasm_narrow_signed;
17482 break;
17483 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
17484 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
17485 IntNo = Intrinsic::wasm_narrow_unsigned;
17486 break;
17487 default:
17488 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17488)
;
17489 }
17490 Function *Callee =
17491 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
17492 return Builder.CreateCall(Callee, {Low, High});
17493 }
17494 case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
17495 case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4: {
17496 Value *Vec = EmitScalarExpr(E->getArg(0));
17497 unsigned IntNo;
17498 switch (BuiltinID) {
17499 case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
17500 IntNo = Intrinsic::fptosi_sat;
17501 break;
17502 case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4:
17503 IntNo = Intrinsic::fptoui_sat;
17504 break;
17505 default:
17506 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17506)
;
17507 }
17508 llvm::Type *SrcT = Vec->getType();
17509 llvm::Type *TruncT =
17510 SrcT->getWithNewType(llvm::IntegerType::get(getLLVMContext(), 32));
17511 Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT});
17512 Value *Trunc = Builder.CreateCall(Callee, Vec);
17513 Value *Splat = Builder.CreateVectorSplat(2, Builder.getInt32(0));
17514 Value *ConcatMask =
17515 llvm::ConstantVector::get({Builder.getInt32(0), Builder.getInt32(1),
17516 Builder.getInt32(2), Builder.getInt32(3)});
17517 return Builder.CreateShuffleVector(Trunc, Splat, ConcatMask);
17518 }
17519 case WebAssembly::BI__builtin_wasm_demote_zero_f64x2_f32x4: {
17520 Value *Vec = EmitScalarExpr(E->getArg(0));
17521 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_demote_zero);
17522 return Builder.CreateCall(Callee, Vec);
17523 }
17524 case WebAssembly::BI__builtin_wasm_promote_low_f32x4_f64x2: {
17525 Value *Vec = EmitScalarExpr(E->getArg(0));
17526 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_promote_low);
17527 return Builder.CreateCall(Callee, Vec);
17528 }
17529 case WebAssembly::BI__builtin_wasm_load32_zero: {
17530 Value *Ptr = EmitScalarExpr(E->getArg(0));
17531 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load32_zero);
17532 return Builder.CreateCall(Callee, {Ptr});
17533 }
17534 case WebAssembly::BI__builtin_wasm_load64_zero: {
17535 Value *Ptr = EmitScalarExpr(E->getArg(0));
17536 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load64_zero);
17537 return Builder.CreateCall(Callee, {Ptr});
17538 }
17539 case WebAssembly::BI__builtin_wasm_load8_lane:
17540 case WebAssembly::BI__builtin_wasm_load16_lane:
17541 case WebAssembly::BI__builtin_wasm_load32_lane:
17542 case WebAssembly::BI__builtin_wasm_load64_lane:
17543 case WebAssembly::BI__builtin_wasm_store8_lane:
17544 case WebAssembly::BI__builtin_wasm_store16_lane:
17545 case WebAssembly::BI__builtin_wasm_store32_lane:
17546 case WebAssembly::BI__builtin_wasm_store64_lane: {
17547 Value *Ptr = EmitScalarExpr(E->getArg(0));
17548 Value *Vec = EmitScalarExpr(E->getArg(1));
17549 Optional<llvm::APSInt> LaneIdxConst =
17550 E->getArg(2)->getIntegerConstantExpr(getContext());
17551 assert(LaneIdxConst && "Constant arg isn't actually constant?")(static_cast <bool> (LaneIdxConst && "Constant arg isn't actually constant?"
) ? void (0) : __assert_fail ("LaneIdxConst && \"Constant arg isn't actually constant?\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17551, __extension__ __PRETTY_FUNCTION__))
;
17552 Value *LaneIdx = llvm::ConstantInt::get(getLLVMContext(), *LaneIdxConst);
17553 unsigned IntNo;
17554 switch (BuiltinID) {
17555 case WebAssembly::BI__builtin_wasm_load8_lane:
17556 IntNo = Intrinsic::wasm_load8_lane;
17557 break;
17558 case WebAssembly::BI__builtin_wasm_load16_lane:
17559 IntNo = Intrinsic::wasm_load16_lane;
17560 break;
17561 case WebAssembly::BI__builtin_wasm_load32_lane:
17562 IntNo = Intrinsic::wasm_load32_lane;
17563 break;
17564 case WebAssembly::BI__builtin_wasm_load64_lane:
17565 IntNo = Intrinsic::wasm_load64_lane;
17566 break;
17567 case WebAssembly::BI__builtin_wasm_store8_lane:
17568 IntNo = Intrinsic::wasm_store8_lane;
17569 break;
17570 case WebAssembly::BI__builtin_wasm_store16_lane:
17571 IntNo = Intrinsic::wasm_store16_lane;
17572 break;
17573 case WebAssembly::BI__builtin_wasm_store32_lane:
17574 IntNo = Intrinsic::wasm_store32_lane;
17575 break;
17576 case WebAssembly::BI__builtin_wasm_store64_lane:
17577 IntNo = Intrinsic::wasm_store64_lane;
17578 break;
17579 default:
17580 llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17580)
;
17581 }
17582 Function *Callee = CGM.getIntrinsic(IntNo);
17583 return Builder.CreateCall(Callee, {Ptr, Vec, LaneIdx});
17584 }
17585 case WebAssembly::BI__builtin_wasm_shuffle_i8x16: {
17586 Value *Ops[18];
17587 size_t OpIdx = 0;
17588 Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
17589 Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
17590 while (OpIdx < 18) {
17591 Optional<llvm::APSInt> LaneConst =
17592 E->getArg(OpIdx)->getIntegerConstantExpr(getContext());
17593 assert(LaneConst && "Constant arg isn't actually constant?")(static_cast <bool> (LaneConst && "Constant arg isn't actually constant?"
) ? void (0) : __assert_fail ("LaneConst && \"Constant arg isn't actually constant?\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17593, __extension__ __PRETTY_FUNCTION__))
;
17594 Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
17595 }
17596 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
17597 return Builder.CreateCall(Callee, Ops);
17598 }
17599 default:
17600 return nullptr;
17601 }
17602}
17603
17604static std::pair<Intrinsic::ID, unsigned>
17605getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
17606 struct Info {
17607 unsigned BuiltinID;
17608 Intrinsic::ID IntrinsicID;
17609 unsigned VecLen;
17610 };
17611 Info Infos[] = {
17612#define CUSTOM_BUILTIN_MAPPING(x,s) \
17613 { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
17614 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
17615 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
17616 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
17617 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
17618 CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
17619 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
17620 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
17621 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
17622 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
17623 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
17624 CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
17625 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
17626 CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
17627 CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
17628 CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
17629 CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
17630 CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
17631 CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
17632 CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
17633 CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
17634 CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
17635 CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
17636 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
17637 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
17638 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
17639 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
17640 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
17641 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
17642 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
17643 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
17644#include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
17645#undef CUSTOM_BUILTIN_MAPPING
17646 };
17647
17648 auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
17649 static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
17650 (void)SortOnce;
17651
17652 const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos),
17653 Info{BuiltinID, 0, 0}, CmpInfo);
17654 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
17655 return {Intrinsic::not_intrinsic, 0};
17656
17657 return {F->IntrinsicID, F->VecLen};
17658}
17659
17660Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
17661 const CallExpr *E) {
17662 Intrinsic::ID ID;
17663 unsigned VecLen;
17664 std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
17665
17666 auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
17667 // The base pointer is passed by address, so it needs to be loaded.
17668 Address A = EmitPointerWithAlignment(E->getArg(0));
17669 Address BP = Address(
17670 Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
17671 llvm::Value *Base = Builder.CreateLoad(BP);
17672 // The treatment of both loads and stores is the same: the arguments for
17673 // the builtin are the same as the arguments for the intrinsic.
17674 // Load:
17675 // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
17676 // builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
17677 // Store:
17678 // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
17679 // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
17680 SmallVector<llvm::Value*,5> Ops = { Base };
17681 for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
17682 Ops.push_back(EmitScalarExpr(E->getArg(i)));
17683
17684 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
17685 // The load intrinsics generate two results (Value, NewBase), stores
17686 // generate one (NewBase). The new base address needs to be stored.
17687 llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
17688 : Result;
17689 llvm::Value *LV = Builder.CreateBitCast(
17690 EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
17691 Address Dest = EmitPointerWithAlignment(E->getArg(0));
17692 llvm::Value *RetVal =
17693 Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
17694 if (IsLoad)
17695 RetVal = Builder.CreateExtractValue(Result, 0);
17696 return RetVal;
17697 };
17698
17699 // Handle the conversion of bit-reverse load intrinsics to bit code.
17700 // The intrinsic call after this function only reads from memory and the
17701 // write to memory is dealt by the store instruction.
17702 auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
17703 // The intrinsic generates one result, which is the new value for the base
17704 // pointer. It needs to be returned. The result of the load instruction is
17705 // passed to intrinsic by address, so the value needs to be stored.
17706 llvm::Value *BaseAddress =
17707 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
17708
17709 // Expressions like &(*pt++) will be incremented per evaluation.
17710 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
17711 // per call.
17712 Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
17713 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
17714 DestAddr.getAlignment());
17715 llvm::Value *DestAddress = DestAddr.getPointer();
17716
17717 // Operands are Base, Dest, Modifier.
17718 // The intrinsic format in LLVM IR is defined as
17719 // { ValueType, i8* } (i8*, i32).
17720 llvm::Value *Result = Builder.CreateCall(
17721 CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
17722
17723 // The value needs to be stored as the variable is passed by reference.
17724 llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
17725
17726 // The store needs to be truncated to fit the destination type.
17727 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
17728 // to be handled with stores of respective destination type.
17729 DestVal = Builder.CreateTrunc(DestVal, DestTy);
17730
17731 llvm::Value *DestForStore =
17732 Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
17733 Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
17734 // The updated value of the base pointer is returned.
17735 return Builder.CreateExtractValue(Result, 1);
17736 };
17737
17738 auto V2Q = [this, VecLen] (llvm::Value *Vec) {
17739 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
17740 : Intrinsic::hexagon_V6_vandvrt;
17741 return Builder.CreateCall(CGM.getIntrinsic(ID),
17742 {Vec, Builder.getInt32(-1)});
17743 };
17744 auto Q2V = [this, VecLen] (llvm::Value *Pred) {
17745 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
17746 : Intrinsic::hexagon_V6_vandqrt;
17747 return Builder.CreateCall(CGM.getIntrinsic(ID),
17748 {Pred, Builder.getInt32(-1)});
17749 };
17750
17751 switch (BuiltinID) {
17752 // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
17753 // and the corresponding C/C++ builtins use loads/stores to update
17754 // the predicate.
17755 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
17756 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
17757 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
17758 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
17759 // Get the type from the 0-th argument.
17760 llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
17761 Address PredAddr = Builder.CreateBitCast(
17762 EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
17763 llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
17764 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
17765 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
17766
17767 llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
17768 Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
17769 PredAddr.getAlignment());
17770 return Builder.CreateExtractValue(Result, 0);
17771 }
17772
17773 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
17774 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
17775 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
17776 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
17777 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
17778 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
17779 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
17780 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
17781 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
17782 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
17783 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
17784 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
17785 return MakeCircOp(ID, /*IsLoad=*/true);
17786 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
17787 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
17788 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
17789 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
17790 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
17791 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
17792 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
17793 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
17794 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
17795 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
17796 return MakeCircOp(ID, /*IsLoad=*/false);
17797 case Hexagon::BI__builtin_brev_ldub:
17798 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
17799 case Hexagon::BI__builtin_brev_ldb:
17800 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
17801 case Hexagon::BI__builtin_brev_lduh:
17802 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
17803 case Hexagon::BI__builtin_brev_ldh:
17804 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
17805 case Hexagon::BI__builtin_brev_ldw:
17806 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
17807 case Hexagon::BI__builtin_brev_ldd:
17808 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
17809
17810 default: {
17811 if (ID == Intrinsic::not_intrinsic)
17812 return nullptr;
17813
17814 auto IsVectorPredTy = [](llvm::Type *T) {
17815 return T->isVectorTy() &&
17816 cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
17817 };
17818
17819 llvm::Function *IntrFn = CGM.getIntrinsic(ID);
17820 llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
17821 SmallVector<llvm::Value*,4> Ops;
17822 for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
17823 llvm::Type *T = IntrTy->getParamType(i);
17824 const Expr *A = E->getArg(i);
17825 if (IsVectorPredTy(T)) {
17826 // There will be an implicit cast to a boolean vector. Strip it.
17827 if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
17828 if (Cast->getCastKind() == CK_BitCast)
17829 A = Cast->getSubExpr();
17830 }
17831 Ops.push_back(V2Q(EmitScalarExpr(A)));
17832 } else {
17833 Ops.push_back(EmitScalarExpr(A));
17834 }
17835 }
17836
17837 llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
17838 if (IsVectorPredTy(IntrTy->getReturnType()))
17839 Call = Q2V(Call);
17840
17841 return Call;
17842 } // default
17843 } // switch
17844
17845 return nullptr;
17846}
17847
17848Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
17849 const CallExpr *E,
17850 ReturnValueSlot ReturnValue) {
17851 SmallVector<Value *, 4> Ops;
17852 llvm::Type *ResultType = ConvertType(E->getType());
17853
17854 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
17855 Ops.push_back(EmitScalarExpr(E->getArg(i)));
17856
17857 Intrinsic::ID ID = Intrinsic::not_intrinsic;
17858
17859 // Required for overloaded intrinsics.
17860 llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
17861 switch (BuiltinID) {
17862 default: llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17862)
;
17863 case RISCV::BI__builtin_riscv_orc_b_32:
17864 case RISCV::BI__builtin_riscv_orc_b_64:
17865 case RISCV::BI__builtin_riscv_clmul:
17866 case RISCV::BI__builtin_riscv_clmulh:
17867 case RISCV::BI__builtin_riscv_clmulr:
17868 case RISCV::BI__builtin_riscv_bcompress_32:
17869 case RISCV::BI__builtin_riscv_bcompress_64:
17870 case RISCV::BI__builtin_riscv_bdecompress_32:
17871 case RISCV::BI__builtin_riscv_bdecompress_64:
17872 case RISCV::BI__builtin_riscv_grev_32:
17873 case RISCV::BI__builtin_riscv_grev_64:
17874 case RISCV::BI__builtin_riscv_gorc_32:
17875 case RISCV::BI__builtin_riscv_gorc_64:
17876 case RISCV::BI__builtin_riscv_shfl_32:
17877 case RISCV::BI__builtin_riscv_shfl_64:
17878 case RISCV::BI__builtin_riscv_unshfl_32:
17879 case RISCV::BI__builtin_riscv_unshfl_64:
17880 case RISCV::BI__builtin_riscv_xperm_n:
17881 case RISCV::BI__builtin_riscv_xperm_b:
17882 case RISCV::BI__builtin_riscv_xperm_h:
17883 case RISCV::BI__builtin_riscv_xperm_w:
17884 case RISCV::BI__builtin_riscv_crc32_b:
17885 case RISCV::BI__builtin_riscv_crc32_h:
17886 case RISCV::BI__builtin_riscv_crc32_w:
17887 case RISCV::BI__builtin_riscv_crc32_d:
17888 case RISCV::BI__builtin_riscv_crc32c_b:
17889 case RISCV::BI__builtin_riscv_crc32c_h:
17890 case RISCV::BI__builtin_riscv_crc32c_w:
17891 case RISCV::BI__builtin_riscv_crc32c_d: {
17892 switch (BuiltinID) {
17893 default: llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17893)
;
17894 // Zbb
17895 case RISCV::BI__builtin_riscv_orc_b_32:
17896 case RISCV::BI__builtin_riscv_orc_b_64:
17897 ID = Intrinsic::riscv_orc_b;
17898 break;
17899
17900 // Zbc
17901 case RISCV::BI__builtin_riscv_clmul:
17902 ID = Intrinsic::riscv_clmul;
17903 break;
17904 case RISCV::BI__builtin_riscv_clmulh:
17905 ID = Intrinsic::riscv_clmulh;
17906 break;
17907 case RISCV::BI__builtin_riscv_clmulr:
17908 ID = Intrinsic::riscv_clmulr;
17909 break;
17910
17911 // Zbe
17912 case RISCV::BI__builtin_riscv_bcompress_32:
17913 case RISCV::BI__builtin_riscv_bcompress_64:
17914 ID = Intrinsic::riscv_bcompress;
17915 break;
17916 case RISCV::BI__builtin_riscv_bdecompress_32:
17917 case RISCV::BI__builtin_riscv_bdecompress_64:
17918 ID = Intrinsic::riscv_bdecompress;
17919 break;
17920
17921 // Zbp
17922 case RISCV::BI__builtin_riscv_grev_32:
17923 case RISCV::BI__builtin_riscv_grev_64:
17924 ID = Intrinsic::riscv_grev;
17925 break;
17926 case RISCV::BI__builtin_riscv_gorc_32:
17927 case RISCV::BI__builtin_riscv_gorc_64:
17928 ID = Intrinsic::riscv_gorc;
17929 break;
17930 case RISCV::BI__builtin_riscv_shfl_32:
17931 case RISCV::BI__builtin_riscv_shfl_64:
17932 ID = Intrinsic::riscv_shfl;
17933 break;
17934 case RISCV::BI__builtin_riscv_unshfl_32:
17935 case RISCV::BI__builtin_riscv_unshfl_64:
17936 ID = Intrinsic::riscv_unshfl;
17937 break;
17938 case RISCV::BI__builtin_riscv_xperm_n:
17939 ID = Intrinsic::riscv_xperm_n;
17940 break;
17941 case RISCV::BI__builtin_riscv_xperm_b:
17942 ID = Intrinsic::riscv_xperm_b;
17943 break;
17944 case RISCV::BI__builtin_riscv_xperm_h:
17945 ID = Intrinsic::riscv_xperm_h;
17946 break;
17947 case RISCV::BI__builtin_riscv_xperm_w:
17948 ID = Intrinsic::riscv_xperm_w;
17949 break;
17950
17951 // Zbr
17952 case RISCV::BI__builtin_riscv_crc32_b:
17953 ID = Intrinsic::riscv_crc32_b;
17954 break;
17955 case RISCV::BI__builtin_riscv_crc32_h:
17956 ID = Intrinsic::riscv_crc32_h;
17957 break;
17958 case RISCV::BI__builtin_riscv_crc32_w:
17959 ID = Intrinsic::riscv_crc32_w;
17960 break;
17961 case RISCV::BI__builtin_riscv_crc32_d:
17962 ID = Intrinsic::riscv_crc32_d;
17963 break;
17964 case RISCV::BI__builtin_riscv_crc32c_b:
17965 ID = Intrinsic::riscv_crc32c_b;
17966 break;
17967 case RISCV::BI__builtin_riscv_crc32c_h:
17968 ID = Intrinsic::riscv_crc32c_h;
17969 break;
17970 case RISCV::BI__builtin_riscv_crc32c_w:
17971 ID = Intrinsic::riscv_crc32c_w;
17972 break;
17973 case RISCV::BI__builtin_riscv_crc32c_d:
17974 ID = Intrinsic::riscv_crc32c_d;
17975 break;
17976 }
17977
17978 IntrinsicTypes = {ResultType};
17979 break;
17980 }
17981 // Vector builtins are handled from here.
17982#include "clang/Basic/riscv_vector_builtin_cg.inc"
17983 }
17984
17985 assert(ID != Intrinsic::not_intrinsic)(static_cast <bool> (ID != Intrinsic::not_intrinsic) ? void
(0) : __assert_fail ("ID != Intrinsic::not_intrinsic", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/lib/CodeGen/CGBuiltin.cpp"
, 17985, __extension__ __PRETTY_FUNCTION__))
;
17986
17987 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
17988 return Builder.CreateCall(F, Ops, "");
17989}

/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h

1//===- DeclBase.h - Base Classes for representing declarations --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the Decl and DeclContext interfaces.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CLANG_AST_DECLBASE_H
14#define LLVM_CLANG_AST_DECLBASE_H
15
16#include "clang/AST/ASTDumperUtils.h"
17#include "clang/AST/AttrIterator.h"
18#include "clang/AST/DeclarationName.h"
19#include "clang/Basic/IdentifierTable.h"
20#include "clang/Basic/LLVM.h"
21#include "clang/Basic/SourceLocation.h"
22#include "clang/Basic/Specifiers.h"
23#include "llvm/ADT/ArrayRef.h"
24#include "llvm/ADT/PointerIntPair.h"
25#include "llvm/ADT/PointerUnion.h"
26#include "llvm/ADT/iterator.h"
27#include "llvm/ADT/iterator_range.h"
28#include "llvm/Support/Casting.h"
29#include "llvm/Support/Compiler.h"
30#include "llvm/Support/PrettyStackTrace.h"
31#include "llvm/Support/VersionTuple.h"
32#include <algorithm>
33#include <cassert>
34#include <cstddef>
35#include <iterator>
36#include <string>
37#include <type_traits>
38#include <utility>
39
40namespace clang {
41
42class ASTContext;
43class ASTMutationListener;
44class Attr;
45class BlockDecl;
46class DeclContext;
47class ExternalSourceSymbolAttr;
48class FunctionDecl;
49class FunctionType;
50class IdentifierInfo;
51enum Linkage : unsigned char;
52class LinkageSpecDecl;
53class Module;
54class NamedDecl;
55class ObjCCategoryDecl;
56class ObjCCategoryImplDecl;
57class ObjCContainerDecl;
58class ObjCImplDecl;
59class ObjCImplementationDecl;
60class ObjCInterfaceDecl;
61class ObjCMethodDecl;
62class ObjCProtocolDecl;
63struct PrintingPolicy;
64class RecordDecl;
65class SourceManager;
66class Stmt;
67class StoredDeclsMap;
68class TemplateDecl;
69class TemplateParameterList;
70class TranslationUnitDecl;
71class UsingDirectiveDecl;
72
73/// Captures the result of checking the availability of a
74/// declaration.
75enum AvailabilityResult {
76 AR_Available = 0,
77 AR_NotYetIntroduced,
78 AR_Deprecated,
79 AR_Unavailable
80};
81
82/// Decl - This represents one declaration (or definition), e.g. a variable,
83/// typedef, function, struct, etc.
84///
85/// Note: There are objects tacked on before the *beginning* of Decl
86/// (and its subclasses) in its Decl::operator new(). Proper alignment
87/// of all subclasses (not requiring more than the alignment of Decl) is
88/// asserted in DeclBase.cpp.
89class alignas(8) Decl {
90public:
91 /// Lists the kind of concrete classes of Decl.
92 enum Kind {
93#define DECL(DERIVED, BASE) DERIVED,
94#define ABSTRACT_DECL(DECL)
95#define DECL_RANGE(BASE, START, END) \
96 first##BASE = START, last##BASE = END,
97#define LAST_DECL_RANGE(BASE, START, END) \
98 first##BASE = START, last##BASE = END
99#include "clang/AST/DeclNodes.inc"
100 };
101
102 /// A placeholder type used to construct an empty shell of a
103 /// decl-derived type that will be filled in later (e.g., by some
104 /// deserialization method).
105 struct EmptyShell {};
106
107 /// IdentifierNamespace - The different namespaces in which
108 /// declarations may appear. According to C99 6.2.3, there are
109 /// four namespaces, labels, tags, members and ordinary
110 /// identifiers. C++ describes lookup completely differently:
111 /// certain lookups merely "ignore" certain kinds of declarations,
112 /// usually based on whether the declaration is of a type, etc.
113 ///
114 /// These are meant as bitmasks, so that searches in
115 /// C++ can look into the "tag" namespace during ordinary lookup.
116 ///
117 /// Decl currently provides 15 bits of IDNS bits.
118 enum IdentifierNamespace {
119 /// Labels, declared with 'x:' and referenced with 'goto x'.
120 IDNS_Label = 0x0001,
121
122 /// Tags, declared with 'struct foo;' and referenced with
123 /// 'struct foo'. All tags are also types. This is what
124 /// elaborated-type-specifiers look for in C.
125 /// This also contains names that conflict with tags in the
126 /// same scope but that are otherwise ordinary names (non-type
127 /// template parameters and indirect field declarations).
128 IDNS_Tag = 0x0002,
129
130 /// Types, declared with 'struct foo', typedefs, etc.
131 /// This is what elaborated-type-specifiers look for in C++,
132 /// but note that it's ill-formed to find a non-tag.
133 IDNS_Type = 0x0004,
134
135 /// Members, declared with object declarations within tag
136 /// definitions. In C, these can only be found by "qualified"
137 /// lookup in member expressions. In C++, they're found by
138 /// normal lookup.
139 IDNS_Member = 0x0008,
140
141 /// Namespaces, declared with 'namespace foo {}'.
142 /// Lookup for nested-name-specifiers find these.
143 IDNS_Namespace = 0x0010,
144
145 /// Ordinary names. In C, everything that's not a label, tag,
146 /// member, or function-local extern ends up here.
147 IDNS_Ordinary = 0x0020,
148
149 /// Objective C \@protocol.
150 IDNS_ObjCProtocol = 0x0040,
151
152 /// This declaration is a friend function. A friend function
153 /// declaration is always in this namespace but may also be in
154 /// IDNS_Ordinary if it was previously declared.
155 IDNS_OrdinaryFriend = 0x0080,
156
157 /// This declaration is a friend class. A friend class
158 /// declaration is always in this namespace but may also be in
159 /// IDNS_Tag|IDNS_Type if it was previously declared.
160 IDNS_TagFriend = 0x0100,
161
162 /// This declaration is a using declaration. A using declaration
163 /// *introduces* a number of other declarations into the current
164 /// scope, and those declarations use the IDNS of their targets,
165 /// but the actual using declarations go in this namespace.
166 IDNS_Using = 0x0200,
167
168 /// This declaration is a C++ operator declared in a non-class
169 /// context. All such operators are also in IDNS_Ordinary.
170 /// C++ lexical operator lookup looks for these.
171 IDNS_NonMemberOperator = 0x0400,
172
173 /// This declaration is a function-local extern declaration of a
174 /// variable or function. This may also be IDNS_Ordinary if it
175 /// has been declared outside any function. These act mostly like
176 /// invisible friend declarations, but are also visible to unqualified
177 /// lookup within the scope of the declaring function.
178 IDNS_LocalExtern = 0x0800,
179
180 /// This declaration is an OpenMP user defined reduction construction.
181 IDNS_OMPReduction = 0x1000,
182
183 /// This declaration is an OpenMP user defined mapper.
184 IDNS_OMPMapper = 0x2000,
185 };
186
187 /// ObjCDeclQualifier - 'Qualifiers' written next to the return and
188 /// parameter types in method declarations. Other than remembering
189 /// them and mangling them into the method's signature string, these
190 /// are ignored by the compiler; they are consumed by certain
191 /// remote-messaging frameworks.
192 ///
193 /// in, inout, and out are mutually exclusive and apply only to
194 /// method parameters. bycopy and byref are mutually exclusive and
195 /// apply only to method parameters (?). oneway applies only to
196 /// results. All of these expect their corresponding parameter to
197 /// have a particular type. None of this is currently enforced by
198 /// clang.
199 ///
200 /// This should be kept in sync with ObjCDeclSpec::ObjCDeclQualifier.
201 enum ObjCDeclQualifier {
202 OBJC_TQ_None = 0x0,
203 OBJC_TQ_In = 0x1,
204 OBJC_TQ_Inout = 0x2,
205 OBJC_TQ_Out = 0x4,
206 OBJC_TQ_Bycopy = 0x8,
207 OBJC_TQ_Byref = 0x10,
208 OBJC_TQ_Oneway = 0x20,
209
210 /// The nullability qualifier is set when the nullability of the
211 /// result or parameter was expressed via a context-sensitive
212 /// keyword.
213 OBJC_TQ_CSNullability = 0x40
214 };
215
216 /// The kind of ownership a declaration has, for visibility purposes.
217 /// This enumeration is designed such that higher values represent higher
218 /// levels of name hiding.
219 enum class ModuleOwnershipKind : unsigned {
220 /// This declaration is not owned by a module.
221 Unowned,
222
223 /// This declaration has an owning module, but is globally visible
224 /// (typically because its owning module is visible and we know that
225 /// modules cannot later become hidden in this compilation).
226 /// After serialization and deserialization, this will be converted
227 /// to VisibleWhenImported.
228 Visible,
229
230 /// This declaration has an owning module, and is visible when that
231 /// module is imported.
232 VisibleWhenImported,
233
234 /// This declaration has an owning module, but is only visible to
235 /// lookups that occur within that module.
236 ModulePrivate
237 };
238
239protected:
240 /// The next declaration within the same lexical
241 /// DeclContext. These pointers form the linked list that is
242 /// traversed via DeclContext's decls_begin()/decls_end().
243 ///
244 /// The extra two bits are used for the ModuleOwnershipKind.
245 llvm::PointerIntPair<Decl *, 2, ModuleOwnershipKind> NextInContextAndBits;
246
247private:
248 friend class DeclContext;
249
250 struct MultipleDC {
251 DeclContext *SemanticDC;
252 DeclContext *LexicalDC;
253 };
254
255 /// DeclCtx - Holds either a DeclContext* or a MultipleDC*.
256 /// For declarations that don't contain C++ scope specifiers, it contains
257 /// the DeclContext where the Decl was declared.
258 /// For declarations with C++ scope specifiers, it contains a MultipleDC*
259 /// with the context where it semantically belongs (SemanticDC) and the
260 /// context where it was lexically declared (LexicalDC).
261 /// e.g.:
262 ///
263 /// namespace A {
264 /// void f(); // SemanticDC == LexicalDC == 'namespace A'
265 /// }
266 /// void A::f(); // SemanticDC == namespace 'A'
267 /// // LexicalDC == global namespace
268 llvm::PointerUnion<DeclContext*, MultipleDC*> DeclCtx;
269
270 bool isInSemaDC() const { return DeclCtx.is<DeclContext*>(); }
271 bool isOutOfSemaDC() const { return DeclCtx.is<MultipleDC*>(); }
272
273 MultipleDC *getMultipleDC() const {
274 return DeclCtx.get<MultipleDC*>();
275 }
276
277 DeclContext *getSemanticDC() const {
278 return DeclCtx.get<DeclContext*>();
279 }
280
281 /// Loc - The location of this decl.
282 SourceLocation Loc;
283
284 /// DeclKind - This indicates which class this is.
285 unsigned DeclKind : 7;
286
287 /// InvalidDecl - This indicates a semantic error occurred.
288 unsigned InvalidDecl : 1;
289
290 /// HasAttrs - This indicates whether the decl has attributes or not.
291 unsigned HasAttrs : 1;
292
293 /// Implicit - Whether this declaration was implicitly generated by
294 /// the implementation rather than explicitly written by the user.
295 unsigned Implicit : 1;
296
297 /// Whether this declaration was "used", meaning that a definition is
298 /// required.
299 unsigned Used : 1;
300
301 /// Whether this declaration was "referenced".
302 /// The difference with 'Used' is whether the reference appears in a
303 /// evaluated context or not, e.g. functions used in uninstantiated templates
304 /// are regarded as "referenced" but not "used".
305 unsigned Referenced : 1;
306
307 /// Whether this declaration is a top-level declaration (function,
308 /// global variable, etc.) that is lexically inside an objc container
309 /// definition.
310 unsigned TopLevelDeclInObjCContainer : 1;
311
312 /// Whether statistic collection is enabled.
313 static bool StatisticsEnabled;
314
315protected:
316 friend class ASTDeclReader;
317 friend class ASTDeclWriter;
318 friend class ASTNodeImporter;
319 friend class ASTReader;
320 friend class CXXClassMemberWrapper;
321 friend class LinkageComputer;
322 template<typename decl_type> friend class Redeclarable;
323
324 /// Access - Used by C++ decls for the access specifier.
325 // NOTE: VC++ treats enums as signed, avoid using the AccessSpecifier enum
326 unsigned Access : 2;
327
328 /// Whether this declaration was loaded from an AST file.
329 unsigned FromASTFile : 1;
330
331 /// IdentifierNamespace - This specifies what IDNS_* namespace this lives in.
332 unsigned IdentifierNamespace : 14;
333
334 /// If 0, we have not computed the linkage of this declaration.
335 /// Otherwise, it is the linkage + 1.
336 mutable unsigned CacheValidAndLinkage : 3;
337
338 /// Allocate memory for a deserialized declaration.
339 ///
340 /// This routine must be used to allocate memory for any declaration that is
341 /// deserialized from a module file.
342 ///
343 /// \param Size The size of the allocated object.
344 /// \param Ctx The context in which we will allocate memory.
345 /// \param ID The global ID of the deserialized declaration.
346 /// \param Extra The amount of extra space to allocate after the object.
347 void *operator new(std::size_t Size, const ASTContext &Ctx, unsigned ID,
348 std::size_t Extra = 0);
349
350 /// Allocate memory for a non-deserialized declaration.
351 void *operator new(std::size_t Size, const ASTContext &Ctx,
352 DeclContext *Parent, std::size_t Extra = 0);
353
354private:
355 bool AccessDeclContextSanity() const;
356
357 /// Get the module ownership kind to use for a local lexical child of \p DC,
358 /// which may be either a local or (rarely) an imported declaration.
359 static ModuleOwnershipKind getModuleOwnershipKindForChildOf(DeclContext *DC) {
360 if (DC) {
361 auto *D = cast<Decl>(DC);
362 auto MOK = D->getModuleOwnershipKind();
363 if (MOK != ModuleOwnershipKind::Unowned &&
364 (!D->isFromASTFile() || D->hasLocalOwningModuleStorage()))
365 return MOK;
366 // If D is not local and we have no local module storage, then we don't
367 // need to track module ownership at all.
368 }
369 return ModuleOwnershipKind::Unowned;
370 }
371
372public:
373 Decl() = delete;
374 Decl(const Decl&) = delete;
375 Decl(Decl &&) = delete;
376 Decl &operator=(const Decl&) = delete;
377 Decl &operator=(Decl&&) = delete;
378
379protected:
380 Decl(Kind DK, DeclContext *DC, SourceLocation L)
381 : NextInContextAndBits(nullptr, getModuleOwnershipKindForChildOf(DC)),
382 DeclCtx(DC), Loc(L), DeclKind(DK), InvalidDecl(false), HasAttrs(false),
383 Implicit(false), Used(false), Referenced(false),
384 TopLevelDeclInObjCContainer(false), Access(AS_none), FromASTFile(0),
385 IdentifierNamespace(getIdentifierNamespaceForKind(DK)),
386 CacheValidAndLinkage(0) {
387 if (StatisticsEnabled) add(DK);
388 }
389
390 Decl(Kind DK, EmptyShell Empty)
391 : DeclKind(DK), InvalidDecl(false), HasAttrs(false), Implicit(false),
392 Used(false), Referenced(false), TopLevelDeclInObjCContainer(false),
393 Access(AS_none), FromASTFile(0),
394 IdentifierNamespace(getIdentifierNamespaceForKind(DK)),
395 CacheValidAndLinkage(0) {
396 if (StatisticsEnabled) add(DK);
397 }
398
399 virtual ~Decl();
400
401 /// Update a potentially out-of-date declaration.
402 void updateOutOfDate(IdentifierInfo &II) const;
403
404 Linkage getCachedLinkage() const {
405 return Linkage(CacheValidAndLinkage - 1);
406 }
407
408 void setCachedLinkage(Linkage L) const {
409 CacheValidAndLinkage = L + 1;
410 }
411
412 bool hasCachedLinkage() const {
413 return CacheValidAndLinkage;
414 }
415
416public:
417 /// Source range that this declaration covers.
418 virtual SourceRange getSourceRange() const LLVM_READONLY__attribute__((__pure__)) {
419 return SourceRange(getLocation(), getLocation());
420 }
421
422 SourceLocation getBeginLoc() const LLVM_READONLY__attribute__((__pure__)) {
423 return getSourceRange().getBegin();
424 }
425
426 SourceLocation getEndLoc() const LLVM_READONLY__attribute__((__pure__)) {
427 return getSourceRange().getEnd();
428 }
429
430 SourceLocation getLocation() const { return Loc; }
431 void setLocation(SourceLocation L) { Loc = L; }
432
433 Kind getKind() const { return static_cast<Kind>(DeclKind); }
434 const char *getDeclKindName() const;
435
436 Decl *getNextDeclInContext() { return NextInContextAndBits.getPointer(); }
437 const Decl *getNextDeclInContext() const {return NextInContextAndBits.getPointer();}
438
439 DeclContext *getDeclContext() {
440 if (isInSemaDC())
441 return getSemanticDC();
442 return getMultipleDC()->SemanticDC;
443 }
444 const DeclContext *getDeclContext() const {
445 return const_cast<Decl*>(this)->getDeclContext();
446 }
447
448 /// Find the innermost non-closure ancestor of this declaration,
449 /// walking up through blocks, lambdas, etc. If that ancestor is
450 /// not a code context (!isFunctionOrMethod()), returns null.
451 ///
452 /// A declaration may be its own non-closure context.
453 Decl *getNonClosureContext();
454 const Decl *getNonClosureContext() const {
455 return const_cast<Decl*>(this)->getNonClosureContext();
456 }
457
458 TranslationUnitDecl *getTranslationUnitDecl();
459 const TranslationUnitDecl *getTranslationUnitDecl() const {
460 return const_cast<Decl*>(this)->getTranslationUnitDecl();
461 }
462
463 bool isInAnonymousNamespace() const;
464
465 bool isInStdNamespace() const;
466
467 ASTContext &getASTContext() const LLVM_READONLY__attribute__((__pure__));
468
469 /// Helper to get the language options from the ASTContext.
470 /// Defined out of line to avoid depending on ASTContext.h.
471 const LangOptions &getLangOpts() const LLVM_READONLY__attribute__((__pure__));
472
473 void setAccess(AccessSpecifier AS) {
474 Access = AS;
475 assert(AccessDeclContextSanity())(static_cast <bool> (AccessDeclContextSanity()) ? void (
0) : __assert_fail ("AccessDeclContextSanity()", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 475, __extension__ __PRETTY_FUNCTION__))
;
476 }
477
478 AccessSpecifier getAccess() const {
479 assert(AccessDeclContextSanity())(static_cast <bool> (AccessDeclContextSanity()) ? void (
0) : __assert_fail ("AccessDeclContextSanity()", "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 479, __extension__ __PRETTY_FUNCTION__))
;
480 return AccessSpecifier(Access);
481 }
482
483 /// Retrieve the access specifier for this declaration, even though
484 /// it may not yet have been properly set.
485 AccessSpecifier getAccessUnsafe() const {
486 return AccessSpecifier(Access);
487 }
488
489 bool hasAttrs() const { return HasAttrs; }
490
491 void setAttrs(const AttrVec& Attrs) {
492 return setAttrsImpl(Attrs, getASTContext());
493 }
494
495 AttrVec &getAttrs() {
496 return const_cast<AttrVec&>(const_cast<const Decl*>(this)->getAttrs());
497 }
498
499 const AttrVec &getAttrs() const;
500 void dropAttrs();
501 void addAttr(Attr *A);
502
503 using attr_iterator = AttrVec::const_iterator;
504 using attr_range = llvm::iterator_range<attr_iterator>;
505
506 attr_range attrs() const {
507 return attr_range(attr_begin(), attr_end());
508 }
509
510 attr_iterator attr_begin() const {
511 return hasAttrs() ? getAttrs().begin() : nullptr;
512 }
513 attr_iterator attr_end() const {
514 return hasAttrs() ? getAttrs().end() : nullptr;
515 }
516
517 template <typename T>
518 void dropAttr() {
519 if (!HasAttrs) return;
520
521 AttrVec &Vec = getAttrs();
522 llvm::erase_if(Vec, [](Attr *A) { return isa<T>(A); });
523
524 if (Vec.empty())
525 HasAttrs = false;
526 }
527
528 template <typename T>
529 llvm::iterator_range<specific_attr_iterator<T>> specific_attrs() const {
530 return llvm::make_range(specific_attr_begin<T>(), specific_attr_end<T>());
531 }
532
533 template <typename T>
534 specific_attr_iterator<T> specific_attr_begin() const {
535 return specific_attr_iterator<T>(attr_begin());
536 }
537
538 template <typename T>
539 specific_attr_iterator<T> specific_attr_end() const {
540 return specific_attr_iterator<T>(attr_end());
541 }
542
543 template<typename T> T *getAttr() const {
544 return hasAttrs() ? getSpecificAttr<T>(getAttrs()) : nullptr;
545 }
546
547 template<typename T> bool hasAttr() const {
548 return hasAttrs() && hasSpecificAttr<T>(getAttrs());
4
Returning zero, which participates in a condition later
549 }
550
551 /// getMaxAlignment - return the maximum alignment specified by attributes
552 /// on this decl, 0 if there are none.
553 unsigned getMaxAlignment() const;
554
555 /// setInvalidDecl - Indicates the Decl had a semantic error. This
556 /// allows for graceful error recovery.
557 void setInvalidDecl(bool Invalid = true);
558 bool isInvalidDecl() const { return (bool) InvalidDecl; }
559
560 /// isImplicit - Indicates whether the declaration was implicitly
561 /// generated by the implementation. If false, this declaration
562 /// was written explicitly in the source code.
563 bool isImplicit() const { return Implicit; }
564 void setImplicit(bool I = true) { Implicit = I; }
565
566 /// Whether *any* (re-)declaration of the entity was used, meaning that
567 /// a definition is required.
568 ///
569 /// \param CheckUsedAttr When true, also consider the "used" attribute
570 /// (in addition to the "used" bit set by \c setUsed()) when determining
571 /// whether the function is used.
572 bool isUsed(bool CheckUsedAttr = true) const;
573
574 /// Set whether the declaration is used, in the sense of odr-use.
575 ///
576 /// This should only be used immediately after creating a declaration.
577 /// It intentionally doesn't notify any listeners.
578 void setIsUsed() { getCanonicalDecl()->Used = true; }
579
580 /// Mark the declaration used, in the sense of odr-use.
581 ///
582 /// This notifies any mutation listeners in addition to setting a bit
583 /// indicating the declaration is used.
584 void markUsed(ASTContext &C);
585
586 /// Whether any declaration of this entity was referenced.
587 bool isReferenced() const;
588
589 /// Whether this declaration was referenced. This should not be relied
590 /// upon for anything other than debugging.
591 bool isThisDeclarationReferenced() const { return Referenced; }
592
593 void setReferenced(bool R = true) { Referenced = R; }
594
595 /// Whether this declaration is a top-level declaration (function,
596 /// global variable, etc.) that is lexically inside an objc container
597 /// definition.
598 bool isTopLevelDeclInObjCContainer() const {
599 return TopLevelDeclInObjCContainer;
600 }
601
602 void setTopLevelDeclInObjCContainer(bool V = true) {
603 TopLevelDeclInObjCContainer = V;
604 }
605
606 /// Looks on this and related declarations for an applicable
607 /// external source symbol attribute.
608 ExternalSourceSymbolAttr *getExternalSourceSymbolAttr() const;
609
610 /// Whether this declaration was marked as being private to the
611 /// module in which it was defined.
612 bool isModulePrivate() const {
613 return getModuleOwnershipKind() == ModuleOwnershipKind::ModulePrivate;
614 }
615
616 /// Return true if this declaration has an attribute which acts as
617 /// definition of the entity, such as 'alias' or 'ifunc'.
618 bool hasDefiningAttr() const;
619
620 /// Return this declaration's defining attribute if it has one.
621 const Attr *getDefiningAttr() const;
622
623protected:
624 /// Specify that this declaration was marked as being private
625 /// to the module in which it was defined.
626 void setModulePrivate() {
627 // The module-private specifier has no effect on unowned declarations.
628 // FIXME: We should track this in some way for source fidelity.
629 if (getModuleOwnershipKind() == ModuleOwnershipKind::Unowned)
630 return;
631 setModuleOwnershipKind(ModuleOwnershipKind::ModulePrivate);
632 }
633
634public:
635 /// Set the FromASTFile flag. This indicates that this declaration
636 /// was deserialized and not parsed from source code and enables
637 /// features such as module ownership information.
638 void setFromASTFile() {
639 FromASTFile = true;
640 }
641
642 /// Set the owning module ID. This may only be called for
643 /// deserialized Decls.
644 void setOwningModuleID(unsigned ID) {
645 assert(isFromASTFile() && "Only works on a deserialized declaration")(static_cast <bool> (isFromASTFile() && "Only works on a deserialized declaration"
) ? void (0) : __assert_fail ("isFromASTFile() && \"Only works on a deserialized declaration\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 645, __extension__ __PRETTY_FUNCTION__))
;
646 *((unsigned*)this - 2) = ID;
647 }
648
649public:
650 /// Determine the availability of the given declaration.
651 ///
652 /// This routine will determine the most restrictive availability of
653 /// the given declaration (e.g., preferring 'unavailable' to
654 /// 'deprecated').
655 ///
656 /// \param Message If non-NULL and the result is not \c
657 /// AR_Available, will be set to a (possibly empty) message
658 /// describing why the declaration has not been introduced, is
659 /// deprecated, or is unavailable.
660 ///
661 /// \param EnclosingVersion The version to compare with. If empty, assume the
662 /// deployment target version.
663 ///
664 /// \param RealizedPlatform If non-NULL and the availability result is found
665 /// in an available attribute it will set to the platform which is written in
666 /// the available attribute.
667 AvailabilityResult
668 getAvailability(std::string *Message = nullptr,
669 VersionTuple EnclosingVersion = VersionTuple(),
670 StringRef *RealizedPlatform = nullptr) const;
671
672 /// Retrieve the version of the target platform in which this
673 /// declaration was introduced.
674 ///
675 /// \returns An empty version tuple if this declaration has no 'introduced'
676 /// availability attributes, or the version tuple that's specified in the
677 /// attribute otherwise.
678 VersionTuple getVersionIntroduced() const;
679
680 /// Determine whether this declaration is marked 'deprecated'.
681 ///
682 /// \param Message If non-NULL and the declaration is deprecated,
683 /// this will be set to the message describing why the declaration
684 /// was deprecated (which may be empty).
685 bool isDeprecated(std::string *Message = nullptr) const {
686 return getAvailability(Message) == AR_Deprecated;
687 }
688
689 /// Determine whether this declaration is marked 'unavailable'.
690 ///
691 /// \param Message If non-NULL and the declaration is unavailable,
692 /// this will be set to the message describing why the declaration
693 /// was made unavailable (which may be empty).
694 bool isUnavailable(std::string *Message = nullptr) const {
695 return getAvailability(Message) == AR_Unavailable;
696 }
697
698 /// Determine whether this is a weak-imported symbol.
699 ///
700 /// Weak-imported symbols are typically marked with the
701 /// 'weak_import' attribute, but may also be marked with an
702 /// 'availability' attribute where we're targing a platform prior to
703 /// the introduction of this feature.
704 bool isWeakImported() const;
705
706 /// Determines whether this symbol can be weak-imported,
707 /// e.g., whether it would be well-formed to add the weak_import
708 /// attribute.
709 ///
710 /// \param IsDefinition Set to \c true to indicate that this
711 /// declaration cannot be weak-imported because it has a definition.
712 bool canBeWeakImported(bool &IsDefinition) const;
713
714 /// Determine whether this declaration came from an AST file (such as
715 /// a precompiled header or module) rather than having been parsed.
716 bool isFromASTFile() const { return FromASTFile; }
717
718 /// Retrieve the global declaration ID associated with this
719 /// declaration, which specifies where this Decl was loaded from.
720 unsigned getGlobalID() const {
721 if (isFromASTFile())
722 return *((const unsigned*)this - 1);
723 return 0;
724 }
725
726 /// Retrieve the global ID of the module that owns this particular
727 /// declaration.
728 unsigned getOwningModuleID() const {
729 if (isFromASTFile())
730 return *((const unsigned*)this - 2);
731 return 0;
732 }
733
734private:
735 Module *getOwningModuleSlow() const;
736
737protected:
738 bool hasLocalOwningModuleStorage() const;
739
740public:
741 /// Get the imported owning module, if this decl is from an imported
742 /// (non-local) module.
743 Module *getImportedOwningModule() const {
744 if (!isFromASTFile() || !hasOwningModule())
745 return nullptr;
746
747 return getOwningModuleSlow();
748 }
749
750 /// Get the local owning module, if known. Returns nullptr if owner is
751 /// not yet known or declaration is not from a module.
752 Module *getLocalOwningModule() const {
753 if (isFromASTFile() || !hasOwningModule())
754 return nullptr;
755
756 assert(hasLocalOwningModuleStorage() &&(static_cast <bool> (hasLocalOwningModuleStorage() &&
"owned local decl but no local module storage") ? void (0) :
__assert_fail ("hasLocalOwningModuleStorage() && \"owned local decl but no local module storage\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 757, __extension__ __PRETTY_FUNCTION__))
757 "owned local decl but no local module storage")(static_cast <bool> (hasLocalOwningModuleStorage() &&
"owned local decl but no local module storage") ? void (0) :
__assert_fail ("hasLocalOwningModuleStorage() && \"owned local decl but no local module storage\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 757, __extension__ __PRETTY_FUNCTION__))
;
758 return reinterpret_cast<Module *const *>(this)[-1];
759 }
760 void setLocalOwningModule(Module *M) {
761 assert(!isFromASTFile() && hasOwningModule() &&(static_cast <bool> (!isFromASTFile() && hasOwningModule
() && hasLocalOwningModuleStorage() && "should not have a cached owning module"
) ? void (0) : __assert_fail ("!isFromASTFile() && hasOwningModule() && hasLocalOwningModuleStorage() && \"should not have a cached owning module\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 763, __extension__ __PRETTY_FUNCTION__))
762 hasLocalOwningModuleStorage() &&(static_cast <bool> (!isFromASTFile() && hasOwningModule
() && hasLocalOwningModuleStorage() && "should not have a cached owning module"
) ? void (0) : __assert_fail ("!isFromASTFile() && hasOwningModule() && hasLocalOwningModuleStorage() && \"should not have a cached owning module\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 763, __extension__ __PRETTY_FUNCTION__))
763 "should not have a cached owning module")(static_cast <bool> (!isFromASTFile() && hasOwningModule
() && hasLocalOwningModuleStorage() && "should not have a cached owning module"
) ? void (0) : __assert_fail ("!isFromASTFile() && hasOwningModule() && hasLocalOwningModuleStorage() && \"should not have a cached owning module\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 763, __extension__ __PRETTY_FUNCTION__))
;
764 reinterpret_cast<Module **>(this)[-1] = M;
765 }
766
767 /// Is this declaration owned by some module?
768 bool hasOwningModule() const {
769 return getModuleOwnershipKind() != ModuleOwnershipKind::Unowned;
770 }
771
772 /// Get the module that owns this declaration (for visibility purposes).
773 Module *getOwningModule() const {
774 return isFromASTFile() ? getImportedOwningModule() : getLocalOwningModule();
775 }
776
777 /// Get the module that owns this declaration for linkage purposes.
778 /// There only ever is such a module under the C++ Modules TS.
779 ///
780 /// \param IgnoreLinkage Ignore the linkage of the entity; assume that
781 /// all declarations in a global module fragment are unowned.
782 Module *getOwningModuleForLinkage(bool IgnoreLinkage = false) const;
783
784 /// Determine whether this declaration is definitely visible to name lookup,
785 /// independent of whether the owning module is visible.
786 /// Note: The declaration may be visible even if this returns \c false if the
787 /// owning module is visible within the query context. This is a low-level
788 /// helper function; most code should be calling Sema::isVisible() instead.
789 bool isUnconditionallyVisible() const {
790 return (int)getModuleOwnershipKind() <= (int)ModuleOwnershipKind::Visible;
791 }
792
793 /// Set that this declaration is globally visible, even if it came from a
794 /// module that is not visible.
795 void setVisibleDespiteOwningModule() {
796 if (!isUnconditionallyVisible())
797 setModuleOwnershipKind(ModuleOwnershipKind::Visible);
798 }
799
800 /// Get the kind of module ownership for this declaration.
801 ModuleOwnershipKind getModuleOwnershipKind() const {
802 return NextInContextAndBits.getInt();
803 }
804
805 /// Set whether this declaration is hidden from name lookup.
806 void setModuleOwnershipKind(ModuleOwnershipKind MOK) {
807 assert(!(getModuleOwnershipKind() == ModuleOwnershipKind::Unowned &&(static_cast <bool> (!(getModuleOwnershipKind() == ModuleOwnershipKind
::Unowned && MOK != ModuleOwnershipKind::Unowned &&
!isFromASTFile() && !hasLocalOwningModuleStorage()) &&
"no storage available for owning module for this declaration"
) ? void (0) : __assert_fail ("!(getModuleOwnershipKind() == ModuleOwnershipKind::Unowned && MOK != ModuleOwnershipKind::Unowned && !isFromASTFile() && !hasLocalOwningModuleStorage()) && \"no storage available for owning module for this declaration\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 810, __extension__ __PRETTY_FUNCTION__))
808 MOK != ModuleOwnershipKind::Unowned && !isFromASTFile() &&(static_cast <bool> (!(getModuleOwnershipKind() == ModuleOwnershipKind
::Unowned && MOK != ModuleOwnershipKind::Unowned &&
!isFromASTFile() && !hasLocalOwningModuleStorage()) &&
"no storage available for owning module for this declaration"
) ? void (0) : __assert_fail ("!(getModuleOwnershipKind() == ModuleOwnershipKind::Unowned && MOK != ModuleOwnershipKind::Unowned && !isFromASTFile() && !hasLocalOwningModuleStorage()) && \"no storage available for owning module for this declaration\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 810, __extension__ __PRETTY_FUNCTION__))
809 !hasLocalOwningModuleStorage()) &&(static_cast <bool> (!(getModuleOwnershipKind() == ModuleOwnershipKind
::Unowned && MOK != ModuleOwnershipKind::Unowned &&
!isFromASTFile() && !hasLocalOwningModuleStorage()) &&
"no storage available for owning module for this declaration"
) ? void (0) : __assert_fail ("!(getModuleOwnershipKind() == ModuleOwnershipKind::Unowned && MOK != ModuleOwnershipKind::Unowned && !isFromASTFile() && !hasLocalOwningModuleStorage()) && \"no storage available for owning module for this declaration\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 810, __extension__ __PRETTY_FUNCTION__))
810 "no storage available for owning module for this declaration")(static_cast <bool> (!(getModuleOwnershipKind() == ModuleOwnershipKind
::Unowned && MOK != ModuleOwnershipKind::Unowned &&
!isFromASTFile() && !hasLocalOwningModuleStorage()) &&
"no storage available for owning module for this declaration"
) ? void (0) : __assert_fail ("!(getModuleOwnershipKind() == ModuleOwnershipKind::Unowned && MOK != ModuleOwnershipKind::Unowned && !isFromASTFile() && !hasLocalOwningModuleStorage()) && \"no storage available for owning module for this declaration\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 810, __extension__ __PRETTY_FUNCTION__))
;
811 NextInContextAndBits.setInt(MOK);
812 }
813
814 unsigned getIdentifierNamespace() const {
815 return IdentifierNamespace;
816 }
817
818 bool isInIdentifierNamespace(unsigned NS) const {
819 return getIdentifierNamespace() & NS;
820 }
821
822 static unsigned getIdentifierNamespaceForKind(Kind DK);
823
824 bool hasTagIdentifierNamespace() const {
825 return isTagIdentifierNamespace(getIdentifierNamespace());
826 }
827
828 static bool isTagIdentifierNamespace(unsigned NS) {
829 // TagDecls have Tag and Type set and may also have TagFriend.
830 return (NS & ~IDNS_TagFriend) == (IDNS_Tag | IDNS_Type);
831 }
832
833 /// getLexicalDeclContext - The declaration context where this Decl was
834 /// lexically declared (LexicalDC). May be different from
835 /// getDeclContext() (SemanticDC).
836 /// e.g.:
837 ///
838 /// namespace A {
839 /// void f(); // SemanticDC == LexicalDC == 'namespace A'
840 /// }
841 /// void A::f(); // SemanticDC == namespace 'A'
842 /// // LexicalDC == global namespace
843 DeclContext *getLexicalDeclContext() {
844 if (isInSemaDC())
845 return getSemanticDC();
846 return getMultipleDC()->LexicalDC;
847 }
848 const DeclContext *getLexicalDeclContext() const {
849 return const_cast<Decl*>(this)->getLexicalDeclContext();
850 }
851
852 /// Determine whether this declaration is declared out of line (outside its
853 /// semantic context).
854 virtual bool isOutOfLine() const;
855
856 /// setDeclContext - Set both the semantic and lexical DeclContext
857 /// to DC.
858 void setDeclContext(DeclContext *DC);
859
860 void setLexicalDeclContext(DeclContext *DC);
861
862 /// Determine whether this declaration is a templated entity (whether it is
863 // within the scope of a template parameter).
864 bool isTemplated() const;
865
866 /// Determine the number of levels of template parameter surrounding this
867 /// declaration.
868 unsigned getTemplateDepth() const;
869
870 /// isDefinedOutsideFunctionOrMethod - This predicate returns true if this
871 /// scoped decl is defined outside the current function or method. This is
872 /// roughly global variables and functions, but also handles enums (which
873 /// could be defined inside or outside a function etc).
874 bool isDefinedOutsideFunctionOrMethod() const {
875 return getParentFunctionOrMethod() == nullptr;
876 }
877
878 /// Determine whether a substitution into this declaration would occur as
879 /// part of a substitution into a dependent local scope. Such a substitution
880 /// transitively substitutes into all constructs nested within this
881 /// declaration.
882 ///
883 /// This recognizes non-defining declarations as well as members of local
884 /// classes and lambdas:
885 /// \code
886 /// template<typename T> void foo() { void bar(); }
887 /// template<typename T> void foo2() { class ABC { void bar(); }; }
888 /// template<typename T> inline int x = [](){ return 0; }();
889 /// \endcode
890 bool isInLocalScopeForInstantiation() const;
891
892 /// If this decl is defined inside a function/method/block it returns
893 /// the corresponding DeclContext, otherwise it returns null.
894 const DeclContext *getParentFunctionOrMethod() const;
895 DeclContext *getParentFunctionOrMethod() {
896 return const_cast<DeclContext*>(
897 const_cast<const Decl*>(this)->getParentFunctionOrMethod());
898 }
899
900 /// Retrieves the "canonical" declaration of the given declaration.
901 virtual Decl *getCanonicalDecl() { return this; }
902 const Decl *getCanonicalDecl() const {
903 return const_cast<Decl*>(this)->getCanonicalDecl();
904 }
905
906 /// Whether this particular Decl is a canonical one.
907 bool isCanonicalDecl() const { return getCanonicalDecl() == this; }
908
909protected:
910 /// Returns the next redeclaration or itself if this is the only decl.
911 ///
912 /// Decl subclasses that can be redeclared should override this method so that
913 /// Decl::redecl_iterator can iterate over them.
914 virtual Decl *getNextRedeclarationImpl() { return this; }
915
916 /// Implementation of getPreviousDecl(), to be overridden by any
917 /// subclass that has a redeclaration chain.
918 virtual Decl *getPreviousDeclImpl() { return nullptr; }
919
920 /// Implementation of getMostRecentDecl(), to be overridden by any
921 /// subclass that has a redeclaration chain.
922 virtual Decl *getMostRecentDeclImpl() { return this; }
923
924public:
925 /// Iterates through all the redeclarations of the same decl.
926 class redecl_iterator {
927 /// Current - The current declaration.
928 Decl *Current = nullptr;
929 Decl *Starter;
930
931 public:
932 using value_type = Decl *;
933 using reference = const value_type &;
934 using pointer = const value_type *;
935 using iterator_category = std::forward_iterator_tag;
936 using difference_type = std::ptrdiff_t;
937
938 redecl_iterator() = default;
939 explicit redecl_iterator(Decl *C) : Current(C), Starter(C) {}
940
941 reference operator*() const { return Current; }
942 value_type operator->() const { return Current; }
943
944 redecl_iterator& operator++() {
945 assert(Current && "Advancing while iterator has reached end")(static_cast <bool> (Current && "Advancing while iterator has reached end"
) ? void (0) : __assert_fail ("Current && \"Advancing while iterator has reached end\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 945, __extension__ __PRETTY_FUNCTION__))
;
946 // Get either previous decl or latest decl.
947 Decl *Next = Current->getNextRedeclarationImpl();
948 assert(Next && "Should return next redeclaration or itself, never null!")(static_cast <bool> (Next && "Should return next redeclaration or itself, never null!"
) ? void (0) : __assert_fail ("Next && \"Should return next redeclaration or itself, never null!\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 948, __extension__ __PRETTY_FUNCTION__))
;
949 Current = (Next != Starter) ? Next : nullptr;
950 return *this;
951 }
952
953 redecl_iterator operator++(int) {
954 redecl_iterator tmp(*this);
955 ++(*this);
956 return tmp;
957 }
958
959 friend bool operator==(redecl_iterator x, redecl_iterator y) {
960 return x.Current == y.Current;
961 }
962
963 friend bool operator!=(redecl_iterator x, redecl_iterator y) {
964 return x.Current != y.Current;
965 }
966 };
967
968 using redecl_range = llvm::iterator_range<redecl_iterator>;
969
970 /// Returns an iterator range for all the redeclarations of the same
971 /// decl. It will iterate at least once (when this decl is the only one).
972 redecl_range redecls() const {
973 return redecl_range(redecls_begin(), redecls_end());
974 }
975
976 redecl_iterator redecls_begin() const {
977 return redecl_iterator(const_cast<Decl *>(this));
978 }
979
980 redecl_iterator redecls_end() const { return redecl_iterator(); }
981
982 /// Retrieve the previous declaration that declares the same entity
983 /// as this declaration, or NULL if there is no previous declaration.
984 Decl *getPreviousDecl() { return getPreviousDeclImpl(); }
985
986 /// Retrieve the previous declaration that declares the same entity
987 /// as this declaration, or NULL if there is no previous declaration.
988 const Decl *getPreviousDecl() const {
989 return const_cast<Decl *>(this)->getPreviousDeclImpl();
990 }
991
992 /// True if this is the first declaration in its redeclaration chain.
993 bool isFirstDecl() const {
994 return getPreviousDecl() == nullptr;
995 }
996
997 /// Retrieve the most recent declaration that declares the same entity
998 /// as this declaration (which may be this declaration).
999 Decl *getMostRecentDecl() { return getMostRecentDeclImpl(); }
1000
1001 /// Retrieve the most recent declaration that declares the same entity
1002 /// as this declaration (which may be this declaration).
1003 const Decl *getMostRecentDecl() const {
1004 return const_cast<Decl *>(this)->getMostRecentDeclImpl();
1005 }
1006
1007 /// getBody - If this Decl represents a declaration for a body of code,
1008 /// such as a function or method definition, this method returns the
1009 /// top-level Stmt* of that body. Otherwise this method returns null.
1010 virtual Stmt* getBody() const { return nullptr; }
1011
1012 /// Returns true if this \c Decl represents a declaration for a body of
1013 /// code, such as a function or method definition.
1014 /// Note that \c hasBody can also return true if any redeclaration of this
1015 /// \c Decl represents a declaration for a body of code.
1016 virtual bool hasBody() const { return getBody() != nullptr; }
1017
1018 /// getBodyRBrace - Gets the right brace of the body, if a body exists.
1019 /// This works whether the body is a CompoundStmt or a CXXTryStmt.
1020 SourceLocation getBodyRBrace() const;
1021
1022 // global temp stats (until we have a per-module visitor)
1023 static void add(Kind k);
1024 static void EnableStatistics();
1025 static void PrintStats();
1026
1027 /// isTemplateParameter - Determines whether this declaration is a
1028 /// template parameter.
1029 bool isTemplateParameter() const;
1030
1031 /// isTemplateParameter - Determines whether this declaration is a
1032 /// template parameter pack.
1033 bool isTemplateParameterPack() const;
1034
1035 /// Whether this declaration is a parameter pack.
1036 bool isParameterPack() const;
1037
1038 /// returns true if this declaration is a template
1039 bool isTemplateDecl() const;
1040
1041 /// Whether this declaration is a function or function template.
1042 bool isFunctionOrFunctionTemplate() const {
1043 return (DeclKind >= Decl::firstFunction &&
1044 DeclKind <= Decl::lastFunction) ||
1045 DeclKind == FunctionTemplate;
1046 }
1047
1048 /// If this is a declaration that describes some template, this
1049 /// method returns that template declaration.
1050 ///
1051 /// Note that this returns nullptr for partial specializations, because they
1052 /// are not modeled as TemplateDecls. Use getDescribedTemplateParams to handle
1053 /// those cases.
1054 TemplateDecl *getDescribedTemplate() const;
1055
1056 /// If this is a declaration that describes some template or partial
1057 /// specialization, this returns the corresponding template parameter list.
1058 const TemplateParameterList *getDescribedTemplateParams() const;
1059
1060 /// Returns the function itself, or the templated function if this is a
1061 /// function template.
1062 FunctionDecl *getAsFunction() LLVM_READONLY__attribute__((__pure__));
1063
1064 const FunctionDecl *getAsFunction() const {
1065 return const_cast<Decl *>(this)->getAsFunction();
1066 }
1067
1068 /// Changes the namespace of this declaration to reflect that it's
1069 /// a function-local extern declaration.
1070 ///
1071 /// These declarations appear in the lexical context of the extern
1072 /// declaration, but in the semantic context of the enclosing namespace
1073 /// scope.
1074 void setLocalExternDecl() {
1075 Decl *Prev = getPreviousDecl();
1076 IdentifierNamespace &= ~IDNS_Ordinary;
1077
1078 // It's OK for the declaration to still have the "invisible friend" flag or
1079 // the "conflicts with tag declarations in this scope" flag for the outer
1080 // scope.
1081 assert((IdentifierNamespace & ~(IDNS_OrdinaryFriend | IDNS_Tag)) == 0 &&(static_cast <bool> ((IdentifierNamespace & ~(IDNS_OrdinaryFriend
| IDNS_Tag)) == 0 && "namespace is not ordinary") ? void
(0) : __assert_fail ("(IdentifierNamespace & ~(IDNS_OrdinaryFriend | IDNS_Tag)) == 0 && \"namespace is not ordinary\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1082, __extension__ __PRETTY_FUNCTION__))
1082 "namespace is not ordinary")(static_cast <bool> ((IdentifierNamespace & ~(IDNS_OrdinaryFriend
| IDNS_Tag)) == 0 && "namespace is not ordinary") ? void
(0) : __assert_fail ("(IdentifierNamespace & ~(IDNS_OrdinaryFriend | IDNS_Tag)) == 0 && \"namespace is not ordinary\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1082, __extension__ __PRETTY_FUNCTION__))
;
1083
1084 IdentifierNamespace |= IDNS_LocalExtern;
1085 if (Prev && Prev->getIdentifierNamespace() & IDNS_Ordinary)
1086 IdentifierNamespace |= IDNS_Ordinary;
1087 }
1088
1089 /// Determine whether this is a block-scope declaration with linkage.
1090 /// This will either be a local variable declaration declared 'extern', or a
1091 /// local function declaration.
1092 bool isLocalExternDecl() {
1093 return IdentifierNamespace & IDNS_LocalExtern;
1094 }
1095
1096 /// Changes the namespace of this declaration to reflect that it's
1097 /// the object of a friend declaration.
1098 ///
1099 /// These declarations appear in the lexical context of the friending
1100 /// class, but in the semantic context of the actual entity. This property
1101 /// applies only to a specific decl object; other redeclarations of the
1102 /// same entity may not (and probably don't) share this property.
1103 void setObjectOfFriendDecl(bool PerformFriendInjection = false) {
1104 unsigned OldNS = IdentifierNamespace;
1105 assert((OldNS & (IDNS_Tag | IDNS_Ordinary |(static_cast <bool> ((OldNS & (IDNS_Tag | IDNS_Ordinary
| IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern | IDNS_NonMemberOperator
)) && "namespace includes neither ordinary nor tag") ?
void (0) : __assert_fail ("(OldNS & (IDNS_Tag | IDNS_Ordinary | IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern | IDNS_NonMemberOperator)) && \"namespace includes neither ordinary nor tag\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1108, __extension__ __PRETTY_FUNCTION__))
1106 IDNS_TagFriend | IDNS_OrdinaryFriend |(static_cast <bool> ((OldNS & (IDNS_Tag | IDNS_Ordinary
| IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern | IDNS_NonMemberOperator
)) && "namespace includes neither ordinary nor tag") ?
void (0) : __assert_fail ("(OldNS & (IDNS_Tag | IDNS_Ordinary | IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern | IDNS_NonMemberOperator)) && \"namespace includes neither ordinary nor tag\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1108, __extension__ __PRETTY_FUNCTION__))
1107 IDNS_LocalExtern | IDNS_NonMemberOperator)) &&(static_cast <bool> ((OldNS & (IDNS_Tag | IDNS_Ordinary
| IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern | IDNS_NonMemberOperator
)) && "namespace includes neither ordinary nor tag") ?
void (0) : __assert_fail ("(OldNS & (IDNS_Tag | IDNS_Ordinary | IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern | IDNS_NonMemberOperator)) && \"namespace includes neither ordinary nor tag\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1108, __extension__ __PRETTY_FUNCTION__))
1108 "namespace includes neither ordinary nor tag")(static_cast <bool> ((OldNS & (IDNS_Tag | IDNS_Ordinary
| IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern | IDNS_NonMemberOperator
)) && "namespace includes neither ordinary nor tag") ?
void (0) : __assert_fail ("(OldNS & (IDNS_Tag | IDNS_Ordinary | IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern | IDNS_NonMemberOperator)) && \"namespace includes neither ordinary nor tag\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1108, __extension__ __PRETTY_FUNCTION__))
;
1109 assert(!(OldNS & ~(IDNS_Tag | IDNS_Ordinary | IDNS_Type |(static_cast <bool> (!(OldNS & ~(IDNS_Tag | IDNS_Ordinary
| IDNS_Type | IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern
| IDNS_NonMemberOperator)) && "namespace includes other than ordinary or tag"
) ? void (0) : __assert_fail ("!(OldNS & ~(IDNS_Tag | IDNS_Ordinary | IDNS_Type | IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern | IDNS_NonMemberOperator)) && \"namespace includes other than ordinary or tag\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1112, __extension__ __PRETTY_FUNCTION__))
1110 IDNS_TagFriend | IDNS_OrdinaryFriend |(static_cast <bool> (!(OldNS & ~(IDNS_Tag | IDNS_Ordinary
| IDNS_Type | IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern
| IDNS_NonMemberOperator)) && "namespace includes other than ordinary or tag"
) ? void (0) : __assert_fail ("!(OldNS & ~(IDNS_Tag | IDNS_Ordinary | IDNS_Type | IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern | IDNS_NonMemberOperator)) && \"namespace includes other than ordinary or tag\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1112, __extension__ __PRETTY_FUNCTION__))
1111 IDNS_LocalExtern | IDNS_NonMemberOperator)) &&(static_cast <bool> (!(OldNS & ~(IDNS_Tag | IDNS_Ordinary
| IDNS_Type | IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern
| IDNS_NonMemberOperator)) && "namespace includes other than ordinary or tag"
) ? void (0) : __assert_fail ("!(OldNS & ~(IDNS_Tag | IDNS_Ordinary | IDNS_Type | IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern | IDNS_NonMemberOperator)) && \"namespace includes other than ordinary or tag\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1112, __extension__ __PRETTY_FUNCTION__))
1112 "namespace includes other than ordinary or tag")(static_cast <bool> (!(OldNS & ~(IDNS_Tag | IDNS_Ordinary
| IDNS_Type | IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern
| IDNS_NonMemberOperator)) && "namespace includes other than ordinary or tag"
) ? void (0) : __assert_fail ("!(OldNS & ~(IDNS_Tag | IDNS_Ordinary | IDNS_Type | IDNS_TagFriend | IDNS_OrdinaryFriend | IDNS_LocalExtern | IDNS_NonMemberOperator)) && \"namespace includes other than ordinary or tag\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1112, __extension__ __PRETTY_FUNCTION__))
;
1113
1114 Decl *Prev = getPreviousDecl();
1115 IdentifierNamespace &= ~(IDNS_Ordinary | IDNS_Tag | IDNS_Type);
1116
1117 if (OldNS & (IDNS_Tag | IDNS_TagFriend)) {
1118 IdentifierNamespace |= IDNS_TagFriend;
1119 if (PerformFriendInjection ||
1120 (Prev && Prev->getIdentifierNamespace() & IDNS_Tag))
1121 IdentifierNamespace |= IDNS_Tag | IDNS_Type;
1122 }
1123
1124 if (OldNS & (IDNS_Ordinary | IDNS_OrdinaryFriend |
1125 IDNS_LocalExtern | IDNS_NonMemberOperator)) {
1126 IdentifierNamespace |= IDNS_OrdinaryFriend;
1127 if (PerformFriendInjection ||
1128 (Prev && Prev->getIdentifierNamespace() & IDNS_Ordinary))
1129 IdentifierNamespace |= IDNS_Ordinary;
1130 }
1131 }
1132
1133 enum FriendObjectKind {
1134 FOK_None, ///< Not a friend object.
1135 FOK_Declared, ///< A friend of a previously-declared entity.
1136 FOK_Undeclared ///< A friend of a previously-undeclared entity.
1137 };
1138
1139 /// Determines whether this declaration is the object of a
1140 /// friend declaration and, if so, what kind.
1141 ///
1142 /// There is currently no direct way to find the associated FriendDecl.
1143 FriendObjectKind getFriendObjectKind() const {
1144 unsigned mask =
1145 (IdentifierNamespace & (IDNS_TagFriend | IDNS_OrdinaryFriend));
1146 if (!mask) return FOK_None;
1147 return (IdentifierNamespace & (IDNS_Tag | IDNS_Ordinary) ? FOK_Declared
1148 : FOK_Undeclared);
1149 }
1150
1151 /// Specifies that this declaration is a C++ overloaded non-member.
1152 void setNonMemberOperator() {
1153 assert(getKind() == Function || getKind() == FunctionTemplate)(static_cast <bool> (getKind() == Function || getKind()
== FunctionTemplate) ? void (0) : __assert_fail ("getKind() == Function || getKind() == FunctionTemplate"
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1153, __extension__ __PRETTY_FUNCTION__))
;
1154 assert((IdentifierNamespace & IDNS_Ordinary) &&(static_cast <bool> ((IdentifierNamespace & IDNS_Ordinary
) && "visible non-member operators should be in ordinary namespace"
) ? void (0) : __assert_fail ("(IdentifierNamespace & IDNS_Ordinary) && \"visible non-member operators should be in ordinary namespace\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1155, __extension__ __PRETTY_FUNCTION__))
1155 "visible non-member operators should be in ordinary namespace")(static_cast <bool> ((IdentifierNamespace & IDNS_Ordinary
) && "visible non-member operators should be in ordinary namespace"
) ? void (0) : __assert_fail ("(IdentifierNamespace & IDNS_Ordinary) && \"visible non-member operators should be in ordinary namespace\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1155, __extension__ __PRETTY_FUNCTION__))
;
1156 IdentifierNamespace |= IDNS_NonMemberOperator;
1157 }
1158
1159 static bool classofKind(Kind K) { return true; }
1160 static DeclContext *castToDeclContext(const Decl *);
1161 static Decl *castFromDeclContext(const DeclContext *);
1162
1163 void print(raw_ostream &Out, unsigned Indentation = 0,
1164 bool PrintInstantiation = false) const;
1165 void print(raw_ostream &Out, const PrintingPolicy &Policy,
1166 unsigned Indentation = 0, bool PrintInstantiation = false) const;
1167 static void printGroup(Decl** Begin, unsigned NumDecls,
1168 raw_ostream &Out, const PrintingPolicy &Policy,
1169 unsigned Indentation = 0);
1170
1171 // Debuggers don't usually respect default arguments.
1172 void dump() const;
1173
1174 // Same as dump(), but forces color printing.
1175 void dumpColor() const;
1176
1177 void dump(raw_ostream &Out, bool Deserialize = false,
1178 ASTDumpOutputFormat OutputFormat = ADOF_Default) const;
1179
1180 /// \return Unique reproducible object identifier
1181 int64_t getID() const;
1182
1183 /// Looks through the Decl's underlying type to extract a FunctionType
1184 /// when possible. Will return null if the type underlying the Decl does not
1185 /// have a FunctionType.
1186 const FunctionType *getFunctionType(bool BlocksToo = true) const;
1187
1188private:
1189 void setAttrsImpl(const AttrVec& Attrs, ASTContext &Ctx);
1190 void setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
1191 ASTContext &Ctx);
1192
1193protected:
1194 ASTMutationListener *getASTMutationListener() const;
1195};
1196
1197/// Determine whether two declarations declare the same entity.
1198inline bool declaresSameEntity(const Decl *D1, const Decl *D2) {
1199 if (!D1 || !D2)
1200 return false;
1201
1202 if (D1 == D2)
1203 return true;
1204
1205 return D1->getCanonicalDecl() == D2->getCanonicalDecl();
1206}
1207
1208/// PrettyStackTraceDecl - If a crash occurs, indicate that it happened when
1209/// doing something to a specific decl.
1210class PrettyStackTraceDecl : public llvm::PrettyStackTraceEntry {
1211 const Decl *TheDecl;
1212 SourceLocation Loc;
1213 SourceManager &SM;
1214 const char *Message;
1215
1216public:
1217 PrettyStackTraceDecl(const Decl *theDecl, SourceLocation L,
1218 SourceManager &sm, const char *Msg)
1219 : TheDecl(theDecl), Loc(L), SM(sm), Message(Msg) {}
1220
1221 void print(raw_ostream &OS) const override;
1222};
1223} // namespace clang
1224
1225// Required to determine the layout of the PointerUnion<NamedDecl*> before
1226// seeing the NamedDecl definition being first used in DeclListNode::operator*.
1227namespace llvm {
1228 template <> struct PointerLikeTypeTraits<::clang::NamedDecl *> {
1229 static inline void *getAsVoidPointer(::clang::NamedDecl *P) { return P; }
1230 static inline ::clang::NamedDecl *getFromVoidPointer(void *P) {
1231 return static_cast<::clang::NamedDecl *>(P);
1232 }
1233 static constexpr int NumLowBitsAvailable = 3;
1234 };
1235}
1236
1237namespace clang {
1238/// A list storing NamedDecls in the lookup tables.
1239class DeclListNode {
1240 friend class ASTContext; // allocate, deallocate nodes.
1241 friend class StoredDeclsList;
1242public:
1243 using Decls = llvm::PointerUnion<NamedDecl*, DeclListNode*>;
1244 class iterator {
1245 friend class DeclContextLookupResult;
1246 friend class StoredDeclsList;
1247
1248 Decls Ptr;
1249 iterator(Decls Node) : Ptr(Node) { }
1250 public:
1251 using difference_type = ptrdiff_t;
1252 using value_type = NamedDecl*;
1253 using pointer = void;
1254 using reference = value_type;
1255 using iterator_category = std::forward_iterator_tag;
1256
1257 iterator() = default;
1258
1259 reference operator*() const {
1260 assert(Ptr && "dereferencing end() iterator")(static_cast <bool> (Ptr && "dereferencing end() iterator"
) ? void (0) : __assert_fail ("Ptr && \"dereferencing end() iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1260, __extension__ __PRETTY_FUNCTION__))
;
1261 if (DeclListNode *CurNode = Ptr.dyn_cast<DeclListNode*>())
1262 return CurNode->D;
1263 return Ptr.get<NamedDecl*>();
1264 }
1265 void operator->() const { } // Unsupported.
1266 bool operator==(const iterator &X) const { return Ptr == X.Ptr; }
1267 bool operator!=(const iterator &X) const { return Ptr != X.Ptr; }
1268 inline iterator &operator++() { // ++It
1269 assert(!Ptr.isNull() && "Advancing empty iterator")(static_cast <bool> (!Ptr.isNull() && "Advancing empty iterator"
) ? void (0) : __assert_fail ("!Ptr.isNull() && \"Advancing empty iterator\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 1269, __extension__ __PRETTY_FUNCTION__))
;
1270
1271 if (DeclListNode *CurNode = Ptr.dyn_cast<DeclListNode*>())
1272 Ptr = CurNode->Rest;
1273 else
1274 Ptr = nullptr;
1275 return *this;
1276 }
1277 iterator operator++(int) { // It++
1278 iterator temp = *this;
1279 ++(*this);
1280 return temp;
1281 }
1282 // Enables the pattern for (iterator I =..., E = I.end(); I != E; ++I)
1283 iterator end() { return iterator(); }
1284 };
1285private:
1286 NamedDecl *D = nullptr;
1287 Decls Rest = nullptr;
1288 DeclListNode(NamedDecl *ND) : D(ND) {}
1289};
1290
1291/// The results of name lookup within a DeclContext.
1292class DeclContextLookupResult {
1293 using Decls = DeclListNode::Decls;
1294
1295 /// When in collection form, this is what the Data pointer points to.
1296 Decls Result;
1297
1298public:
1299 DeclContextLookupResult() = default;
1300 DeclContextLookupResult(Decls Result) : Result(Result) {}
1301
1302 using iterator = DeclListNode::iterator;
1303 using const_iterator = iterator;
1304 using reference = iterator::reference;
1305
1306 iterator begin() { return iterator(Result); }
1307 iterator end() { return iterator(); }
1308 const_iterator begin() const {
1309 return const_cast<DeclContextLookupResult*>(this)->begin();
1310 }
1311 const_iterator end() const { return iterator(); }
1312
1313 bool empty() const { return Result.isNull(); }
1314 bool isSingleResult() const { return Result.dyn_cast<NamedDecl*>(); }
1315 reference front() const { return *begin(); }
1316
1317 // Find the first declaration of the given type in the list. Note that this
1318 // is not in general the earliest-declared declaration, and should only be
1319 // used when it's not possible for there to be more than one match or where
1320 // it doesn't matter which one is found.
1321 template<class T> T *find_first() const {
1322 for (auto *D : *this)
1323 if (T *Decl = dyn_cast<T>(D))
1324 return Decl;
1325
1326 return nullptr;
1327 }
1328};
1329
1330/// DeclContext - This is used only as base class of specific decl types that
1331/// can act as declaration contexts. These decls are (only the top classes
1332/// that directly derive from DeclContext are mentioned, not their subclasses):
1333///
1334/// TranslationUnitDecl
1335/// ExternCContext
1336/// NamespaceDecl
1337/// TagDecl
1338/// OMPDeclareReductionDecl
1339/// OMPDeclareMapperDecl
1340/// FunctionDecl
1341/// ObjCMethodDecl
1342/// ObjCContainerDecl
1343/// LinkageSpecDecl
1344/// ExportDecl
1345/// BlockDecl
1346/// CapturedDecl
1347class DeclContext {
1348 /// For makeDeclVisibleInContextImpl
1349 friend class ASTDeclReader;
1350 /// For reconcileExternalVisibleStorage, CreateStoredDeclsMap,
1351 /// hasNeedToReconcileExternalVisibleStorage
1352 friend class ExternalASTSource;
1353 /// For CreateStoredDeclsMap
1354 friend class DependentDiagnostic;
1355 /// For hasNeedToReconcileExternalVisibleStorage,
1356 /// hasLazyLocalLexicalLookups, hasLazyExternalLexicalLookups
1357 friend class ASTWriter;
1358
1359 // We use uint64_t in the bit-fields below since some bit-fields
1360 // cross the unsigned boundary and this breaks the packing.
1361
1362 /// Stores the bits used by DeclContext.
1363 /// If modified NumDeclContextBit, the ctor of DeclContext and the accessor
1364 /// methods in DeclContext should be updated appropriately.
1365 class DeclContextBitfields {
1366 friend class DeclContext;
1367 /// DeclKind - This indicates which class this is.
1368 uint64_t DeclKind : 7;
1369
1370 /// Whether this declaration context also has some external
1371 /// storage that contains additional declarations that are lexically
1372 /// part of this context.
1373 mutable uint64_t ExternalLexicalStorage : 1;
1374
1375 /// Whether this declaration context also has some external
1376 /// storage that contains additional declarations that are visible
1377 /// in this context.
1378 mutable uint64_t ExternalVisibleStorage : 1;
1379
1380 /// Whether this declaration context has had externally visible
1381 /// storage added since the last lookup. In this case, \c LookupPtr's
1382 /// invariant may not hold and needs to be fixed before we perform
1383 /// another lookup.
1384 mutable uint64_t NeedToReconcileExternalVisibleStorage : 1;
1385
1386 /// If \c true, this context may have local lexical declarations
1387 /// that are missing from the lookup table.
1388 mutable uint64_t HasLazyLocalLexicalLookups : 1;
1389
1390 /// If \c true, the external source may have lexical declarations
1391 /// that are missing from the lookup table.
1392 mutable uint64_t HasLazyExternalLexicalLookups : 1;
1393
1394 /// If \c true, lookups should only return identifier from
1395 /// DeclContext scope (for example TranslationUnit). Used in
1396 /// LookupQualifiedName()
1397 mutable uint64_t UseQualifiedLookup : 1;
1398 };
1399
1400 /// Number of bits in DeclContextBitfields.
1401 enum { NumDeclContextBits = 13 };
1402
1403 /// Stores the bits used by TagDecl.
1404 /// If modified NumTagDeclBits and the accessor
1405 /// methods in TagDecl should be updated appropriately.
1406 class TagDeclBitfields {
1407 friend class TagDecl;
1408 /// For the bits in DeclContextBitfields
1409 uint64_t : NumDeclContextBits;
1410
1411 /// The TagKind enum.
1412 uint64_t TagDeclKind : 3;
1413
1414 /// True if this is a definition ("struct foo {};"), false if it is a
1415 /// declaration ("struct foo;"). It is not considered a definition
1416 /// until the definition has been fully processed.
1417 uint64_t IsCompleteDefinition : 1;
1418
1419 /// True if this is currently being defined.
1420 uint64_t IsBeingDefined : 1;
1421
1422 /// True if this tag declaration is "embedded" (i.e., defined or declared
1423 /// for the very first time) in the syntax of a declarator.
1424 uint64_t IsEmbeddedInDeclarator : 1;
1425
1426 /// True if this tag is free standing, e.g. "struct foo;".
1427 uint64_t IsFreeStanding : 1;
1428
1429 /// Indicates whether it is possible for declarations of this kind
1430 /// to have an out-of-date definition.
1431 ///
1432 /// This option is only enabled when modules are enabled.
1433 uint64_t MayHaveOutOfDateDef : 1;
1434
1435 /// Has the full definition of this type been required by a use somewhere in
1436 /// the TU.
1437 uint64_t IsCompleteDefinitionRequired : 1;
1438 };
1439
1440 /// Number of non-inherited bits in TagDeclBitfields.
1441 enum { NumTagDeclBits = 9 };
1442
1443 /// Stores the bits used by EnumDecl.
1444 /// If modified NumEnumDeclBit and the accessor
1445 /// methods in EnumDecl should be updated appropriately.
1446 class EnumDeclBitfields {
1447 friend class EnumDecl;
1448 /// For the bits in DeclContextBitfields.
1449 uint64_t : NumDeclContextBits;
1450 /// For the bits in TagDeclBitfields.
1451 uint64_t : NumTagDeclBits;
1452
1453 /// Width in bits required to store all the non-negative
1454 /// enumerators of this enum.
1455 uint64_t NumPositiveBits : 8;
1456
1457 /// Width in bits required to store all the negative
1458 /// enumerators of this enum.
1459 uint64_t NumNegativeBits : 8;
1460
1461 /// True if this tag declaration is a scoped enumeration. Only
1462 /// possible in C++11 mode.
1463 uint64_t IsScoped : 1;
1464
1465 /// If this tag declaration is a scoped enum,
1466 /// then this is true if the scoped enum was declared using the class
1467 /// tag, false if it was declared with the struct tag. No meaning is
1468 /// associated if this tag declaration is not a scoped enum.
1469 uint64_t IsScopedUsingClassTag : 1;
1470
1471 /// True if this is an enumeration with fixed underlying type. Only
1472 /// possible in C++11, Microsoft extensions, or Objective C mode.
1473 uint64_t IsFixed : 1;
1474
1475 /// True if a valid hash is stored in ODRHash.
1476 uint64_t HasODRHash : 1;
1477 };
1478
1479 /// Number of non-inherited bits in EnumDeclBitfields.
1480 enum { NumEnumDeclBits = 20 };
1481
1482 /// Stores the bits used by RecordDecl.
1483 /// If modified NumRecordDeclBits and the accessor
1484 /// methods in RecordDecl should be updated appropriately.
1485 class RecordDeclBitfields {
1486 friend class RecordDecl;
1487 /// For the bits in DeclContextBitfields.
1488 uint64_t : NumDeclContextBits;
1489 /// For the bits in TagDeclBitfields.
1490 uint64_t : NumTagDeclBits;
1491
1492 /// This is true if this struct ends with a flexible
1493 /// array member (e.g. int X[]) or if this union contains a struct that does.
1494 /// If so, this cannot be contained in arrays or other structs as a member.
1495 uint64_t HasFlexibleArrayMember : 1;
1496
1497 /// Whether this is the type of an anonymous struct or union.
1498 uint64_t AnonymousStructOrUnion : 1;
1499
1500 /// This is true if this struct has at least one member
1501 /// containing an Objective-C object pointer type.
1502 uint64_t HasObjectMember : 1;
1503
1504 /// This is true if struct has at least one member of
1505 /// 'volatile' type.
1506 uint64_t HasVolatileMember : 1;
1507
1508 /// Whether the field declarations of this record have been loaded
1509 /// from external storage. To avoid unnecessary deserialization of
1510 /// methods/nested types we allow deserialization of just the fields
1511 /// when needed.
1512 mutable uint64_t LoadedFieldsFromExternalStorage : 1;
1513
1514 /// Basic properties of non-trivial C structs.
1515 uint64_t NonTrivialToPrimitiveDefaultInitialize : 1;
1516 uint64_t NonTrivialToPrimitiveCopy : 1;
1517 uint64_t NonTrivialToPrimitiveDestroy : 1;
1518
1519 /// The following bits indicate whether this is or contains a C union that
1520 /// is non-trivial to default-initialize, destruct, or copy. These bits
1521 /// imply the associated basic non-triviality predicates declared above.
1522 uint64_t HasNonTrivialToPrimitiveDefaultInitializeCUnion : 1;
1523 uint64_t HasNonTrivialToPrimitiveDestructCUnion : 1;
1524 uint64_t HasNonTrivialToPrimitiveCopyCUnion : 1;
1525
1526 /// Indicates whether this struct is destroyed in the callee.
1527 uint64_t ParamDestroyedInCallee : 1;
1528
1529 /// Represents the way this type is passed to a function.
1530 uint64_t ArgPassingRestrictions : 2;
1531 };
1532
1533 /// Number of non-inherited bits in RecordDeclBitfields.
1534 enum { NumRecordDeclBits = 14 };
1535
1536 /// Stores the bits used by OMPDeclareReductionDecl.
1537 /// If modified NumOMPDeclareReductionDeclBits and the accessor
1538 /// methods in OMPDeclareReductionDecl should be updated appropriately.
1539 class OMPDeclareReductionDeclBitfields {
1540 friend class OMPDeclareReductionDecl;
1541 /// For the bits in DeclContextBitfields
1542 uint64_t : NumDeclContextBits;
1543
1544 /// Kind of initializer,
1545 /// function call or omp_priv<init_expr> initializtion.
1546 uint64_t InitializerKind : 2;
1547 };
1548
1549 /// Number of non-inherited bits in OMPDeclareReductionDeclBitfields.
1550 enum { NumOMPDeclareReductionDeclBits = 2 };
1551
1552 /// Stores the bits used by FunctionDecl.
1553 /// If modified NumFunctionDeclBits and the accessor
1554 /// methods in FunctionDecl and CXXDeductionGuideDecl
1555 /// (for IsCopyDeductionCandidate) should be updated appropriately.
1556 class FunctionDeclBitfields {
1557 friend class FunctionDecl;
1558 /// For IsCopyDeductionCandidate
1559 friend class CXXDeductionGuideDecl;
1560 /// For the bits in DeclContextBitfields.
1561 uint64_t : NumDeclContextBits;
1562
1563 uint64_t SClass : 3;
1564 uint64_t IsInline : 1;
1565 uint64_t IsInlineSpecified : 1;
1566
1567 uint64_t IsVirtualAsWritten : 1;
1568 uint64_t IsPure : 1;
1569 uint64_t HasInheritedPrototype : 1;
1570 uint64_t HasWrittenPrototype : 1;
1571 uint64_t IsDeleted : 1;
1572 /// Used by CXXMethodDecl
1573 uint64_t IsTrivial : 1;
1574
1575 /// This flag indicates whether this function is trivial for the purpose of
1576 /// calls. This is meaningful only when this function is a copy/move
1577 /// constructor or a destructor.
1578 uint64_t IsTrivialForCall : 1;
1579
1580 uint64_t IsDefaulted : 1;
1581 uint64_t IsExplicitlyDefaulted : 1;
1582 uint64_t HasDefaultedFunctionInfo : 1;
1583 uint64_t HasImplicitReturnZero : 1;
1584 uint64_t IsLateTemplateParsed : 1;
1585
1586 /// Kind of contexpr specifier as defined by ConstexprSpecKind.
1587 uint64_t ConstexprKind : 2;
1588 uint64_t InstantiationIsPending : 1;
1589
1590 /// Indicates if the function uses __try.
1591 uint64_t UsesSEHTry : 1;
1592
1593 /// Indicates if the function was a definition
1594 /// but its body was skipped.
1595 uint64_t HasSkippedBody : 1;
1596
1597 /// Indicates if the function declaration will
1598 /// have a body, once we're done parsing it.
1599 uint64_t WillHaveBody : 1;
1600
1601 /// Indicates that this function is a multiversioned
1602 /// function using attribute 'target'.
1603 uint64_t IsMultiVersion : 1;
1604
1605 /// [C++17] Only used by CXXDeductionGuideDecl. Indicates that
1606 /// the Deduction Guide is the implicitly generated 'copy
1607 /// deduction candidate' (is used during overload resolution).
1608 uint64_t IsCopyDeductionCandidate : 1;
1609
1610 /// Store the ODRHash after first calculation.
1611 uint64_t HasODRHash : 1;
1612
1613 /// Indicates if the function uses Floating Point Constrained Intrinsics
1614 uint64_t UsesFPIntrin : 1;
1615 };
1616
1617 /// Number of non-inherited bits in FunctionDeclBitfields.
1618 enum { NumFunctionDeclBits = 27 };
1619
1620 /// Stores the bits used by CXXConstructorDecl. If modified
1621 /// NumCXXConstructorDeclBits and the accessor
1622 /// methods in CXXConstructorDecl should be updated appropriately.
1623 class CXXConstructorDeclBitfields {
1624 friend class CXXConstructorDecl;
1625 /// For the bits in DeclContextBitfields.
1626 uint64_t : NumDeclContextBits;
1627 /// For the bits in FunctionDeclBitfields.
1628 uint64_t : NumFunctionDeclBits;
1629
1630 /// 24 bits to fit in the remaining available space.
1631 /// Note that this makes CXXConstructorDeclBitfields take
1632 /// exactly 64 bits and thus the width of NumCtorInitializers
1633 /// will need to be shrunk if some bit is added to NumDeclContextBitfields,
1634 /// NumFunctionDeclBitfields or CXXConstructorDeclBitfields.
1635 uint64_t NumCtorInitializers : 21;
1636 uint64_t IsInheritingConstructor : 1;
1637
1638 /// Whether this constructor has a trail-allocated explicit specifier.
1639 uint64_t HasTrailingExplicitSpecifier : 1;
1640 /// If this constructor does't have a trail-allocated explicit specifier.
1641 /// Whether this constructor is explicit specified.
1642 uint64_t IsSimpleExplicit : 1;
1643 };
1644
1645 /// Number of non-inherited bits in CXXConstructorDeclBitfields.
1646 enum {
1647 NumCXXConstructorDeclBits = 64 - NumDeclContextBits - NumFunctionDeclBits
1648 };
1649
1650 /// Stores the bits used by ObjCMethodDecl.
1651 /// If modified NumObjCMethodDeclBits and the accessor
1652 /// methods in ObjCMethodDecl should be updated appropriately.
1653 class ObjCMethodDeclBitfields {
1654 friend class ObjCMethodDecl;
1655
1656 /// For the bits in DeclContextBitfields.
1657 uint64_t : NumDeclContextBits;
1658
1659 /// The conventional meaning of this method; an ObjCMethodFamily.
1660 /// This is not serialized; instead, it is computed on demand and
1661 /// cached.
1662 mutable uint64_t Family : ObjCMethodFamilyBitWidth;
1663
1664 /// instance (true) or class (false) method.
1665 uint64_t IsInstance : 1;
1666 uint64_t IsVariadic : 1;
1667
1668 /// True if this method is the getter or setter for an explicit property.
1669 uint64_t IsPropertyAccessor : 1;
1670
1671 /// True if this method is a synthesized property accessor stub.
1672 uint64_t IsSynthesizedAccessorStub : 1;
1673
1674 /// Method has a definition.
1675 uint64_t IsDefined : 1;
1676
1677 /// Method redeclaration in the same interface.
1678 uint64_t IsRedeclaration : 1;
1679
1680 /// Is redeclared in the same interface.
1681 mutable uint64_t HasRedeclaration : 1;
1682
1683 /// \@required/\@optional
1684 uint64_t DeclImplementation : 2;
1685
1686 /// in, inout, etc.
1687 uint64_t objcDeclQualifier : 7;
1688
1689 /// Indicates whether this method has a related result type.
1690 uint64_t RelatedResultType : 1;
1691
1692 /// Whether the locations of the selector identifiers are in a
1693 /// "standard" position, a enum SelectorLocationsKind.
1694 uint64_t SelLocsKind : 2;
1695
1696 /// Whether this method overrides any other in the class hierarchy.
1697 ///
1698 /// A method is said to override any method in the class's
1699 /// base classes, its protocols, or its categories' protocols, that has
1700 /// the same selector and is of the same kind (class or instance).
1701 /// A method in an implementation is not considered as overriding the same
1702 /// method in the interface or its categories.
1703 uint64_t IsOverriding : 1;
1704
1705 /// Indicates if the method was a definition but its body was skipped.
1706 uint64_t HasSkippedBody : 1;
1707 };
1708
1709 /// Number of non-inherited bits in ObjCMethodDeclBitfields.
1710 enum { NumObjCMethodDeclBits = 24 };
1711
1712 /// Stores the bits used by ObjCContainerDecl.
1713 /// If modified NumObjCContainerDeclBits and the accessor
1714 /// methods in ObjCContainerDecl should be updated appropriately.
1715 class ObjCContainerDeclBitfields {
1716 friend class ObjCContainerDecl;
1717 /// For the bits in DeclContextBitfields
1718 uint32_t : NumDeclContextBits;
1719
1720 // Not a bitfield but this saves space.
1721 // Note that ObjCContainerDeclBitfields is full.
1722 SourceLocation AtStart;
1723 };
1724
1725 /// Number of non-inherited bits in ObjCContainerDeclBitfields.
1726 /// Note that here we rely on the fact that SourceLocation is 32 bits
1727 /// wide. We check this with the static_assert in the ctor of DeclContext.
1728 enum { NumObjCContainerDeclBits = 64 - NumDeclContextBits };
1729
1730 /// Stores the bits used by LinkageSpecDecl.
1731 /// If modified NumLinkageSpecDeclBits and the accessor
1732 /// methods in LinkageSpecDecl should be updated appropriately.
1733 class LinkageSpecDeclBitfields {
1734 friend class LinkageSpecDecl;
1735 /// For the bits in DeclContextBitfields.
1736 uint64_t : NumDeclContextBits;
1737
1738 /// The language for this linkage specification with values
1739 /// in the enum LinkageSpecDecl::LanguageIDs.
1740 uint64_t Language : 3;
1741
1742 /// True if this linkage spec has braces.
1743 /// This is needed so that hasBraces() returns the correct result while the
1744 /// linkage spec body is being parsed. Once RBraceLoc has been set this is
1745 /// not used, so it doesn't need to be serialized.
1746 uint64_t HasBraces : 1;
1747 };
1748
1749 /// Number of non-inherited bits in LinkageSpecDeclBitfields.
1750 enum { NumLinkageSpecDeclBits = 4 };
1751
1752 /// Stores the bits used by BlockDecl.
1753 /// If modified NumBlockDeclBits and the accessor
1754 /// methods in BlockDecl should be updated appropriately.
1755 class BlockDeclBitfields {
1756 friend class BlockDecl;
1757 /// For the bits in DeclContextBitfields.
1758 uint64_t : NumDeclContextBits;
1759
1760 uint64_t IsVariadic : 1;
1761 uint64_t CapturesCXXThis : 1;
1762 uint64_t BlockMissingReturnType : 1;
1763 uint64_t IsConversionFromLambda : 1;
1764
1765 /// A bit that indicates this block is passed directly to a function as a
1766 /// non-escaping parameter.
1767 uint64_t DoesNotEscape : 1;
1768
1769 /// A bit that indicates whether it's possible to avoid coying this block to
1770 /// the heap when it initializes or is assigned to a local variable with
1771 /// automatic storage.
1772 uint64_t CanAvoidCopyToHeap : 1;
1773 };
1774
1775 /// Number of non-inherited bits in BlockDeclBitfields.
1776 enum { NumBlockDeclBits = 5 };
1777
1778 /// Pointer to the data structure used to lookup declarations
1779 /// within this context (or a DependentStoredDeclsMap if this is a
1780 /// dependent context). We maintain the invariant that, if the map
1781 /// contains an entry for a DeclarationName (and we haven't lazily
1782 /// omitted anything), then it contains all relevant entries for that
1783 /// name (modulo the hasExternalDecls() flag).
1784 mutable StoredDeclsMap *LookupPtr = nullptr;
1785
1786protected:
1787 /// This anonymous union stores the bits belonging to DeclContext and classes
1788 /// deriving from it. The goal is to use otherwise wasted
1789 /// space in DeclContext to store data belonging to derived classes.
1790 /// The space saved is especially significient when pointers are aligned
1791 /// to 8 bytes. In this case due to alignment requirements we have a
1792 /// little less than 8 bytes free in DeclContext which we can use.
1793 /// We check that none of the classes in this union is larger than
1794 /// 8 bytes with static_asserts in the ctor of DeclContext.
1795 union {
1796 DeclContextBitfields DeclContextBits;
1797 TagDeclBitfields TagDeclBits;
1798 EnumDeclBitfields EnumDeclBits;
1799 RecordDeclBitfields RecordDeclBits;
1800 OMPDeclareReductionDeclBitfields OMPDeclareReductionDeclBits;
1801 FunctionDeclBitfields FunctionDeclBits;
1802 CXXConstructorDeclBitfields CXXConstructorDeclBits;
1803 ObjCMethodDeclBitfields ObjCMethodDeclBits;
1804 ObjCContainerDeclBitfields ObjCContainerDeclBits;
1805 LinkageSpecDeclBitfields LinkageSpecDeclBits;
1806 BlockDeclBitfields BlockDeclBits;
1807
1808 static_assert(sizeof(DeclContextBitfields) <= 8,
1809 "DeclContextBitfields is larger than 8 bytes!");
1810 static_assert(sizeof(TagDeclBitfields) <= 8,
1811 "TagDeclBitfields is larger than 8 bytes!");
1812 static_assert(sizeof(EnumDeclBitfields) <= 8,
1813 "EnumDeclBitfields is larger than 8 bytes!");
1814 static_assert(sizeof(RecordDeclBitfields) <= 8,
1815 "RecordDeclBitfields is larger than 8 bytes!");
1816 static_assert(sizeof(OMPDeclareReductionDeclBitfields) <= 8,
1817 "OMPDeclareReductionDeclBitfields is larger than 8 bytes!");
1818 static_assert(sizeof(FunctionDeclBitfields) <= 8,
1819 "FunctionDeclBitfields is larger than 8 bytes!");
1820 static_assert(sizeof(CXXConstructorDeclBitfields) <= 8,
1821 "CXXConstructorDeclBitfields is larger than 8 bytes!");
1822 static_assert(sizeof(ObjCMethodDeclBitfields) <= 8,
1823 "ObjCMethodDeclBitfields is larger than 8 bytes!");
1824 static_assert(sizeof(ObjCContainerDeclBitfields) <= 8,
1825 "ObjCContainerDeclBitfields is larger than 8 bytes!");
1826 static_assert(sizeof(LinkageSpecDeclBitfields) <= 8,
1827 "LinkageSpecDeclBitfields is larger than 8 bytes!");
1828 static_assert(sizeof(BlockDeclBitfields) <= 8,
1829 "BlockDeclBitfields is larger than 8 bytes!");
1830 };
1831
1832 /// FirstDecl - The first declaration stored within this declaration
1833 /// context.
1834 mutable Decl *FirstDecl = nullptr;
1835
1836 /// LastDecl - The last declaration stored within this declaration
1837 /// context. FIXME: We could probably cache this value somewhere
1838 /// outside of the DeclContext, to reduce the size of DeclContext by
1839 /// another pointer.
1840 mutable Decl *LastDecl = nullptr;
1841
1842 /// Build up a chain of declarations.
1843 ///
1844 /// \returns the first/last pair of declarations.
1845 static std::pair<Decl *, Decl *>
1846 BuildDeclChain(ArrayRef<Decl*> Decls, bool FieldsAlreadyLoaded);
1847
1848 DeclContext(Decl::Kind K);
1849
1850public:
1851 ~DeclContext();
1852
1853 Decl::Kind getDeclKind() const {
1854 return static_cast<Decl::Kind>(DeclContextBits.DeclKind);
1855 }
1856
1857 const char *getDeclKindName() const;
1858
1859 /// getParent - Returns the containing DeclContext.
1860 DeclContext *getParent() {
1861 return cast<Decl>(this)->getDeclContext();
1862 }
1863 const DeclContext *getParent() const {
1864 return const_cast<DeclContext*>(this)->getParent();
1865 }
1866
1867 /// getLexicalParent - Returns the containing lexical DeclContext. May be
1868 /// different from getParent, e.g.:
1869 ///
1870 /// namespace A {
1871 /// struct S;
1872 /// }
1873 /// struct A::S {}; // getParent() == namespace 'A'
1874 /// // getLexicalParent() == translation unit
1875 ///
1876 DeclContext *getLexicalParent() {
1877 return cast<Decl>(this)->getLexicalDeclContext();
1878 }
1879 const DeclContext *getLexicalParent() const {
1880 return const_cast<DeclContext*>(this)->getLexicalParent();
1881 }
1882
1883 DeclContext *getLookupParent();
1884
1885 const DeclContext *getLookupParent() const {
1886 return const_cast<DeclContext*>(this)->getLookupParent();
1887 }
1888
1889 ASTContext &getParentASTContext() const {
1890 return cast<Decl>(this)->getASTContext();
1891 }
1892
1893 bool isClosure() const { return getDeclKind() == Decl::Block; }
1894
1895 /// Return this DeclContext if it is a BlockDecl. Otherwise, return the
1896 /// innermost enclosing BlockDecl or null if there are no enclosing blocks.
1897 const BlockDecl *getInnermostBlockDecl() const;
1898
1899 bool isObjCContainer() const {
1900 switch (getDeclKind()) {
1901 case Decl::ObjCCategory:
1902 case Decl::ObjCCategoryImpl:
1903 case Decl::ObjCImplementation:
1904 case Decl::ObjCInterface:
1905 case Decl::ObjCProtocol:
1906 return true;
1907 default:
1908 return false;
1909 }
1910 }
1911
1912 bool isFunctionOrMethod() const {
1913 switch (getDeclKind()) {
1914 case Decl::Block:
1915 case Decl::Captured:
1916 case Decl::ObjCMethod:
1917 return true;
1918 default:
1919 return getDeclKind() >= Decl::firstFunction &&
1920 getDeclKind() <= Decl::lastFunction;
1921 }
1922 }
1923
1924 /// Test whether the context supports looking up names.
1925 bool isLookupContext() const {
1926 return !isFunctionOrMethod() && getDeclKind() != Decl::LinkageSpec &&
1927 getDeclKind() != Decl::Export;
1928 }
1929
1930 bool isFileContext() const {
1931 return getDeclKind() == Decl::TranslationUnit ||
1932 getDeclKind() == Decl::Namespace;
1933 }
1934
1935 bool isTranslationUnit() const {
1936 return getDeclKind() == Decl::TranslationUnit;
1937 }
1938
1939 bool isRecord() const {
1940 return getDeclKind() >= Decl::firstRecord &&
1941 getDeclKind() <= Decl::lastRecord;
1942 }
1943
1944 bool isNamespace() const { return getDeclKind() == Decl::Namespace; }
1945
1946 bool isStdNamespace() const;
1947
1948 bool isInlineNamespace() const;
1949
1950 /// Determines whether this context is dependent on a
1951 /// template parameter.
1952 bool isDependentContext() const;
1953
1954 /// isTransparentContext - Determines whether this context is a
1955 /// "transparent" context, meaning that the members declared in this
1956 /// context are semantically declared in the nearest enclosing
1957 /// non-transparent (opaque) context but are lexically declared in
1958 /// this context. For example, consider the enumerators of an
1959 /// enumeration type:
1960 /// @code
1961 /// enum E {
1962 /// Val1
1963 /// };
1964 /// @endcode
1965 /// Here, E is a transparent context, so its enumerator (Val1) will
1966 /// appear (semantically) that it is in the same context of E.
1967 /// Examples of transparent contexts include: enumerations (except for
1968 /// C++0x scoped enums), and C++ linkage specifications.
1969 bool isTransparentContext() const;
1970
1971 /// Determines whether this context or some of its ancestors is a
1972 /// linkage specification context that specifies C linkage.
1973 bool isExternCContext() const;
1974
1975 /// Retrieve the nearest enclosing C linkage specification context.
1976 const LinkageSpecDecl *getExternCContext() const;
1977
1978 /// Determines whether this context or some of its ancestors is a
1979 /// linkage specification context that specifies C++ linkage.
1980 bool isExternCXXContext() const;
1981
1982 /// Determine whether this declaration context is equivalent
1983 /// to the declaration context DC.
1984 bool Equals(const DeclContext *DC) const {
1985 return DC && this->getPrimaryContext() == DC->getPrimaryContext();
1986 }
1987
1988 /// Determine whether this declaration context encloses the
1989 /// declaration context DC.
1990 bool Encloses(const DeclContext *DC) const;
1991
1992 /// Find the nearest non-closure ancestor of this context,
1993 /// i.e. the innermost semantic parent of this context which is not
1994 /// a closure. A context may be its own non-closure ancestor.
1995 Decl *getNonClosureAncestor();
1996 const Decl *getNonClosureAncestor() const {
1997 return const_cast<DeclContext*>(this)->getNonClosureAncestor();
1998 }
1999
2000 /// getPrimaryContext - There may be many different
2001 /// declarations of the same entity (including forward declarations
2002 /// of classes, multiple definitions of namespaces, etc.), each with
2003 /// a different set of declarations. This routine returns the
2004 /// "primary" DeclContext structure, which will contain the
2005 /// information needed to perform name lookup into this context.
2006 DeclContext *getPrimaryContext();
2007 const DeclContext *getPrimaryContext() const {
2008 return const_cast<DeclContext*>(this)->getPrimaryContext();
2009 }
2010
2011 /// getRedeclContext - Retrieve the context in which an entity conflicts with
2012 /// other entities of the same name, or where it is a redeclaration if the
2013 /// two entities are compatible. This skips through transparent contexts.
2014 DeclContext *getRedeclContext();
2015 const DeclContext *getRedeclContext() const {
2016 return const_cast<DeclContext *>(this)->getRedeclContext();
2017 }
2018
2019 /// Retrieve the nearest enclosing namespace context.
2020 DeclContext *getEnclosingNamespaceContext();
2021 const DeclContext *getEnclosingNamespaceContext() const {
2022 return const_cast<DeclContext *>(this)->getEnclosingNamespaceContext();
2023 }
2024
2025 /// Retrieve the outermost lexically enclosing record context.
2026 RecordDecl *getOuterLexicalRecordContext();
2027 const RecordDecl *getOuterLexicalRecordContext() const {
2028 return const_cast<DeclContext *>(this)->getOuterLexicalRecordContext();
2029 }
2030
2031 /// Test if this context is part of the enclosing namespace set of
2032 /// the context NS, as defined in C++0x [namespace.def]p9. If either context
2033 /// isn't a namespace, this is equivalent to Equals().
2034 ///
2035 /// The enclosing namespace set of a namespace is the namespace and, if it is
2036 /// inline, its enclosing namespace, recursively.
2037 bool InEnclosingNamespaceSetOf(const DeclContext *NS) const;
2038
2039 /// Collects all of the declaration contexts that are semantically
2040 /// connected to this declaration context.
2041 ///
2042 /// For declaration contexts that have multiple semantically connected but
2043 /// syntactically distinct contexts, such as C++ namespaces, this routine
2044 /// retrieves the complete set of such declaration contexts in source order.
2045 /// For example, given:
2046 ///
2047 /// \code
2048 /// namespace N {
2049 /// int x;
2050 /// }
2051 /// namespace N {
2052 /// int y;
2053 /// }
2054 /// \endcode
2055 ///
2056 /// The \c Contexts parameter will contain both definitions of N.
2057 ///
2058 /// \param Contexts Will be cleared and set to the set of declaration
2059 /// contexts that are semanticaly connected to this declaration context,
2060 /// in source order, including this context (which may be the only result,
2061 /// for non-namespace contexts).
2062 void collectAllContexts(SmallVectorImpl<DeclContext *> &Contexts);
2063
2064 /// decl_iterator - Iterates through the declarations stored
2065 /// within this context.
2066 class decl_iterator {
2067 /// Current - The current declaration.
2068 Decl *Current = nullptr;
2069
2070 public:
2071 using value_type = Decl *;
2072 using reference = const value_type &;
2073 using pointer = const value_type *;
2074 using iterator_category = std::forward_iterator_tag;
2075 using difference_type = std::ptrdiff_t;
2076
2077 decl_iterator() = default;
2078 explicit decl_iterator(Decl *C) : Current(C) {}
2079
2080 reference operator*() const { return Current; }
2081
2082 // This doesn't meet the iterator requirements, but it's convenient
2083 value_type operator->() const { return Current; }
2084
2085 decl_iterator& operator++() {
2086 Current = Current->getNextDeclInContext();
2087 return *this;
2088 }
2089
2090 decl_iterator operator++(int) {
2091 decl_iterator tmp(*this);
2092 ++(*this);
2093 return tmp;
2094 }
2095
2096 friend bool operator==(decl_iterator x, decl_iterator y) {
2097 return x.Current == y.Current;
2098 }
2099
2100 friend bool operator!=(decl_iterator x, decl_iterator y) {
2101 return x.Current != y.Current;
2102 }
2103 };
2104
2105 using decl_range = llvm::iterator_range<decl_iterator>;
2106
2107 /// decls_begin/decls_end - Iterate over the declarations stored in
2108 /// this context.
2109 decl_range decls() const { return decl_range(decls_begin(), decls_end()); }
2110 decl_iterator decls_begin() const;
2111 decl_iterator decls_end() const { return decl_iterator(); }
2112 bool decls_empty() const;
2113
2114 /// noload_decls_begin/end - Iterate over the declarations stored in this
2115 /// context that are currently loaded; don't attempt to retrieve anything
2116 /// from an external source.
2117 decl_range noload_decls() const {
2118 return decl_range(noload_decls_begin(), noload_decls_end());
2119 }
2120 decl_iterator noload_decls_begin() const { return decl_iterator(FirstDecl); }
2121 decl_iterator noload_decls_end() const { return decl_iterator(); }
2122
2123 /// specific_decl_iterator - Iterates over a subrange of
2124 /// declarations stored in a DeclContext, providing only those that
2125 /// are of type SpecificDecl (or a class derived from it). This
2126 /// iterator is used, for example, to provide iteration over just
2127 /// the fields within a RecordDecl (with SpecificDecl = FieldDecl).
2128 template<typename SpecificDecl>
2129 class specific_decl_iterator {
2130 /// Current - The current, underlying declaration iterator, which
2131 /// will either be NULL or will point to a declaration of
2132 /// type SpecificDecl.
2133 DeclContext::decl_iterator Current;
2134
2135 /// SkipToNextDecl - Advances the current position up to the next
2136 /// declaration of type SpecificDecl that also meets the criteria
2137 /// required by Acceptable.
2138 void SkipToNextDecl() {
2139 while (*Current && !isa<SpecificDecl>(*Current))
2140 ++Current;
2141 }
2142
2143 public:
2144 using value_type = SpecificDecl *;
2145 // TODO: Add reference and pointer types (with some appropriate proxy type)
2146 // if we ever have a need for them.
2147 using reference = void;
2148 using pointer = void;
2149 using difference_type =
2150 std::iterator_traits<DeclContext::decl_iterator>::difference_type;
2151 using iterator_category = std::forward_iterator_tag;
2152
2153 specific_decl_iterator() = default;
2154
2155 /// specific_decl_iterator - Construct a new iterator over a
2156 /// subset of the declarations the range [C,
2157 /// end-of-declarations). If A is non-NULL, it is a pointer to a
2158 /// member function of SpecificDecl that should return true for
2159 /// all of the SpecificDecl instances that will be in the subset
2160 /// of iterators. For example, if you want Objective-C instance
2161 /// methods, SpecificDecl will be ObjCMethodDecl and A will be
2162 /// &ObjCMethodDecl::isInstanceMethod.
2163 explicit specific_decl_iterator(DeclContext::decl_iterator C) : Current(C) {
2164 SkipToNextDecl();
2165 }
2166
2167 value_type operator*() const { return cast<SpecificDecl>(*Current); }
2168
2169 // This doesn't meet the iterator requirements, but it's convenient
2170 value_type operator->() const { return **this; }
2171
2172 specific_decl_iterator& operator++() {
2173 ++Current;
2174 SkipToNextDecl();
2175 return *this;
2176 }
2177
2178 specific_decl_iterator operator++(int) {
2179 specific_decl_iterator tmp(*this);
2180 ++(*this);
2181 return tmp;
2182 }
2183
2184 friend bool operator==(const specific_decl_iterator& x,
2185 const specific_decl_iterator& y) {
2186 return x.Current == y.Current;
2187 }
2188
2189 friend bool operator!=(const specific_decl_iterator& x,
2190 const specific_decl_iterator& y) {
2191 return x.Current != y.Current;
2192 }
2193 };
2194
2195 /// Iterates over a filtered subrange of declarations stored
2196 /// in a DeclContext.
2197 ///
2198 /// This iterator visits only those declarations that are of type
2199 /// SpecificDecl (or a class derived from it) and that meet some
2200 /// additional run-time criteria. This iterator is used, for
2201 /// example, to provide access to the instance methods within an
2202 /// Objective-C interface (with SpecificDecl = ObjCMethodDecl and
2203 /// Acceptable = ObjCMethodDecl::isInstanceMethod).
2204 template<typename SpecificDecl, bool (SpecificDecl::*Acceptable)() const>
2205 class filtered_decl_iterator {
2206 /// Current - The current, underlying declaration iterator, which
2207 /// will either be NULL or will point to a declaration of
2208 /// type SpecificDecl.
2209 DeclContext::decl_iterator Current;
2210
2211 /// SkipToNextDecl - Advances the current position up to the next
2212 /// declaration of type SpecificDecl that also meets the criteria
2213 /// required by Acceptable.
2214 void SkipToNextDecl() {
2215 while (*Current &&
2216 (!isa<SpecificDecl>(*Current) ||
2217 (Acceptable && !(cast<SpecificDecl>(*Current)->*Acceptable)())))
2218 ++Current;
2219 }
2220
2221 public:
2222 using value_type = SpecificDecl *;
2223 // TODO: Add reference and pointer types (with some appropriate proxy type)
2224 // if we ever have a need for them.
2225 using reference = void;
2226 using pointer = void;
2227 using difference_type =
2228 std::iterator_traits<DeclContext::decl_iterator>::difference_type;
2229 using iterator_category = std::forward_iterator_tag;
2230
2231 filtered_decl_iterator() = default;
2232
2233 /// filtered_decl_iterator - Construct a new iterator over a
2234 /// subset of the declarations the range [C,
2235 /// end-of-declarations). If A is non-NULL, it is a pointer to a
2236 /// member function of SpecificDecl that should return true for
2237 /// all of the SpecificDecl instances that will be in the subset
2238 /// of iterators. For example, if you want Objective-C instance
2239 /// methods, SpecificDecl will be ObjCMethodDecl and A will be
2240 /// &ObjCMethodDecl::isInstanceMethod.
2241 explicit filtered_decl_iterator(DeclContext::decl_iterator C) : Current(C) {
2242 SkipToNextDecl();
2243 }
2244
2245 value_type operator*() const { return cast<SpecificDecl>(*Current); }
2246 value_type operator->() const { return cast<SpecificDecl>(*Current); }
2247
2248 filtered_decl_iterator& operator++() {
2249 ++Current;
2250 SkipToNextDecl();
2251 return *this;
2252 }
2253
2254 filtered_decl_iterator operator++(int) {
2255 filtered_decl_iterator tmp(*this);
2256 ++(*this);
2257 return tmp;
2258 }
2259
2260 friend bool operator==(const filtered_decl_iterator& x,
2261 const filtered_decl_iterator& y) {
2262 return x.Current == y.Current;
2263 }
2264
2265 friend bool operator!=(const filtered_decl_iterator& x,
2266 const filtered_decl_iterator& y) {
2267 return x.Current != y.Current;
2268 }
2269 };
2270
2271 /// Add the declaration D into this context.
2272 ///
2273 /// This routine should be invoked when the declaration D has first
2274 /// been declared, to place D into the context where it was
2275 /// (lexically) defined. Every declaration must be added to one
2276 /// (and only one!) context, where it can be visited via
2277 /// [decls_begin(), decls_end()). Once a declaration has been added
2278 /// to its lexical context, the corresponding DeclContext owns the
2279 /// declaration.
2280 ///
2281 /// If D is also a NamedDecl, it will be made visible within its
2282 /// semantic context via makeDeclVisibleInContext.
2283 void addDecl(Decl *D);
2284
2285 /// Add the declaration D into this context, but suppress
2286 /// searches for external declarations with the same name.
2287 ///
2288 /// Although analogous in function to addDecl, this removes an
2289 /// important check. This is only useful if the Decl is being
2290 /// added in response to an external search; in all other cases,
2291 /// addDecl() is the right function to use.
2292 /// See the ASTImporter for use cases.
2293 void addDeclInternal(Decl *D);
2294
2295 /// Add the declaration D to this context without modifying
2296 /// any lookup tables.
2297 ///
2298 /// This is useful for some operations in dependent contexts where
2299 /// the semantic context might not be dependent; this basically
2300 /// only happens with friends.
2301 void addHiddenDecl(Decl *D);
2302
2303 /// Removes a declaration from this context.
2304 void removeDecl(Decl *D);
2305
2306 /// Checks whether a declaration is in this context.
2307 bool containsDecl(Decl *D) const;
2308
2309 /// Checks whether a declaration is in this context.
2310 /// This also loads the Decls from the external source before the check.
2311 bool containsDeclAndLoad(Decl *D) const;
2312
2313 using lookup_result = DeclContextLookupResult;
2314 using lookup_iterator = lookup_result::iterator;
2315
2316 /// lookup - Find the declarations (if any) with the given Name in
2317 /// this context. Returns a range of iterators that contains all of
2318 /// the declarations with this name, with object, function, member,
2319 /// and enumerator names preceding any tag name. Note that this
2320 /// routine will not look into parent contexts.
2321 lookup_result lookup(DeclarationName Name) const;
2322
2323 /// Find the declarations with the given name that are visible
2324 /// within this context; don't attempt to retrieve anything from an
2325 /// external source.
2326 lookup_result noload_lookup(DeclarationName Name);
2327
2328 /// A simplistic name lookup mechanism that performs name lookup
2329 /// into this declaration context without consulting the external source.
2330 ///
2331 /// This function should almost never be used, because it subverts the
2332 /// usual relationship between a DeclContext and the external source.
2333 /// See the ASTImporter for the (few, but important) use cases.
2334 ///
2335 /// FIXME: This is very inefficient; replace uses of it with uses of
2336 /// noload_lookup.
2337 void localUncachedLookup(DeclarationName Name,
2338 SmallVectorImpl<NamedDecl *> &Results);
2339
2340 /// Makes a declaration visible within this context.
2341 ///
2342 /// This routine makes the declaration D visible to name lookup
2343 /// within this context and, if this is a transparent context,
2344 /// within its parent contexts up to the first enclosing
2345 /// non-transparent context. Making a declaration visible within a
2346 /// context does not transfer ownership of a declaration, and a
2347 /// declaration can be visible in many contexts that aren't its
2348 /// lexical context.
2349 ///
2350 /// If D is a redeclaration of an existing declaration that is
2351 /// visible from this context, as determined by
2352 /// NamedDecl::declarationReplaces, the previous declaration will be
2353 /// replaced with D.
2354 void makeDeclVisibleInContext(NamedDecl *D);
2355
2356 /// all_lookups_iterator - An iterator that provides a view over the results
2357 /// of looking up every possible name.
2358 class all_lookups_iterator;
2359
2360 using lookups_range = llvm::iterator_range<all_lookups_iterator>;
2361
2362 lookups_range lookups() const;
2363 // Like lookups(), but avoids loading external declarations.
2364 // If PreserveInternalState, avoids building lookup data structures too.
2365 lookups_range noload_lookups(bool PreserveInternalState) const;
2366
2367 /// Iterators over all possible lookups within this context.
2368 all_lookups_iterator lookups_begin() const;
2369 all_lookups_iterator lookups_end() const;
2370
2371 /// Iterators over all possible lookups within this context that are
2372 /// currently loaded; don't attempt to retrieve anything from an external
2373 /// source.
2374 all_lookups_iterator noload_lookups_begin() const;
2375 all_lookups_iterator noload_lookups_end() const;
2376
2377 struct udir_iterator;
2378
2379 using udir_iterator_base =
2380 llvm::iterator_adaptor_base<udir_iterator, lookup_iterator,
2381 typename lookup_iterator::iterator_category,
2382 UsingDirectiveDecl *>;
2383
2384 struct udir_iterator : udir_iterator_base {
2385 udir_iterator(lookup_iterator I) : udir_iterator_base(I) {}
2386
2387 UsingDirectiveDecl *operator*() const;
2388 };
2389
2390 using udir_range = llvm::iterator_range<udir_iterator>;
2391
2392 udir_range using_directives() const;
2393
2394 // These are all defined in DependentDiagnostic.h.
2395 class ddiag_iterator;
2396
2397 using ddiag_range = llvm::iterator_range<DeclContext::ddiag_iterator>;
2398
2399 inline ddiag_range ddiags() const;
2400
2401 // Low-level accessors
2402
2403 /// Mark that there are external lexical declarations that we need
2404 /// to include in our lookup table (and that are not available as external
2405 /// visible lookups). These extra lookup results will be found by walking
2406 /// the lexical declarations of this context. This should be used only if
2407 /// setHasExternalLexicalStorage() has been called on any decl context for
2408 /// which this is the primary context.
2409 void setMustBuildLookupTable() {
2410 assert(this == getPrimaryContext() &&(static_cast <bool> (this == getPrimaryContext() &&
"should only be called on primary context") ? void (0) : __assert_fail
("this == getPrimaryContext() && \"should only be called on primary context\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 2411, __extension__ __PRETTY_FUNCTION__))
2411 "should only be called on primary context")(static_cast <bool> (this == getPrimaryContext() &&
"should only be called on primary context") ? void (0) : __assert_fail
("this == getPrimaryContext() && \"should only be called on primary context\""
, "/build/llvm-toolchain-snapshot-13~++20210616111117+5c1639fe064b/clang/include/clang/AST/DeclBase.h"
, 2411, __extension__ __PRETTY_FUNCTION__))
;
2412 DeclContextBits.HasLazyExternalLexicalLookups = true;
2413 }
2414
2415 /// Retrieve the internal representation of the lookup structure.
2416 /// This may omit some names if we are lazily building the structure.
2417 StoredDeclsMap *getLookupPtr() const { return LookupPtr; }
2418
2419 /// Ensure the lookup structure is fully-built and return it.
2420 StoredDeclsMap *buildLookup();
2421
2422 /// Whether this DeclContext has external storage containing
2423 /// additional declarations that are lexically in this context.
2424 bool hasExternalLexicalStorage() const {
2425 return DeclContextBits.ExternalLexicalStorage;
2426 }
2427
2428 /// State whether this DeclContext has external storage for
2429 /// declarations lexically in this context.
2430 void setHasExternalLexicalStorage(bool ES = true) const {
2431 DeclContextBits.ExternalLexicalStorage = ES;
2432 }
2433
2434 /// Whether this DeclContext has external storage containing
2435 /// additional declarations that are visible in this context.
2436 bool hasExternalVisibleStorage() const {
2437 return DeclContextBits.ExternalVisibleStorage;
2438 }
2439
2440 /// State whether this DeclContext has external storage for
2441 /// declarations visible in this context.
2442 void setHasExternalVisibleStorage(bool ES = true) const {
2443 DeclContextBits.ExternalVisibleStorage = ES;
2444 if (ES && LookupPtr)
2445 DeclContextBits.NeedToReconcileExternalVisibleStorage = true;
2446 }
2447
2448 /// Determine whether the given declaration is stored in the list of
2449 /// declarations lexically within this context.
2450 bool isDeclInLexicalTraversal(const Decl *D) const {
2451 return D && (D->NextInContextAndBits.getPointer() || D == FirstDecl ||
2452 D == LastDecl);
2453 }
2454
2455 bool setUseQualifiedLookup(bool use = true) const {
2456 bool old_value = DeclContextBits.UseQualifiedLookup;
2457 DeclContextBits.UseQualifiedLookup = use;
2458 return old_value;
2459 }
2460
2461 bool shouldUseQualifiedLookup() const {
2462 return DeclContextBits.UseQualifiedLookup;
2463 }
2464
2465 static bool classof(const Decl *D);
2466 static bool classof(const DeclContext *D) { return true; }
2467
2468 void dumpDeclContext() const;
2469 void dumpLookups() const;
2470 void dumpLookups(llvm::raw_ostream &OS, bool DumpDecls = false,
2471 bool Deserialize = false) const;
2472
2473private:
2474 /// Whether this declaration context has had externally visible
2475 /// storage added since the last lookup. In this case, \c LookupPtr's
2476 /// invariant may not hold and needs to be fixed before we perform
2477 /// another lookup.
2478 bool hasNeedToReconcileExternalVisibleStorage() const {
2479 return DeclContextBits.NeedToReconcileExternalVisibleStorage;
2480 }
2481
2482 /// State that this declaration context has had externally visible
2483 /// storage added since the last lookup. In this case, \c LookupPtr's
2484 /// invariant may not hold and needs to be fixed before we perform
2485 /// another lookup.
2486 void setNeedToReconcileExternalVisibleStorage(bool Need = true) const {
2487 DeclContextBits.NeedToReconcileExternalVisibleStorage = Need;
2488 }
2489
2490 /// If \c true, this context may have local lexical declarations
2491 /// that are missing from the lookup table.
2492 bool hasLazyLocalLexicalLookups() const {
2493 return DeclContextBits.HasLazyLocalLexicalLookups;
2494 }
2495
2496 /// If \c true, this context may have local lexical declarations
2497 /// that are missing from the lookup table.
2498 void setHasLazyLocalLexicalLookups(bool HasLLLL = true) const {
2499 DeclContextBits.HasLazyLocalLexicalLookups = HasLLLL;
2500 }
2501
2502 /// If \c true, the external source may have lexical declarations
2503 /// that are missing from the lookup table.
2504 bool hasLazyExternalLexicalLookups() const {
2505 return DeclContextBits.HasLazyExternalLexicalLookups;
2506 }
2507
2508 /// If \c true, the external source may have lexical declarations
2509 /// that are missing from the lookup table.
2510 void setHasLazyExternalLexicalLookups(bool HasLELL = true) const {
2511 DeclContextBits.HasLazyExternalLexicalLookups = HasLELL;
2512 }
2513
2514 void reconcileExternalVisibleStorage() const;
2515 bool LoadLexicalDeclsFromExternalStorage() const;
2516
2517 /// Makes a declaration visible within this context, but
2518 /// suppresses searches for external declarations with the same
2519 /// name.
2520 ///
2521 /// Analogous to makeDeclVisibleInContext, but for the exclusive
2522 /// use of addDeclInternal().
2523 void makeDeclVisibleInContextInternal(NamedDecl *D);
2524
2525 StoredDeclsMap *CreateStoredDeclsMap(ASTContext &C) const;
2526
2527 void loadLazyLocalLexicalLookups();
2528 void buildLookupImpl(DeclContext *DCtx, bool Internal);
2529 void makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal,
2530 bool Rediscoverable);
2531 void makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal);
2532};
2533
2534inline bool Decl::isTemplateParameter() const {
2535 return getKind() == TemplateTypeParm || getKind() == NonTypeTemplateParm ||
2536 getKind() == TemplateTemplateParm;
2537}
2538
2539// Specialization selected when ToTy is not a known subclass of DeclContext.
2540template <class ToTy,
2541 bool IsKnownSubtype = ::std::is_base_of<DeclContext, ToTy>::value>
2542struct cast_convert_decl_context {
2543 static const ToTy *doit(const DeclContext *Val) {
2544 return static_cast<const ToTy*>(Decl::castFromDeclContext(Val));
2545 }
2546
2547 static ToTy *doit(DeclContext *Val) {
2548 return static_cast<ToTy*>(Decl::castFromDeclContext(Val));
2549 }
2550};
2551
2552// Specialization selected when ToTy is a known subclass of DeclContext.
2553template <class ToTy>
2554struct cast_convert_decl_context<ToTy, true> {
2555 static const ToTy *doit(const DeclContext *Val) {
2556 return static_cast<const ToTy*>(Val);
2557 }
2558
2559 static ToTy *doit(DeclContext *Val) {
2560 return static_cast<ToTy*>(Val);
2561 }
2562};
2563
2564} // namespace clang
2565
2566namespace llvm {
2567
2568/// isa<T>(DeclContext*)
2569template <typename To>
2570struct isa_impl<To, ::clang::DeclContext> {
2571 static bool doit(const ::clang::DeclContext &Val) {
2572 return To::classofKind(Val.getDeclKind());
2573 }
2574};
2575
2576/// cast<T>(DeclContext*)
2577template<class ToTy>
2578struct cast_convert_val<ToTy,
2579 const ::clang::DeclContext,const ::clang::DeclContext> {
2580 static const ToTy &doit(const ::clang::DeclContext &Val) {
2581 return *::clang::cast_convert_decl_context<ToTy>::doit(&Val);
2582 }
2583};
2584
2585template<class ToTy>
2586struct cast_convert_val<ToTy, ::clang::DeclContext, ::clang::DeclContext> {
2587 static ToTy &doit(::clang::DeclContext &Val) {
2588 return *::clang::cast_convert_decl_context<ToTy>::doit(&Val);
2589 }
2590};
2591
2592template<class ToTy>
2593struct cast_convert_val<ToTy,
2594 const ::clang::DeclContext*, const ::clang::DeclContext*> {
2595 static const ToTy *doit(const ::clang::DeclContext *Val) {
2596 return ::clang::cast_convert_decl_context<ToTy>::doit(Val);
2597 }
2598};
2599
2600template<class ToTy>
2601struct cast_convert_val<ToTy, ::clang::DeclContext*, ::clang::DeclContext*> {
2602 static ToTy *doit(::clang::DeclContext *Val) {
2603 return ::clang::cast_convert_decl_context<ToTy>::doit(Val);
2604 }
2605};
2606
2607/// Implement cast_convert_val for Decl -> DeclContext conversions.
2608template<class FromTy>
2609struct cast_convert_val< ::clang::DeclContext, FromTy, FromTy> {
2610 static ::clang::DeclContext &doit(const FromTy &Val) {
2611 return *FromTy::castToDeclContext(&Val);
2612 }
2613};
2614
2615template<class FromTy>
2616struct cast_convert_val< ::clang::DeclContext, FromTy*, FromTy*> {
2617 static ::clang::DeclContext *doit(const FromTy *Val) {
2618 return FromTy::castToDeclContext(Val);
2619 }
2620};
2621
2622template<class FromTy>
2623struct cast_convert_val< const ::clang::DeclContext, FromTy, FromTy> {
2624 static const ::clang::DeclContext &doit(const FromTy &Val) {
2625 return *FromTy::castToDeclContext(&Val);
2626 }
2627};
2628
2629template<class FromTy>
2630struct cast_convert_val< const ::clang::DeclContext, FromTy*, FromTy*> {
2631 static const ::clang::DeclContext *doit(const FromTy *Val) {
2632 return FromTy::castToDeclContext(Val);
2633 }
2634};
2635
2636} // namespace llvm
2637
2638#endif // LLVM_CLANG_AST_DECLBASE_H