File: | build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/clang/lib/CodeGen/CGBuiltin.cpp |
Warning: | line 1027, column 22 Value stored to 'RetType' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code to emit Builtin calls as LLVM code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CGCUDARuntime.h" |
14 | #include "CGCXXABI.h" |
15 | #include "CGObjCRuntime.h" |
16 | #include "CGOpenCLRuntime.h" |
17 | #include "CGRecordLayout.h" |
18 | #include "CodeGenFunction.h" |
19 | #include "CodeGenModule.h" |
20 | #include "ConstantEmitter.h" |
21 | #include "PatternInit.h" |
22 | #include "TargetInfo.h" |
23 | #include "clang/AST/ASTContext.h" |
24 | #include "clang/AST/Attr.h" |
25 | #include "clang/AST/Decl.h" |
26 | #include "clang/AST/OSLog.h" |
27 | #include "clang/Basic/TargetBuiltins.h" |
28 | #include "clang/Basic/TargetInfo.h" |
29 | #include "clang/CodeGen/CGFunctionInfo.h" |
30 | #include "llvm/ADT/APFloat.h" |
31 | #include "llvm/ADT/APInt.h" |
32 | #include "llvm/ADT/SmallPtrSet.h" |
33 | #include "llvm/ADT/StringExtras.h" |
34 | #include "llvm/Analysis/ValueTracking.h" |
35 | #include "llvm/IR/DataLayout.h" |
36 | #include "llvm/IR/InlineAsm.h" |
37 | #include "llvm/IR/Intrinsics.h" |
38 | #include "llvm/IR/IntrinsicsAArch64.h" |
39 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
40 | #include "llvm/IR/IntrinsicsARM.h" |
41 | #include "llvm/IR/IntrinsicsBPF.h" |
42 | #include "llvm/IR/IntrinsicsHexagon.h" |
43 | #include "llvm/IR/IntrinsicsNVPTX.h" |
44 | #include "llvm/IR/IntrinsicsPowerPC.h" |
45 | #include "llvm/IR/IntrinsicsR600.h" |
46 | #include "llvm/IR/IntrinsicsRISCV.h" |
47 | #include "llvm/IR/IntrinsicsS390.h" |
48 | #include "llvm/IR/IntrinsicsVE.h" |
49 | #include "llvm/IR/IntrinsicsWebAssembly.h" |
50 | #include "llvm/IR/IntrinsicsX86.h" |
51 | #include "llvm/IR/MDBuilder.h" |
52 | #include "llvm/IR/MatrixBuilder.h" |
53 | #include "llvm/Support/ConvertUTF.h" |
54 | #include "llvm/Support/ScopedPrinter.h" |
55 | #include "llvm/Support/X86TargetParser.h" |
56 | #include <sstream> |
57 | |
58 | using namespace clang; |
59 | using namespace CodeGen; |
60 | using namespace llvm; |
61 | |
62 | static |
63 | int64_t clamp(int64_t Value, int64_t Low, int64_t High) { |
64 | return std::min(High, std::max(Low, Value)); |
65 | } |
66 | |
67 | static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, |
68 | Align AlignmentInBytes) { |
69 | ConstantInt *Byte; |
70 | switch (CGF.getLangOpts().getTrivialAutoVarInit()) { |
71 | case LangOptions::TrivialAutoVarInitKind::Uninitialized: |
72 | // Nothing to initialize. |
73 | return; |
74 | case LangOptions::TrivialAutoVarInitKind::Zero: |
75 | Byte = CGF.Builder.getInt8(0x00); |
76 | break; |
77 | case LangOptions::TrivialAutoVarInitKind::Pattern: { |
78 | llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext()); |
79 | Byte = llvm::dyn_cast<llvm::ConstantInt>( |
80 | initializationPatternFor(CGF.CGM, Int8)); |
81 | break; |
82 | } |
83 | } |
84 | if (CGF.CGM.stopAutoInit()) |
85 | return; |
86 | auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes); |
87 | I->addAnnotationMetadata("auto-init"); |
88 | } |
89 | |
90 | /// getBuiltinLibFunction - Given a builtin id for a function like |
91 | /// "__builtin_fabsf", return a Function* for "fabsf". |
92 | llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, |
93 | unsigned BuiltinID) { |
94 | assert(Context.BuiltinInfo.isLibFunction(BuiltinID))(static_cast <bool> (Context.BuiltinInfo.isLibFunction( BuiltinID)) ? void (0) : __assert_fail ("Context.BuiltinInfo.isLibFunction(BuiltinID)" , "clang/lib/CodeGen/CGBuiltin.cpp", 94, __extension__ __PRETTY_FUNCTION__ )); |
95 | |
96 | // Get the name, skip over the __builtin_ prefix (if necessary). |
97 | StringRef Name; |
98 | GlobalDecl D(FD); |
99 | |
100 | // TODO: This list should be expanded or refactored after all GCC-compatible |
101 | // std libcall builtins are implemented. |
102 | static SmallDenseMap<unsigned, StringRef, 8> F128Builtins{ |
103 | {Builtin::BI__builtin_printf, "__printfieee128"}, |
104 | {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"}, |
105 | {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"}, |
106 | {Builtin::BI__builtin_sprintf, "__sprintfieee128"}, |
107 | {Builtin::BI__builtin_snprintf, "__snprintfieee128"}, |
108 | {Builtin::BI__builtin_fprintf, "__fprintfieee128"}, |
109 | {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"}, |
110 | }; |
111 | |
112 | // If the builtin has been declared explicitly with an assembler label, |
113 | // use the mangled name. This differs from the plain label on platforms |
114 | // that prefix labels. |
115 | if (FD->hasAttr<AsmLabelAttr>()) |
116 | Name = getMangledName(D); |
117 | else { |
118 | // TODO: This mutation should also be applied to other targets other than |
119 | // PPC, after backend supports IEEE 128-bit style libcalls. |
120 | if (getTriple().isPPC64() && |
121 | &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() && |
122 | F128Builtins.find(BuiltinID) != F128Builtins.end()) |
123 | Name = F128Builtins[BuiltinID]; |
124 | else |
125 | Name = Context.BuiltinInfo.getName(BuiltinID) + 10; |
126 | } |
127 | |
128 | llvm::FunctionType *Ty = |
129 | cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); |
130 | |
131 | return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); |
132 | } |
133 | |
134 | /// Emit the conversions required to turn the given value into an |
135 | /// integer of the given size. |
136 | static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, |
137 | QualType T, llvm::IntegerType *IntType) { |
138 | V = CGF.EmitToMemory(V, T); |
139 | |
140 | if (V->getType()->isPointerTy()) |
141 | return CGF.Builder.CreatePtrToInt(V, IntType); |
142 | |
143 | assert(V->getType() == IntType)(static_cast <bool> (V->getType() == IntType) ? void (0) : __assert_fail ("V->getType() == IntType", "clang/lib/CodeGen/CGBuiltin.cpp" , 143, __extension__ __PRETTY_FUNCTION__)); |
144 | return V; |
145 | } |
146 | |
147 | static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, |
148 | QualType T, llvm::Type *ResultType) { |
149 | V = CGF.EmitFromMemory(V, T); |
150 | |
151 | if (ResultType->isPointerTy()) |
152 | return CGF.Builder.CreateIntToPtr(V, ResultType); |
153 | |
154 | assert(V->getType() == ResultType)(static_cast <bool> (V->getType() == ResultType) ? void (0) : __assert_fail ("V->getType() == ResultType", "clang/lib/CodeGen/CGBuiltin.cpp" , 154, __extension__ __PRETTY_FUNCTION__)); |
155 | return V; |
156 | } |
157 | |
158 | /// Utility to insert an atomic instruction based on Intrinsic::ID |
159 | /// and the expression node. |
160 | static Value *MakeBinaryAtomicValue( |
161 | CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, |
162 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
163 | |
164 | QualType T = E->getType(); |
165 | assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()-> isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "clang/lib/CodeGen/CGBuiltin.cpp", 165, __extension__ __PRETTY_FUNCTION__ )); |
166 | assert(CGF.getContext().hasSameUnqualifiedType(T,(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(0)->getType()->getPointeeType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "clang/lib/CodeGen/CGBuiltin.cpp", 167, __extension__ __PRETTY_FUNCTION__ )) |
167 | E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(0)->getType()->getPointeeType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "clang/lib/CodeGen/CGBuiltin.cpp", 167, __extension__ __PRETTY_FUNCTION__ )); |
168 | assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(1)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())" , "clang/lib/CodeGen/CGBuiltin.cpp", 168, __extension__ __PRETTY_FUNCTION__ )); |
169 | |
170 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
171 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
172 | |
173 | llvm::IntegerType *IntType = |
174 | llvm::IntegerType::get(CGF.getLLVMContext(), |
175 | CGF.getContext().getTypeSize(T)); |
176 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
177 | |
178 | llvm::Value *Args[2]; |
179 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
180 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
181 | llvm::Type *ValueType = Args[1]->getType(); |
182 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
183 | |
184 | llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
185 | Kind, Args[0], Args[1], Ordering); |
186 | return EmitFromInt(CGF, Result, T, ValueType); |
187 | } |
188 | |
189 | static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) { |
190 | Value *Val = CGF.EmitScalarExpr(E->getArg(0)); |
191 | Value *Address = CGF.EmitScalarExpr(E->getArg(1)); |
192 | |
193 | // Convert the type of the pointer to a pointer to the stored type. |
194 | Val = CGF.EmitToMemory(Val, E->getArg(0)->getType()); |
195 | unsigned SrcAddrSpace = Address->getType()->getPointerAddressSpace(); |
196 | Value *BC = CGF.Builder.CreateBitCast( |
197 | Address, llvm::PointerType::get(Val->getType(), SrcAddrSpace), "cast"); |
198 | LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType()); |
199 | LV.setNontemporal(true); |
200 | CGF.EmitStoreOfScalar(Val, LV, false); |
201 | return nullptr; |
202 | } |
203 | |
204 | static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) { |
205 | Value *Address = CGF.EmitScalarExpr(E->getArg(0)); |
206 | |
207 | LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType()); |
208 | LV.setNontemporal(true); |
209 | return CGF.EmitLoadOfScalar(LV, E->getExprLoc()); |
210 | } |
211 | |
212 | static RValue EmitBinaryAtomic(CodeGenFunction &CGF, |
213 | llvm::AtomicRMWInst::BinOp Kind, |
214 | const CallExpr *E) { |
215 | return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E)); |
216 | } |
217 | |
218 | /// Utility to insert an atomic instruction based Intrinsic::ID and |
219 | /// the expression node, where the return value is the result of the |
220 | /// operation. |
221 | static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, |
222 | llvm::AtomicRMWInst::BinOp Kind, |
223 | const CallExpr *E, |
224 | Instruction::BinaryOps Op, |
225 | bool Invert = false) { |
226 | QualType T = E->getType(); |
227 | assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()-> isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "clang/lib/CodeGen/CGBuiltin.cpp", 227, __extension__ __PRETTY_FUNCTION__ )); |
228 | assert(CGF.getContext().hasSameUnqualifiedType(T,(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(0)->getType()->getPointeeType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "clang/lib/CodeGen/CGBuiltin.cpp", 229, __extension__ __PRETTY_FUNCTION__ )) |
229 | E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(0)->getType()->getPointeeType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "clang/lib/CodeGen/CGBuiltin.cpp", 229, __extension__ __PRETTY_FUNCTION__ )); |
230 | assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(1)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())" , "clang/lib/CodeGen/CGBuiltin.cpp", 230, __extension__ __PRETTY_FUNCTION__ )); |
231 | |
232 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
233 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
234 | |
235 | llvm::IntegerType *IntType = |
236 | llvm::IntegerType::get(CGF.getLLVMContext(), |
237 | CGF.getContext().getTypeSize(T)); |
238 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
239 | |
240 | llvm::Value *Args[2]; |
241 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
242 | llvm::Type *ValueType = Args[1]->getType(); |
243 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
244 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
245 | |
246 | llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
247 | Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); |
248 | Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); |
249 | if (Invert) |
250 | Result = |
251 | CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result, |
252 | llvm::ConstantInt::getAllOnesValue(IntType)); |
253 | Result = EmitFromInt(CGF, Result, T, ValueType); |
254 | return RValue::get(Result); |
255 | } |
256 | |
257 | /// Utility to insert an atomic cmpxchg instruction. |
258 | /// |
259 | /// @param CGF The current codegen function. |
260 | /// @param E Builtin call expression to convert to cmpxchg. |
261 | /// arg0 - address to operate on |
262 | /// arg1 - value to compare with |
263 | /// arg2 - new value |
264 | /// @param ReturnBool Specifies whether to return success flag of |
265 | /// cmpxchg result or the old value. |
266 | /// |
267 | /// @returns result of cmpxchg, according to ReturnBool |
268 | /// |
269 | /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics |
270 | /// invoke the function EmitAtomicCmpXchgForMSIntrin. |
271 | static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, |
272 | bool ReturnBool) { |
273 | QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType(); |
274 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
275 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
276 | |
277 | llvm::IntegerType *IntType = llvm::IntegerType::get( |
278 | CGF.getLLVMContext(), CGF.getContext().getTypeSize(T)); |
279 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
280 | |
281 | Value *Args[3]; |
282 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
283 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
284 | llvm::Type *ValueType = Args[1]->getType(); |
285 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
286 | Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); |
287 | |
288 | Value *Pair = CGF.Builder.CreateAtomicCmpXchg( |
289 | Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent, |
290 | llvm::AtomicOrdering::SequentiallyConsistent); |
291 | if (ReturnBool) |
292 | // Extract boolean success flag and zext it to int. |
293 | return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1), |
294 | CGF.ConvertType(E->getType())); |
295 | else |
296 | // Extract old value and emit it using the same type as compare value. |
297 | return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T, |
298 | ValueType); |
299 | } |
300 | |
301 | /// This function should be invoked to emit atomic cmpxchg for Microsoft's |
302 | /// _InterlockedCompareExchange* intrinsics which have the following signature: |
303 | /// T _InterlockedCompareExchange(T volatile *Destination, |
304 | /// T Exchange, |
305 | /// T Comparand); |
306 | /// |
307 | /// Whereas the llvm 'cmpxchg' instruction has the following syntax: |
308 | /// cmpxchg *Destination, Comparand, Exchange. |
309 | /// So we need to swap Comparand and Exchange when invoking |
310 | /// CreateAtomicCmpXchg. That is the reason we could not use the above utility |
311 | /// function MakeAtomicCmpXchgValue since it expects the arguments to be |
312 | /// already swapped. |
313 | |
314 | static |
315 | Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, |
316 | AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) { |
317 | assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()-> isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "clang/lib/CodeGen/CGBuiltin.cpp", 317, __extension__ __PRETTY_FUNCTION__ )); |
318 | assert(CGF.getContext().hasSameUnqualifiedType((static_cast <bool> (CGF.getContext().hasSameUnqualifiedType ( E->getType(), E->getArg(0)->getType()->getPointeeType ())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())" , "clang/lib/CodeGen/CGBuiltin.cpp", 319, __extension__ __PRETTY_FUNCTION__ )) |
319 | E->getType(), E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType ( E->getType(), E->getArg(0)->getType()->getPointeeType ())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())" , "clang/lib/CodeGen/CGBuiltin.cpp", 319, __extension__ __PRETTY_FUNCTION__ )); |
320 | assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (E->getType(), E->getArg(1)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())" , "clang/lib/CodeGen/CGBuiltin.cpp", 321, __extension__ __PRETTY_FUNCTION__ )) |
321 | E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (E->getType(), E->getArg(1)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())" , "clang/lib/CodeGen/CGBuiltin.cpp", 321, __extension__ __PRETTY_FUNCTION__ )); |
322 | assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (E->getType(), E->getArg(2)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())" , "clang/lib/CodeGen/CGBuiltin.cpp", 323, __extension__ __PRETTY_FUNCTION__ )) |
323 | E->getArg(2)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (E->getType(), E->getArg(2)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())" , "clang/lib/CodeGen/CGBuiltin.cpp", 323, __extension__ __PRETTY_FUNCTION__ )); |
324 | |
325 | auto *Destination = CGF.EmitScalarExpr(E->getArg(0)); |
326 | auto *Comparand = CGF.EmitScalarExpr(E->getArg(2)); |
327 | auto *Exchange = CGF.EmitScalarExpr(E->getArg(1)); |
328 | |
329 | // For Release ordering, the failure ordering should be Monotonic. |
330 | auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ? |
331 | AtomicOrdering::Monotonic : |
332 | SuccessOrdering; |
333 | |
334 | // The atomic instruction is marked volatile for consistency with MSVC. This |
335 | // blocks the few atomics optimizations that LLVM has. If we want to optimize |
336 | // _Interlocked* operations in the future, we will have to remove the volatile |
337 | // marker. |
338 | auto *Result = CGF.Builder.CreateAtomicCmpXchg( |
339 | Destination, Comparand, Exchange, |
340 | SuccessOrdering, FailureOrdering); |
341 | Result->setVolatile(true); |
342 | return CGF.Builder.CreateExtractValue(Result, 0); |
343 | } |
344 | |
345 | // 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are |
346 | // prototyped like this: |
347 | // |
348 | // unsigned char _InterlockedCompareExchange128...( |
349 | // __int64 volatile * _Destination, |
350 | // __int64 _ExchangeHigh, |
351 | // __int64 _ExchangeLow, |
352 | // __int64 * _ComparandResult); |
353 | static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, |
354 | const CallExpr *E, |
355 | AtomicOrdering SuccessOrdering) { |
356 | assert(E->getNumArgs() == 4)(static_cast <bool> (E->getNumArgs() == 4) ? void (0 ) : __assert_fail ("E->getNumArgs() == 4", "clang/lib/CodeGen/CGBuiltin.cpp" , 356, __extension__ __PRETTY_FUNCTION__)); |
357 | llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0)); |
358 | llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1)); |
359 | llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2)); |
360 | llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3)); |
361 | |
362 | assert(Destination->getType()->isPointerTy())(static_cast <bool> (Destination->getType()->isPointerTy ()) ? void (0) : __assert_fail ("Destination->getType()->isPointerTy()" , "clang/lib/CodeGen/CGBuiltin.cpp", 362, __extension__ __PRETTY_FUNCTION__ )); |
363 | assert(!ExchangeHigh->getType()->isPointerTy())(static_cast <bool> (!ExchangeHigh->getType()->isPointerTy ()) ? void (0) : __assert_fail ("!ExchangeHigh->getType()->isPointerTy()" , "clang/lib/CodeGen/CGBuiltin.cpp", 363, __extension__ __PRETTY_FUNCTION__ )); |
364 | assert(!ExchangeLow->getType()->isPointerTy())(static_cast <bool> (!ExchangeLow->getType()->isPointerTy ()) ? void (0) : __assert_fail ("!ExchangeLow->getType()->isPointerTy()" , "clang/lib/CodeGen/CGBuiltin.cpp", 364, __extension__ __PRETTY_FUNCTION__ )); |
365 | assert(ComparandPtr->getType()->isPointerTy())(static_cast <bool> (ComparandPtr->getType()->isPointerTy ()) ? void (0) : __assert_fail ("ComparandPtr->getType()->isPointerTy()" , "clang/lib/CodeGen/CGBuiltin.cpp", 365, __extension__ __PRETTY_FUNCTION__ )); |
366 | |
367 | // For Release ordering, the failure ordering should be Monotonic. |
368 | auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release |
369 | ? AtomicOrdering::Monotonic |
370 | : SuccessOrdering; |
371 | |
372 | // Convert to i128 pointers and values. |
373 | llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128); |
374 | llvm::Type *Int128PtrTy = Int128Ty->getPointerTo(); |
375 | Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy); |
376 | Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy), |
377 | Int128Ty, CGF.getContext().toCharUnitsFromBits(128)); |
378 | |
379 | // (((i128)hi) << 64) | ((i128)lo) |
380 | ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty); |
381 | ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty); |
382 | ExchangeHigh = |
383 | CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64)); |
384 | llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow); |
385 | |
386 | // Load the comparand for the instruction. |
387 | llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult); |
388 | |
389 | auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, |
390 | SuccessOrdering, FailureOrdering); |
391 | |
392 | // The atomic instruction is marked volatile for consistency with MSVC. This |
393 | // blocks the few atomics optimizations that LLVM has. If we want to optimize |
394 | // _Interlocked* operations in the future, we will have to remove the volatile |
395 | // marker. |
396 | CXI->setVolatile(true); |
397 | |
398 | // Store the result as an outparameter. |
399 | CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0), |
400 | ComparandResult); |
401 | |
402 | // Get the success boolean and zero extend it to i8. |
403 | Value *Success = CGF.Builder.CreateExtractValue(CXI, 1); |
404 | return CGF.Builder.CreateZExt(Success, CGF.Int8Ty); |
405 | } |
406 | |
407 | static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, |
408 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
409 | assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()-> isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "clang/lib/CodeGen/CGBuiltin.cpp", 409, __extension__ __PRETTY_FUNCTION__ )); |
410 | |
411 | auto *IntTy = CGF.ConvertType(E->getType()); |
412 | auto *Result = CGF.Builder.CreateAtomicRMW( |
413 | AtomicRMWInst::Add, |
414 | CGF.EmitScalarExpr(E->getArg(0)), |
415 | ConstantInt::get(IntTy, 1), |
416 | Ordering); |
417 | return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1)); |
418 | } |
419 | |
420 | static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, |
421 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
422 | assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()-> isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "clang/lib/CodeGen/CGBuiltin.cpp", 422, __extension__ __PRETTY_FUNCTION__ )); |
423 | |
424 | auto *IntTy = CGF.ConvertType(E->getType()); |
425 | auto *Result = CGF.Builder.CreateAtomicRMW( |
426 | AtomicRMWInst::Sub, |
427 | CGF.EmitScalarExpr(E->getArg(0)), |
428 | ConstantInt::get(IntTy, 1), |
429 | Ordering); |
430 | return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1)); |
431 | } |
432 | |
433 | // Build a plain volatile load. |
434 | static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) { |
435 | Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); |
436 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
437 | CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy); |
438 | llvm::Type *ITy = |
439 | llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8); |
440 | Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
441 | llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize); |
442 | Load->setVolatile(true); |
443 | return Load; |
444 | } |
445 | |
446 | // Build a plain volatile store. |
447 | static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) { |
448 | Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); |
449 | Value *Value = CGF.EmitScalarExpr(E->getArg(1)); |
450 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
451 | CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy); |
452 | llvm::Type *ITy = |
453 | llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8); |
454 | Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
455 | llvm::StoreInst *Store = |
456 | CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize); |
457 | Store->setVolatile(true); |
458 | return Store; |
459 | } |
460 | |
461 | // Emit a simple mangled intrinsic that has 1 argument and a return type |
462 | // matching the argument type. Depending on mode, this may be a constrained |
463 | // floating-point intrinsic. |
464 | static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
465 | const CallExpr *E, unsigned IntrinsicID, |
466 | unsigned ConstrainedIntrinsicID) { |
467 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
468 | |
469 | if (CGF.Builder.getIsFPConstrained()) { |
470 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
471 | Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); |
472 | return CGF.Builder.CreateConstrainedFPCall(F, { Src0 }); |
473 | } else { |
474 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
475 | return CGF.Builder.CreateCall(F, Src0); |
476 | } |
477 | } |
478 | |
479 | // Emit an intrinsic that has 2 operands of the same type as its result. |
480 | // Depending on mode, this may be a constrained floating-point intrinsic. |
481 | static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
482 | const CallExpr *E, unsigned IntrinsicID, |
483 | unsigned ConstrainedIntrinsicID) { |
484 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
485 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
486 | |
487 | if (CGF.Builder.getIsFPConstrained()) { |
488 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
489 | Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); |
490 | return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 }); |
491 | } else { |
492 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
493 | return CGF.Builder.CreateCall(F, { Src0, Src1 }); |
494 | } |
495 | } |
496 | |
497 | // Emit an intrinsic that has 3 operands of the same type as its result. |
498 | // Depending on mode, this may be a constrained floating-point intrinsic. |
499 | static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
500 | const CallExpr *E, unsigned IntrinsicID, |
501 | unsigned ConstrainedIntrinsicID) { |
502 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
503 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
504 | llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); |
505 | |
506 | if (CGF.Builder.getIsFPConstrained()) { |
507 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
508 | Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); |
509 | return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 }); |
510 | } else { |
511 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
512 | return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); |
513 | } |
514 | } |
515 | |
516 | // Emit an intrinsic where all operands are of the same type as the result. |
517 | // Depending on mode, this may be a constrained floating-point intrinsic. |
518 | static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
519 | unsigned IntrinsicID, |
520 | unsigned ConstrainedIntrinsicID, |
521 | llvm::Type *Ty, |
522 | ArrayRef<Value *> Args) { |
523 | Function *F; |
524 | if (CGF.Builder.getIsFPConstrained()) |
525 | F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty); |
526 | else |
527 | F = CGF.CGM.getIntrinsic(IntrinsicID, Ty); |
528 | |
529 | if (CGF.Builder.getIsFPConstrained()) |
530 | return CGF.Builder.CreateConstrainedFPCall(F, Args); |
531 | else |
532 | return CGF.Builder.CreateCall(F, Args); |
533 | } |
534 | |
535 | // Emit a simple mangled intrinsic that has 1 argument and a return type |
536 | // matching the argument type. |
537 | static Value *emitUnaryBuiltin(CodeGenFunction &CGF, const CallExpr *E, |
538 | unsigned IntrinsicID, |
539 | llvm::StringRef Name = "") { |
540 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
541 | |
542 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
543 | return CGF.Builder.CreateCall(F, Src0, Name); |
544 | } |
545 | |
546 | // Emit an intrinsic that has 2 operands of the same type as its result. |
547 | static Value *emitBinaryBuiltin(CodeGenFunction &CGF, |
548 | const CallExpr *E, |
549 | unsigned IntrinsicID) { |
550 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
551 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
552 | |
553 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
554 | return CGF.Builder.CreateCall(F, { Src0, Src1 }); |
555 | } |
556 | |
557 | // Emit an intrinsic that has 3 operands of the same type as its result. |
558 | static Value *emitTernaryBuiltin(CodeGenFunction &CGF, |
559 | const CallExpr *E, |
560 | unsigned IntrinsicID) { |
561 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
562 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
563 | llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); |
564 | |
565 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
566 | return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); |
567 | } |
568 | |
569 | // Emit an intrinsic that has 1 float or double operand, and 1 integer. |
570 | static Value *emitFPIntBuiltin(CodeGenFunction &CGF, |
571 | const CallExpr *E, |
572 | unsigned IntrinsicID) { |
573 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
574 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
575 | |
576 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
577 | return CGF.Builder.CreateCall(F, {Src0, Src1}); |
578 | } |
579 | |
580 | // Emit an intrinsic that has overloaded integer result and fp operand. |
581 | static Value * |
582 | emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, |
583 | unsigned IntrinsicID, |
584 | unsigned ConstrainedIntrinsicID) { |
585 | llvm::Type *ResultType = CGF.ConvertType(E->getType()); |
586 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
587 | |
588 | if (CGF.Builder.getIsFPConstrained()) { |
589 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
590 | Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, |
591 | {ResultType, Src0->getType()}); |
592 | return CGF.Builder.CreateConstrainedFPCall(F, {Src0}); |
593 | } else { |
594 | Function *F = |
595 | CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()}); |
596 | return CGF.Builder.CreateCall(F, Src0); |
597 | } |
598 | } |
599 | |
600 | /// EmitFAbs - Emit a call to @llvm.fabs(). |
601 | static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) { |
602 | Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType()); |
603 | llvm::CallInst *Call = CGF.Builder.CreateCall(F, V); |
604 | Call->setDoesNotAccessMemory(); |
605 | return Call; |
606 | } |
607 | |
608 | /// Emit the computation of the sign bit for a floating point value. Returns |
609 | /// the i1 sign bit value. |
610 | static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) { |
611 | LLVMContext &C = CGF.CGM.getLLVMContext(); |
612 | |
613 | llvm::Type *Ty = V->getType(); |
614 | int Width = Ty->getPrimitiveSizeInBits(); |
615 | llvm::Type *IntTy = llvm::IntegerType::get(C, Width); |
616 | V = CGF.Builder.CreateBitCast(V, IntTy); |
617 | if (Ty->isPPC_FP128Ty()) { |
618 | // We want the sign bit of the higher-order double. The bitcast we just |
619 | // did works as if the double-double was stored to memory and then |
620 | // read as an i128. The "store" will put the higher-order double in the |
621 | // lower address in both little- and big-Endian modes, but the "load" |
622 | // will treat those bits as a different part of the i128: the low bits in |
623 | // little-Endian, the high bits in big-Endian. Therefore, on big-Endian |
624 | // we need to shift the high bits down to the low before truncating. |
625 | Width >>= 1; |
626 | if (CGF.getTarget().isBigEndian()) { |
627 | Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width); |
628 | V = CGF.Builder.CreateLShr(V, ShiftCst); |
629 | } |
630 | // We are truncating value in order to extract the higher-order |
631 | // double, which we will be using to extract the sign from. |
632 | IntTy = llvm::IntegerType::get(C, Width); |
633 | V = CGF.Builder.CreateTrunc(V, IntTy); |
634 | } |
635 | Value *Zero = llvm::Constant::getNullValue(IntTy); |
636 | return CGF.Builder.CreateICmpSLT(V, Zero); |
637 | } |
638 | |
639 | static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD, |
640 | const CallExpr *E, llvm::Constant *calleeValue) { |
641 | CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD)); |
642 | return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); |
643 | } |
644 | |
645 | /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.* |
646 | /// depending on IntrinsicID. |
647 | /// |
648 | /// \arg CGF The current codegen function. |
649 | /// \arg IntrinsicID The ID for the Intrinsic we wish to generate. |
650 | /// \arg X The first argument to the llvm.*.with.overflow.*. |
651 | /// \arg Y The second argument to the llvm.*.with.overflow.*. |
652 | /// \arg Carry The carry returned by the llvm.*.with.overflow.*. |
653 | /// \returns The result (i.e. sum/product) returned by the intrinsic. |
654 | static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, |
655 | const llvm::Intrinsic::ID IntrinsicID, |
656 | llvm::Value *X, llvm::Value *Y, |
657 | llvm::Value *&Carry) { |
658 | // Make sure we have integers of the same width. |
659 | assert(X->getType() == Y->getType() &&(static_cast <bool> (X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both " "arguments have the same integer width?)") ? void (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 661, __extension__ __PRETTY_FUNCTION__ )) |
660 | "Arguments must be the same type. (Did you forget to make sure both "(static_cast <bool> (X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both " "arguments have the same integer width?)") ? void (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 661, __extension__ __PRETTY_FUNCTION__ )) |
661 | "arguments have the same integer width?)")(static_cast <bool> (X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both " "arguments have the same integer width?)") ? void (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 661, __extension__ __PRETTY_FUNCTION__ )); |
662 | |
663 | Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); |
664 | llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y}); |
665 | Carry = CGF.Builder.CreateExtractValue(Tmp, 1); |
666 | return CGF.Builder.CreateExtractValue(Tmp, 0); |
667 | } |
668 | |
669 | static Value *emitRangedBuiltin(CodeGenFunction &CGF, |
670 | unsigned IntrinsicID, |
671 | int low, int high) { |
672 | llvm::MDBuilder MDHelper(CGF.getLLVMContext()); |
673 | llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high)); |
674 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {}); |
675 | llvm::Instruction *Call = CGF.Builder.CreateCall(F); |
676 | Call->setMetadata(llvm::LLVMContext::MD_range, RNode); |
677 | return Call; |
678 | } |
679 | |
680 | namespace { |
681 | struct WidthAndSignedness { |
682 | unsigned Width; |
683 | bool Signed; |
684 | }; |
685 | } |
686 | |
687 | static WidthAndSignedness |
688 | getIntegerWidthAndSignedness(const clang::ASTContext &context, |
689 | const clang::QualType Type) { |
690 | assert(Type->isIntegerType() && "Given type is not an integer.")(static_cast <bool> (Type->isIntegerType() && "Given type is not an integer.") ? void (0) : __assert_fail ( "Type->isIntegerType() && \"Given type is not an integer.\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 690, __extension__ __PRETTY_FUNCTION__ )); |
691 | unsigned Width = Type->isBooleanType() ? 1 |
692 | : Type->isBitIntType() ? context.getIntWidth(Type) |
693 | : context.getTypeInfo(Type).Width; |
694 | bool Signed = Type->isSignedIntegerType(); |
695 | return {Width, Signed}; |
696 | } |
697 | |
698 | // Given one or more integer types, this function produces an integer type that |
699 | // encompasses them: any value in one of the given types could be expressed in |
700 | // the encompassing type. |
701 | static struct WidthAndSignedness |
702 | EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) { |
703 | assert(Types.size() > 0 && "Empty list of types.")(static_cast <bool> (Types.size() > 0 && "Empty list of types." ) ? void (0) : __assert_fail ("Types.size() > 0 && \"Empty list of types.\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 703, __extension__ __PRETTY_FUNCTION__ )); |
704 | |
705 | // If any of the given types is signed, we must return a signed type. |
706 | bool Signed = false; |
707 | for (const auto &Type : Types) { |
708 | Signed |= Type.Signed; |
709 | } |
710 | |
711 | // The encompassing type must have a width greater than or equal to the width |
712 | // of the specified types. Additionally, if the encompassing type is signed, |
713 | // its width must be strictly greater than the width of any unsigned types |
714 | // given. |
715 | unsigned Width = 0; |
716 | for (const auto &Type : Types) { |
717 | unsigned MinWidth = Type.Width + (Signed && !Type.Signed); |
718 | if (Width < MinWidth) { |
719 | Width = MinWidth; |
720 | } |
721 | } |
722 | |
723 | return {Width, Signed}; |
724 | } |
725 | |
726 | Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) { |
727 | llvm::Type *DestType = Int8PtrTy; |
728 | if (ArgValue->getType() != DestType) |
729 | ArgValue = |
730 | Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data()); |
731 | |
732 | Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend; |
733 | return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue); |
734 | } |
735 | |
736 | /// Checks if using the result of __builtin_object_size(p, @p From) in place of |
737 | /// __builtin_object_size(p, @p To) is correct |
738 | static bool areBOSTypesCompatible(int From, int To) { |
739 | // Note: Our __builtin_object_size implementation currently treats Type=0 and |
740 | // Type=2 identically. Encoding this implementation detail here may make |
741 | // improving __builtin_object_size difficult in the future, so it's omitted. |
742 | return From == To || (From == 0 && To == 1) || (From == 3 && To == 2); |
743 | } |
744 | |
745 | static llvm::Value * |
746 | getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) { |
747 | return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true); |
748 | } |
749 | |
750 | llvm::Value * |
751 | CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, |
752 | llvm::IntegerType *ResType, |
753 | llvm::Value *EmittedE, |
754 | bool IsDynamic) { |
755 | uint64_t ObjectSize; |
756 | if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) |
757 | return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic); |
758 | return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true); |
759 | } |
760 | |
761 | /// Returns a Value corresponding to the size of the given expression. |
762 | /// This Value may be either of the following: |
763 | /// - A llvm::Argument (if E is a param with the pass_object_size attribute on |
764 | /// it) |
765 | /// - A call to the @llvm.objectsize intrinsic |
766 | /// |
767 | /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null |
768 | /// and we wouldn't otherwise try to reference a pass_object_size parameter, |
769 | /// we'll call @llvm.objectsize on EmittedE, rather than emitting E. |
770 | llvm::Value * |
771 | CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, |
772 | llvm::IntegerType *ResType, |
773 | llvm::Value *EmittedE, bool IsDynamic) { |
774 | // We need to reference an argument if the pointer is a parameter with the |
775 | // pass_object_size attribute. |
776 | if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) { |
777 | auto *Param = dyn_cast<ParmVarDecl>(D->getDecl()); |
778 | auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>(); |
779 | if (Param != nullptr && PS != nullptr && |
780 | areBOSTypesCompatible(PS->getType(), Type)) { |
781 | auto Iter = SizeArguments.find(Param); |
782 | assert(Iter != SizeArguments.end())(static_cast <bool> (Iter != SizeArguments.end()) ? void (0) : __assert_fail ("Iter != SizeArguments.end()", "clang/lib/CodeGen/CGBuiltin.cpp" , 782, __extension__ __PRETTY_FUNCTION__)); |
783 | |
784 | const ImplicitParamDecl *D = Iter->second; |
785 | auto DIter = LocalDeclMap.find(D); |
786 | assert(DIter != LocalDeclMap.end())(static_cast <bool> (DIter != LocalDeclMap.end()) ? void (0) : __assert_fail ("DIter != LocalDeclMap.end()", "clang/lib/CodeGen/CGBuiltin.cpp" , 786, __extension__ __PRETTY_FUNCTION__)); |
787 | |
788 | return EmitLoadOfScalar(DIter->second, /*Volatile=*/false, |
789 | getContext().getSizeType(), E->getBeginLoc()); |
790 | } |
791 | } |
792 | |
793 | // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't |
794 | // evaluate E for side-effects. In either case, we shouldn't lower to |
795 | // @llvm.objectsize. |
796 | if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext()))) |
797 | return getDefaultBuiltinObjectSizeResult(Type, ResType); |
798 | |
799 | Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E); |
800 | assert(Ptr->getType()->isPointerTy() &&(static_cast <bool> (Ptr->getType()->isPointerTy( ) && "Non-pointer passed to __builtin_object_size?") ? void (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 801, __extension__ __PRETTY_FUNCTION__ )) |
801 | "Non-pointer passed to __builtin_object_size?")(static_cast <bool> (Ptr->getType()->isPointerTy( ) && "Non-pointer passed to __builtin_object_size?") ? void (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 801, __extension__ __PRETTY_FUNCTION__ )); |
802 | |
803 | Function *F = |
804 | CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()}); |
805 | |
806 | // LLVM only supports 0 and 2, make sure that we pass along that as a boolean. |
807 | Value *Min = Builder.getInt1((Type & 2) != 0); |
808 | // For GCC compatibility, __builtin_object_size treat NULL as unknown size. |
809 | Value *NullIsUnknown = Builder.getTrue(); |
810 | Value *Dynamic = Builder.getInt1(IsDynamic); |
811 | return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}); |
812 | } |
813 | |
814 | namespace { |
815 | /// A struct to generically describe a bit test intrinsic. |
816 | struct BitTest { |
817 | enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set }; |
818 | enum InterlockingKind : uint8_t { |
819 | Unlocked, |
820 | Sequential, |
821 | Acquire, |
822 | Release, |
823 | NoFence |
824 | }; |
825 | |
826 | ActionKind Action; |
827 | InterlockingKind Interlocking; |
828 | bool Is64Bit; |
829 | |
830 | static BitTest decodeBitTestBuiltin(unsigned BuiltinID); |
831 | }; |
832 | } // namespace |
833 | |
834 | BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) { |
835 | switch (BuiltinID) { |
836 | // Main portable variants. |
837 | case Builtin::BI_bittest: |
838 | return {TestOnly, Unlocked, false}; |
839 | case Builtin::BI_bittestandcomplement: |
840 | return {Complement, Unlocked, false}; |
841 | case Builtin::BI_bittestandreset: |
842 | return {Reset, Unlocked, false}; |
843 | case Builtin::BI_bittestandset: |
844 | return {Set, Unlocked, false}; |
845 | case Builtin::BI_interlockedbittestandreset: |
846 | return {Reset, Sequential, false}; |
847 | case Builtin::BI_interlockedbittestandset: |
848 | return {Set, Sequential, false}; |
849 | |
850 | // X86-specific 64-bit variants. |
851 | case Builtin::BI_bittest64: |
852 | return {TestOnly, Unlocked, true}; |
853 | case Builtin::BI_bittestandcomplement64: |
854 | return {Complement, Unlocked, true}; |
855 | case Builtin::BI_bittestandreset64: |
856 | return {Reset, Unlocked, true}; |
857 | case Builtin::BI_bittestandset64: |
858 | return {Set, Unlocked, true}; |
859 | case Builtin::BI_interlockedbittestandreset64: |
860 | return {Reset, Sequential, true}; |
861 | case Builtin::BI_interlockedbittestandset64: |
862 | return {Set, Sequential, true}; |
863 | |
864 | // ARM/AArch64-specific ordering variants. |
865 | case Builtin::BI_interlockedbittestandset_acq: |
866 | return {Set, Acquire, false}; |
867 | case Builtin::BI_interlockedbittestandset_rel: |
868 | return {Set, Release, false}; |
869 | case Builtin::BI_interlockedbittestandset_nf: |
870 | return {Set, NoFence, false}; |
871 | case Builtin::BI_interlockedbittestandreset_acq: |
872 | return {Reset, Acquire, false}; |
873 | case Builtin::BI_interlockedbittestandreset_rel: |
874 | return {Reset, Release, false}; |
875 | case Builtin::BI_interlockedbittestandreset_nf: |
876 | return {Reset, NoFence, false}; |
877 | } |
878 | llvm_unreachable("expected only bittest intrinsics")::llvm::llvm_unreachable_internal("expected only bittest intrinsics" , "clang/lib/CodeGen/CGBuiltin.cpp", 878); |
879 | } |
880 | |
881 | static char bitActionToX86BTCode(BitTest::ActionKind A) { |
882 | switch (A) { |
883 | case BitTest::TestOnly: return '\0'; |
884 | case BitTest::Complement: return 'c'; |
885 | case BitTest::Reset: return 'r'; |
886 | case BitTest::Set: return 's'; |
887 | } |
888 | llvm_unreachable("invalid action")::llvm::llvm_unreachable_internal("invalid action", "clang/lib/CodeGen/CGBuiltin.cpp" , 888); |
889 | } |
890 | |
891 | static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF, |
892 | BitTest BT, |
893 | const CallExpr *E, Value *BitBase, |
894 | Value *BitPos) { |
895 | char Action = bitActionToX86BTCode(BT.Action); |
896 | char SizeSuffix = BT.Is64Bit ? 'q' : 'l'; |
897 | |
898 | // Build the assembly. |
899 | SmallString<64> Asm; |
900 | raw_svector_ostream AsmOS(Asm); |
901 | if (BT.Interlocking != BitTest::Unlocked) |
902 | AsmOS << "lock "; |
903 | AsmOS << "bt"; |
904 | if (Action) |
905 | AsmOS << Action; |
906 | AsmOS << SizeSuffix << " $2, ($1)"; |
907 | |
908 | // Build the constraints. FIXME: We should support immediates when possible. |
909 | std::string Constraints = "={@ccc},r,r,~{cc},~{memory}"; |
910 | std::string MachineClobbers = CGF.getTarget().getClobbers(); |
911 | if (!MachineClobbers.empty()) { |
912 | Constraints += ','; |
913 | Constraints += MachineClobbers; |
914 | } |
915 | llvm::IntegerType *IntType = llvm::IntegerType::get( |
916 | CGF.getLLVMContext(), |
917 | CGF.getContext().getTypeSize(E->getArg(1)->getType())); |
918 | llvm::Type *IntPtrType = IntType->getPointerTo(); |
919 | llvm::FunctionType *FTy = |
920 | llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false); |
921 | |
922 | llvm::InlineAsm *IA = |
923 | llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true); |
924 | return CGF.Builder.CreateCall(IA, {BitBase, BitPos}); |
925 | } |
926 | |
927 | static llvm::AtomicOrdering |
928 | getBitTestAtomicOrdering(BitTest::InterlockingKind I) { |
929 | switch (I) { |
930 | case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic; |
931 | case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent; |
932 | case BitTest::Acquire: return llvm::AtomicOrdering::Acquire; |
933 | case BitTest::Release: return llvm::AtomicOrdering::Release; |
934 | case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic; |
935 | } |
936 | llvm_unreachable("invalid interlocking")::llvm::llvm_unreachable_internal("invalid interlocking", "clang/lib/CodeGen/CGBuiltin.cpp" , 936); |
937 | } |
938 | |
939 | /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of |
940 | /// bits and a bit position and read and optionally modify the bit at that |
941 | /// position. The position index can be arbitrarily large, i.e. it can be larger |
942 | /// than 31 or 63, so we need an indexed load in the general case. |
943 | static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF, |
944 | unsigned BuiltinID, |
945 | const CallExpr *E) { |
946 | Value *BitBase = CGF.EmitScalarExpr(E->getArg(0)); |
947 | Value *BitPos = CGF.EmitScalarExpr(E->getArg(1)); |
948 | |
949 | BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID); |
950 | |
951 | // X86 has special BT, BTC, BTR, and BTS instructions that handle the array |
952 | // indexing operation internally. Use them if possible. |
953 | if (CGF.getTarget().getTriple().isX86()) |
954 | return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos); |
955 | |
956 | // Otherwise, use generic code to load one byte and test the bit. Use all but |
957 | // the bottom three bits as the array index, and the bottom three bits to form |
958 | // a mask. |
959 | // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0; |
960 | Value *ByteIndex = CGF.Builder.CreateAShr( |
961 | BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx"); |
962 | Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy); |
963 | Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8, |
964 | ByteIndex, "bittest.byteaddr"), |
965 | CGF.Int8Ty, CharUnits::One()); |
966 | Value *PosLow = |
967 | CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty), |
968 | llvm::ConstantInt::get(CGF.Int8Ty, 0x7)); |
969 | |
970 | // The updating instructions will need a mask. |
971 | Value *Mask = nullptr; |
972 | if (BT.Action != BitTest::TestOnly) { |
973 | Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow, |
974 | "bittest.mask"); |
975 | } |
976 | |
977 | // Check the action and ordering of the interlocked intrinsics. |
978 | llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking); |
979 | |
980 | Value *OldByte = nullptr; |
981 | if (Ordering != llvm::AtomicOrdering::NotAtomic) { |
982 | // Emit a combined atomicrmw load/store operation for the interlocked |
983 | // intrinsics. |
984 | llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or; |
985 | if (BT.Action == BitTest::Reset) { |
986 | Mask = CGF.Builder.CreateNot(Mask); |
987 | RMWOp = llvm::AtomicRMWInst::And; |
988 | } |
989 | OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask, |
990 | Ordering); |
991 | } else { |
992 | // Emit a plain load for the non-interlocked intrinsics. |
993 | OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte"); |
994 | Value *NewByte = nullptr; |
995 | switch (BT.Action) { |
996 | case BitTest::TestOnly: |
997 | // Don't store anything. |
998 | break; |
999 | case BitTest::Complement: |
1000 | NewByte = CGF.Builder.CreateXor(OldByte, Mask); |
1001 | break; |
1002 | case BitTest::Reset: |
1003 | NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask)); |
1004 | break; |
1005 | case BitTest::Set: |
1006 | NewByte = CGF.Builder.CreateOr(OldByte, Mask); |
1007 | break; |
1008 | } |
1009 | if (NewByte) |
1010 | CGF.Builder.CreateStore(NewByte, ByteAddr); |
1011 | } |
1012 | |
1013 | // However we loaded the old byte, either by plain load or atomicrmw, shift |
1014 | // the bit into the low position and mask it to 0 or 1. |
1015 | Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr"); |
1016 | return CGF.Builder.CreateAnd( |
1017 | ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res"); |
1018 | } |
1019 | |
1020 | static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF, |
1021 | unsigned BuiltinID, |
1022 | const CallExpr *E) { |
1023 | Value *Addr = CGF.EmitScalarExpr(E->getArg(0)); |
1024 | |
1025 | SmallString<64> Asm; |
1026 | raw_svector_ostream AsmOS(Asm); |
1027 | llvm::IntegerType *RetType = CGF.Int32Ty; |
Value stored to 'RetType' during its initialization is never read | |
1028 | |
1029 | switch (BuiltinID) { |
1030 | case clang::PPC::BI__builtin_ppc_ldarx: |
1031 | AsmOS << "ldarx "; |
1032 | RetType = CGF.Int64Ty; |
1033 | break; |
1034 | case clang::PPC::BI__builtin_ppc_lwarx: |
1035 | AsmOS << "lwarx "; |
1036 | RetType = CGF.Int32Ty; |
1037 | break; |
1038 | case clang::PPC::BI__builtin_ppc_lharx: |
1039 | AsmOS << "lharx "; |
1040 | RetType = CGF.Int16Ty; |
1041 | break; |
1042 | case clang::PPC::BI__builtin_ppc_lbarx: |
1043 | AsmOS << "lbarx "; |
1044 | RetType = CGF.Int8Ty; |
1045 | break; |
1046 | default: |
1047 | llvm_unreachable("Expected only PowerPC load reserve intrinsics")::llvm::llvm_unreachable_internal("Expected only PowerPC load reserve intrinsics" , "clang/lib/CodeGen/CGBuiltin.cpp", 1047); |
1048 | } |
1049 | |
1050 | AsmOS << "$0, ${1:y}"; |
1051 | |
1052 | std::string Constraints = "=r,*Z,~{memory}"; |
1053 | std::string MachineClobbers = CGF.getTarget().getClobbers(); |
1054 | if (!MachineClobbers.empty()) { |
1055 | Constraints += ','; |
1056 | Constraints += MachineClobbers; |
1057 | } |
1058 | |
1059 | llvm::Type *IntPtrType = RetType->getPointerTo(); |
1060 | llvm::FunctionType *FTy = |
1061 | llvm::FunctionType::get(RetType, {IntPtrType}, false); |
1062 | |
1063 | llvm::InlineAsm *IA = |
1064 | llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true); |
1065 | llvm::CallInst *CI = CGF.Builder.CreateCall(IA, {Addr}); |
1066 | CI->addParamAttr( |
1067 | 0, Attribute::get(CGF.getLLVMContext(), Attribute::ElementType, RetType)); |
1068 | return CI; |
1069 | } |
1070 | |
1071 | namespace { |
1072 | enum class MSVCSetJmpKind { |
1073 | _setjmpex, |
1074 | _setjmp3, |
1075 | _setjmp |
1076 | }; |
1077 | } |
1078 | |
1079 | /// MSVC handles setjmp a bit differently on different platforms. On every |
1080 | /// architecture except 32-bit x86, the frame address is passed. On x86, extra |
1081 | /// parameters can be passed as variadic arguments, but we always pass none. |
1082 | static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, |
1083 | const CallExpr *E) { |
1084 | llvm::Value *Arg1 = nullptr; |
1085 | llvm::Type *Arg1Ty = nullptr; |
1086 | StringRef Name; |
1087 | bool IsVarArg = false; |
1088 | if (SJKind == MSVCSetJmpKind::_setjmp3) { |
1089 | Name = "_setjmp3"; |
1090 | Arg1Ty = CGF.Int32Ty; |
1091 | Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0); |
1092 | IsVarArg = true; |
1093 | } else { |
1094 | Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex"; |
1095 | Arg1Ty = CGF.Int8PtrTy; |
1096 | if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) { |
1097 | Arg1 = CGF.Builder.CreateCall( |
1098 | CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy)); |
1099 | } else |
1100 | Arg1 = CGF.Builder.CreateCall( |
1101 | CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy), |
1102 | llvm::ConstantInt::get(CGF.Int32Ty, 0)); |
1103 | } |
1104 | |
1105 | // Mark the call site and declaration with ReturnsTwice. |
1106 | llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty}; |
1107 | llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get( |
1108 | CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, |
1109 | llvm::Attribute::ReturnsTwice); |
1110 | llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction( |
1111 | llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name, |
1112 | ReturnsTwiceAttr, /*Local=*/true); |
1113 | |
1114 | llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast( |
1115 | CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy); |
1116 | llvm::Value *Args[] = {Buf, Arg1}; |
1117 | llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args); |
1118 | CB->setAttributes(ReturnsTwiceAttr); |
1119 | return RValue::get(CB); |
1120 | } |
1121 | |
1122 | // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code, |
1123 | // we handle them here. |
1124 | enum class CodeGenFunction::MSVCIntrin { |
1125 | _BitScanForward, |
1126 | _BitScanReverse, |
1127 | _InterlockedAnd, |
1128 | _InterlockedDecrement, |
1129 | _InterlockedExchange, |
1130 | _InterlockedExchangeAdd, |
1131 | _InterlockedExchangeSub, |
1132 | _InterlockedIncrement, |
1133 | _InterlockedOr, |
1134 | _InterlockedXor, |
1135 | _InterlockedExchangeAdd_acq, |
1136 | _InterlockedExchangeAdd_rel, |
1137 | _InterlockedExchangeAdd_nf, |
1138 | _InterlockedExchange_acq, |
1139 | _InterlockedExchange_rel, |
1140 | _InterlockedExchange_nf, |
1141 | _InterlockedCompareExchange_acq, |
1142 | _InterlockedCompareExchange_rel, |
1143 | _InterlockedCompareExchange_nf, |
1144 | _InterlockedCompareExchange128, |
1145 | _InterlockedCompareExchange128_acq, |
1146 | _InterlockedCompareExchange128_rel, |
1147 | _InterlockedCompareExchange128_nf, |
1148 | _InterlockedOr_acq, |
1149 | _InterlockedOr_rel, |
1150 | _InterlockedOr_nf, |
1151 | _InterlockedXor_acq, |
1152 | _InterlockedXor_rel, |
1153 | _InterlockedXor_nf, |
1154 | _InterlockedAnd_acq, |
1155 | _InterlockedAnd_rel, |
1156 | _InterlockedAnd_nf, |
1157 | _InterlockedIncrement_acq, |
1158 | _InterlockedIncrement_rel, |
1159 | _InterlockedIncrement_nf, |
1160 | _InterlockedDecrement_acq, |
1161 | _InterlockedDecrement_rel, |
1162 | _InterlockedDecrement_nf, |
1163 | __fastfail, |
1164 | }; |
1165 | |
1166 | static Optional<CodeGenFunction::MSVCIntrin> |
1167 | translateArmToMsvcIntrin(unsigned BuiltinID) { |
1168 | using MSVCIntrin = CodeGenFunction::MSVCIntrin; |
1169 | switch (BuiltinID) { |
1170 | default: |
1171 | return None; |
1172 | case ARM::BI_BitScanForward: |
1173 | case ARM::BI_BitScanForward64: |
1174 | return MSVCIntrin::_BitScanForward; |
1175 | case ARM::BI_BitScanReverse: |
1176 | case ARM::BI_BitScanReverse64: |
1177 | return MSVCIntrin::_BitScanReverse; |
1178 | case ARM::BI_InterlockedAnd64: |
1179 | return MSVCIntrin::_InterlockedAnd; |
1180 | case ARM::BI_InterlockedExchange64: |
1181 | return MSVCIntrin::_InterlockedExchange; |
1182 | case ARM::BI_InterlockedExchangeAdd64: |
1183 | return MSVCIntrin::_InterlockedExchangeAdd; |
1184 | case ARM::BI_InterlockedExchangeSub64: |
1185 | return MSVCIntrin::_InterlockedExchangeSub; |
1186 | case ARM::BI_InterlockedOr64: |
1187 | return MSVCIntrin::_InterlockedOr; |
1188 | case ARM::BI_InterlockedXor64: |
1189 | return MSVCIntrin::_InterlockedXor; |
1190 | case ARM::BI_InterlockedDecrement64: |
1191 | return MSVCIntrin::_InterlockedDecrement; |
1192 | case ARM::BI_InterlockedIncrement64: |
1193 | return MSVCIntrin::_InterlockedIncrement; |
1194 | case ARM::BI_InterlockedExchangeAdd8_acq: |
1195 | case ARM::BI_InterlockedExchangeAdd16_acq: |
1196 | case ARM::BI_InterlockedExchangeAdd_acq: |
1197 | case ARM::BI_InterlockedExchangeAdd64_acq: |
1198 | return MSVCIntrin::_InterlockedExchangeAdd_acq; |
1199 | case ARM::BI_InterlockedExchangeAdd8_rel: |
1200 | case ARM::BI_InterlockedExchangeAdd16_rel: |
1201 | case ARM::BI_InterlockedExchangeAdd_rel: |
1202 | case ARM::BI_InterlockedExchangeAdd64_rel: |
1203 | return MSVCIntrin::_InterlockedExchangeAdd_rel; |
1204 | case ARM::BI_InterlockedExchangeAdd8_nf: |
1205 | case ARM::BI_InterlockedExchangeAdd16_nf: |
1206 | case ARM::BI_InterlockedExchangeAdd_nf: |
1207 | case ARM::BI_InterlockedExchangeAdd64_nf: |
1208 | return MSVCIntrin::_InterlockedExchangeAdd_nf; |
1209 | case ARM::BI_InterlockedExchange8_acq: |
1210 | case ARM::BI_InterlockedExchange16_acq: |
1211 | case ARM::BI_InterlockedExchange_acq: |
1212 | case ARM::BI_InterlockedExchange64_acq: |
1213 | return MSVCIntrin::_InterlockedExchange_acq; |
1214 | case ARM::BI_InterlockedExchange8_rel: |
1215 | case ARM::BI_InterlockedExchange16_rel: |
1216 | case ARM::BI_InterlockedExchange_rel: |
1217 | case ARM::BI_InterlockedExchange64_rel: |
1218 | return MSVCIntrin::_InterlockedExchange_rel; |
1219 | case ARM::BI_InterlockedExchange8_nf: |
1220 | case ARM::BI_InterlockedExchange16_nf: |
1221 | case ARM::BI_InterlockedExchange_nf: |
1222 | case ARM::BI_InterlockedExchange64_nf: |
1223 | return MSVCIntrin::_InterlockedExchange_nf; |
1224 | case ARM::BI_InterlockedCompareExchange8_acq: |
1225 | case ARM::BI_InterlockedCompareExchange16_acq: |
1226 | case ARM::BI_InterlockedCompareExchange_acq: |
1227 | case ARM::BI_InterlockedCompareExchange64_acq: |
1228 | return MSVCIntrin::_InterlockedCompareExchange_acq; |
1229 | case ARM::BI_InterlockedCompareExchange8_rel: |
1230 | case ARM::BI_InterlockedCompareExchange16_rel: |
1231 | case ARM::BI_InterlockedCompareExchange_rel: |
1232 | case ARM::BI_InterlockedCompareExchange64_rel: |
1233 | return MSVCIntrin::_InterlockedCompareExchange_rel; |
1234 | case ARM::BI_InterlockedCompareExchange8_nf: |
1235 | case ARM::BI_InterlockedCompareExchange16_nf: |
1236 | case ARM::BI_InterlockedCompareExchange_nf: |
1237 | case ARM::BI_InterlockedCompareExchange64_nf: |
1238 | return MSVCIntrin::_InterlockedCompareExchange_nf; |
1239 | case ARM::BI_InterlockedOr8_acq: |
1240 | case ARM::BI_InterlockedOr16_acq: |
1241 | case ARM::BI_InterlockedOr_acq: |
1242 | case ARM::BI_InterlockedOr64_acq: |
1243 | return MSVCIntrin::_InterlockedOr_acq; |
1244 | case ARM::BI_InterlockedOr8_rel: |
1245 | case ARM::BI_InterlockedOr16_rel: |
1246 | case ARM::BI_InterlockedOr_rel: |
1247 | case ARM::BI_InterlockedOr64_rel: |
1248 | return MSVCIntrin::_InterlockedOr_rel; |
1249 | case ARM::BI_InterlockedOr8_nf: |
1250 | case ARM::BI_InterlockedOr16_nf: |
1251 | case ARM::BI_InterlockedOr_nf: |
1252 | case ARM::BI_InterlockedOr64_nf: |
1253 | return MSVCIntrin::_InterlockedOr_nf; |
1254 | case ARM::BI_InterlockedXor8_acq: |
1255 | case ARM::BI_InterlockedXor16_acq: |
1256 | case ARM::BI_InterlockedXor_acq: |
1257 | case ARM::BI_InterlockedXor64_acq: |
1258 | return MSVCIntrin::_InterlockedXor_acq; |
1259 | case ARM::BI_InterlockedXor8_rel: |
1260 | case ARM::BI_InterlockedXor16_rel: |
1261 | case ARM::BI_InterlockedXor_rel: |
1262 | case ARM::BI_InterlockedXor64_rel: |
1263 | return MSVCIntrin::_InterlockedXor_rel; |
1264 | case ARM::BI_InterlockedXor8_nf: |
1265 | case ARM::BI_InterlockedXor16_nf: |
1266 | case ARM::BI_InterlockedXor_nf: |
1267 | case ARM::BI_InterlockedXor64_nf: |
1268 | return MSVCIntrin::_InterlockedXor_nf; |
1269 | case ARM::BI_InterlockedAnd8_acq: |
1270 | case ARM::BI_InterlockedAnd16_acq: |
1271 | case ARM::BI_InterlockedAnd_acq: |
1272 | case ARM::BI_InterlockedAnd64_acq: |
1273 | return MSVCIntrin::_InterlockedAnd_acq; |
1274 | case ARM::BI_InterlockedAnd8_rel: |
1275 | case ARM::BI_InterlockedAnd16_rel: |
1276 | case ARM::BI_InterlockedAnd_rel: |
1277 | case ARM::BI_InterlockedAnd64_rel: |
1278 | return MSVCIntrin::_InterlockedAnd_rel; |
1279 | case ARM::BI_InterlockedAnd8_nf: |
1280 | case ARM::BI_InterlockedAnd16_nf: |
1281 | case ARM::BI_InterlockedAnd_nf: |
1282 | case ARM::BI_InterlockedAnd64_nf: |
1283 | return MSVCIntrin::_InterlockedAnd_nf; |
1284 | case ARM::BI_InterlockedIncrement16_acq: |
1285 | case ARM::BI_InterlockedIncrement_acq: |
1286 | case ARM::BI_InterlockedIncrement64_acq: |
1287 | return MSVCIntrin::_InterlockedIncrement_acq; |
1288 | case ARM::BI_InterlockedIncrement16_rel: |
1289 | case ARM::BI_InterlockedIncrement_rel: |
1290 | case ARM::BI_InterlockedIncrement64_rel: |
1291 | return MSVCIntrin::_InterlockedIncrement_rel; |
1292 | case ARM::BI_InterlockedIncrement16_nf: |
1293 | case ARM::BI_InterlockedIncrement_nf: |
1294 | case ARM::BI_InterlockedIncrement64_nf: |
1295 | return MSVCIntrin::_InterlockedIncrement_nf; |
1296 | case ARM::BI_InterlockedDecrement16_acq: |
1297 | case ARM::BI_InterlockedDecrement_acq: |
1298 | case ARM::BI_InterlockedDecrement64_acq: |
1299 | return MSVCIntrin::_InterlockedDecrement_acq; |
1300 | case ARM::BI_InterlockedDecrement16_rel: |
1301 | case ARM::BI_InterlockedDecrement_rel: |
1302 | case ARM::BI_InterlockedDecrement64_rel: |
1303 | return MSVCIntrin::_InterlockedDecrement_rel; |
1304 | case ARM::BI_InterlockedDecrement16_nf: |
1305 | case ARM::BI_InterlockedDecrement_nf: |
1306 | case ARM::BI_InterlockedDecrement64_nf: |
1307 | return MSVCIntrin::_InterlockedDecrement_nf; |
1308 | } |
1309 | llvm_unreachable("must return from switch")::llvm::llvm_unreachable_internal("must return from switch", "clang/lib/CodeGen/CGBuiltin.cpp" , 1309); |
1310 | } |
1311 | |
1312 | static Optional<CodeGenFunction::MSVCIntrin> |
1313 | translateAarch64ToMsvcIntrin(unsigned BuiltinID) { |
1314 | using MSVCIntrin = CodeGenFunction::MSVCIntrin; |
1315 | switch (BuiltinID) { |
1316 | default: |
1317 | return None; |
1318 | case AArch64::BI_BitScanForward: |
1319 | case AArch64::BI_BitScanForward64: |
1320 | return MSVCIntrin::_BitScanForward; |
1321 | case AArch64::BI_BitScanReverse: |
1322 | case AArch64::BI_BitScanReverse64: |
1323 | return MSVCIntrin::_BitScanReverse; |
1324 | case AArch64::BI_InterlockedAnd64: |
1325 | return MSVCIntrin::_InterlockedAnd; |
1326 | case AArch64::BI_InterlockedExchange64: |
1327 | return MSVCIntrin::_InterlockedExchange; |
1328 | case AArch64::BI_InterlockedExchangeAdd64: |
1329 | return MSVCIntrin::_InterlockedExchangeAdd; |
1330 | case AArch64::BI_InterlockedExchangeSub64: |
1331 | return MSVCIntrin::_InterlockedExchangeSub; |
1332 | case AArch64::BI_InterlockedOr64: |
1333 | return MSVCIntrin::_InterlockedOr; |
1334 | case AArch64::BI_InterlockedXor64: |
1335 | return MSVCIntrin::_InterlockedXor; |
1336 | case AArch64::BI_InterlockedDecrement64: |
1337 | return MSVCIntrin::_InterlockedDecrement; |
1338 | case AArch64::BI_InterlockedIncrement64: |
1339 | return MSVCIntrin::_InterlockedIncrement; |
1340 | case AArch64::BI_InterlockedExchangeAdd8_acq: |
1341 | case AArch64::BI_InterlockedExchangeAdd16_acq: |
1342 | case AArch64::BI_InterlockedExchangeAdd_acq: |
1343 | case AArch64::BI_InterlockedExchangeAdd64_acq: |
1344 | return MSVCIntrin::_InterlockedExchangeAdd_acq; |
1345 | case AArch64::BI_InterlockedExchangeAdd8_rel: |
1346 | case AArch64::BI_InterlockedExchangeAdd16_rel: |
1347 | case AArch64::BI_InterlockedExchangeAdd_rel: |
1348 | case AArch64::BI_InterlockedExchangeAdd64_rel: |
1349 | return MSVCIntrin::_InterlockedExchangeAdd_rel; |
1350 | case AArch64::BI_InterlockedExchangeAdd8_nf: |
1351 | case AArch64::BI_InterlockedExchangeAdd16_nf: |
1352 | case AArch64::BI_InterlockedExchangeAdd_nf: |
1353 | case AArch64::BI_InterlockedExchangeAdd64_nf: |
1354 | return MSVCIntrin::_InterlockedExchangeAdd_nf; |
1355 | case AArch64::BI_InterlockedExchange8_acq: |
1356 | case AArch64::BI_InterlockedExchange16_acq: |
1357 | case AArch64::BI_InterlockedExchange_acq: |
1358 | case AArch64::BI_InterlockedExchange64_acq: |
1359 | return MSVCIntrin::_InterlockedExchange_acq; |
1360 | case AArch64::BI_InterlockedExchange8_rel: |
1361 | case AArch64::BI_InterlockedExchange16_rel: |
1362 | case AArch64::BI_InterlockedExchange_rel: |
1363 | case AArch64::BI_InterlockedExchange64_rel: |
1364 | return MSVCIntrin::_InterlockedExchange_rel; |
1365 | case AArch64::BI_InterlockedExchange8_nf: |
1366 | case AArch64::BI_InterlockedExchange16_nf: |
1367 | case AArch64::BI_InterlockedExchange_nf: |
1368 | case AArch64::BI_InterlockedExchange64_nf: |
1369 | return MSVCIntrin::_InterlockedExchange_nf; |
1370 | case AArch64::BI_InterlockedCompareExchange8_acq: |
1371 | case AArch64::BI_InterlockedCompareExchange16_acq: |
1372 | case AArch64::BI_InterlockedCompareExchange_acq: |
1373 | case AArch64::BI_InterlockedCompareExchange64_acq: |
1374 | return MSVCIntrin::_InterlockedCompareExchange_acq; |
1375 | case AArch64::BI_InterlockedCompareExchange8_rel: |
1376 | case AArch64::BI_InterlockedCompareExchange16_rel: |
1377 | case AArch64::BI_InterlockedCompareExchange_rel: |
1378 | case AArch64::BI_InterlockedCompareExchange64_rel: |
1379 | return MSVCIntrin::_InterlockedCompareExchange_rel; |
1380 | case AArch64::BI_InterlockedCompareExchange8_nf: |
1381 | case AArch64::BI_InterlockedCompareExchange16_nf: |
1382 | case AArch64::BI_InterlockedCompareExchange_nf: |
1383 | case AArch64::BI_InterlockedCompareExchange64_nf: |
1384 | return MSVCIntrin::_InterlockedCompareExchange_nf; |
1385 | case AArch64::BI_InterlockedCompareExchange128: |
1386 | return MSVCIntrin::_InterlockedCompareExchange128; |
1387 | case AArch64::BI_InterlockedCompareExchange128_acq: |
1388 | return MSVCIntrin::_InterlockedCompareExchange128_acq; |
1389 | case AArch64::BI_InterlockedCompareExchange128_nf: |
1390 | return MSVCIntrin::_InterlockedCompareExchange128_nf; |
1391 | case AArch64::BI_InterlockedCompareExchange128_rel: |
1392 | return MSVCIntrin::_InterlockedCompareExchange128_rel; |
1393 | case AArch64::BI_InterlockedOr8_acq: |
1394 | case AArch64::BI_InterlockedOr16_acq: |
1395 | case AArch64::BI_InterlockedOr_acq: |
1396 | case AArch64::BI_InterlockedOr64_acq: |
1397 | return MSVCIntrin::_InterlockedOr_acq; |
1398 | case AArch64::BI_InterlockedOr8_rel: |
1399 | case AArch64::BI_InterlockedOr16_rel: |
1400 | case AArch64::BI_InterlockedOr_rel: |
1401 | case AArch64::BI_InterlockedOr64_rel: |
1402 | return MSVCIntrin::_InterlockedOr_rel; |
1403 | case AArch64::BI_InterlockedOr8_nf: |
1404 | case AArch64::BI_InterlockedOr16_nf: |
1405 | case AArch64::BI_InterlockedOr_nf: |
1406 | case AArch64::BI_InterlockedOr64_nf: |
1407 | return MSVCIntrin::_InterlockedOr_nf; |
1408 | case AArch64::BI_InterlockedXor8_acq: |
1409 | case AArch64::BI_InterlockedXor16_acq: |
1410 | case AArch64::BI_InterlockedXor_acq: |
1411 | case AArch64::BI_InterlockedXor64_acq: |
1412 | return MSVCIntrin::_InterlockedXor_acq; |
1413 | case AArch64::BI_InterlockedXor8_rel: |
1414 | case AArch64::BI_InterlockedXor16_rel: |
1415 | case AArch64::BI_InterlockedXor_rel: |
1416 | case AArch64::BI_InterlockedXor64_rel: |
1417 | return MSVCIntrin::_InterlockedXor_rel; |
1418 | case AArch64::BI_InterlockedXor8_nf: |
1419 | case AArch64::BI_InterlockedXor16_nf: |
1420 | case AArch64::BI_InterlockedXor_nf: |
1421 | case AArch64::BI_InterlockedXor64_nf: |
1422 | return MSVCIntrin::_InterlockedXor_nf; |
1423 | case AArch64::BI_InterlockedAnd8_acq: |
1424 | case AArch64::BI_InterlockedAnd16_acq: |
1425 | case AArch64::BI_InterlockedAnd_acq: |
1426 | case AArch64::BI_InterlockedAnd64_acq: |
1427 | return MSVCIntrin::_InterlockedAnd_acq; |
1428 | case AArch64::BI_InterlockedAnd8_rel: |
1429 | case AArch64::BI_InterlockedAnd16_rel: |
1430 | case AArch64::BI_InterlockedAnd_rel: |
1431 | case AArch64::BI_InterlockedAnd64_rel: |
1432 | return MSVCIntrin::_InterlockedAnd_rel; |
1433 | case AArch64::BI_InterlockedAnd8_nf: |
1434 | case AArch64::BI_InterlockedAnd16_nf: |
1435 | case AArch64::BI_InterlockedAnd_nf: |
1436 | case AArch64::BI_InterlockedAnd64_nf: |
1437 | return MSVCIntrin::_InterlockedAnd_nf; |
1438 | case AArch64::BI_InterlockedIncrement16_acq: |
1439 | case AArch64::BI_InterlockedIncrement_acq: |
1440 | case AArch64::BI_InterlockedIncrement64_acq: |
1441 | return MSVCIntrin::_InterlockedIncrement_acq; |
1442 | case AArch64::BI_InterlockedIncrement16_rel: |
1443 | case AArch64::BI_InterlockedIncrement_rel: |
1444 | case AArch64::BI_InterlockedIncrement64_rel: |
1445 | return MSVCIntrin::_InterlockedIncrement_rel; |
1446 | case AArch64::BI_InterlockedIncrement16_nf: |
1447 | case AArch64::BI_InterlockedIncrement_nf: |
1448 | case AArch64::BI_InterlockedIncrement64_nf: |
1449 | return MSVCIntrin::_InterlockedIncrement_nf; |
1450 | case AArch64::BI_InterlockedDecrement16_acq: |
1451 | case AArch64::BI_InterlockedDecrement_acq: |
1452 | case AArch64::BI_InterlockedDecrement64_acq: |
1453 | return MSVCIntrin::_InterlockedDecrement_acq; |
1454 | case AArch64::BI_InterlockedDecrement16_rel: |
1455 | case AArch64::BI_InterlockedDecrement_rel: |
1456 | case AArch64::BI_InterlockedDecrement64_rel: |
1457 | return MSVCIntrin::_InterlockedDecrement_rel; |
1458 | case AArch64::BI_InterlockedDecrement16_nf: |
1459 | case AArch64::BI_InterlockedDecrement_nf: |
1460 | case AArch64::BI_InterlockedDecrement64_nf: |
1461 | return MSVCIntrin::_InterlockedDecrement_nf; |
1462 | } |
1463 | llvm_unreachable("must return from switch")::llvm::llvm_unreachable_internal("must return from switch", "clang/lib/CodeGen/CGBuiltin.cpp" , 1463); |
1464 | } |
1465 | |
1466 | static Optional<CodeGenFunction::MSVCIntrin> |
1467 | translateX86ToMsvcIntrin(unsigned BuiltinID) { |
1468 | using MSVCIntrin = CodeGenFunction::MSVCIntrin; |
1469 | switch (BuiltinID) { |
1470 | default: |
1471 | return None; |
1472 | case clang::X86::BI_BitScanForward: |
1473 | case clang::X86::BI_BitScanForward64: |
1474 | return MSVCIntrin::_BitScanForward; |
1475 | case clang::X86::BI_BitScanReverse: |
1476 | case clang::X86::BI_BitScanReverse64: |
1477 | return MSVCIntrin::_BitScanReverse; |
1478 | case clang::X86::BI_InterlockedAnd64: |
1479 | return MSVCIntrin::_InterlockedAnd; |
1480 | case clang::X86::BI_InterlockedCompareExchange128: |
1481 | return MSVCIntrin::_InterlockedCompareExchange128; |
1482 | case clang::X86::BI_InterlockedExchange64: |
1483 | return MSVCIntrin::_InterlockedExchange; |
1484 | case clang::X86::BI_InterlockedExchangeAdd64: |
1485 | return MSVCIntrin::_InterlockedExchangeAdd; |
1486 | case clang::X86::BI_InterlockedExchangeSub64: |
1487 | return MSVCIntrin::_InterlockedExchangeSub; |
1488 | case clang::X86::BI_InterlockedOr64: |
1489 | return MSVCIntrin::_InterlockedOr; |
1490 | case clang::X86::BI_InterlockedXor64: |
1491 | return MSVCIntrin::_InterlockedXor; |
1492 | case clang::X86::BI_InterlockedDecrement64: |
1493 | return MSVCIntrin::_InterlockedDecrement; |
1494 | case clang::X86::BI_InterlockedIncrement64: |
1495 | return MSVCIntrin::_InterlockedIncrement; |
1496 | } |
1497 | llvm_unreachable("must return from switch")::llvm::llvm_unreachable_internal("must return from switch", "clang/lib/CodeGen/CGBuiltin.cpp" , 1497); |
1498 | } |
1499 | |
1500 | // Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated. |
1501 | Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, |
1502 | const CallExpr *E) { |
1503 | switch (BuiltinID) { |
1504 | case MSVCIntrin::_BitScanForward: |
1505 | case MSVCIntrin::_BitScanReverse: { |
1506 | Address IndexAddress(EmitPointerWithAlignment(E->getArg(0))); |
1507 | Value *ArgValue = EmitScalarExpr(E->getArg(1)); |
1508 | |
1509 | llvm::Type *ArgType = ArgValue->getType(); |
1510 | llvm::Type *IndexType = IndexAddress.getElementType(); |
1511 | llvm::Type *ResultType = ConvertType(E->getType()); |
1512 | |
1513 | Value *ArgZero = llvm::Constant::getNullValue(ArgType); |
1514 | Value *ResZero = llvm::Constant::getNullValue(ResultType); |
1515 | Value *ResOne = llvm::ConstantInt::get(ResultType, 1); |
1516 | |
1517 | BasicBlock *Begin = Builder.GetInsertBlock(); |
1518 | BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn); |
1519 | Builder.SetInsertPoint(End); |
1520 | PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result"); |
1521 | |
1522 | Builder.SetInsertPoint(Begin); |
1523 | Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero); |
1524 | BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn); |
1525 | Builder.CreateCondBr(IsZero, End, NotZero); |
1526 | Result->addIncoming(ResZero, Begin); |
1527 | |
1528 | Builder.SetInsertPoint(NotZero); |
1529 | |
1530 | if (BuiltinID == MSVCIntrin::_BitScanForward) { |
1531 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
1532 | Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); |
1533 | ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); |
1534 | Builder.CreateStore(ZeroCount, IndexAddress, false); |
1535 | } else { |
1536 | unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth(); |
1537 | Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1); |
1538 | |
1539 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
1540 | Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); |
1541 | ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); |
1542 | Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount); |
1543 | Builder.CreateStore(Index, IndexAddress, false); |
1544 | } |
1545 | Builder.CreateBr(End); |
1546 | Result->addIncoming(ResOne, NotZero); |
1547 | |
1548 | Builder.SetInsertPoint(End); |
1549 | return Result; |
1550 | } |
1551 | case MSVCIntrin::_InterlockedAnd: |
1552 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E); |
1553 | case MSVCIntrin::_InterlockedExchange: |
1554 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E); |
1555 | case MSVCIntrin::_InterlockedExchangeAdd: |
1556 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E); |
1557 | case MSVCIntrin::_InterlockedExchangeSub: |
1558 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E); |
1559 | case MSVCIntrin::_InterlockedOr: |
1560 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E); |
1561 | case MSVCIntrin::_InterlockedXor: |
1562 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E); |
1563 | case MSVCIntrin::_InterlockedExchangeAdd_acq: |
1564 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
1565 | AtomicOrdering::Acquire); |
1566 | case MSVCIntrin::_InterlockedExchangeAdd_rel: |
1567 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
1568 | AtomicOrdering::Release); |
1569 | case MSVCIntrin::_InterlockedExchangeAdd_nf: |
1570 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
1571 | AtomicOrdering::Monotonic); |
1572 | case MSVCIntrin::_InterlockedExchange_acq: |
1573 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
1574 | AtomicOrdering::Acquire); |
1575 | case MSVCIntrin::_InterlockedExchange_rel: |
1576 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
1577 | AtomicOrdering::Release); |
1578 | case MSVCIntrin::_InterlockedExchange_nf: |
1579 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
1580 | AtomicOrdering::Monotonic); |
1581 | case MSVCIntrin::_InterlockedCompareExchange_acq: |
1582 | return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire); |
1583 | case MSVCIntrin::_InterlockedCompareExchange_rel: |
1584 | return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release); |
1585 | case MSVCIntrin::_InterlockedCompareExchange_nf: |
1586 | return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic); |
1587 | case MSVCIntrin::_InterlockedCompareExchange128: |
1588 | return EmitAtomicCmpXchg128ForMSIntrin( |
1589 | *this, E, AtomicOrdering::SequentiallyConsistent); |
1590 | case MSVCIntrin::_InterlockedCompareExchange128_acq: |
1591 | return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire); |
1592 | case MSVCIntrin::_InterlockedCompareExchange128_rel: |
1593 | return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release); |
1594 | case MSVCIntrin::_InterlockedCompareExchange128_nf: |
1595 | return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic); |
1596 | case MSVCIntrin::_InterlockedOr_acq: |
1597 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
1598 | AtomicOrdering::Acquire); |
1599 | case MSVCIntrin::_InterlockedOr_rel: |
1600 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
1601 | AtomicOrdering::Release); |
1602 | case MSVCIntrin::_InterlockedOr_nf: |
1603 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
1604 | AtomicOrdering::Monotonic); |
1605 | case MSVCIntrin::_InterlockedXor_acq: |
1606 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
1607 | AtomicOrdering::Acquire); |
1608 | case MSVCIntrin::_InterlockedXor_rel: |
1609 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
1610 | AtomicOrdering::Release); |
1611 | case MSVCIntrin::_InterlockedXor_nf: |
1612 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
1613 | AtomicOrdering::Monotonic); |
1614 | case MSVCIntrin::_InterlockedAnd_acq: |
1615 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
1616 | AtomicOrdering::Acquire); |
1617 | case MSVCIntrin::_InterlockedAnd_rel: |
1618 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
1619 | AtomicOrdering::Release); |
1620 | case MSVCIntrin::_InterlockedAnd_nf: |
1621 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
1622 | AtomicOrdering::Monotonic); |
1623 | case MSVCIntrin::_InterlockedIncrement_acq: |
1624 | return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire); |
1625 | case MSVCIntrin::_InterlockedIncrement_rel: |
1626 | return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release); |
1627 | case MSVCIntrin::_InterlockedIncrement_nf: |
1628 | return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic); |
1629 | case MSVCIntrin::_InterlockedDecrement_acq: |
1630 | return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire); |
1631 | case MSVCIntrin::_InterlockedDecrement_rel: |
1632 | return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release); |
1633 | case MSVCIntrin::_InterlockedDecrement_nf: |
1634 | return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic); |
1635 | |
1636 | case MSVCIntrin::_InterlockedDecrement: |
1637 | return EmitAtomicDecrementValue(*this, E); |
1638 | case MSVCIntrin::_InterlockedIncrement: |
1639 | return EmitAtomicIncrementValue(*this, E); |
1640 | |
1641 | case MSVCIntrin::__fastfail: { |
1642 | // Request immediate process termination from the kernel. The instruction |
1643 | // sequences to do this are documented on MSDN: |
1644 | // https://msdn.microsoft.com/en-us/library/dn774154.aspx |
1645 | llvm::Triple::ArchType ISA = getTarget().getTriple().getArch(); |
1646 | StringRef Asm, Constraints; |
1647 | switch (ISA) { |
1648 | default: |
1649 | ErrorUnsupported(E, "__fastfail call for this architecture"); |
1650 | break; |
1651 | case llvm::Triple::x86: |
1652 | case llvm::Triple::x86_64: |
1653 | Asm = "int $$0x29"; |
1654 | Constraints = "{cx}"; |
1655 | break; |
1656 | case llvm::Triple::thumb: |
1657 | Asm = "udf #251"; |
1658 | Constraints = "{r0}"; |
1659 | break; |
1660 | case llvm::Triple::aarch64: |
1661 | Asm = "brk #0xF003"; |
1662 | Constraints = "{w0}"; |
1663 | } |
1664 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false); |
1665 | llvm::InlineAsm *IA = |
1666 | llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true); |
1667 | llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( |
1668 | getLLVMContext(), llvm::AttributeList::FunctionIndex, |
1669 | llvm::Attribute::NoReturn); |
1670 | llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0))); |
1671 | CI->setAttributes(NoReturnAttr); |
1672 | return CI; |
1673 | } |
1674 | } |
1675 | llvm_unreachable("Incorrect MSVC intrinsic!")::llvm::llvm_unreachable_internal("Incorrect MSVC intrinsic!" , "clang/lib/CodeGen/CGBuiltin.cpp", 1675); |
1676 | } |
1677 | |
1678 | namespace { |
1679 | // ARC cleanup for __builtin_os_log_format |
1680 | struct CallObjCArcUse final : EHScopeStack::Cleanup { |
1681 | CallObjCArcUse(llvm::Value *object) : object(object) {} |
1682 | llvm::Value *object; |
1683 | |
1684 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
1685 | CGF.EmitARCIntrinsicUse(object); |
1686 | } |
1687 | }; |
1688 | } |
1689 | |
1690 | Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E, |
1691 | BuiltinCheckKind Kind) { |
1692 | assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)(static_cast <bool> ((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && "Unsupported builtin check kind" ) ? void (0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 1693, __extension__ __PRETTY_FUNCTION__ )) |
1693 | && "Unsupported builtin check kind")(static_cast <bool> ((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && "Unsupported builtin check kind" ) ? void (0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 1693, __extension__ __PRETTY_FUNCTION__ )); |
1694 | |
1695 | Value *ArgValue = EmitScalarExpr(E); |
1696 | if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef()) |
1697 | return ArgValue; |
1698 | |
1699 | SanitizerScope SanScope(this); |
1700 | Value *Cond = Builder.CreateICmpNE( |
1701 | ArgValue, llvm::Constant::getNullValue(ArgValue->getType())); |
1702 | EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin), |
1703 | SanitizerHandler::InvalidBuiltin, |
1704 | {EmitCheckSourceLocation(E->getExprLoc()), |
1705 | llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)}, |
1706 | None); |
1707 | return ArgValue; |
1708 | } |
1709 | |
1710 | /// Get the argument type for arguments to os_log_helper. |
1711 | static CanQualType getOSLogArgType(ASTContext &C, int Size) { |
1712 | QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false); |
1713 | return C.getCanonicalType(UnsignedTy); |
1714 | } |
1715 | |
1716 | llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction( |
1717 | const analyze_os_log::OSLogBufferLayout &Layout, |
1718 | CharUnits BufferAlignment) { |
1719 | ASTContext &Ctx = getContext(); |
1720 | |
1721 | llvm::SmallString<64> Name; |
1722 | { |
1723 | raw_svector_ostream OS(Name); |
1724 | OS << "__os_log_helper"; |
1725 | OS << "_" << BufferAlignment.getQuantity(); |
1726 | OS << "_" << int(Layout.getSummaryByte()); |
1727 | OS << "_" << int(Layout.getNumArgsByte()); |
1728 | for (const auto &Item : Layout.Items) |
1729 | OS << "_" << int(Item.getSizeByte()) << "_" |
1730 | << int(Item.getDescriptorByte()); |
1731 | } |
1732 | |
1733 | if (llvm::Function *F = CGM.getModule().getFunction(Name)) |
1734 | return F; |
1735 | |
1736 | llvm::SmallVector<QualType, 4> ArgTys; |
1737 | FunctionArgList Args; |
1738 | Args.push_back(ImplicitParamDecl::Create( |
1739 | Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy, |
1740 | ImplicitParamDecl::Other)); |
1741 | ArgTys.emplace_back(Ctx.VoidPtrTy); |
1742 | |
1743 | for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) { |
1744 | char Size = Layout.Items[I].getSizeByte(); |
1745 | if (!Size) |
1746 | continue; |
1747 | |
1748 | QualType ArgTy = getOSLogArgType(Ctx, Size); |
1749 | Args.push_back(ImplicitParamDecl::Create( |
1750 | Ctx, nullptr, SourceLocation(), |
1751 | &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy, |
1752 | ImplicitParamDecl::Other)); |
1753 | ArgTys.emplace_back(ArgTy); |
1754 | } |
1755 | |
1756 | QualType ReturnTy = Ctx.VoidTy; |
1757 | |
1758 | // The helper function has linkonce_odr linkage to enable the linker to merge |
1759 | // identical functions. To ensure the merging always happens, 'noinline' is |
1760 | // attached to the function when compiling with -Oz. |
1761 | const CGFunctionInfo &FI = |
1762 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args); |
1763 | llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI); |
1764 | llvm::Function *Fn = llvm::Function::Create( |
1765 | FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule()); |
1766 | Fn->setVisibility(llvm::GlobalValue::HiddenVisibility); |
1767 | CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false); |
1768 | CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn); |
1769 | Fn->setDoesNotThrow(); |
1770 | |
1771 | // Attach 'noinline' at -Oz. |
1772 | if (CGM.getCodeGenOpts().OptimizeSize == 2) |
1773 | Fn->addFnAttr(llvm::Attribute::NoInline); |
1774 | |
1775 | auto NL = ApplyDebugLocation::CreateEmpty(*this); |
1776 | StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args); |
1777 | |
1778 | // Create a scope with an artificial location for the body of this function. |
1779 | auto AL = ApplyDebugLocation::CreateArtificial(*this); |
1780 | |
1781 | CharUnits Offset; |
1782 | Address BufAddr = |
1783 | Address(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Int8Ty, |
1784 | BufferAlignment); |
1785 | Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()), |
1786 | Builder.CreateConstByteGEP(BufAddr, Offset++, "summary")); |
1787 | Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()), |
1788 | Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs")); |
1789 | |
1790 | unsigned I = 1; |
1791 | for (const auto &Item : Layout.Items) { |
1792 | Builder.CreateStore( |
1793 | Builder.getInt8(Item.getDescriptorByte()), |
1794 | Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor")); |
1795 | Builder.CreateStore( |
1796 | Builder.getInt8(Item.getSizeByte()), |
1797 | Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize")); |
1798 | |
1799 | CharUnits Size = Item.size(); |
1800 | if (!Size.getQuantity()) |
1801 | continue; |
1802 | |
1803 | Address Arg = GetAddrOfLocalVar(Args[I]); |
1804 | Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData"); |
1805 | Addr = |
1806 | Builder.CreateElementBitCast(Addr, Arg.getElementType(), "argDataCast"); |
1807 | Builder.CreateStore(Builder.CreateLoad(Arg), Addr); |
1808 | Offset += Size; |
1809 | ++I; |
1810 | } |
1811 | |
1812 | FinishFunction(); |
1813 | |
1814 | return Fn; |
1815 | } |
1816 | |
1817 | RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) { |
1818 | assert(E.getNumArgs() >= 2 &&(static_cast <bool> (E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments" ) ? void (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 1819, __extension__ __PRETTY_FUNCTION__ )) |
1819 | "__builtin_os_log_format takes at least 2 arguments")(static_cast <bool> (E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments" ) ? void (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 1819, __extension__ __PRETTY_FUNCTION__ )); |
1820 | ASTContext &Ctx = getContext(); |
1821 | analyze_os_log::OSLogBufferLayout Layout; |
1822 | analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout); |
1823 | Address BufAddr = EmitPointerWithAlignment(E.getArg(0)); |
1824 | llvm::SmallVector<llvm::Value *, 4> RetainableOperands; |
1825 | |
1826 | // Ignore argument 1, the format string. It is not currently used. |
1827 | CallArgList Args; |
1828 | Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy); |
1829 | |
1830 | for (const auto &Item : Layout.Items) { |
1831 | int Size = Item.getSizeByte(); |
1832 | if (!Size) |
1833 | continue; |
1834 | |
1835 | llvm::Value *ArgVal; |
1836 | |
1837 | if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) { |
1838 | uint64_t Val = 0; |
1839 | for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I) |
1840 | Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8; |
1841 | ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val)); |
1842 | } else if (const Expr *TheExpr = Item.getExpr()) { |
1843 | ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false); |
1844 | |
1845 | // If a temporary object that requires destruction after the full |
1846 | // expression is passed, push a lifetime-extended cleanup to extend its |
1847 | // lifetime to the end of the enclosing block scope. |
1848 | auto LifetimeExtendObject = [&](const Expr *E) { |
1849 | E = E->IgnoreParenCasts(); |
1850 | // Extend lifetimes of objects returned by function calls and message |
1851 | // sends. |
1852 | |
1853 | // FIXME: We should do this in other cases in which temporaries are |
1854 | // created including arguments of non-ARC types (e.g., C++ |
1855 | // temporaries). |
1856 | if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E)) |
1857 | return true; |
1858 | return false; |
1859 | }; |
1860 | |
1861 | if (TheExpr->getType()->isObjCRetainableType() && |
1862 | getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) { |
1863 | assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&(static_cast <bool> (getEvaluationKind(TheExpr->getType ()) == TEK_Scalar && "Only scalar can be a ObjC retainable type" ) ? void (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 1864, __extension__ __PRETTY_FUNCTION__ )) |
1864 | "Only scalar can be a ObjC retainable type")(static_cast <bool> (getEvaluationKind(TheExpr->getType ()) == TEK_Scalar && "Only scalar can be a ObjC retainable type" ) ? void (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 1864, __extension__ __PRETTY_FUNCTION__ )); |
1865 | if (!isa<Constant>(ArgVal)) { |
1866 | CleanupKind Cleanup = getARCCleanupKind(); |
1867 | QualType Ty = TheExpr->getType(); |
1868 | Address Alloca = Address::invalid(); |
1869 | Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca); |
1870 | ArgVal = EmitARCRetain(Ty, ArgVal); |
1871 | Builder.CreateStore(ArgVal, Addr); |
1872 | pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty, |
1873 | CodeGenFunction::destroyARCStrongPrecise, |
1874 | Cleanup & EHCleanup); |
1875 | |
1876 | // Push a clang.arc.use call to ensure ARC optimizer knows that the |
1877 | // argument has to be alive. |
1878 | if (CGM.getCodeGenOpts().OptimizationLevel != 0) |
1879 | pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal); |
1880 | } |
1881 | } |
1882 | } else { |
1883 | ArgVal = Builder.getInt32(Item.getConstValue().getQuantity()); |
1884 | } |
1885 | |
1886 | unsigned ArgValSize = |
1887 | CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType()); |
1888 | llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(), |
1889 | ArgValSize); |
1890 | ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy); |
1891 | CanQualType ArgTy = getOSLogArgType(Ctx, Size); |
1892 | // If ArgVal has type x86_fp80, zero-extend ArgVal. |
1893 | ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy)); |
1894 | Args.add(RValue::get(ArgVal), ArgTy); |
1895 | } |
1896 | |
1897 | const CGFunctionInfo &FI = |
1898 | CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args); |
1899 | llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction( |
1900 | Layout, BufAddr.getAlignment()); |
1901 | EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args); |
1902 | return RValue::get(BufAddr.getPointer()); |
1903 | } |
1904 | |
1905 | static bool isSpecialUnsignedMultiplySignedResult( |
1906 | unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, |
1907 | WidthAndSignedness ResultInfo) { |
1908 | return BuiltinID == Builtin::BI__builtin_mul_overflow && |
1909 | Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width && |
1910 | !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed; |
1911 | } |
1912 | |
1913 | static RValue EmitCheckedUnsignedMultiplySignedResult( |
1914 | CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, |
1915 | const clang::Expr *Op2, WidthAndSignedness Op2Info, |
1916 | const clang::Expr *ResultArg, QualType ResultQTy, |
1917 | WidthAndSignedness ResultInfo) { |
1918 | assert(isSpecialUnsignedMultiplySignedResult((static_cast <bool> (isSpecialUnsignedMultiplySignedResult ( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo ) && "Cannot specialize this multiply") ? void (0) : __assert_fail ("isSpecialUnsignedMultiplySignedResult( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Cannot specialize this multiply\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 1920, __extension__ __PRETTY_FUNCTION__ )) |
1919 | Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&(static_cast <bool> (isSpecialUnsignedMultiplySignedResult ( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo ) && "Cannot specialize this multiply") ? void (0) : __assert_fail ("isSpecialUnsignedMultiplySignedResult( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Cannot specialize this multiply\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 1920, __extension__ __PRETTY_FUNCTION__ )) |
1920 | "Cannot specialize this multiply")(static_cast <bool> (isSpecialUnsignedMultiplySignedResult ( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo ) && "Cannot specialize this multiply") ? void (0) : __assert_fail ("isSpecialUnsignedMultiplySignedResult( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Cannot specialize this multiply\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 1920, __extension__ __PRETTY_FUNCTION__ )); |
1921 | |
1922 | llvm::Value *V1 = CGF.EmitScalarExpr(Op1); |
1923 | llvm::Value *V2 = CGF.EmitScalarExpr(Op2); |
1924 | |
1925 | llvm::Value *HasOverflow; |
1926 | llvm::Value *Result = EmitOverflowIntrinsic( |
1927 | CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow); |
1928 | |
1929 | // The intrinsic call will detect overflow when the value is > UINT_MAX, |
1930 | // however, since the original builtin had a signed result, we need to report |
1931 | // an overflow when the result is greater than INT_MAX. |
1932 | auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width); |
1933 | llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax); |
1934 | |
1935 | llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue); |
1936 | HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow); |
1937 | |
1938 | bool isVolatile = |
1939 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
1940 | Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg); |
1941 | CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr, |
1942 | isVolatile); |
1943 | return RValue::get(HasOverflow); |
1944 | } |
1945 | |
1946 | /// Determine if a binop is a checked mixed-sign multiply we can specialize. |
1947 | static bool isSpecialMixedSignMultiply(unsigned BuiltinID, |
1948 | WidthAndSignedness Op1Info, |
1949 | WidthAndSignedness Op2Info, |
1950 | WidthAndSignedness ResultInfo) { |
1951 | return BuiltinID == Builtin::BI__builtin_mul_overflow && |
1952 | std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width && |
1953 | Op1Info.Signed != Op2Info.Signed; |
1954 | } |
1955 | |
1956 | /// Emit a checked mixed-sign multiply. This is a cheaper specialization of |
1957 | /// the generic checked-binop irgen. |
1958 | static RValue |
1959 | EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, |
1960 | WidthAndSignedness Op1Info, const clang::Expr *Op2, |
1961 | WidthAndSignedness Op2Info, |
1962 | const clang::Expr *ResultArg, QualType ResultQTy, |
1963 | WidthAndSignedness ResultInfo) { |
1964 | assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,(static_cast <bool> (isSpecialMixedSignMultiply(Builtin ::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize") ? void ( 0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 1966, __extension__ __PRETTY_FUNCTION__ )) |
1965 | Op2Info, ResultInfo) &&(static_cast <bool> (isSpecialMixedSignMultiply(Builtin ::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize") ? void ( 0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 1966, __extension__ __PRETTY_FUNCTION__ )) |
1966 | "Not a mixed-sign multipliction we can specialize")(static_cast <bool> (isSpecialMixedSignMultiply(Builtin ::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize") ? void ( 0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 1966, __extension__ __PRETTY_FUNCTION__ )); |
1967 | |
1968 | // Emit the signed and unsigned operands. |
1969 | const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2; |
1970 | const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1; |
1971 | llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp); |
1972 | llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp); |
1973 | unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width; |
1974 | unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width; |
1975 | |
1976 | // One of the operands may be smaller than the other. If so, [s|z]ext it. |
1977 | if (SignedOpWidth < UnsignedOpWidth) |
1978 | Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext"); |
1979 | if (UnsignedOpWidth < SignedOpWidth) |
1980 | Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext"); |
1981 | |
1982 | llvm::Type *OpTy = Signed->getType(); |
1983 | llvm::Value *Zero = llvm::Constant::getNullValue(OpTy); |
1984 | Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg); |
1985 | llvm::Type *ResTy = ResultPtr.getElementType(); |
1986 | unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width); |
1987 | |
1988 | // Take the absolute value of the signed operand. |
1989 | llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero); |
1990 | llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed); |
1991 | llvm::Value *AbsSigned = |
1992 | CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed); |
1993 | |
1994 | // Perform a checked unsigned multiplication. |
1995 | llvm::Value *UnsignedOverflow; |
1996 | llvm::Value *UnsignedResult = |
1997 | EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned, |
1998 | Unsigned, UnsignedOverflow); |
1999 | |
2000 | llvm::Value *Overflow, *Result; |
2001 | if (ResultInfo.Signed) { |
2002 | // Signed overflow occurs if the result is greater than INT_MAX or lesser |
2003 | // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative). |
2004 | auto IntMax = |
2005 | llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth); |
2006 | llvm::Value *MaxResult = |
2007 | CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax), |
2008 | CGF.Builder.CreateZExt(IsNegative, OpTy)); |
2009 | llvm::Value *SignedOverflow = |
2010 | CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult); |
2011 | Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow); |
2012 | |
2013 | // Prepare the signed result (possibly by negating it). |
2014 | llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult); |
2015 | llvm::Value *SignedResult = |
2016 | CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult); |
2017 | Result = CGF.Builder.CreateTrunc(SignedResult, ResTy); |
2018 | } else { |
2019 | // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX. |
2020 | llvm::Value *Underflow = CGF.Builder.CreateAnd( |
2021 | IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult)); |
2022 | Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow); |
2023 | if (ResultInfo.Width < OpWidth) { |
2024 | auto IntMax = |
2025 | llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth); |
2026 | llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT( |
2027 | UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax)); |
2028 | Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow); |
2029 | } |
2030 | |
2031 | // Negate the product if it would be negative in infinite precision. |
2032 | Result = CGF.Builder.CreateSelect( |
2033 | IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult); |
2034 | |
2035 | Result = CGF.Builder.CreateTrunc(Result, ResTy); |
2036 | } |
2037 | assert(Overflow && Result && "Missing overflow or result")(static_cast <bool> (Overflow && Result && "Missing overflow or result") ? void (0) : __assert_fail ("Overflow && Result && \"Missing overflow or result\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 2037, __extension__ __PRETTY_FUNCTION__ )); |
2038 | |
2039 | bool isVolatile = |
2040 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
2041 | CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr, |
2042 | isVolatile); |
2043 | return RValue::get(Overflow); |
2044 | } |
2045 | |
2046 | static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType, |
2047 | LValue RecordLV, CharUnits Align, |
2048 | llvm::FunctionCallee Func, int Lvl) { |
2049 | ASTContext &Context = CGF.getContext(); |
2050 | RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition(); |
2051 | std::string Pad = std::string(Lvl * 4, ' '); |
2052 | std::string ElementPad = std::string((Lvl + 1) * 4, ' '); |
2053 | |
2054 | PrintingPolicy Policy(Context.getLangOpts()); |
2055 | Policy.AnonymousTagLocations = false; |
2056 | Value *GString = CGF.Builder.CreateGlobalStringPtr( |
2057 | llvm::Twine(Pad).concat(RType.getAsString(Policy)).concat(" {\n").str()); |
2058 | Value *Res = CGF.Builder.CreateCall(Func, {GString}); |
2059 | |
2060 | static llvm::DenseMap<QualType, const char *> Types; |
2061 | if (Types.empty()) { |
2062 | Types[Context.CharTy] = "%c"; |
2063 | Types[Context.BoolTy] = "%d"; |
2064 | Types[Context.SignedCharTy] = "%hhd"; |
2065 | Types[Context.UnsignedCharTy] = "%hhu"; |
2066 | Types[Context.IntTy] = "%d"; |
2067 | Types[Context.UnsignedIntTy] = "%u"; |
2068 | Types[Context.LongTy] = "%ld"; |
2069 | Types[Context.UnsignedLongTy] = "%lu"; |
2070 | Types[Context.LongLongTy] = "%lld"; |
2071 | Types[Context.UnsignedLongLongTy] = "%llu"; |
2072 | Types[Context.ShortTy] = "%hd"; |
2073 | Types[Context.UnsignedShortTy] = "%hu"; |
2074 | Types[Context.VoidPtrTy] = "%p"; |
2075 | Types[Context.FloatTy] = "%f"; |
2076 | Types[Context.DoubleTy] = "%f"; |
2077 | Types[Context.LongDoubleTy] = "%Lf"; |
2078 | Types[Context.getPointerType(Context.CharTy)] = "%s"; |
2079 | Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s"; |
2080 | } |
2081 | |
2082 | for (const auto *FD : RD->fields()) { |
2083 | Value *TmpRes = nullptr; |
2084 | |
2085 | std::string Format = llvm::Twine(ElementPad) |
2086 | .concat(FD->getType().getAsString()) |
2087 | .concat(llvm::Twine(' ')) |
2088 | .concat(FD->getNameAsString()) |
2089 | .str(); |
2090 | |
2091 | if (FD->isBitField()) { |
2092 | unsigned BitfieldWidth = FD->getBitWidthValue(CGF.getContext()); |
2093 | |
2094 | // If current field is a unnamed bitfield, we should dump only one ' ' |
2095 | // between type-name and ':' |
2096 | if (!FD->getDeclName().isEmpty()) |
2097 | Format += ' '; |
2098 | Format += llvm::Twine(": ").concat(llvm::Twine(BitfieldWidth)).str(); |
2099 | |
2100 | // If current field is a zero-width bitfield, we just dump a string like |
2101 | // 'type-name : 0' |
2102 | if (FD->isZeroSize(CGF.getContext())) { |
2103 | Format += "\n"; |
2104 | GString = CGF.Builder.CreateGlobalStringPtr(Format); |
2105 | TmpRes = CGF.Builder.CreateCall(Func, {GString}); |
2106 | Res = CGF.Builder.CreateAdd(Res, TmpRes); |
2107 | continue; |
2108 | } |
2109 | } |
2110 | |
2111 | LValue FieldLV = CGF.EmitLValueForField(RecordLV, FD); |
2112 | QualType CanonicalType = |
2113 | FD->getType().getUnqualifiedType().getCanonicalType(); |
2114 | |
2115 | // We check whether we are in a recursive type |
2116 | if (CanonicalType->isRecordType()) { |
2117 | TmpRes = dumpRecord(CGF, CanonicalType, FieldLV, Align, Func, Lvl + 1); |
2118 | Res = CGF.Builder.CreateAdd(TmpRes, Res); |
2119 | continue; |
2120 | } |
2121 | |
2122 | // We try to determine the best format to print the current field |
2123 | const char *TypeFormat = Types.find(CanonicalType) == Types.end() |
2124 | ? Types[Context.VoidPtrTy] |
2125 | : Types[CanonicalType]; |
2126 | |
2127 | GString = CGF.Builder.CreateGlobalStringPtr(llvm::Twine(Format) |
2128 | .concat(" = ") |
2129 | .concat(TypeFormat) |
2130 | .concat(llvm::Twine('\n')) |
2131 | .str()); |
2132 | |
2133 | RValue RV = FD->isBitField() |
2134 | ? CGF.EmitLoadOfBitfieldLValue(FieldLV, FD->getLocation()) |
2135 | : CGF.EmitLoadOfLValue(FieldLV, FD->getLocation()); |
2136 | TmpRes = CGF.Builder.CreateCall(Func, {GString, RV.getScalarVal()}); |
2137 | Res = CGF.Builder.CreateAdd(Res, TmpRes); |
2138 | } |
2139 | |
2140 | GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n"); |
2141 | Value *TmpRes = CGF.Builder.CreateCall(Func, {GString}); |
2142 | Res = CGF.Builder.CreateAdd(Res, TmpRes); |
2143 | return Res; |
2144 | } |
2145 | |
2146 | static bool |
2147 | TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, |
2148 | llvm::SmallPtrSetImpl<const Decl *> &Seen) { |
2149 | if (const auto *Arr = Ctx.getAsArrayType(Ty)) |
2150 | Ty = Ctx.getBaseElementType(Arr); |
2151 | |
2152 | const auto *Record = Ty->getAsCXXRecordDecl(); |
2153 | if (!Record) |
2154 | return false; |
2155 | |
2156 | // We've already checked this type, or are in the process of checking it. |
2157 | if (!Seen.insert(Record).second) |
2158 | return false; |
2159 | |
2160 | assert(Record->hasDefinition() &&(static_cast <bool> (Record->hasDefinition() && "Incomplete types should already be diagnosed") ? void (0) : __assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 2161, __extension__ __PRETTY_FUNCTION__ )) |
2161 | "Incomplete types should already be diagnosed")(static_cast <bool> (Record->hasDefinition() && "Incomplete types should already be diagnosed") ? void (0) : __assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 2161, __extension__ __PRETTY_FUNCTION__ )); |
2162 | |
2163 | if (Record->isDynamicClass()) |
2164 | return true; |
2165 | |
2166 | for (FieldDecl *F : Record->fields()) { |
2167 | if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen)) |
2168 | return true; |
2169 | } |
2170 | return false; |
2171 | } |
2172 | |
2173 | /// Determine if the specified type requires laundering by checking if it is a |
2174 | /// dynamic class type or contains a subobject which is a dynamic class type. |
2175 | static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) { |
2176 | if (!CGM.getCodeGenOpts().StrictVTablePointers) |
2177 | return false; |
2178 | llvm::SmallPtrSet<const Decl *, 16> Seen; |
2179 | return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen); |
2180 | } |
2181 | |
2182 | RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) { |
2183 | llvm::Value *Src = EmitScalarExpr(E->getArg(0)); |
2184 | llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1)); |
2185 | |
2186 | // The builtin's shift arg may have a different type than the source arg and |
2187 | // result, but the LLVM intrinsic uses the same type for all values. |
2188 | llvm::Type *Ty = Src->getType(); |
2189 | ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false); |
2190 | |
2191 | // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same. |
2192 | unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl; |
2193 | Function *F = CGM.getIntrinsic(IID, Ty); |
2194 | return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt })); |
2195 | } |
2196 | |
2197 | // Map math builtins for long-double to f128 version. |
2198 | static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) { |
2199 | switch (BuiltinID) { |
2200 | #define MUTATE_LDBL(func) \ |
2201 | case Builtin::BI__builtin_##func##l: \ |
2202 | return Builtin::BI__builtin_##func##f128; |
2203 | MUTATE_LDBL(sqrt) |
2204 | MUTATE_LDBL(cbrt) |
2205 | MUTATE_LDBL(fabs) |
2206 | MUTATE_LDBL(log) |
2207 | MUTATE_LDBL(log2) |
2208 | MUTATE_LDBL(log10) |
2209 | MUTATE_LDBL(log1p) |
2210 | MUTATE_LDBL(logb) |
2211 | MUTATE_LDBL(exp) |
2212 | MUTATE_LDBL(exp2) |
2213 | MUTATE_LDBL(expm1) |
2214 | MUTATE_LDBL(fdim) |
2215 | MUTATE_LDBL(hypot) |
2216 | MUTATE_LDBL(ilogb) |
2217 | MUTATE_LDBL(pow) |
2218 | MUTATE_LDBL(fmin) |
2219 | MUTATE_LDBL(fmax) |
2220 | MUTATE_LDBL(ceil) |
2221 | MUTATE_LDBL(trunc) |
2222 | MUTATE_LDBL(rint) |
2223 | MUTATE_LDBL(nearbyint) |
2224 | MUTATE_LDBL(round) |
2225 | MUTATE_LDBL(floor) |
2226 | MUTATE_LDBL(lround) |
2227 | MUTATE_LDBL(llround) |
2228 | MUTATE_LDBL(lrint) |
2229 | MUTATE_LDBL(llrint) |
2230 | MUTATE_LDBL(fmod) |
2231 | MUTATE_LDBL(modf) |
2232 | MUTATE_LDBL(nan) |
2233 | MUTATE_LDBL(nans) |
2234 | MUTATE_LDBL(inf) |
2235 | MUTATE_LDBL(fma) |
2236 | MUTATE_LDBL(sin) |
2237 | MUTATE_LDBL(cos) |
2238 | MUTATE_LDBL(tan) |
2239 | MUTATE_LDBL(sinh) |
2240 | MUTATE_LDBL(cosh) |
2241 | MUTATE_LDBL(tanh) |
2242 | MUTATE_LDBL(asin) |
2243 | MUTATE_LDBL(acos) |
2244 | MUTATE_LDBL(atan) |
2245 | MUTATE_LDBL(asinh) |
2246 | MUTATE_LDBL(acosh) |
2247 | MUTATE_LDBL(atanh) |
2248 | MUTATE_LDBL(atan2) |
2249 | MUTATE_LDBL(erf) |
2250 | MUTATE_LDBL(erfc) |
2251 | MUTATE_LDBL(ldexp) |
2252 | MUTATE_LDBL(frexp) |
2253 | MUTATE_LDBL(huge_val) |
2254 | MUTATE_LDBL(copysign) |
2255 | MUTATE_LDBL(nextafter) |
2256 | MUTATE_LDBL(nexttoward) |
2257 | MUTATE_LDBL(remainder) |
2258 | MUTATE_LDBL(remquo) |
2259 | MUTATE_LDBL(scalbln) |
2260 | MUTATE_LDBL(scalbn) |
2261 | MUTATE_LDBL(tgamma) |
2262 | MUTATE_LDBL(lgamma) |
2263 | #undef MUTATE_LDBL |
2264 | default: |
2265 | return BuiltinID; |
2266 | } |
2267 | } |
2268 | |
2269 | RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, |
2270 | const CallExpr *E, |
2271 | ReturnValueSlot ReturnValue) { |
2272 | const FunctionDecl *FD = GD.getDecl()->getAsFunction(); |
2273 | // See if we can constant fold this builtin. If so, don't emit it at all. |
2274 | // TODO: Extend this handling to all builtin calls that we can constant-fold. |
2275 | Expr::EvalResult Result; |
2276 | if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) && |
2277 | !Result.hasSideEffects()) { |
2278 | if (Result.Val.isInt()) |
2279 | return RValue::get(llvm::ConstantInt::get(getLLVMContext(), |
2280 | Result.Val.getInt())); |
2281 | if (Result.Val.isFloat()) |
2282 | return RValue::get(llvm::ConstantFP::get(getLLVMContext(), |
2283 | Result.Val.getFloat())); |
2284 | } |
2285 | |
2286 | // If current long-double semantics is IEEE 128-bit, replace math builtins |
2287 | // of long-double with f128 equivalent. |
2288 | // TODO: This mutation should also be applied to other targets other than PPC, |
2289 | // after backend supports IEEE 128-bit style libcalls. |
2290 | if (getTarget().getTriple().isPPC64() && |
2291 | &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad()) |
2292 | BuiltinID = mutateLongDoubleBuiltin(BuiltinID); |
2293 | |
2294 | // If the builtin has been declared explicitly with an assembler label, |
2295 | // disable the specialized emitting below. Ideally we should communicate the |
2296 | // rename in IR, or at least avoid generating the intrinsic calls that are |
2297 | // likely to get lowered to the renamed library functions. |
2298 | const unsigned BuiltinIDIfNoAsmLabel = |
2299 | FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID; |
2300 | |
2301 | // There are LLVM math intrinsics/instructions corresponding to math library |
2302 | // functions except the LLVM op will never set errno while the math library |
2303 | // might. Also, math builtins have the same semantics as their math library |
2304 | // twins. Thus, we can transform math library and builtin calls to their |
2305 | // LLVM counterparts if the call is marked 'const' (known to never set errno). |
2306 | if (FD->hasAttr<ConstAttr>()) { |
2307 | switch (BuiltinIDIfNoAsmLabel) { |
2308 | case Builtin::BIceil: |
2309 | case Builtin::BIceilf: |
2310 | case Builtin::BIceill: |
2311 | case Builtin::BI__builtin_ceil: |
2312 | case Builtin::BI__builtin_ceilf: |
2313 | case Builtin::BI__builtin_ceilf16: |
2314 | case Builtin::BI__builtin_ceill: |
2315 | case Builtin::BI__builtin_ceilf128: |
2316 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2317 | Intrinsic::ceil, |
2318 | Intrinsic::experimental_constrained_ceil)); |
2319 | |
2320 | case Builtin::BIcopysign: |
2321 | case Builtin::BIcopysignf: |
2322 | case Builtin::BIcopysignl: |
2323 | case Builtin::BI__builtin_copysign: |
2324 | case Builtin::BI__builtin_copysignf: |
2325 | case Builtin::BI__builtin_copysignf16: |
2326 | case Builtin::BI__builtin_copysignl: |
2327 | case Builtin::BI__builtin_copysignf128: |
2328 | return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign)); |
2329 | |
2330 | case Builtin::BIcos: |
2331 | case Builtin::BIcosf: |
2332 | case Builtin::BIcosl: |
2333 | case Builtin::BI__builtin_cos: |
2334 | case Builtin::BI__builtin_cosf: |
2335 | case Builtin::BI__builtin_cosf16: |
2336 | case Builtin::BI__builtin_cosl: |
2337 | case Builtin::BI__builtin_cosf128: |
2338 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2339 | Intrinsic::cos, |
2340 | Intrinsic::experimental_constrained_cos)); |
2341 | |
2342 | case Builtin::BIexp: |
2343 | case Builtin::BIexpf: |
2344 | case Builtin::BIexpl: |
2345 | case Builtin::BI__builtin_exp: |
2346 | case Builtin::BI__builtin_expf: |
2347 | case Builtin::BI__builtin_expf16: |
2348 | case Builtin::BI__builtin_expl: |
2349 | case Builtin::BI__builtin_expf128: |
2350 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2351 | Intrinsic::exp, |
2352 | Intrinsic::experimental_constrained_exp)); |
2353 | |
2354 | case Builtin::BIexp2: |
2355 | case Builtin::BIexp2f: |
2356 | case Builtin::BIexp2l: |
2357 | case Builtin::BI__builtin_exp2: |
2358 | case Builtin::BI__builtin_exp2f: |
2359 | case Builtin::BI__builtin_exp2f16: |
2360 | case Builtin::BI__builtin_exp2l: |
2361 | case Builtin::BI__builtin_exp2f128: |
2362 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2363 | Intrinsic::exp2, |
2364 | Intrinsic::experimental_constrained_exp2)); |
2365 | |
2366 | case Builtin::BIfabs: |
2367 | case Builtin::BIfabsf: |
2368 | case Builtin::BIfabsl: |
2369 | case Builtin::BI__builtin_fabs: |
2370 | case Builtin::BI__builtin_fabsf: |
2371 | case Builtin::BI__builtin_fabsf16: |
2372 | case Builtin::BI__builtin_fabsl: |
2373 | case Builtin::BI__builtin_fabsf128: |
2374 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs)); |
2375 | |
2376 | case Builtin::BIfloor: |
2377 | case Builtin::BIfloorf: |
2378 | case Builtin::BIfloorl: |
2379 | case Builtin::BI__builtin_floor: |
2380 | case Builtin::BI__builtin_floorf: |
2381 | case Builtin::BI__builtin_floorf16: |
2382 | case Builtin::BI__builtin_floorl: |
2383 | case Builtin::BI__builtin_floorf128: |
2384 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2385 | Intrinsic::floor, |
2386 | Intrinsic::experimental_constrained_floor)); |
2387 | |
2388 | case Builtin::BIfma: |
2389 | case Builtin::BIfmaf: |
2390 | case Builtin::BIfmal: |
2391 | case Builtin::BI__builtin_fma: |
2392 | case Builtin::BI__builtin_fmaf: |
2393 | case Builtin::BI__builtin_fmaf16: |
2394 | case Builtin::BI__builtin_fmal: |
2395 | case Builtin::BI__builtin_fmaf128: |
2396 | return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E, |
2397 | Intrinsic::fma, |
2398 | Intrinsic::experimental_constrained_fma)); |
2399 | |
2400 | case Builtin::BIfmax: |
2401 | case Builtin::BIfmaxf: |
2402 | case Builtin::BIfmaxl: |
2403 | case Builtin::BI__builtin_fmax: |
2404 | case Builtin::BI__builtin_fmaxf: |
2405 | case Builtin::BI__builtin_fmaxf16: |
2406 | case Builtin::BI__builtin_fmaxl: |
2407 | case Builtin::BI__builtin_fmaxf128: |
2408 | return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, |
2409 | Intrinsic::maxnum, |
2410 | Intrinsic::experimental_constrained_maxnum)); |
2411 | |
2412 | case Builtin::BIfmin: |
2413 | case Builtin::BIfminf: |
2414 | case Builtin::BIfminl: |
2415 | case Builtin::BI__builtin_fmin: |
2416 | case Builtin::BI__builtin_fminf: |
2417 | case Builtin::BI__builtin_fminf16: |
2418 | case Builtin::BI__builtin_fminl: |
2419 | case Builtin::BI__builtin_fminf128: |
2420 | return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, |
2421 | Intrinsic::minnum, |
2422 | Intrinsic::experimental_constrained_minnum)); |
2423 | |
2424 | // fmod() is a special-case. It maps to the frem instruction rather than an |
2425 | // LLVM intrinsic. |
2426 | case Builtin::BIfmod: |
2427 | case Builtin::BIfmodf: |
2428 | case Builtin::BIfmodl: |
2429 | case Builtin::BI__builtin_fmod: |
2430 | case Builtin::BI__builtin_fmodf: |
2431 | case Builtin::BI__builtin_fmodf16: |
2432 | case Builtin::BI__builtin_fmodl: |
2433 | case Builtin::BI__builtin_fmodf128: { |
2434 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
2435 | Value *Arg1 = EmitScalarExpr(E->getArg(0)); |
2436 | Value *Arg2 = EmitScalarExpr(E->getArg(1)); |
2437 | return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod")); |
2438 | } |
2439 | |
2440 | case Builtin::BIlog: |
2441 | case Builtin::BIlogf: |
2442 | case Builtin::BIlogl: |
2443 | case Builtin::BI__builtin_log: |
2444 | case Builtin::BI__builtin_logf: |
2445 | case Builtin::BI__builtin_logf16: |
2446 | case Builtin::BI__builtin_logl: |
2447 | case Builtin::BI__builtin_logf128: |
2448 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2449 | Intrinsic::log, |
2450 | Intrinsic::experimental_constrained_log)); |
2451 | |
2452 | case Builtin::BIlog10: |
2453 | case Builtin::BIlog10f: |
2454 | case Builtin::BIlog10l: |
2455 | case Builtin::BI__builtin_log10: |
2456 | case Builtin::BI__builtin_log10f: |
2457 | case Builtin::BI__builtin_log10f16: |
2458 | case Builtin::BI__builtin_log10l: |
2459 | case Builtin::BI__builtin_log10f128: |
2460 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2461 | Intrinsic::log10, |
2462 | Intrinsic::experimental_constrained_log10)); |
2463 | |
2464 | case Builtin::BIlog2: |
2465 | case Builtin::BIlog2f: |
2466 | case Builtin::BIlog2l: |
2467 | case Builtin::BI__builtin_log2: |
2468 | case Builtin::BI__builtin_log2f: |
2469 | case Builtin::BI__builtin_log2f16: |
2470 | case Builtin::BI__builtin_log2l: |
2471 | case Builtin::BI__builtin_log2f128: |
2472 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2473 | Intrinsic::log2, |
2474 | Intrinsic::experimental_constrained_log2)); |
2475 | |
2476 | case Builtin::BInearbyint: |
2477 | case Builtin::BInearbyintf: |
2478 | case Builtin::BInearbyintl: |
2479 | case Builtin::BI__builtin_nearbyint: |
2480 | case Builtin::BI__builtin_nearbyintf: |
2481 | case Builtin::BI__builtin_nearbyintl: |
2482 | case Builtin::BI__builtin_nearbyintf128: |
2483 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2484 | Intrinsic::nearbyint, |
2485 | Intrinsic::experimental_constrained_nearbyint)); |
2486 | |
2487 | case Builtin::BIpow: |
2488 | case Builtin::BIpowf: |
2489 | case Builtin::BIpowl: |
2490 | case Builtin::BI__builtin_pow: |
2491 | case Builtin::BI__builtin_powf: |
2492 | case Builtin::BI__builtin_powf16: |
2493 | case Builtin::BI__builtin_powl: |
2494 | case Builtin::BI__builtin_powf128: |
2495 | return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, |
2496 | Intrinsic::pow, |
2497 | Intrinsic::experimental_constrained_pow)); |
2498 | |
2499 | case Builtin::BIrint: |
2500 | case Builtin::BIrintf: |
2501 | case Builtin::BIrintl: |
2502 | case Builtin::BI__builtin_rint: |
2503 | case Builtin::BI__builtin_rintf: |
2504 | case Builtin::BI__builtin_rintf16: |
2505 | case Builtin::BI__builtin_rintl: |
2506 | case Builtin::BI__builtin_rintf128: |
2507 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2508 | Intrinsic::rint, |
2509 | Intrinsic::experimental_constrained_rint)); |
2510 | |
2511 | case Builtin::BIround: |
2512 | case Builtin::BIroundf: |
2513 | case Builtin::BIroundl: |
2514 | case Builtin::BI__builtin_round: |
2515 | case Builtin::BI__builtin_roundf: |
2516 | case Builtin::BI__builtin_roundf16: |
2517 | case Builtin::BI__builtin_roundl: |
2518 | case Builtin::BI__builtin_roundf128: |
2519 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2520 | Intrinsic::round, |
2521 | Intrinsic::experimental_constrained_round)); |
2522 | |
2523 | case Builtin::BIsin: |
2524 | case Builtin::BIsinf: |
2525 | case Builtin::BIsinl: |
2526 | case Builtin::BI__builtin_sin: |
2527 | case Builtin::BI__builtin_sinf: |
2528 | case Builtin::BI__builtin_sinf16: |
2529 | case Builtin::BI__builtin_sinl: |
2530 | case Builtin::BI__builtin_sinf128: |
2531 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2532 | Intrinsic::sin, |
2533 | Intrinsic::experimental_constrained_sin)); |
2534 | |
2535 | case Builtin::BIsqrt: |
2536 | case Builtin::BIsqrtf: |
2537 | case Builtin::BIsqrtl: |
2538 | case Builtin::BI__builtin_sqrt: |
2539 | case Builtin::BI__builtin_sqrtf: |
2540 | case Builtin::BI__builtin_sqrtf16: |
2541 | case Builtin::BI__builtin_sqrtl: |
2542 | case Builtin::BI__builtin_sqrtf128: |
2543 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2544 | Intrinsic::sqrt, |
2545 | Intrinsic::experimental_constrained_sqrt)); |
2546 | |
2547 | case Builtin::BItrunc: |
2548 | case Builtin::BItruncf: |
2549 | case Builtin::BItruncl: |
2550 | case Builtin::BI__builtin_trunc: |
2551 | case Builtin::BI__builtin_truncf: |
2552 | case Builtin::BI__builtin_truncf16: |
2553 | case Builtin::BI__builtin_truncl: |
2554 | case Builtin::BI__builtin_truncf128: |
2555 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2556 | Intrinsic::trunc, |
2557 | Intrinsic::experimental_constrained_trunc)); |
2558 | |
2559 | case Builtin::BIlround: |
2560 | case Builtin::BIlroundf: |
2561 | case Builtin::BIlroundl: |
2562 | case Builtin::BI__builtin_lround: |
2563 | case Builtin::BI__builtin_lroundf: |
2564 | case Builtin::BI__builtin_lroundl: |
2565 | case Builtin::BI__builtin_lroundf128: |
2566 | return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
2567 | *this, E, Intrinsic::lround, |
2568 | Intrinsic::experimental_constrained_lround)); |
2569 | |
2570 | case Builtin::BIllround: |
2571 | case Builtin::BIllroundf: |
2572 | case Builtin::BIllroundl: |
2573 | case Builtin::BI__builtin_llround: |
2574 | case Builtin::BI__builtin_llroundf: |
2575 | case Builtin::BI__builtin_llroundl: |
2576 | case Builtin::BI__builtin_llroundf128: |
2577 | return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
2578 | *this, E, Intrinsic::llround, |
2579 | Intrinsic::experimental_constrained_llround)); |
2580 | |
2581 | case Builtin::BIlrint: |
2582 | case Builtin::BIlrintf: |
2583 | case Builtin::BIlrintl: |
2584 | case Builtin::BI__builtin_lrint: |
2585 | case Builtin::BI__builtin_lrintf: |
2586 | case Builtin::BI__builtin_lrintl: |
2587 | case Builtin::BI__builtin_lrintf128: |
2588 | return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
2589 | *this, E, Intrinsic::lrint, |
2590 | Intrinsic::experimental_constrained_lrint)); |
2591 | |
2592 | case Builtin::BIllrint: |
2593 | case Builtin::BIllrintf: |
2594 | case Builtin::BIllrintl: |
2595 | case Builtin::BI__builtin_llrint: |
2596 | case Builtin::BI__builtin_llrintf: |
2597 | case Builtin::BI__builtin_llrintl: |
2598 | case Builtin::BI__builtin_llrintf128: |
2599 | return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
2600 | *this, E, Intrinsic::llrint, |
2601 | Intrinsic::experimental_constrained_llrint)); |
2602 | |
2603 | default: |
2604 | break; |
2605 | } |
2606 | } |
2607 | |
2608 | switch (BuiltinIDIfNoAsmLabel) { |
2609 | default: break; |
2610 | case Builtin::BI__builtin___CFStringMakeConstantString: |
2611 | case Builtin::BI__builtin___NSStringMakeConstantString: |
2612 | return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType())); |
2613 | case Builtin::BI__builtin_stdarg_start: |
2614 | case Builtin::BI__builtin_va_start: |
2615 | case Builtin::BI__va_start: |
2616 | case Builtin::BI__builtin_va_end: |
2617 | return RValue::get( |
2618 | EmitVAStartEnd(BuiltinID == Builtin::BI__va_start |
2619 | ? EmitScalarExpr(E->getArg(0)) |
2620 | : EmitVAListRef(E->getArg(0)).getPointer(), |
2621 | BuiltinID != Builtin::BI__builtin_va_end)); |
2622 | case Builtin::BI__builtin_va_copy: { |
2623 | Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer(); |
2624 | Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer(); |
2625 | |
2626 | llvm::Type *Type = Int8PtrTy; |
2627 | |
2628 | DstPtr = Builder.CreateBitCast(DstPtr, Type); |
2629 | SrcPtr = Builder.CreateBitCast(SrcPtr, Type); |
2630 | return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), |
2631 | {DstPtr, SrcPtr})); |
2632 | } |
2633 | case Builtin::BI__builtin_abs: |
2634 | case Builtin::BI__builtin_labs: |
2635 | case Builtin::BI__builtin_llabs: { |
2636 | // X < 0 ? -X : X |
2637 | // The negation has 'nsw' because abs of INT_MIN is undefined. |
2638 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2639 | Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg"); |
2640 | Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType()); |
2641 | Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond"); |
2642 | Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs"); |
2643 | return RValue::get(Result); |
2644 | } |
2645 | case Builtin::BI__builtin_complex: { |
2646 | Value *Real = EmitScalarExpr(E->getArg(0)); |
2647 | Value *Imag = EmitScalarExpr(E->getArg(1)); |
2648 | return RValue::getComplex({Real, Imag}); |
2649 | } |
2650 | case Builtin::BI__builtin_conj: |
2651 | case Builtin::BI__builtin_conjf: |
2652 | case Builtin::BI__builtin_conjl: |
2653 | case Builtin::BIconj: |
2654 | case Builtin::BIconjf: |
2655 | case Builtin::BIconjl: { |
2656 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2657 | Value *Real = ComplexVal.first; |
2658 | Value *Imag = ComplexVal.second; |
2659 | Imag = Builder.CreateFNeg(Imag, "neg"); |
2660 | return RValue::getComplex(std::make_pair(Real, Imag)); |
2661 | } |
2662 | case Builtin::BI__builtin_creal: |
2663 | case Builtin::BI__builtin_crealf: |
2664 | case Builtin::BI__builtin_creall: |
2665 | case Builtin::BIcreal: |
2666 | case Builtin::BIcrealf: |
2667 | case Builtin::BIcreall: { |
2668 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2669 | return RValue::get(ComplexVal.first); |
2670 | } |
2671 | |
2672 | case Builtin::BI__builtin_dump_struct: { |
2673 | llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy); |
2674 | llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get( |
2675 | LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true); |
2676 | |
2677 | Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts()); |
2678 | CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment(); |
2679 | |
2680 | const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts(); |
2681 | QualType Arg0Type = Arg0->getType()->getPointeeType(); |
2682 | |
2683 | Value *RecordPtr = EmitScalarExpr(Arg0); |
2684 | LValue RecordLV = MakeAddrLValue(RecordPtr, Arg0Type, Arg0Align); |
2685 | Value *Res = dumpRecord(*this, Arg0Type, RecordLV, Arg0Align, |
2686 | {LLVMFuncType, Func}, 0); |
2687 | return RValue::get(Res); |
2688 | } |
2689 | |
2690 | case Builtin::BI__builtin_preserve_access_index: { |
2691 | // Only enabled preserved access index region when debuginfo |
2692 | // is available as debuginfo is needed to preserve user-level |
2693 | // access pattern. |
2694 | if (!getDebugInfo()) { |
2695 | CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g"); |
2696 | return RValue::get(EmitScalarExpr(E->getArg(0))); |
2697 | } |
2698 | |
2699 | // Nested builtin_preserve_access_index() not supported |
2700 | if (IsInPreservedAIRegion) { |
2701 | CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported"); |
2702 | return RValue::get(EmitScalarExpr(E->getArg(0))); |
2703 | } |
2704 | |
2705 | IsInPreservedAIRegion = true; |
2706 | Value *Res = EmitScalarExpr(E->getArg(0)); |
2707 | IsInPreservedAIRegion = false; |
2708 | return RValue::get(Res); |
2709 | } |
2710 | |
2711 | case Builtin::BI__builtin_cimag: |
2712 | case Builtin::BI__builtin_cimagf: |
2713 | case Builtin::BI__builtin_cimagl: |
2714 | case Builtin::BIcimag: |
2715 | case Builtin::BIcimagf: |
2716 | case Builtin::BIcimagl: { |
2717 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2718 | return RValue::get(ComplexVal.second); |
2719 | } |
2720 | |
2721 | case Builtin::BI__builtin_clrsb: |
2722 | case Builtin::BI__builtin_clrsbl: |
2723 | case Builtin::BI__builtin_clrsbll: { |
2724 | // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or |
2725 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2726 | |
2727 | llvm::Type *ArgType = ArgValue->getType(); |
2728 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
2729 | |
2730 | llvm::Type *ResultType = ConvertType(E->getType()); |
2731 | Value *Zero = llvm::Constant::getNullValue(ArgType); |
2732 | Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg"); |
2733 | Value *Inverse = Builder.CreateNot(ArgValue, "not"); |
2734 | Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue); |
2735 | Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()}); |
2736 | Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1)); |
2737 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2738 | "cast"); |
2739 | return RValue::get(Result); |
2740 | } |
2741 | case Builtin::BI__builtin_ctzs: |
2742 | case Builtin::BI__builtin_ctz: |
2743 | case Builtin::BI__builtin_ctzl: |
2744 | case Builtin::BI__builtin_ctzll: { |
2745 | Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero); |
2746 | |
2747 | llvm::Type *ArgType = ArgValue->getType(); |
2748 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
2749 | |
2750 | llvm::Type *ResultType = ConvertType(E->getType()); |
2751 | Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
2752 | Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
2753 | if (Result->getType() != ResultType) |
2754 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2755 | "cast"); |
2756 | return RValue::get(Result); |
2757 | } |
2758 | case Builtin::BI__builtin_clzs: |
2759 | case Builtin::BI__builtin_clz: |
2760 | case Builtin::BI__builtin_clzl: |
2761 | case Builtin::BI__builtin_clzll: { |
2762 | Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero); |
2763 | |
2764 | llvm::Type *ArgType = ArgValue->getType(); |
2765 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
2766 | |
2767 | llvm::Type *ResultType = ConvertType(E->getType()); |
2768 | Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
2769 | Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
2770 | if (Result->getType() != ResultType) |
2771 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2772 | "cast"); |
2773 | return RValue::get(Result); |
2774 | } |
2775 | case Builtin::BI__builtin_ffs: |
2776 | case Builtin::BI__builtin_ffsl: |
2777 | case Builtin::BI__builtin_ffsll: { |
2778 | // ffs(x) -> x ? cttz(x) + 1 : 0 |
2779 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2780 | |
2781 | llvm::Type *ArgType = ArgValue->getType(); |
2782 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
2783 | |
2784 | llvm::Type *ResultType = ConvertType(E->getType()); |
2785 | Value *Tmp = |
2786 | Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}), |
2787 | llvm::ConstantInt::get(ArgType, 1)); |
2788 | Value *Zero = llvm::Constant::getNullValue(ArgType); |
2789 | Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); |
2790 | Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); |
2791 | if (Result->getType() != ResultType) |
2792 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2793 | "cast"); |
2794 | return RValue::get(Result); |
2795 | } |
2796 | case Builtin::BI__builtin_parity: |
2797 | case Builtin::BI__builtin_parityl: |
2798 | case Builtin::BI__builtin_parityll: { |
2799 | // parity(x) -> ctpop(x) & 1 |
2800 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2801 | |
2802 | llvm::Type *ArgType = ArgValue->getType(); |
2803 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
2804 | |
2805 | llvm::Type *ResultType = ConvertType(E->getType()); |
2806 | Value *Tmp = Builder.CreateCall(F, ArgValue); |
2807 | Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); |
2808 | if (Result->getType() != ResultType) |
2809 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2810 | "cast"); |
2811 | return RValue::get(Result); |
2812 | } |
2813 | case Builtin::BI__lzcnt16: |
2814 | case Builtin::BI__lzcnt: |
2815 | case Builtin::BI__lzcnt64: { |
2816 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2817 | |
2818 | llvm::Type *ArgType = ArgValue->getType(); |
2819 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
2820 | |
2821 | llvm::Type *ResultType = ConvertType(E->getType()); |
2822 | Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()}); |
2823 | if (Result->getType() != ResultType) |
2824 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2825 | "cast"); |
2826 | return RValue::get(Result); |
2827 | } |
2828 | case Builtin::BI__popcnt16: |
2829 | case Builtin::BI__popcnt: |
2830 | case Builtin::BI__popcnt64: |
2831 | case Builtin::BI__builtin_popcount: |
2832 | case Builtin::BI__builtin_popcountl: |
2833 | case Builtin::BI__builtin_popcountll: { |
2834 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2835 | |
2836 | llvm::Type *ArgType = ArgValue->getType(); |
2837 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
2838 | |
2839 | llvm::Type *ResultType = ConvertType(E->getType()); |
2840 | Value *Result = Builder.CreateCall(F, ArgValue); |
2841 | if (Result->getType() != ResultType) |
2842 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2843 | "cast"); |
2844 | return RValue::get(Result); |
2845 | } |
2846 | case Builtin::BI__builtin_unpredictable: { |
2847 | // Always return the argument of __builtin_unpredictable. LLVM does not |
2848 | // handle this builtin. Metadata for this builtin should be added directly |
2849 | // to instructions such as branches or switches that use it. |
2850 | return RValue::get(EmitScalarExpr(E->getArg(0))); |
2851 | } |
2852 | case Builtin::BI__builtin_expect: { |
2853 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2854 | llvm::Type *ArgType = ArgValue->getType(); |
2855 | |
2856 | Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); |
2857 | // Don't generate llvm.expect on -O0 as the backend won't use it for |
2858 | // anything. |
2859 | // Note, we still IRGen ExpectedValue because it could have side-effects. |
2860 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
2861 | return RValue::get(ArgValue); |
2862 | |
2863 | Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); |
2864 | Value *Result = |
2865 | Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval"); |
2866 | return RValue::get(Result); |
2867 | } |
2868 | case Builtin::BI__builtin_expect_with_probability: { |
2869 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2870 | llvm::Type *ArgType = ArgValue->getType(); |
2871 | |
2872 | Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); |
2873 | llvm::APFloat Probability(0.0); |
2874 | const Expr *ProbArg = E->getArg(2); |
2875 | bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext()); |
2876 | assert(EvalSucceed && "probability should be able to evaluate as float")(static_cast <bool> (EvalSucceed && "probability should be able to evaluate as float" ) ? void (0) : __assert_fail ("EvalSucceed && \"probability should be able to evaluate as float\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 2876, __extension__ __PRETTY_FUNCTION__ )); |
2877 | (void)EvalSucceed; |
2878 | bool LoseInfo = false; |
2879 | Probability.convert(llvm::APFloat::IEEEdouble(), |
2880 | llvm::RoundingMode::Dynamic, &LoseInfo); |
2881 | llvm::Type *Ty = ConvertType(ProbArg->getType()); |
2882 | Constant *Confidence = ConstantFP::get(Ty, Probability); |
2883 | // Don't generate llvm.expect.with.probability on -O0 as the backend |
2884 | // won't use it for anything. |
2885 | // Note, we still IRGen ExpectedValue because it could have side-effects. |
2886 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
2887 | return RValue::get(ArgValue); |
2888 | |
2889 | Function *FnExpect = |
2890 | CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType); |
2891 | Value *Result = Builder.CreateCall( |
2892 | FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval"); |
2893 | return RValue::get(Result); |
2894 | } |
2895 | case Builtin::BI__builtin_assume_aligned: { |
2896 | const Expr *Ptr = E->getArg(0); |
2897 | Value *PtrValue = EmitScalarExpr(Ptr); |
2898 | Value *OffsetValue = |
2899 | (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr; |
2900 | |
2901 | Value *AlignmentValue = EmitScalarExpr(E->getArg(1)); |
2902 | ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue); |
2903 | if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment)) |
2904 | AlignmentCI = ConstantInt::get(AlignmentCI->getType(), |
2905 | llvm::Value::MaximumAlignment); |
2906 | |
2907 | emitAlignmentAssumption(PtrValue, Ptr, |
2908 | /*The expr loc is sufficient.*/ SourceLocation(), |
2909 | AlignmentCI, OffsetValue); |
2910 | return RValue::get(PtrValue); |
2911 | } |
2912 | case Builtin::BI__assume: |
2913 | case Builtin::BI__builtin_assume: { |
2914 | if (E->getArg(0)->HasSideEffects(getContext())) |
2915 | return RValue::get(nullptr); |
2916 | |
2917 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2918 | Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume); |
2919 | return RValue::get(Builder.CreateCall(FnAssume, ArgValue)); |
2920 | } |
2921 | case Builtin::BI__arithmetic_fence: { |
2922 | // Create the builtin call if FastMath is selected, and the target |
2923 | // supports the builtin, otherwise just return the argument. |
2924 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
2925 | llvm::FastMathFlags FMF = Builder.getFastMathFlags(); |
2926 | bool isArithmeticFenceEnabled = |
2927 | FMF.allowReassoc() && |
2928 | getContext().getTargetInfo().checkArithmeticFenceSupported(); |
2929 | QualType ArgType = E->getArg(0)->getType(); |
2930 | if (ArgType->isComplexType()) { |
2931 | if (isArithmeticFenceEnabled) { |
2932 | QualType ElementType = ArgType->castAs<ComplexType>()->getElementType(); |
2933 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2934 | Value *Real = Builder.CreateArithmeticFence(ComplexVal.first, |
2935 | ConvertType(ElementType)); |
2936 | Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second, |
2937 | ConvertType(ElementType)); |
2938 | return RValue::getComplex(std::make_pair(Real, Imag)); |
2939 | } |
2940 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2941 | Value *Real = ComplexVal.first; |
2942 | Value *Imag = ComplexVal.second; |
2943 | return RValue::getComplex(std::make_pair(Real, Imag)); |
2944 | } |
2945 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2946 | if (isArithmeticFenceEnabled) |
2947 | return RValue::get( |
2948 | Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType))); |
2949 | return RValue::get(ArgValue); |
2950 | } |
2951 | case Builtin::BI__builtin_bswap16: |
2952 | case Builtin::BI__builtin_bswap32: |
2953 | case Builtin::BI__builtin_bswap64: |
2954 | case Builtin::BI_byteswap_ushort: |
2955 | case Builtin::BI_byteswap_ulong: |
2956 | case Builtin::BI_byteswap_uint64: { |
2957 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap)); |
2958 | } |
2959 | case Builtin::BI__builtin_bitreverse8: |
2960 | case Builtin::BI__builtin_bitreverse16: |
2961 | case Builtin::BI__builtin_bitreverse32: |
2962 | case Builtin::BI__builtin_bitreverse64: { |
2963 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse)); |
2964 | } |
2965 | case Builtin::BI__builtin_rotateleft8: |
2966 | case Builtin::BI__builtin_rotateleft16: |
2967 | case Builtin::BI__builtin_rotateleft32: |
2968 | case Builtin::BI__builtin_rotateleft64: |
2969 | case Builtin::BI_rotl8: // Microsoft variants of rotate left |
2970 | case Builtin::BI_rotl16: |
2971 | case Builtin::BI_rotl: |
2972 | case Builtin::BI_lrotl: |
2973 | case Builtin::BI_rotl64: |
2974 | return emitRotate(E, false); |
2975 | |
2976 | case Builtin::BI__builtin_rotateright8: |
2977 | case Builtin::BI__builtin_rotateright16: |
2978 | case Builtin::BI__builtin_rotateright32: |
2979 | case Builtin::BI__builtin_rotateright64: |
2980 | case Builtin::BI_rotr8: // Microsoft variants of rotate right |
2981 | case Builtin::BI_rotr16: |
2982 | case Builtin::BI_rotr: |
2983 | case Builtin::BI_lrotr: |
2984 | case Builtin::BI_rotr64: |
2985 | return emitRotate(E, true); |
2986 | |
2987 | case Builtin::BI__builtin_constant_p: { |
2988 | llvm::Type *ResultType = ConvertType(E->getType()); |
2989 | |
2990 | const Expr *Arg = E->getArg(0); |
2991 | QualType ArgType = Arg->getType(); |
2992 | // FIXME: The allowance for Obj-C pointers and block pointers is historical |
2993 | // and likely a mistake. |
2994 | if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() && |
2995 | !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType()) |
2996 | // Per the GCC documentation, only numeric constants are recognized after |
2997 | // inlining. |
2998 | return RValue::get(ConstantInt::get(ResultType, 0)); |
2999 | |
3000 | if (Arg->HasSideEffects(getContext())) |
3001 | // The argument is unevaluated, so be conservative if it might have |
3002 | // side-effects. |
3003 | return RValue::get(ConstantInt::get(ResultType, 0)); |
3004 | |
3005 | Value *ArgValue = EmitScalarExpr(Arg); |
3006 | if (ArgType->isObjCObjectPointerType()) { |
3007 | // Convert Objective-C objects to id because we cannot distinguish between |
3008 | // LLVM types for Obj-C classes as they are opaque. |
3009 | ArgType = CGM.getContext().getObjCIdType(); |
3010 | ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType)); |
3011 | } |
3012 | Function *F = |
3013 | CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType)); |
3014 | Value *Result = Builder.CreateCall(F, ArgValue); |
3015 | if (Result->getType() != ResultType) |
3016 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false); |
3017 | return RValue::get(Result); |
3018 | } |
3019 | case Builtin::BI__builtin_dynamic_object_size: |
3020 | case Builtin::BI__builtin_object_size: { |
3021 | unsigned Type = |
3022 | E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
3023 | auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType())); |
3024 | |
3025 | // We pass this builtin onto the optimizer so that it can figure out the |
3026 | // object size in more complex cases. |
3027 | bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size; |
3028 | return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, |
3029 | /*EmittedE=*/nullptr, IsDynamic)); |
3030 | } |
3031 | case Builtin::BI__builtin_prefetch: { |
3032 | Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); |
3033 | // FIXME: Technically these constants should of type 'int', yes? |
3034 | RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : |
3035 | llvm::ConstantInt::get(Int32Ty, 0); |
3036 | Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : |
3037 | llvm::ConstantInt::get(Int32Ty, 3); |
3038 | Value *Data = llvm::ConstantInt::get(Int32Ty, 1); |
3039 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
3040 | return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data})); |
3041 | } |
3042 | case Builtin::BI__builtin_readcyclecounter: { |
3043 | Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); |
3044 | return RValue::get(Builder.CreateCall(F)); |
3045 | } |
3046 | case Builtin::BI__builtin___clear_cache: { |
3047 | Value *Begin = EmitScalarExpr(E->getArg(0)); |
3048 | Value *End = EmitScalarExpr(E->getArg(1)); |
3049 | Function *F = CGM.getIntrinsic(Intrinsic::clear_cache); |
3050 | return RValue::get(Builder.CreateCall(F, {Begin, End})); |
3051 | } |
3052 | case Builtin::BI__builtin_trap: |
3053 | return RValue::get(EmitTrapCall(Intrinsic::trap)); |
3054 | case Builtin::BI__debugbreak: |
3055 | return RValue::get(EmitTrapCall(Intrinsic::debugtrap)); |
3056 | case Builtin::BI__builtin_unreachable: { |
3057 | EmitUnreachable(E->getExprLoc()); |
3058 | |
3059 | // We do need to preserve an insertion point. |
3060 | EmitBlock(createBasicBlock("unreachable.cont")); |
3061 | |
3062 | return RValue::get(nullptr); |
3063 | } |
3064 | |
3065 | case Builtin::BI__builtin_powi: |
3066 | case Builtin::BI__builtin_powif: |
3067 | case Builtin::BI__builtin_powil: { |
3068 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
3069 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
3070 | |
3071 | if (Builder.getIsFPConstrained()) { |
3072 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3073 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi, |
3074 | Src0->getType()); |
3075 | return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 })); |
3076 | } |
3077 | |
3078 | Function *F = CGM.getIntrinsic(Intrinsic::powi, |
3079 | { Src0->getType(), Src1->getType() }); |
3080 | return RValue::get(Builder.CreateCall(F, { Src0, Src1 })); |
3081 | } |
3082 | case Builtin::BI__builtin_isgreater: |
3083 | case Builtin::BI__builtin_isgreaterequal: |
3084 | case Builtin::BI__builtin_isless: |
3085 | case Builtin::BI__builtin_islessequal: |
3086 | case Builtin::BI__builtin_islessgreater: |
3087 | case Builtin::BI__builtin_isunordered: { |
3088 | // Ordered comparisons: we know the arguments to these are matching scalar |
3089 | // floating point values. |
3090 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3091 | // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here. |
3092 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
3093 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
3094 | |
3095 | switch (BuiltinID) { |
3096 | default: llvm_unreachable("Unknown ordered comparison")::llvm::llvm_unreachable_internal("Unknown ordered comparison" , "clang/lib/CodeGen/CGBuiltin.cpp", 3096); |
3097 | case Builtin::BI__builtin_isgreater: |
3098 | LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); |
3099 | break; |
3100 | case Builtin::BI__builtin_isgreaterequal: |
3101 | LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); |
3102 | break; |
3103 | case Builtin::BI__builtin_isless: |
3104 | LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); |
3105 | break; |
3106 | case Builtin::BI__builtin_islessequal: |
3107 | LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); |
3108 | break; |
3109 | case Builtin::BI__builtin_islessgreater: |
3110 | LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); |
3111 | break; |
3112 | case Builtin::BI__builtin_isunordered: |
3113 | LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); |
3114 | break; |
3115 | } |
3116 | // ZExt bool to int type. |
3117 | return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); |
3118 | } |
3119 | case Builtin::BI__builtin_isnan: { |
3120 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3121 | Value *V = EmitScalarExpr(E->getArg(0)); |
3122 | llvm::Type *Ty = V->getType(); |
3123 | const llvm::fltSemantics &Semantics = Ty->getFltSemantics(); |
3124 | if (!Builder.getIsFPConstrained() || |
3125 | Builder.getDefaultConstrainedExcept() == fp::ebIgnore || |
3126 | !Ty->isIEEE()) { |
3127 | V = Builder.CreateFCmpUNO(V, V, "cmp"); |
3128 | return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
3129 | } |
3130 | |
3131 | if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM)) |
3132 | return RValue::get(Result); |
3133 | |
3134 | // NaN has all exp bits set and a non zero significand. Therefore: |
3135 | // isnan(V) == ((exp mask - (abs(V) & exp mask)) < 0) |
3136 | unsigned bitsize = Ty->getScalarSizeInBits(); |
3137 | llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize); |
3138 | Value *IntV = Builder.CreateBitCast(V, IntTy); |
3139 | APInt AndMask = APInt::getSignedMaxValue(bitsize); |
3140 | Value *AbsV = |
3141 | Builder.CreateAnd(IntV, llvm::ConstantInt::get(IntTy, AndMask)); |
3142 | APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt(); |
3143 | Value *Sub = |
3144 | Builder.CreateSub(llvm::ConstantInt::get(IntTy, ExpMask), AbsV); |
3145 | // V = sign bit (Sub) <=> V = (Sub < 0) |
3146 | V = Builder.CreateLShr(Sub, llvm::ConstantInt::get(IntTy, bitsize - 1)); |
3147 | if (bitsize > 32) |
3148 | V = Builder.CreateTrunc(V, ConvertType(E->getType())); |
3149 | return RValue::get(V); |
3150 | } |
3151 | |
3152 | case Builtin::BI__builtin_elementwise_abs: { |
3153 | Value *Result; |
3154 | QualType QT = E->getArg(0)->getType(); |
3155 | |
3156 | if (auto *VecTy = QT->getAs<VectorType>()) |
3157 | QT = VecTy->getElementType(); |
3158 | if (QT->isIntegerType()) |
3159 | Result = Builder.CreateBinaryIntrinsic( |
3160 | llvm::Intrinsic::abs, EmitScalarExpr(E->getArg(0)), |
3161 | Builder.getFalse(), nullptr, "elt.abs"); |
3162 | else |
3163 | Result = emitUnaryBuiltin(*this, E, llvm::Intrinsic::fabs, "elt.abs"); |
3164 | |
3165 | return RValue::get(Result); |
3166 | } |
3167 | |
3168 | case Builtin::BI__builtin_elementwise_ceil: |
3169 | return RValue::get( |
3170 | emitUnaryBuiltin(*this, E, llvm::Intrinsic::ceil, "elt.ceil")); |
3171 | case Builtin::BI__builtin_elementwise_floor: |
3172 | return RValue::get( |
3173 | emitUnaryBuiltin(*this, E, llvm::Intrinsic::floor, "elt.floor")); |
3174 | case Builtin::BI__builtin_elementwise_roundeven: |
3175 | return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::roundeven, |
3176 | "elt.roundeven")); |
3177 | case Builtin::BI__builtin_elementwise_trunc: |
3178 | return RValue::get( |
3179 | emitUnaryBuiltin(*this, E, llvm::Intrinsic::trunc, "elt.trunc")); |
3180 | |
3181 | case Builtin::BI__builtin_elementwise_add_sat: |
3182 | case Builtin::BI__builtin_elementwise_sub_sat: { |
3183 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
3184 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
3185 | Value *Result; |
3186 | assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected")(static_cast <bool> (Op0->getType()->isIntOrIntVectorTy () && "integer type expected") ? void (0) : __assert_fail ("Op0->getType()->isIntOrIntVectorTy() && \"integer type expected\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 3186, __extension__ __PRETTY_FUNCTION__ )); |
3187 | QualType Ty = E->getArg(0)->getType(); |
3188 | if (auto *VecTy = Ty->getAs<VectorType>()) |
3189 | Ty = VecTy->getElementType(); |
3190 | bool IsSigned = Ty->isSignedIntegerType(); |
3191 | unsigned Opc; |
3192 | if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat) |
3193 | Opc = IsSigned ? llvm::Intrinsic::sadd_sat : llvm::Intrinsic::uadd_sat; |
3194 | else |
3195 | Opc = IsSigned ? llvm::Intrinsic::ssub_sat : llvm::Intrinsic::usub_sat; |
3196 | Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat"); |
3197 | return RValue::get(Result); |
3198 | } |
3199 | |
3200 | case Builtin::BI__builtin_elementwise_max: { |
3201 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
3202 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
3203 | Value *Result; |
3204 | if (Op0->getType()->isIntOrIntVectorTy()) { |
3205 | QualType Ty = E->getArg(0)->getType(); |
3206 | if (auto *VecTy = Ty->getAs<VectorType>()) |
3207 | Ty = VecTy->getElementType(); |
3208 | Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType() |
3209 | ? llvm::Intrinsic::smax |
3210 | : llvm::Intrinsic::umax, |
3211 | Op0, Op1, nullptr, "elt.max"); |
3212 | } else |
3213 | Result = Builder.CreateMaxNum(Op0, Op1, "elt.max"); |
3214 | return RValue::get(Result); |
3215 | } |
3216 | case Builtin::BI__builtin_elementwise_min: { |
3217 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
3218 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
3219 | Value *Result; |
3220 | if (Op0->getType()->isIntOrIntVectorTy()) { |
3221 | QualType Ty = E->getArg(0)->getType(); |
3222 | if (auto *VecTy = Ty->getAs<VectorType>()) |
3223 | Ty = VecTy->getElementType(); |
3224 | Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType() |
3225 | ? llvm::Intrinsic::smin |
3226 | : llvm::Intrinsic::umin, |
3227 | Op0, Op1, nullptr, "elt.min"); |
3228 | } else |
3229 | Result = Builder.CreateMinNum(Op0, Op1, "elt.min"); |
3230 | return RValue::get(Result); |
3231 | } |
3232 | |
3233 | case Builtin::BI__builtin_reduce_max: { |
3234 | auto GetIntrinsicID = [](QualType QT) { |
3235 | if (auto *VecTy = QT->getAs<VectorType>()) |
3236 | QT = VecTy->getElementType(); |
3237 | if (QT->isSignedIntegerType()) |
3238 | return llvm::Intrinsic::vector_reduce_smax; |
3239 | if (QT->isUnsignedIntegerType()) |
3240 | return llvm::Intrinsic::vector_reduce_umax; |
3241 | assert(QT->isFloatingType() && "must have a float here")(static_cast <bool> (QT->isFloatingType() && "must have a float here") ? void (0) : __assert_fail ("QT->isFloatingType() && \"must have a float here\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 3241, __extension__ __PRETTY_FUNCTION__ )); |
3242 | return llvm::Intrinsic::vector_reduce_fmax; |
3243 | }; |
3244 | return RValue::get(emitUnaryBuiltin( |
3245 | *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min")); |
3246 | } |
3247 | |
3248 | case Builtin::BI__builtin_reduce_min: { |
3249 | auto GetIntrinsicID = [](QualType QT) { |
3250 | if (auto *VecTy = QT->getAs<VectorType>()) |
3251 | QT = VecTy->getElementType(); |
3252 | if (QT->isSignedIntegerType()) |
3253 | return llvm::Intrinsic::vector_reduce_smin; |
3254 | if (QT->isUnsignedIntegerType()) |
3255 | return llvm::Intrinsic::vector_reduce_umin; |
3256 | assert(QT->isFloatingType() && "must have a float here")(static_cast <bool> (QT->isFloatingType() && "must have a float here") ? void (0) : __assert_fail ("QT->isFloatingType() && \"must have a float here\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 3256, __extension__ __PRETTY_FUNCTION__ )); |
3257 | return llvm::Intrinsic::vector_reduce_fmin; |
3258 | }; |
3259 | |
3260 | return RValue::get(emitUnaryBuiltin( |
3261 | *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min")); |
3262 | } |
3263 | |
3264 | case Builtin::BI__builtin_reduce_xor: |
3265 | return RValue::get(emitUnaryBuiltin( |
3266 | *this, E, llvm::Intrinsic::vector_reduce_xor, "rdx.xor")); |
3267 | case Builtin::BI__builtin_reduce_or: |
3268 | return RValue::get(emitUnaryBuiltin( |
3269 | *this, E, llvm::Intrinsic::vector_reduce_or, "rdx.or")); |
3270 | case Builtin::BI__builtin_reduce_and: |
3271 | return RValue::get(emitUnaryBuiltin( |
3272 | *this, E, llvm::Intrinsic::vector_reduce_and, "rdx.and")); |
3273 | |
3274 | case Builtin::BI__builtin_matrix_transpose: { |
3275 | auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>(); |
3276 | Value *MatValue = EmitScalarExpr(E->getArg(0)); |
3277 | MatrixBuilder MB(Builder); |
3278 | Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(), |
3279 | MatrixTy->getNumColumns()); |
3280 | return RValue::get(Result); |
3281 | } |
3282 | |
3283 | case Builtin::BI__builtin_matrix_column_major_load: { |
3284 | MatrixBuilder MB(Builder); |
3285 | // Emit everything that isn't dependent on the first parameter type |
3286 | Value *Stride = EmitScalarExpr(E->getArg(3)); |
3287 | const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>(); |
3288 | auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>(); |
3289 | assert(PtrTy && "arg0 must be of pointer type")(static_cast <bool> (PtrTy && "arg0 must be of pointer type" ) ? void (0) : __assert_fail ("PtrTy && \"arg0 must be of pointer type\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 3289, __extension__ __PRETTY_FUNCTION__ )); |
3290 | bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified(); |
3291 | |
3292 | Address Src = EmitPointerWithAlignment(E->getArg(0)); |
3293 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(), |
3294 | E->getArg(0)->getExprLoc(), FD, 0); |
3295 | Value *Result = MB.CreateColumnMajorLoad( |
3296 | Src.getElementType(), Src.getPointer(), |
3297 | Align(Src.getAlignment().getQuantity()), Stride, IsVolatile, |
3298 | ResultTy->getNumRows(), ResultTy->getNumColumns(), |
3299 | "matrix"); |
3300 | return RValue::get(Result); |
3301 | } |
3302 | |
3303 | case Builtin::BI__builtin_matrix_column_major_store: { |
3304 | MatrixBuilder MB(Builder); |
3305 | Value *Matrix = EmitScalarExpr(E->getArg(0)); |
3306 | Address Dst = EmitPointerWithAlignment(E->getArg(1)); |
3307 | Value *Stride = EmitScalarExpr(E->getArg(2)); |
3308 | |
3309 | const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>(); |
3310 | auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>(); |
3311 | assert(PtrTy && "arg1 must be of pointer type")(static_cast <bool> (PtrTy && "arg1 must be of pointer type" ) ? void (0) : __assert_fail ("PtrTy && \"arg1 must be of pointer type\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 3311, __extension__ __PRETTY_FUNCTION__ )); |
3312 | bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified(); |
3313 | |
3314 | EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(), |
3315 | E->getArg(1)->getExprLoc(), FD, 0); |
3316 | Value *Result = MB.CreateColumnMajorStore( |
3317 | Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()), |
3318 | Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns()); |
3319 | return RValue::get(Result); |
3320 | } |
3321 | |
3322 | case Builtin::BIfinite: |
3323 | case Builtin::BI__finite: |
3324 | case Builtin::BIfinitef: |
3325 | case Builtin::BI__finitef: |
3326 | case Builtin::BIfinitel: |
3327 | case Builtin::BI__finitel: |
3328 | case Builtin::BI__builtin_isinf: |
3329 | case Builtin::BI__builtin_isfinite: { |
3330 | // isinf(x) --> fabs(x) == infinity |
3331 | // isfinite(x) --> fabs(x) != infinity |
3332 | // x != NaN via the ordered compare in either case. |
3333 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3334 | Value *V = EmitScalarExpr(E->getArg(0)); |
3335 | llvm::Type *Ty = V->getType(); |
3336 | if (!Builder.getIsFPConstrained() || |
3337 | Builder.getDefaultConstrainedExcept() == fp::ebIgnore || |
3338 | !Ty->isIEEE()) { |
3339 | Value *Fabs = EmitFAbs(*this, V); |
3340 | Constant *Infinity = ConstantFP::getInfinity(V->getType()); |
3341 | CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf) |
3342 | ? CmpInst::FCMP_OEQ |
3343 | : CmpInst::FCMP_ONE; |
3344 | Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf"); |
3345 | return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType()))); |
3346 | } |
3347 | |
3348 | if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM)) |
3349 | return RValue::get(Result); |
3350 | |
3351 | // Inf values have all exp bits set and a zero significand. Therefore: |
3352 | // isinf(V) == ((V << 1) == ((exp mask) << 1)) |
3353 | // isfinite(V) == ((V << 1) < ((exp mask) << 1)) using unsigned comparison |
3354 | unsigned bitsize = Ty->getScalarSizeInBits(); |
3355 | llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize); |
3356 | Value *IntV = Builder.CreateBitCast(V, IntTy); |
3357 | Value *Shl1 = Builder.CreateShl(IntV, 1); |
3358 | const llvm::fltSemantics &Semantics = Ty->getFltSemantics(); |
3359 | APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt(); |
3360 | Value *ExpMaskShl1 = llvm::ConstantInt::get(IntTy, ExpMask.shl(1)); |
3361 | if (BuiltinID == Builtin::BI__builtin_isinf) |
3362 | V = Builder.CreateICmpEQ(Shl1, ExpMaskShl1); |
3363 | else |
3364 | V = Builder.CreateICmpULT(Shl1, ExpMaskShl1); |
3365 | return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
3366 | } |
3367 | |
3368 | case Builtin::BI__builtin_isinf_sign: { |
3369 | // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0 |
3370 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3371 | // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here. |
3372 | Value *Arg = EmitScalarExpr(E->getArg(0)); |
3373 | Value *AbsArg = EmitFAbs(*this, Arg); |
3374 | Value *IsInf = Builder.CreateFCmpOEQ( |
3375 | AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf"); |
3376 | Value *IsNeg = EmitSignBit(*this, Arg); |
3377 | |
3378 | llvm::Type *IntTy = ConvertType(E->getType()); |
3379 | Value *Zero = Constant::getNullValue(IntTy); |
3380 | Value *One = ConstantInt::get(IntTy, 1); |
3381 | Value *NegativeOne = ConstantInt::get(IntTy, -1); |
3382 | Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One); |
3383 | Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero); |
3384 | return RValue::get(Result); |
3385 | } |
3386 | |
3387 | case Builtin::BI__builtin_isnormal: { |
3388 | // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min |
3389 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3390 | // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here. |
3391 | Value *V = EmitScalarExpr(E->getArg(0)); |
3392 | Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); |
3393 | |
3394 | Value *Abs = EmitFAbs(*this, V); |
3395 | Value *IsLessThanInf = |
3396 | Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); |
3397 | APFloat Smallest = APFloat::getSmallestNormalized( |
3398 | getContext().getFloatTypeSemantics(E->getArg(0)->getType())); |
3399 | Value *IsNormal = |
3400 | Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), |
3401 | "isnormal"); |
3402 | V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); |
3403 | V = Builder.CreateAnd(V, IsNormal, "and"); |
3404 | return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
3405 | } |
3406 | |
3407 | case Builtin::BI__builtin_flt_rounds: { |
3408 | Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds); |
3409 | |
3410 | llvm::Type *ResultType = ConvertType(E->getType()); |
3411 | Value *Result = Builder.CreateCall(F); |
3412 | if (Result->getType() != ResultType) |
3413 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
3414 | "cast"); |
3415 | return RValue::get(Result); |
3416 | } |
3417 | |
3418 | case Builtin::BI__builtin_fpclassify: { |
3419 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3420 | // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here. |
3421 | Value *V = EmitScalarExpr(E->getArg(5)); |
3422 | llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); |
3423 | |
3424 | // Create Result |
3425 | BasicBlock *Begin = Builder.GetInsertBlock(); |
3426 | BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); |
3427 | Builder.SetInsertPoint(End); |
3428 | PHINode *Result = |
3429 | Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, |
3430 | "fpclassify_result"); |
3431 | |
3432 | // if (V==0) return FP_ZERO |
3433 | Builder.SetInsertPoint(Begin); |
3434 | Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), |
3435 | "iszero"); |
3436 | Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); |
3437 | BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); |
3438 | Builder.CreateCondBr(IsZero, End, NotZero); |
3439 | Result->addIncoming(ZeroLiteral, Begin); |
3440 | |
3441 | // if (V != V) return FP_NAN |
3442 | Builder.SetInsertPoint(NotZero); |
3443 | Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); |
3444 | Value *NanLiteral = EmitScalarExpr(E->getArg(0)); |
3445 | BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); |
3446 | Builder.CreateCondBr(IsNan, End, NotNan); |
3447 | Result->addIncoming(NanLiteral, NotZero); |
3448 | |
3449 | // if (fabs(V) == infinity) return FP_INFINITY |
3450 | Builder.SetInsertPoint(NotNan); |
3451 | Value *VAbs = EmitFAbs(*this, V); |
3452 | Value *IsInf = |
3453 | Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), |
3454 | "isinf"); |
3455 | Value *InfLiteral = EmitScalarExpr(E->getArg(1)); |
3456 | BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); |
3457 | Builder.CreateCondBr(IsInf, End, NotInf); |
3458 | Result->addIncoming(InfLiteral, NotNan); |
3459 | |
3460 | // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL |
3461 | Builder.SetInsertPoint(NotInf); |
3462 | APFloat Smallest = APFloat::getSmallestNormalized( |
3463 | getContext().getFloatTypeSemantics(E->getArg(5)->getType())); |
3464 | Value *IsNormal = |
3465 | Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), |
3466 | "isnormal"); |
3467 | Value *NormalResult = |
3468 | Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), |
3469 | EmitScalarExpr(E->getArg(3))); |
3470 | Builder.CreateBr(End); |
3471 | Result->addIncoming(NormalResult, NotInf); |
3472 | |
3473 | // return Result |
3474 | Builder.SetInsertPoint(End); |
3475 | return RValue::get(Result); |
3476 | } |
3477 | |
3478 | case Builtin::BIalloca: |
3479 | case Builtin::BI_alloca: |
3480 | case Builtin::BI__builtin_alloca_uninitialized: |
3481 | case Builtin::BI__builtin_alloca: { |
3482 | Value *Size = EmitScalarExpr(E->getArg(0)); |
3483 | const TargetInfo &TI = getContext().getTargetInfo(); |
3484 | // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__. |
3485 | const Align SuitableAlignmentInBytes = |
3486 | CGM.getContext() |
3487 | .toCharUnitsFromBits(TI.getSuitableAlign()) |
3488 | .getAsAlign(); |
3489 | AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size); |
3490 | AI->setAlignment(SuitableAlignmentInBytes); |
3491 | if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized) |
3492 | initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes); |
3493 | return RValue::get(AI); |
3494 | } |
3495 | |
3496 | case Builtin::BI__builtin_alloca_with_align_uninitialized: |
3497 | case Builtin::BI__builtin_alloca_with_align: { |
3498 | Value *Size = EmitScalarExpr(E->getArg(0)); |
3499 | Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1)); |
3500 | auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue); |
3501 | unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue(); |
3502 | const Align AlignmentInBytes = |
3503 | CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign(); |
3504 | AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size); |
3505 | AI->setAlignment(AlignmentInBytes); |
3506 | if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized) |
3507 | initializeAlloca(*this, AI, Size, AlignmentInBytes); |
3508 | return RValue::get(AI); |
3509 | } |
3510 | |
3511 | case Builtin::BIbzero: |
3512 | case Builtin::BI__builtin_bzero: { |
3513 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3514 | Value *SizeVal = EmitScalarExpr(E->getArg(1)); |
3515 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3516 | E->getArg(0)->getExprLoc(), FD, 0); |
3517 | Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false); |
3518 | return RValue::get(nullptr); |
3519 | } |
3520 | case Builtin::BImemcpy: |
3521 | case Builtin::BI__builtin_memcpy: |
3522 | case Builtin::BImempcpy: |
3523 | case Builtin::BI__builtin_mempcpy: { |
3524 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3525 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3526 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
3527 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3528 | E->getArg(0)->getExprLoc(), FD, 0); |
3529 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
3530 | E->getArg(1)->getExprLoc(), FD, 1); |
3531 | Builder.CreateMemCpy(Dest, Src, SizeVal, false); |
3532 | if (BuiltinID == Builtin::BImempcpy || |
3533 | BuiltinID == Builtin::BI__builtin_mempcpy) |
3534 | return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(), |
3535 | Dest.getPointer(), SizeVal)); |
3536 | else |
3537 | return RValue::get(Dest.getPointer()); |
3538 | } |
3539 | |
3540 | case Builtin::BI__builtin_memcpy_inline: { |
3541 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3542 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3543 | uint64_t Size = |
3544 | E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
3545 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3546 | E->getArg(0)->getExprLoc(), FD, 0); |
3547 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
3548 | E->getArg(1)->getExprLoc(), FD, 1); |
3549 | Builder.CreateMemCpyInline(Dest, Src, Size); |
3550 | return RValue::get(nullptr); |
3551 | } |
3552 | |
3553 | case Builtin::BI__builtin_char_memchr: |
3554 | BuiltinID = Builtin::BI__builtin_memchr; |
3555 | break; |
3556 | |
3557 | case Builtin::BI__builtin___memcpy_chk: { |
3558 | // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. |
3559 | Expr::EvalResult SizeResult, DstSizeResult; |
3560 | if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || |
3561 | !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) |
3562 | break; |
3563 | llvm::APSInt Size = SizeResult.Val.getInt(); |
3564 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
3565 | if (Size.ugt(DstSize)) |
3566 | break; |
3567 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3568 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3569 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
3570 | Builder.CreateMemCpy(Dest, Src, SizeVal, false); |
3571 | return RValue::get(Dest.getPointer()); |
3572 | } |
3573 | |
3574 | case Builtin::BI__builtin_objc_memmove_collectable: { |
3575 | Address DestAddr = EmitPointerWithAlignment(E->getArg(0)); |
3576 | Address SrcAddr = EmitPointerWithAlignment(E->getArg(1)); |
3577 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
3578 | CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, |
3579 | DestAddr, SrcAddr, SizeVal); |
3580 | return RValue::get(DestAddr.getPointer()); |
3581 | } |
3582 | |
3583 | case Builtin::BI__builtin___memmove_chk: { |
3584 | // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2. |
3585 | Expr::EvalResult SizeResult, DstSizeResult; |
3586 | if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || |
3587 | !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) |
3588 | break; |
3589 | llvm::APSInt Size = SizeResult.Val.getInt(); |
3590 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
3591 | if (Size.ugt(DstSize)) |
3592 | break; |
3593 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3594 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3595 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
3596 | Builder.CreateMemMove(Dest, Src, SizeVal, false); |
3597 | return RValue::get(Dest.getPointer()); |
3598 | } |
3599 | |
3600 | case Builtin::BImemmove: |
3601 | case Builtin::BI__builtin_memmove: { |
3602 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3603 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3604 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
3605 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3606 | E->getArg(0)->getExprLoc(), FD, 0); |
3607 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
3608 | E->getArg(1)->getExprLoc(), FD, 1); |
3609 | Builder.CreateMemMove(Dest, Src, SizeVal, false); |
3610 | return RValue::get(Dest.getPointer()); |
3611 | } |
3612 | case Builtin::BImemset: |
3613 | case Builtin::BI__builtin_memset: { |
3614 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3615 | Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), |
3616 | Builder.getInt8Ty()); |
3617 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
3618 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3619 | E->getArg(0)->getExprLoc(), FD, 0); |
3620 | Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); |
3621 | return RValue::get(Dest.getPointer()); |
3622 | } |
3623 | case Builtin::BI__builtin___memset_chk: { |
3624 | // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. |
3625 | Expr::EvalResult SizeResult, DstSizeResult; |
3626 | if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || |
3627 | !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) |
3628 | break; |
3629 | llvm::APSInt Size = SizeResult.Val.getInt(); |
3630 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
3631 | if (Size.ugt(DstSize)) |
3632 | break; |
3633 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3634 | Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), |
3635 | Builder.getInt8Ty()); |
3636 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
3637 | Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); |
3638 | return RValue::get(Dest.getPointer()); |
3639 | } |
3640 | case Builtin::BI__builtin_wmemchr: { |
3641 | // The MSVC runtime library does not provide a definition of wmemchr, so we |
3642 | // need an inline implementation. |
3643 | if (!getTarget().getTriple().isOSMSVCRT()) |
3644 | break; |
3645 | |
3646 | llvm::Type *WCharTy = ConvertType(getContext().WCharTy); |
3647 | Value *Str = EmitScalarExpr(E->getArg(0)); |
3648 | Value *Chr = EmitScalarExpr(E->getArg(1)); |
3649 | Value *Size = EmitScalarExpr(E->getArg(2)); |
3650 | |
3651 | BasicBlock *Entry = Builder.GetInsertBlock(); |
3652 | BasicBlock *CmpEq = createBasicBlock("wmemchr.eq"); |
3653 | BasicBlock *Next = createBasicBlock("wmemchr.next"); |
3654 | BasicBlock *Exit = createBasicBlock("wmemchr.exit"); |
3655 | Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0)); |
3656 | Builder.CreateCondBr(SizeEq0, Exit, CmpEq); |
3657 | |
3658 | EmitBlock(CmpEq); |
3659 | PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2); |
3660 | StrPhi->addIncoming(Str, Entry); |
3661 | PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2); |
3662 | SizePhi->addIncoming(Size, Entry); |
3663 | CharUnits WCharAlign = |
3664 | getContext().getTypeAlignInChars(getContext().WCharTy); |
3665 | Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign); |
3666 | Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0); |
3667 | Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr); |
3668 | Builder.CreateCondBr(StrEqChr, Exit, Next); |
3669 | |
3670 | EmitBlock(Next); |
3671 | Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1); |
3672 | Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1)); |
3673 | Value *NextSizeEq0 = |
3674 | Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0)); |
3675 | Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq); |
3676 | StrPhi->addIncoming(NextStr, Next); |
3677 | SizePhi->addIncoming(NextSize, Next); |
3678 | |
3679 | EmitBlock(Exit); |
3680 | PHINode *Ret = Builder.CreatePHI(Str->getType(), 3); |
3681 | Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry); |
3682 | Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next); |
3683 | Ret->addIncoming(FoundChr, CmpEq); |
3684 | return RValue::get(Ret); |
3685 | } |
3686 | case Builtin::BI__builtin_wmemcmp: { |
3687 | // The MSVC runtime library does not provide a definition of wmemcmp, so we |
3688 | // need an inline implementation. |
3689 | if (!getTarget().getTriple().isOSMSVCRT()) |
3690 | break; |
3691 | |
3692 | llvm::Type *WCharTy = ConvertType(getContext().WCharTy); |
3693 | |
3694 | Value *Dst = EmitScalarExpr(E->getArg(0)); |
3695 | Value *Src = EmitScalarExpr(E->getArg(1)); |
3696 | Value *Size = EmitScalarExpr(E->getArg(2)); |
3697 | |
3698 | BasicBlock *Entry = Builder.GetInsertBlock(); |
3699 | BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt"); |
3700 | BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt"); |
3701 | BasicBlock *Next = createBasicBlock("wmemcmp.next"); |
3702 | BasicBlock *Exit = createBasicBlock("wmemcmp.exit"); |
3703 | Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0)); |
3704 | Builder.CreateCondBr(SizeEq0, Exit, CmpGT); |
3705 | |
3706 | EmitBlock(CmpGT); |
3707 | PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2); |
3708 | DstPhi->addIncoming(Dst, Entry); |
3709 | PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2); |
3710 | SrcPhi->addIncoming(Src, Entry); |
3711 | PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2); |
3712 | SizePhi->addIncoming(Size, Entry); |
3713 | CharUnits WCharAlign = |
3714 | getContext().getTypeAlignInChars(getContext().WCharTy); |
3715 | Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign); |
3716 | Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign); |
3717 | Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh); |
3718 | Builder.CreateCondBr(DstGtSrc, Exit, CmpLT); |
3719 | |
3720 | EmitBlock(CmpLT); |
3721 | Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh); |
3722 | Builder.CreateCondBr(DstLtSrc, Exit, Next); |
3723 | |
3724 | EmitBlock(Next); |
3725 | Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1); |
3726 | Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1); |
3727 | Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1)); |
3728 | Value *NextSizeEq0 = |
3729 | Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0)); |
3730 | Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT); |
3731 | DstPhi->addIncoming(NextDst, Next); |
3732 | SrcPhi->addIncoming(NextSrc, Next); |
3733 | SizePhi->addIncoming(NextSize, Next); |
3734 | |
3735 | EmitBlock(Exit); |
3736 | PHINode *Ret = Builder.CreatePHI(IntTy, 4); |
3737 | Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry); |
3738 | Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT); |
3739 | Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT); |
3740 | Ret->addIncoming(ConstantInt::get(IntTy, 0), Next); |
3741 | return RValue::get(Ret); |
3742 | } |
3743 | case Builtin::BI__builtin_dwarf_cfa: { |
3744 | // The offset in bytes from the first argument to the CFA. |
3745 | // |
3746 | // Why on earth is this in the frontend? Is there any reason at |
3747 | // all that the backend can't reasonably determine this while |
3748 | // lowering llvm.eh.dwarf.cfa()? |
3749 | // |
3750 | // TODO: If there's a satisfactory reason, add a target hook for |
3751 | // this instead of hard-coding 0, which is correct for most targets. |
3752 | int32_t Offset = 0; |
3753 | |
3754 | Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); |
3755 | return RValue::get(Builder.CreateCall(F, |
3756 | llvm::ConstantInt::get(Int32Ty, Offset))); |
3757 | } |
3758 | case Builtin::BI__builtin_return_address: { |
3759 | Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), |
3760 | getContext().UnsignedIntTy); |
3761 | Function *F = CGM.getIntrinsic(Intrinsic::returnaddress); |
3762 | return RValue::get(Builder.CreateCall(F, Depth)); |
3763 | } |
3764 | case Builtin::BI_ReturnAddress: { |
3765 | Function *F = CGM.getIntrinsic(Intrinsic::returnaddress); |
3766 | return RValue::get(Builder.CreateCall(F, Builder.getInt32(0))); |
3767 | } |
3768 | case Builtin::BI__builtin_frame_address: { |
3769 | Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), |
3770 | getContext().UnsignedIntTy); |
3771 | Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy); |
3772 | return RValue::get(Builder.CreateCall(F, Depth)); |
3773 | } |
3774 | case Builtin::BI__builtin_extract_return_addr: { |
3775 | Value *Address = EmitScalarExpr(E->getArg(0)); |
3776 | Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); |
3777 | return RValue::get(Result); |
3778 | } |
3779 | case Builtin::BI__builtin_frob_return_addr: { |
3780 | Value *Address = EmitScalarExpr(E->getArg(0)); |
3781 | Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); |
3782 | return RValue::get(Result); |
3783 | } |
3784 | case Builtin::BI__builtin_dwarf_sp_column: { |
3785 | llvm::IntegerType *Ty |
3786 | = cast<llvm::IntegerType>(ConvertType(E->getType())); |
3787 | int Column = getTargetHooks().getDwarfEHStackPointer(CGM); |
3788 | if (Column == -1) { |
3789 | CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); |
3790 | return RValue::get(llvm::UndefValue::get(Ty)); |
3791 | } |
3792 | return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); |
3793 | } |
3794 | case Builtin::BI__builtin_init_dwarf_reg_size_table: { |
3795 | Value *Address = EmitScalarExpr(E->getArg(0)); |
3796 | if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) |
3797 | CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); |
3798 | return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); |
3799 | } |
3800 | case Builtin::BI__builtin_eh_return: { |
3801 | Value *Int = EmitScalarExpr(E->getArg(0)); |
3802 | Value *Ptr = EmitScalarExpr(E->getArg(1)); |
3803 | |
3804 | llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); |
3805 | assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&(static_cast <bool> ((IntTy->getBitWidth() == 32 || IntTy ->getBitWidth() == 64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants" ) ? void (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 3806, __extension__ __PRETTY_FUNCTION__ )) |
3806 | "LLVM's __builtin_eh_return only supports 32- and 64-bit variants")(static_cast <bool> ((IntTy->getBitWidth() == 32 || IntTy ->getBitWidth() == 64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants" ) ? void (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 3806, __extension__ __PRETTY_FUNCTION__ )); |
3807 | Function *F = |
3808 | CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32 |
3809 | : Intrinsic::eh_return_i64); |
3810 | Builder.CreateCall(F, {Int, Ptr}); |
3811 | Builder.CreateUnreachable(); |
3812 | |
3813 | // We do need to preserve an insertion point. |
3814 | EmitBlock(createBasicBlock("builtin_eh_return.cont")); |
3815 | |
3816 | return RValue::get(nullptr); |
3817 | } |
3818 | case Builtin::BI__builtin_unwind_init: { |
3819 | Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); |
3820 | return RValue::get(Builder.CreateCall(F)); |
3821 | } |
3822 | case Builtin::BI__builtin_extend_pointer: { |
3823 | // Extends a pointer to the size of an _Unwind_Word, which is |
3824 | // uint64_t on all platforms. Generally this gets poked into a |
3825 | // register and eventually used as an address, so if the |
3826 | // addressing registers are wider than pointers and the platform |
3827 | // doesn't implicitly ignore high-order bits when doing |
3828 | // addressing, we need to make sure we zext / sext based on |
3829 | // the platform's expectations. |
3830 | // |
3831 | // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html |
3832 | |
3833 | // Cast the pointer to intptr_t. |
3834 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
3835 | Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); |
3836 | |
3837 | // If that's 64 bits, we're done. |
3838 | if (IntPtrTy->getBitWidth() == 64) |
3839 | return RValue::get(Result); |
3840 | |
3841 | // Otherwise, ask the codegen data what to do. |
3842 | if (getTargetHooks().extendPointerWithSExt()) |
3843 | return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); |
3844 | else |
3845 | return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); |
3846 | } |
3847 | case Builtin::BI__builtin_setjmp: { |
3848 | // Buffer is a void**. |
3849 | Address Buf = EmitPointerWithAlignment(E->getArg(0)); |
3850 | |
3851 | // Store the frame pointer to the setjmp buffer. |
3852 | Value *FrameAddr = Builder.CreateCall( |
3853 | CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy), |
3854 | ConstantInt::get(Int32Ty, 0)); |
3855 | Builder.CreateStore(FrameAddr, Buf); |
3856 | |
3857 | // Store the stack pointer to the setjmp buffer. |
3858 | Value *StackAddr = |
3859 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); |
3860 | Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2); |
3861 | Builder.CreateStore(StackAddr, StackSaveSlot); |
3862 | |
3863 | // Call LLVM's EH setjmp, which is lightweight. |
3864 | Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); |
3865 | Buf = Builder.CreateElementBitCast(Buf, Int8Ty); |
3866 | return RValue::get(Builder.CreateCall(F, Buf.getPointer())); |
3867 | } |
3868 | case Builtin::BI__builtin_longjmp: { |
3869 | Value *Buf = EmitScalarExpr(E->getArg(0)); |
3870 | Buf = Builder.CreateBitCast(Buf, Int8PtrTy); |
3871 | |
3872 | // Call LLVM's EH longjmp, which is lightweight. |
3873 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); |
3874 | |
3875 | // longjmp doesn't return; mark this as unreachable. |
3876 | Builder.CreateUnreachable(); |
3877 | |
3878 | // We do need to preserve an insertion point. |
3879 | EmitBlock(createBasicBlock("longjmp.cont")); |
3880 | |
3881 | return RValue::get(nullptr); |
3882 | } |
3883 | case Builtin::BI__builtin_launder: { |
3884 | const Expr *Arg = E->getArg(0); |
3885 | QualType ArgTy = Arg->getType()->getPointeeType(); |
3886 | Value *Ptr = EmitScalarExpr(Arg); |
3887 | if (TypeRequiresBuiltinLaunder(CGM, ArgTy)) |
3888 | Ptr = Builder.CreateLaunderInvariantGroup(Ptr); |
3889 | |
3890 | return RValue::get(Ptr); |
3891 | } |
3892 | case Builtin::BI__sync_fetch_and_add: |
3893 | case Builtin::BI__sync_fetch_and_sub: |
3894 | case Builtin::BI__sync_fetch_and_or: |
3895 | case Builtin::BI__sync_fetch_and_and: |
3896 | case Builtin::BI__sync_fetch_and_xor: |
3897 | case Builtin::BI__sync_fetch_and_nand: |
3898 | case Builtin::BI__sync_add_and_fetch: |
3899 | case Builtin::BI__sync_sub_and_fetch: |
3900 | case Builtin::BI__sync_and_and_fetch: |
3901 | case Builtin::BI__sync_or_and_fetch: |
3902 | case Builtin::BI__sync_xor_and_fetch: |
3903 | case Builtin::BI__sync_nand_and_fetch: |
3904 | case Builtin::BI__sync_val_compare_and_swap: |
3905 | case Builtin::BI__sync_bool_compare_and_swap: |
3906 | case Builtin::BI__sync_lock_test_and_set: |
3907 | case Builtin::BI__sync_lock_release: |
3908 | case Builtin::BI__sync_swap: |
3909 | llvm_unreachable("Shouldn't make it through sema")::llvm::llvm_unreachable_internal("Shouldn't make it through sema" , "clang/lib/CodeGen/CGBuiltin.cpp", 3909); |
3910 | case Builtin::BI__sync_fetch_and_add_1: |
3911 | case Builtin::BI__sync_fetch_and_add_2: |
3912 | case Builtin::BI__sync_fetch_and_add_4: |
3913 | case Builtin::BI__sync_fetch_and_add_8: |
3914 | case Builtin::BI__sync_fetch_and_add_16: |
3915 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E); |
3916 | case Builtin::BI__sync_fetch_and_sub_1: |
3917 | case Builtin::BI__sync_fetch_and_sub_2: |
3918 | case Builtin::BI__sync_fetch_and_sub_4: |
3919 | case Builtin::BI__sync_fetch_and_sub_8: |
3920 | case Builtin::BI__sync_fetch_and_sub_16: |
3921 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E); |
3922 | case Builtin::BI__sync_fetch_and_or_1: |
3923 | case Builtin::BI__sync_fetch_and_or_2: |
3924 | case Builtin::BI__sync_fetch_and_or_4: |
3925 | case Builtin::BI__sync_fetch_and_or_8: |
3926 | case Builtin::BI__sync_fetch_and_or_16: |
3927 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E); |
3928 | case Builtin::BI__sync_fetch_and_and_1: |
3929 | case Builtin::BI__sync_fetch_and_and_2: |
3930 | case Builtin::BI__sync_fetch_and_and_4: |
3931 | case Builtin::BI__sync_fetch_and_and_8: |
3932 | case Builtin::BI__sync_fetch_and_and_16: |
3933 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E); |
3934 | case Builtin::BI__sync_fetch_and_xor_1: |
3935 | case Builtin::BI__sync_fetch_and_xor_2: |
3936 | case Builtin::BI__sync_fetch_and_xor_4: |
3937 | case Builtin::BI__sync_fetch_and_xor_8: |
3938 | case Builtin::BI__sync_fetch_and_xor_16: |
3939 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E); |
3940 | case Builtin::BI__sync_fetch_and_nand_1: |
3941 | case Builtin::BI__sync_fetch_and_nand_2: |
3942 | case Builtin::BI__sync_fetch_and_nand_4: |
3943 | case Builtin::BI__sync_fetch_and_nand_8: |
3944 | case Builtin::BI__sync_fetch_and_nand_16: |
3945 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E); |
3946 | |
3947 | // Clang extensions: not overloaded yet. |
3948 | case Builtin::BI__sync_fetch_and_min: |
3949 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E); |
3950 | case Builtin::BI__sync_fetch_and_max: |
3951 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E); |
3952 | case Builtin::BI__sync_fetch_and_umin: |
3953 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E); |
3954 | case Builtin::BI__sync_fetch_and_umax: |
3955 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E); |
3956 | |
3957 | case Builtin::BI__sync_add_and_fetch_1: |
3958 | case Builtin::BI__sync_add_and_fetch_2: |
3959 | case Builtin::BI__sync_add_and_fetch_4: |
3960 | case Builtin::BI__sync_add_and_fetch_8: |
3961 | case Builtin::BI__sync_add_and_fetch_16: |
3962 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E, |
3963 | llvm::Instruction::Add); |
3964 | case Builtin::BI__sync_sub_and_fetch_1: |
3965 | case Builtin::BI__sync_sub_and_fetch_2: |
3966 | case Builtin::BI__sync_sub_and_fetch_4: |
3967 | case Builtin::BI__sync_sub_and_fetch_8: |
3968 | case Builtin::BI__sync_sub_and_fetch_16: |
3969 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E, |
3970 | llvm::Instruction::Sub); |
3971 | case Builtin::BI__sync_and_and_fetch_1: |
3972 | case Builtin::BI__sync_and_and_fetch_2: |
3973 | case Builtin::BI__sync_and_and_fetch_4: |
3974 | case Builtin::BI__sync_and_and_fetch_8: |
3975 | case Builtin::BI__sync_and_and_fetch_16: |
3976 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E, |
3977 | llvm::Instruction::And); |
3978 | case Builtin::BI__sync_or_and_fetch_1: |
3979 | case Builtin::BI__sync_or_and_fetch_2: |
3980 | case Builtin::BI__sync_or_and_fetch_4: |
3981 | case Builtin::BI__sync_or_and_fetch_8: |
3982 | case Builtin::BI__sync_or_and_fetch_16: |
3983 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E, |
3984 | llvm::Instruction::Or); |
3985 | case Builtin::BI__sync_xor_and_fetch_1: |
3986 | case Builtin::BI__sync_xor_and_fetch_2: |
3987 | case Builtin::BI__sync_xor_and_fetch_4: |
3988 | case Builtin::BI__sync_xor_and_fetch_8: |
3989 | case Builtin::BI__sync_xor_and_fetch_16: |
3990 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E, |
3991 | llvm::Instruction::Xor); |
3992 | case Builtin::BI__sync_nand_and_fetch_1: |
3993 | case Builtin::BI__sync_nand_and_fetch_2: |
3994 | case Builtin::BI__sync_nand_and_fetch_4: |
3995 | case Builtin::BI__sync_nand_and_fetch_8: |
3996 | case Builtin::BI__sync_nand_and_fetch_16: |
3997 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E, |
3998 | llvm::Instruction::And, true); |
3999 | |
4000 | case Builtin::BI__sync_val_compare_and_swap_1: |
4001 | case Builtin::BI__sync_val_compare_and_swap_2: |
4002 | case Builtin::BI__sync_val_compare_and_swap_4: |
4003 | case Builtin::BI__sync_val_compare_and_swap_8: |
4004 | case Builtin::BI__sync_val_compare_and_swap_16: |
4005 | return RValue::get(MakeAtomicCmpXchgValue(*this, E, false)); |
4006 | |
4007 | case Builtin::BI__sync_bool_compare_and_swap_1: |
4008 | case Builtin::BI__sync_bool_compare_and_swap_2: |
4009 | case Builtin::BI__sync_bool_compare_and_swap_4: |
4010 | case Builtin::BI__sync_bool_compare_and_swap_8: |
4011 | case Builtin::BI__sync_bool_compare_and_swap_16: |
4012 | return RValue::get(MakeAtomicCmpXchgValue(*this, E, true)); |
4013 | |
4014 | case Builtin::BI__sync_swap_1: |
4015 | case Builtin::BI__sync_swap_2: |
4016 | case Builtin::BI__sync_swap_4: |
4017 | case Builtin::BI__sync_swap_8: |
4018 | case Builtin::BI__sync_swap_16: |
4019 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
4020 | |
4021 | case Builtin::BI__sync_lock_test_and_set_1: |
4022 | case Builtin::BI__sync_lock_test_and_set_2: |
4023 | case Builtin::BI__sync_lock_test_and_set_4: |
4024 | case Builtin::BI__sync_lock_test_and_set_8: |
4025 | case Builtin::BI__sync_lock_test_and_set_16: |
4026 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
4027 | |
4028 | case Builtin::BI__sync_lock_release_1: |
4029 | case Builtin::BI__sync_lock_release_2: |
4030 | case Builtin::BI__sync_lock_release_4: |
4031 | case Builtin::BI__sync_lock_release_8: |
4032 | case Builtin::BI__sync_lock_release_16: { |
4033 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
4034 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
4035 | CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); |
4036 | llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), |
4037 | StoreSize.getQuantity() * 8); |
4038 | Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
4039 | llvm::StoreInst *Store = |
4040 | Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr, |
4041 | StoreSize); |
4042 | Store->setAtomic(llvm::AtomicOrdering::Release); |
4043 | return RValue::get(nullptr); |
4044 | } |
4045 | |
4046 | case Builtin::BI__sync_synchronize: { |
4047 | // We assume this is supposed to correspond to a C++0x-style |
4048 | // sequentially-consistent fence (i.e. this is only usable for |
4049 | // synchronization, not device I/O or anything like that). This intrinsic |
4050 | // is really badly designed in the sense that in theory, there isn't |
4051 | // any way to safely use it... but in practice, it mostly works |
4052 | // to use it with non-atomic loads and stores to get acquire/release |
4053 | // semantics. |
4054 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent); |
4055 | return RValue::get(nullptr); |
4056 | } |
4057 | |
4058 | case Builtin::BI__builtin_nontemporal_load: |
4059 | return RValue::get(EmitNontemporalLoad(*this, E)); |
4060 | case Builtin::BI__builtin_nontemporal_store: |
4061 | return RValue::get(EmitNontemporalStore(*this, E)); |
4062 | case Builtin::BI__c11_atomic_is_lock_free: |
4063 | case Builtin::BI__atomic_is_lock_free: { |
4064 | // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the |
4065 | // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since |
4066 | // _Atomic(T) is always properly-aligned. |
4067 | const char *LibCallName = "__atomic_is_lock_free"; |
4068 | CallArgList Args; |
4069 | Args.add(RValue::get(EmitScalarExpr(E->getArg(0))), |
4070 | getContext().getSizeType()); |
4071 | if (BuiltinID == Builtin::BI__atomic_is_lock_free) |
4072 | Args.add(RValue::get(EmitScalarExpr(E->getArg(1))), |
4073 | getContext().VoidPtrTy); |
4074 | else |
4075 | Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)), |
4076 | getContext().VoidPtrTy); |
4077 | const CGFunctionInfo &FuncInfo = |
4078 | CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args); |
4079 | llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); |
4080 | llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName); |
4081 | return EmitCall(FuncInfo, CGCallee::forDirect(Func), |
4082 | ReturnValueSlot(), Args); |
4083 | } |
4084 | |
4085 | case Builtin::BI__atomic_test_and_set: { |
4086 | // Look at the argument type to determine whether this is a volatile |
4087 | // operation. The parameter type is always volatile. |
4088 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
4089 | bool Volatile = |
4090 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
4091 | |
4092 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
4093 | unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); |
4094 | Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); |
4095 | Value *NewVal = Builder.getInt8(1); |
4096 | Value *Order = EmitScalarExpr(E->getArg(1)); |
4097 | if (isa<llvm::ConstantInt>(Order)) { |
4098 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
4099 | AtomicRMWInst *Result = nullptr; |
4100 | switch (ord) { |
4101 | case 0: // memory_order_relaxed |
4102 | default: // invalid order |
4103 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
4104 | llvm::AtomicOrdering::Monotonic); |
4105 | break; |
4106 | case 1: // memory_order_consume |
4107 | case 2: // memory_order_acquire |
4108 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
4109 | llvm::AtomicOrdering::Acquire); |
4110 | break; |
4111 | case 3: // memory_order_release |
4112 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
4113 | llvm::AtomicOrdering::Release); |
4114 | break; |
4115 | case 4: // memory_order_acq_rel |
4116 | |
4117 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
4118 | llvm::AtomicOrdering::AcquireRelease); |
4119 | break; |
4120 | case 5: // memory_order_seq_cst |
4121 | Result = Builder.CreateAtomicRMW( |
4122 | llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
4123 | llvm::AtomicOrdering::SequentiallyConsistent); |
4124 | break; |
4125 | } |
4126 | Result->setVolatile(Volatile); |
4127 | return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); |
4128 | } |
4129 | |
4130 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
4131 | |
4132 | llvm::BasicBlock *BBs[5] = { |
4133 | createBasicBlock("monotonic", CurFn), |
4134 | createBasicBlock("acquire", CurFn), |
4135 | createBasicBlock("release", CurFn), |
4136 | createBasicBlock("acqrel", CurFn), |
4137 | createBasicBlock("seqcst", CurFn) |
4138 | }; |
4139 | llvm::AtomicOrdering Orders[5] = { |
4140 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire, |
4141 | llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease, |
4142 | llvm::AtomicOrdering::SequentiallyConsistent}; |
4143 | |
4144 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
4145 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); |
4146 | |
4147 | Builder.SetInsertPoint(ContBB); |
4148 | PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set"); |
4149 | |
4150 | for (unsigned i = 0; i < 5; ++i) { |
4151 | Builder.SetInsertPoint(BBs[i]); |
4152 | AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, |
4153 | Ptr, NewVal, Orders[i]); |
4154 | RMW->setVolatile(Volatile); |
4155 | Result->addIncoming(RMW, BBs[i]); |
4156 | Builder.CreateBr(ContBB); |
4157 | } |
4158 | |
4159 | SI->addCase(Builder.getInt32(0), BBs[0]); |
4160 | SI->addCase(Builder.getInt32(1), BBs[1]); |
4161 | SI->addCase(Builder.getInt32(2), BBs[1]); |
4162 | SI->addCase(Builder.getInt32(3), BBs[2]); |
4163 | SI->addCase(Builder.getInt32(4), BBs[3]); |
4164 | SI->addCase(Builder.getInt32(5), BBs[4]); |
4165 | |
4166 | Builder.SetInsertPoint(ContBB); |
4167 | return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); |
4168 | } |
4169 | |
4170 | case Builtin::BI__atomic_clear: { |
4171 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
4172 | bool Volatile = |
4173 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
4174 | |
4175 | Address Ptr = EmitPointerWithAlignment(E->getArg(0)); |
4176 | Ptr = Builder.CreateElementBitCast(Ptr, Int8Ty); |
4177 | Value *NewVal = Builder.getInt8(0); |
4178 | Value *Order = EmitScalarExpr(E->getArg(1)); |
4179 | if (isa<llvm::ConstantInt>(Order)) { |
4180 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
4181 | StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); |
4182 | switch (ord) { |
4183 | case 0: // memory_order_relaxed |
4184 | default: // invalid order |
4185 | Store->setOrdering(llvm::AtomicOrdering::Monotonic); |
4186 | break; |
4187 | case 3: // memory_order_release |
4188 | Store->setOrdering(llvm::AtomicOrdering::Release); |
4189 | break; |
4190 | case 5: // memory_order_seq_cst |
4191 | Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); |
4192 | break; |
4193 | } |
4194 | return RValue::get(nullptr); |
4195 | } |
4196 | |
4197 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
4198 | |
4199 | llvm::BasicBlock *BBs[3] = { |
4200 | createBasicBlock("monotonic", CurFn), |
4201 | createBasicBlock("release", CurFn), |
4202 | createBasicBlock("seqcst", CurFn) |
4203 | }; |
4204 | llvm::AtomicOrdering Orders[3] = { |
4205 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release, |
4206 | llvm::AtomicOrdering::SequentiallyConsistent}; |
4207 | |
4208 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
4209 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); |
4210 | |
4211 | for (unsigned i = 0; i < 3; ++i) { |
4212 | Builder.SetInsertPoint(BBs[i]); |
4213 | StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); |
4214 | Store->setOrdering(Orders[i]); |
4215 | Builder.CreateBr(ContBB); |
4216 | } |
4217 | |
4218 | SI->addCase(Builder.getInt32(0), BBs[0]); |
4219 | SI->addCase(Builder.getInt32(3), BBs[1]); |
4220 | SI->addCase(Builder.getInt32(5), BBs[2]); |
4221 | |
4222 | Builder.SetInsertPoint(ContBB); |
4223 | return RValue::get(nullptr); |
4224 | } |
4225 | |
4226 | case Builtin::BI__atomic_thread_fence: |
4227 | case Builtin::BI__atomic_signal_fence: |
4228 | case Builtin::BI__c11_atomic_thread_fence: |
4229 | case Builtin::BI__c11_atomic_signal_fence: { |
4230 | llvm::SyncScope::ID SSID; |
4231 | if (BuiltinID == Builtin::BI__atomic_signal_fence || |
4232 | BuiltinID == Builtin::BI__c11_atomic_signal_fence) |
4233 | SSID = llvm::SyncScope::SingleThread; |
4234 | else |
4235 | SSID = llvm::SyncScope::System; |
4236 | Value *Order = EmitScalarExpr(E->getArg(0)); |
4237 | if (isa<llvm::ConstantInt>(Order)) { |
4238 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
4239 | switch (ord) { |
4240 | case 0: // memory_order_relaxed |
4241 | default: // invalid order |
4242 | break; |
4243 | case 1: // memory_order_consume |
4244 | case 2: // memory_order_acquire |
4245 | Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); |
4246 | break; |
4247 | case 3: // memory_order_release |
4248 | Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); |
4249 | break; |
4250 | case 4: // memory_order_acq_rel |
4251 | Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); |
4252 | break; |
4253 | case 5: // memory_order_seq_cst |
4254 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); |
4255 | break; |
4256 | } |
4257 | return RValue::get(nullptr); |
4258 | } |
4259 | |
4260 | llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; |
4261 | AcquireBB = createBasicBlock("acquire", CurFn); |
4262 | ReleaseBB = createBasicBlock("release", CurFn); |
4263 | AcqRelBB = createBasicBlock("acqrel", CurFn); |
4264 | SeqCstBB = createBasicBlock("seqcst", CurFn); |
4265 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
4266 | |
4267 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
4268 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB); |
4269 | |
4270 | Builder.SetInsertPoint(AcquireBB); |
4271 | Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); |
4272 | Builder.CreateBr(ContBB); |
4273 | SI->addCase(Builder.getInt32(1), AcquireBB); |
4274 | SI->addCase(Builder.getInt32(2), AcquireBB); |
4275 | |
4276 | Builder.SetInsertPoint(ReleaseBB); |
4277 | Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); |
4278 | Builder.CreateBr(ContBB); |
4279 | SI->addCase(Builder.getInt32(3), ReleaseBB); |
4280 | |
4281 | Builder.SetInsertPoint(AcqRelBB); |
4282 | Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); |
4283 | Builder.CreateBr(ContBB); |
4284 | SI->addCase(Builder.getInt32(4), AcqRelBB); |
4285 | |
4286 | Builder.SetInsertPoint(SeqCstBB); |
4287 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); |
4288 | Builder.CreateBr(ContBB); |
4289 | SI->addCase(Builder.getInt32(5), SeqCstBB); |
4290 | |
4291 | Builder.SetInsertPoint(ContBB); |
4292 | return RValue::get(nullptr); |
4293 | } |
4294 | |
4295 | case Builtin::BI__builtin_signbit: |
4296 | case Builtin::BI__builtin_signbitf: |
4297 | case Builtin::BI__builtin_signbitl: { |
4298 | return RValue::get( |
4299 | Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))), |
4300 | ConvertType(E->getType()))); |
4301 | } |
4302 | case Builtin::BI__warn_memset_zero_len: |
4303 | return RValue::getIgnored(); |
4304 | case Builtin::BI__annotation: { |
4305 | // Re-encode each wide string to UTF8 and make an MDString. |
4306 | SmallVector<Metadata *, 1> Strings; |
4307 | for (const Expr *Arg : E->arguments()) { |
4308 | const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts()); |
4309 | assert(Str->getCharByteWidth() == 2)(static_cast <bool> (Str->getCharByteWidth() == 2) ? void (0) : __assert_fail ("Str->getCharByteWidth() == 2", "clang/lib/CodeGen/CGBuiltin.cpp", 4309, __extension__ __PRETTY_FUNCTION__ )); |
4310 | StringRef WideBytes = Str->getBytes(); |
4311 | std::string StrUtf8; |
4312 | if (!convertUTF16ToUTF8String( |
4313 | makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) { |
4314 | CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument"); |
4315 | continue; |
4316 | } |
4317 | Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8)); |
4318 | } |
4319 | |
4320 | // Build and MDTuple of MDStrings and emit the intrinsic call. |
4321 | llvm::Function *F = |
4322 | CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {}); |
4323 | MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings); |
4324 | Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple)); |
4325 | return RValue::getIgnored(); |
4326 | } |
4327 | case Builtin::BI__builtin_annotation: { |
4328 | llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); |
4329 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, |
4330 | AnnVal->getType()); |
4331 | |
4332 | // Get the annotation string, go through casts. Sema requires this to be a |
4333 | // non-wide string literal, potentially casted, so the cast<> is safe. |
4334 | const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); |
4335 | StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); |
4336 | return RValue::get( |
4337 | EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr)); |
4338 | } |
4339 | case Builtin::BI__builtin_addcb: |
4340 | case Builtin::BI__builtin_addcs: |
4341 | case Builtin::BI__builtin_addc: |
4342 | case Builtin::BI__builtin_addcl: |
4343 | case Builtin::BI__builtin_addcll: |
4344 | case Builtin::BI__builtin_subcb: |
4345 | case Builtin::BI__builtin_subcs: |
4346 | case Builtin::BI__builtin_subc: |
4347 | case Builtin::BI__builtin_subcl: |
4348 | case Builtin::BI__builtin_subcll: { |
4349 | |
4350 | // We translate all of these builtins from expressions of the form: |
4351 | // int x = ..., y = ..., carryin = ..., carryout, result; |
4352 | // result = __builtin_addc(x, y, carryin, &carryout); |
4353 | // |
4354 | // to LLVM IR of the form: |
4355 | // |
4356 | // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) |
4357 | // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0 |
4358 | // %carry1 = extractvalue {i32, i1} %tmp1, 1 |
4359 | // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1, |
4360 | // i32 %carryin) |
4361 | // %result = extractvalue {i32, i1} %tmp2, 0 |
4362 | // %carry2 = extractvalue {i32, i1} %tmp2, 1 |
4363 | // %tmp3 = or i1 %carry1, %carry2 |
4364 | // %tmp4 = zext i1 %tmp3 to i32 |
4365 | // store i32 %tmp4, i32* %carryout |
4366 | |
4367 | // Scalarize our inputs. |
4368 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
4369 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
4370 | llvm::Value *Carryin = EmitScalarExpr(E->getArg(2)); |
4371 | Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3)); |
4372 | |
4373 | // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow. |
4374 | llvm::Intrinsic::ID IntrinsicId; |
4375 | switch (BuiltinID) { |
4376 | default: llvm_unreachable("Unknown multiprecision builtin id.")::llvm::llvm_unreachable_internal("Unknown multiprecision builtin id." , "clang/lib/CodeGen/CGBuiltin.cpp", 4376); |
4377 | case Builtin::BI__builtin_addcb: |
4378 | case Builtin::BI__builtin_addcs: |
4379 | case Builtin::BI__builtin_addc: |
4380 | case Builtin::BI__builtin_addcl: |
4381 | case Builtin::BI__builtin_addcll: |
4382 | IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
4383 | break; |
4384 | case Builtin::BI__builtin_subcb: |
4385 | case Builtin::BI__builtin_subcs: |
4386 | case Builtin::BI__builtin_subc: |
4387 | case Builtin::BI__builtin_subcl: |
4388 | case Builtin::BI__builtin_subcll: |
4389 | IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
4390 | break; |
4391 | } |
4392 | |
4393 | // Construct our resulting LLVM IR expression. |
4394 | llvm::Value *Carry1; |
4395 | llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, |
4396 | X, Y, Carry1); |
4397 | llvm::Value *Carry2; |
4398 | llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId, |
4399 | Sum1, Carryin, Carry2); |
4400 | llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), |
4401 | X->getType()); |
4402 | Builder.CreateStore(CarryOut, CarryOutPtr); |
4403 | return RValue::get(Sum2); |
4404 | } |
4405 | |
4406 | case Builtin::BI__builtin_add_overflow: |
4407 | case Builtin::BI__builtin_sub_overflow: |
4408 | case Builtin::BI__builtin_mul_overflow: { |
4409 | const clang::Expr *LeftArg = E->getArg(0); |
4410 | const clang::Expr *RightArg = E->getArg(1); |
4411 | const clang::Expr *ResultArg = E->getArg(2); |
4412 | |
4413 | clang::QualType ResultQTy = |
4414 | ResultArg->getType()->castAs<PointerType>()->getPointeeType(); |
4415 | |
4416 | WidthAndSignedness LeftInfo = |
4417 | getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType()); |
4418 | WidthAndSignedness RightInfo = |
4419 | getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType()); |
4420 | WidthAndSignedness ResultInfo = |
4421 | getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy); |
4422 | |
4423 | // Handle mixed-sign multiplication as a special case, because adding |
4424 | // runtime or backend support for our generic irgen would be too expensive. |
4425 | if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo)) |
4426 | return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg, |
4427 | RightInfo, ResultArg, ResultQTy, |
4428 | ResultInfo); |
4429 | |
4430 | if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo, |
4431 | ResultInfo)) |
4432 | return EmitCheckedUnsignedMultiplySignedResult( |
4433 | *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy, |
4434 | ResultInfo); |
4435 | |
4436 | WidthAndSignedness EncompassingInfo = |
4437 | EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo}); |
4438 | |
4439 | llvm::Type *EncompassingLLVMTy = |
4440 | llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width); |
4441 | |
4442 | llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy); |
4443 | |
4444 | llvm::Intrinsic::ID IntrinsicId; |
4445 | switch (BuiltinID) { |
4446 | default: |
4447 | llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id." , "clang/lib/CodeGen/CGBuiltin.cpp", 4447); |
4448 | case Builtin::BI__builtin_add_overflow: |
4449 | IntrinsicId = EncompassingInfo.Signed |
4450 | ? llvm::Intrinsic::sadd_with_overflow |
4451 | : llvm::Intrinsic::uadd_with_overflow; |
4452 | break; |
4453 | case Builtin::BI__builtin_sub_overflow: |
4454 | IntrinsicId = EncompassingInfo.Signed |
4455 | ? llvm::Intrinsic::ssub_with_overflow |
4456 | : llvm::Intrinsic::usub_with_overflow; |
4457 | break; |
4458 | case Builtin::BI__builtin_mul_overflow: |
4459 | IntrinsicId = EncompassingInfo.Signed |
4460 | ? llvm::Intrinsic::smul_with_overflow |
4461 | : llvm::Intrinsic::umul_with_overflow; |
4462 | break; |
4463 | } |
4464 | |
4465 | llvm::Value *Left = EmitScalarExpr(LeftArg); |
4466 | llvm::Value *Right = EmitScalarExpr(RightArg); |
4467 | Address ResultPtr = EmitPointerWithAlignment(ResultArg); |
4468 | |
4469 | // Extend each operand to the encompassing type. |
4470 | Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed); |
4471 | Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed); |
4472 | |
4473 | // Perform the operation on the extended values. |
4474 | llvm::Value *Overflow, *Result; |
4475 | Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow); |
4476 | |
4477 | if (EncompassingInfo.Width > ResultInfo.Width) { |
4478 | // The encompassing type is wider than the result type, so we need to |
4479 | // truncate it. |
4480 | llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy); |
4481 | |
4482 | // To see if the truncation caused an overflow, we will extend |
4483 | // the result and then compare it to the original result. |
4484 | llvm::Value *ResultTruncExt = Builder.CreateIntCast( |
4485 | ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed); |
4486 | llvm::Value *TruncationOverflow = |
4487 | Builder.CreateICmpNE(Result, ResultTruncExt); |
4488 | |
4489 | Overflow = Builder.CreateOr(Overflow, TruncationOverflow); |
4490 | Result = ResultTrunc; |
4491 | } |
4492 | |
4493 | // Finally, store the result using the pointer. |
4494 | bool isVolatile = |
4495 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
4496 | Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile); |
4497 | |
4498 | return RValue::get(Overflow); |
4499 | } |
4500 | |
4501 | case Builtin::BI__builtin_uadd_overflow: |
4502 | case Builtin::BI__builtin_uaddl_overflow: |
4503 | case Builtin::BI__builtin_uaddll_overflow: |
4504 | case Builtin::BI__builtin_usub_overflow: |
4505 | case Builtin::BI__builtin_usubl_overflow: |
4506 | case Builtin::BI__builtin_usubll_overflow: |
4507 | case Builtin::BI__builtin_umul_overflow: |
4508 | case Builtin::BI__builtin_umull_overflow: |
4509 | case Builtin::BI__builtin_umulll_overflow: |
4510 | case Builtin::BI__builtin_sadd_overflow: |
4511 | case Builtin::BI__builtin_saddl_overflow: |
4512 | case Builtin::BI__builtin_saddll_overflow: |
4513 | case Builtin::BI__builtin_ssub_overflow: |
4514 | case Builtin::BI__builtin_ssubl_overflow: |
4515 | case Builtin::BI__builtin_ssubll_overflow: |
4516 | case Builtin::BI__builtin_smul_overflow: |
4517 | case Builtin::BI__builtin_smull_overflow: |
4518 | case Builtin::BI__builtin_smulll_overflow: { |
4519 | |
4520 | // We translate all of these builtins directly to the relevant llvm IR node. |
4521 | |
4522 | // Scalarize our inputs. |
4523 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
4524 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
4525 | Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2)); |
4526 | |
4527 | // Decide which of the overflow intrinsics we are lowering to: |
4528 | llvm::Intrinsic::ID IntrinsicId; |
4529 | switch (BuiltinID) { |
4530 | default: llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id." , "clang/lib/CodeGen/CGBuiltin.cpp", 4530); |
4531 | case Builtin::BI__builtin_uadd_overflow: |
4532 | case Builtin::BI__builtin_uaddl_overflow: |
4533 | case Builtin::BI__builtin_uaddll_overflow: |
4534 | IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
4535 | break; |
4536 | case Builtin::BI__builtin_usub_overflow: |
4537 | case Builtin::BI__builtin_usubl_overflow: |
4538 | case Builtin::BI__builtin_usubll_overflow: |
4539 | IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
4540 | break; |
4541 | case Builtin::BI__builtin_umul_overflow: |
4542 | case Builtin::BI__builtin_umull_overflow: |
4543 | case Builtin::BI__builtin_umulll_overflow: |
4544 | IntrinsicId = llvm::Intrinsic::umul_with_overflow; |
4545 | break; |
4546 | case Builtin::BI__builtin_sadd_overflow: |
4547 | case Builtin::BI__builtin_saddl_overflow: |
4548 | case Builtin::BI__builtin_saddll_overflow: |
4549 | IntrinsicId = llvm::Intrinsic::sadd_with_overflow; |
4550 | break; |
4551 | case Builtin::BI__builtin_ssub_overflow: |
4552 | case Builtin::BI__builtin_ssubl_overflow: |
4553 | case Builtin::BI__builtin_ssubll_overflow: |
4554 | IntrinsicId = llvm::Intrinsic::ssub_with_overflow; |
4555 | break; |
4556 | case Builtin::BI__builtin_smul_overflow: |
4557 | case Builtin::BI__builtin_smull_overflow: |
4558 | case Builtin::BI__builtin_smulll_overflow: |
4559 | IntrinsicId = llvm::Intrinsic::smul_with_overflow; |
4560 | break; |
4561 | } |
4562 | |
4563 | |
4564 | llvm::Value *Carry; |
4565 | llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry); |
4566 | Builder.CreateStore(Sum, SumOutPtr); |
4567 | |
4568 | return RValue::get(Carry); |
4569 | } |
4570 | case Builtin::BIaddressof: |
4571 | case Builtin::BI__addressof: |
4572 | case Builtin::BI__builtin_addressof: |
4573 | return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this)); |
4574 | case Builtin::BI__builtin_function_start: |
4575 | return RValue::get(CGM.GetFunctionStart( |
4576 | E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext()))); |
4577 | case Builtin::BI__builtin_operator_new: |
4578 | return EmitBuiltinNewDeleteCall( |
4579 | E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false); |
4580 | case Builtin::BI__builtin_operator_delete: |
4581 | return EmitBuiltinNewDeleteCall( |
4582 | E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true); |
4583 | |
4584 | case Builtin::BI__builtin_is_aligned: |
4585 | return EmitBuiltinIsAligned(E); |
4586 | case Builtin::BI__builtin_align_up: |
4587 | return EmitBuiltinAlignTo(E, true); |
4588 | case Builtin::BI__builtin_align_down: |
4589 | return EmitBuiltinAlignTo(E, false); |
4590 | |
4591 | case Builtin::BI__noop: |
4592 | // __noop always evaluates to an integer literal zero. |
4593 | return RValue::get(ConstantInt::get(IntTy, 0)); |
4594 | case Builtin::BI__builtin_call_with_static_chain: { |
4595 | const CallExpr *Call = cast<CallExpr>(E->getArg(0)); |
4596 | const Expr *Chain = E->getArg(1); |
4597 | return EmitCall(Call->getCallee()->getType(), |
4598 | EmitCallee(Call->getCallee()), Call, ReturnValue, |
4599 | EmitScalarExpr(Chain)); |
4600 | } |
4601 | case Builtin::BI_InterlockedExchange8: |
4602 | case Builtin::BI_InterlockedExchange16: |
4603 | case Builtin::BI_InterlockedExchange: |
4604 | case Builtin::BI_InterlockedExchangePointer: |
4605 | return RValue::get( |
4606 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E)); |
4607 | case Builtin::BI_InterlockedCompareExchangePointer: |
4608 | case Builtin::BI_InterlockedCompareExchangePointer_nf: { |
4609 | llvm::Type *RTy; |
4610 | llvm::IntegerType *IntType = |
4611 | IntegerType::get(getLLVMContext(), |
4612 | getContext().getTypeSize(E->getType())); |
4613 | llvm::Type *IntPtrType = IntType->getPointerTo(); |
4614 | |
4615 | llvm::Value *Destination = |
4616 | Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType); |
4617 | |
4618 | llvm::Value *Exchange = EmitScalarExpr(E->getArg(1)); |
4619 | RTy = Exchange->getType(); |
4620 | Exchange = Builder.CreatePtrToInt(Exchange, IntType); |
4621 | |
4622 | llvm::Value *Comparand = |
4623 | Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType); |
4624 | |
4625 | auto Ordering = |
4626 | BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ? |
4627 | AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent; |
4628 | |
4629 | auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, |
4630 | Ordering, Ordering); |
4631 | Result->setVolatile(true); |
4632 | |
4633 | return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result, |
4634 | 0), |
4635 | RTy)); |
4636 | } |
4637 | case Builtin::BI_InterlockedCompareExchange8: |
4638 | case Builtin::BI_InterlockedCompareExchange16: |
4639 | case Builtin::BI_InterlockedCompareExchange: |
4640 | case Builtin::BI_InterlockedCompareExchange64: |
4641 | return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E)); |
4642 | case Builtin::BI_InterlockedIncrement16: |
4643 | case Builtin::BI_InterlockedIncrement: |
4644 | return RValue::get( |
4645 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E)); |
4646 | case Builtin::BI_InterlockedDecrement16: |
4647 | case Builtin::BI_InterlockedDecrement: |
4648 | return RValue::get( |
4649 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E)); |
4650 | case Builtin::BI_InterlockedAnd8: |
4651 | case Builtin::BI_InterlockedAnd16: |
4652 | case Builtin::BI_InterlockedAnd: |
4653 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E)); |
4654 | case Builtin::BI_InterlockedExchangeAdd8: |
4655 | case Builtin::BI_InterlockedExchangeAdd16: |
4656 | case Builtin::BI_InterlockedExchangeAdd: |
4657 | return RValue::get( |
4658 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E)); |
4659 | case Builtin::BI_InterlockedExchangeSub8: |
4660 | case Builtin::BI_InterlockedExchangeSub16: |
4661 | case Builtin::BI_InterlockedExchangeSub: |
4662 | return RValue::get( |
4663 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E)); |
4664 | case Builtin::BI_InterlockedOr8: |
4665 | case Builtin::BI_InterlockedOr16: |
4666 | case Builtin::BI_InterlockedOr: |
4667 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E)); |
4668 | case Builtin::BI_InterlockedXor8: |
4669 | case Builtin::BI_InterlockedXor16: |
4670 | case Builtin::BI_InterlockedXor: |
4671 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E)); |
4672 | |
4673 | case Builtin::BI_bittest64: |
4674 | case Builtin::BI_bittest: |
4675 | case Builtin::BI_bittestandcomplement64: |
4676 | case Builtin::BI_bittestandcomplement: |
4677 | case Builtin::BI_bittestandreset64: |
4678 | case Builtin::BI_bittestandreset: |
4679 | case Builtin::BI_bittestandset64: |
4680 | case Builtin::BI_bittestandset: |
4681 | case Builtin::BI_interlockedbittestandreset: |
4682 | case Builtin::BI_interlockedbittestandreset64: |
4683 | case Builtin::BI_interlockedbittestandset64: |
4684 | case Builtin::BI_interlockedbittestandset: |
4685 | case Builtin::BI_interlockedbittestandset_acq: |
4686 | case Builtin::BI_interlockedbittestandset_rel: |
4687 | case Builtin::BI_interlockedbittestandset_nf: |
4688 | case Builtin::BI_interlockedbittestandreset_acq: |
4689 | case Builtin::BI_interlockedbittestandreset_rel: |
4690 | case Builtin::BI_interlockedbittestandreset_nf: |
4691 | return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E)); |
4692 | |
4693 | // These builtins exist to emit regular volatile loads and stores not |
4694 | // affected by the -fms-volatile setting. |
4695 | case Builtin::BI__iso_volatile_load8: |
4696 | case Builtin::BI__iso_volatile_load16: |
4697 | case Builtin::BI__iso_volatile_load32: |
4698 | case Builtin::BI__iso_volatile_load64: |
4699 | return RValue::get(EmitISOVolatileLoad(*this, E)); |
4700 | case Builtin::BI__iso_volatile_store8: |
4701 | case Builtin::BI__iso_volatile_store16: |
4702 | case Builtin::BI__iso_volatile_store32: |
4703 | case Builtin::BI__iso_volatile_store64: |
4704 | return RValue::get(EmitISOVolatileStore(*this, E)); |
4705 | |
4706 | case Builtin::BI__exception_code: |
4707 | case Builtin::BI_exception_code: |
4708 | return RValue::get(EmitSEHExceptionCode()); |
4709 | case Builtin::BI__exception_info: |
4710 | case Builtin::BI_exception_info: |
4711 | return RValue::get(EmitSEHExceptionInfo()); |
4712 | case Builtin::BI__abnormal_termination: |
4713 | case Builtin::BI_abnormal_termination: |
4714 | return RValue::get(EmitSEHAbnormalTermination()); |
4715 | case Builtin::BI_setjmpex: |
4716 | if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 && |
4717 | E->getArg(0)->getType()->isPointerType()) |
4718 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E); |
4719 | break; |
4720 | case Builtin::BI_setjmp: |
4721 | if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 && |
4722 | E->getArg(0)->getType()->isPointerType()) { |
4723 | if (getTarget().getTriple().getArch() == llvm::Triple::x86) |
4724 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E); |
4725 | else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64) |
4726 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E); |
4727 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E); |
4728 | } |
4729 | break; |
4730 | |
4731 | // C++ std:: builtins. |
4732 | case Builtin::BImove: |
4733 | case Builtin::BImove_if_noexcept: |
4734 | case Builtin::BIforward: |
4735 | case Builtin::BIas_const: |
4736 | return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this)); |
4737 | case Builtin::BI__GetExceptionInfo: { |
4738 | if (llvm::GlobalVariable *GV = |
4739 | CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType())) |
4740 | return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy)); |
4741 | break; |
4742 | } |
4743 | |
4744 | case Builtin::BI__fastfail: |
4745 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E)); |
4746 | |
4747 | case Builtin::BI__builtin_coro_size: { |
4748 | auto & Context = getContext(); |
4749 | auto SizeTy = Context.getSizeType(); |
4750 | auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); |
4751 | Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T); |
4752 | return RValue::get(Builder.CreateCall(F)); |
4753 | } |
4754 | |
4755 | case Builtin::BI__builtin_coro_id: |
4756 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_id); |
4757 | case Builtin::BI__builtin_coro_promise: |
4758 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise); |
4759 | case Builtin::BI__builtin_coro_resume: |
4760 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume); |
4761 | case Builtin::BI__builtin_coro_frame: |
4762 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame); |
4763 | case Builtin::BI__builtin_coro_noop: |
4764 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop); |
4765 | case Builtin::BI__builtin_coro_free: |
4766 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_free); |
4767 | case Builtin::BI__builtin_coro_destroy: |
4768 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy); |
4769 | case Builtin::BI__builtin_coro_done: |
4770 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_done); |
4771 | case Builtin::BI__builtin_coro_alloc: |
4772 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc); |
4773 | case Builtin::BI__builtin_coro_begin: |
4774 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin); |
4775 | case Builtin::BI__builtin_coro_end: |
4776 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_end); |
4777 | case Builtin::BI__builtin_coro_suspend: |
4778 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend); |
4779 | |
4780 | // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions |
4781 | case Builtin::BIread_pipe: |
4782 | case Builtin::BIwrite_pipe: { |
4783 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
4784 | *Arg1 = EmitScalarExpr(E->getArg(1)); |
4785 | CGOpenCLRuntime OpenCLRT(CGM); |
4786 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
4787 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
4788 | |
4789 | // Type of the generic packet parameter. |
4790 | unsigned GenericAS = |
4791 | getContext().getTargetAddressSpace(LangAS::opencl_generic); |
4792 | llvm::Type *I8PTy = llvm::PointerType::get( |
4793 | llvm::Type::getInt8Ty(getLLVMContext()), GenericAS); |
4794 | |
4795 | // Testing which overloaded version we should generate the call for. |
4796 | if (2U == E->getNumArgs()) { |
4797 | const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2" |
4798 | : "__write_pipe_2"; |
4799 | // Creating a generic function type to be able to call with any builtin or |
4800 | // user defined type. |
4801 | llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty}; |
4802 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4803 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4804 | Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy); |
4805 | return RValue::get( |
4806 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4807 | {Arg0, BCast, PacketSize, PacketAlign})); |
4808 | } else { |
4809 | assert(4 == E->getNumArgs() &&(static_cast <bool> (4 == E->getNumArgs() && "Illegal number of parameters to pipe function") ? void (0) : __assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 4810, __extension__ __PRETTY_FUNCTION__ )) |
4810 | "Illegal number of parameters to pipe function")(static_cast <bool> (4 == E->getNumArgs() && "Illegal number of parameters to pipe function") ? void (0) : __assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 4810, __extension__ __PRETTY_FUNCTION__ )); |
4811 | const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4" |
4812 | : "__write_pipe_4"; |
4813 | |
4814 | llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy, |
4815 | Int32Ty, Int32Ty}; |
4816 | Value *Arg2 = EmitScalarExpr(E->getArg(2)), |
4817 | *Arg3 = EmitScalarExpr(E->getArg(3)); |
4818 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4819 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4820 | Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy); |
4821 | // We know the third argument is an integer type, but we may need to cast |
4822 | // it to i32. |
4823 | if (Arg2->getType() != Int32Ty) |
4824 | Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty); |
4825 | return RValue::get( |
4826 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4827 | {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign})); |
4828 | } |
4829 | } |
4830 | // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write |
4831 | // functions |
4832 | case Builtin::BIreserve_read_pipe: |
4833 | case Builtin::BIreserve_write_pipe: |
4834 | case Builtin::BIwork_group_reserve_read_pipe: |
4835 | case Builtin::BIwork_group_reserve_write_pipe: |
4836 | case Builtin::BIsub_group_reserve_read_pipe: |
4837 | case Builtin::BIsub_group_reserve_write_pipe: { |
4838 | // Composing the mangled name for the function. |
4839 | const char *Name; |
4840 | if (BuiltinID == Builtin::BIreserve_read_pipe) |
4841 | Name = "__reserve_read_pipe"; |
4842 | else if (BuiltinID == Builtin::BIreserve_write_pipe) |
4843 | Name = "__reserve_write_pipe"; |
4844 | else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe) |
4845 | Name = "__work_group_reserve_read_pipe"; |
4846 | else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe) |
4847 | Name = "__work_group_reserve_write_pipe"; |
4848 | else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe) |
4849 | Name = "__sub_group_reserve_read_pipe"; |
4850 | else |
4851 | Name = "__sub_group_reserve_write_pipe"; |
4852 | |
4853 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
4854 | *Arg1 = EmitScalarExpr(E->getArg(1)); |
4855 | llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy); |
4856 | CGOpenCLRuntime OpenCLRT(CGM); |
4857 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
4858 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
4859 | |
4860 | // Building the generic function prototype. |
4861 | llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty}; |
4862 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4863 | ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4864 | // We know the second argument is an integer type, but we may need to cast |
4865 | // it to i32. |
4866 | if (Arg1->getType() != Int32Ty) |
4867 | Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty); |
4868 | return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4869 | {Arg0, Arg1, PacketSize, PacketAlign})); |
4870 | } |
4871 | // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write |
4872 | // functions |
4873 | case Builtin::BIcommit_read_pipe: |
4874 | case Builtin::BIcommit_write_pipe: |
4875 | case Builtin::BIwork_group_commit_read_pipe: |
4876 | case Builtin::BIwork_group_commit_write_pipe: |
4877 | case Builtin::BIsub_group_commit_read_pipe: |
4878 | case Builtin::BIsub_group_commit_write_pipe: { |
4879 | const char *Name; |
4880 | if (BuiltinID == Builtin::BIcommit_read_pipe) |
4881 | Name = "__commit_read_pipe"; |
4882 | else if (BuiltinID == Builtin::BIcommit_write_pipe) |
4883 | Name = "__commit_write_pipe"; |
4884 | else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe) |
4885 | Name = "__work_group_commit_read_pipe"; |
4886 | else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe) |
4887 | Name = "__work_group_commit_write_pipe"; |
4888 | else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe) |
4889 | Name = "__sub_group_commit_read_pipe"; |
4890 | else |
4891 | Name = "__sub_group_commit_write_pipe"; |
4892 | |
4893 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
4894 | *Arg1 = EmitScalarExpr(E->getArg(1)); |
4895 | CGOpenCLRuntime OpenCLRT(CGM); |
4896 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
4897 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
4898 | |
4899 | // Building the generic function prototype. |
4900 | llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty}; |
4901 | llvm::FunctionType *FTy = |
4902 | llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), |
4903 | llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4904 | |
4905 | return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4906 | {Arg0, Arg1, PacketSize, PacketAlign})); |
4907 | } |
4908 | // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions |
4909 | case Builtin::BIget_pipe_num_packets: |
4910 | case Builtin::BIget_pipe_max_packets: { |
4911 | const char *BaseName; |
4912 | const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>(); |
4913 | if (BuiltinID == Builtin::BIget_pipe_num_packets) |
4914 | BaseName = "__get_pipe_num_packets"; |
4915 | else |
4916 | BaseName = "__get_pipe_max_packets"; |
4917 | std::string Name = std::string(BaseName) + |
4918 | std::string(PipeTy->isReadOnly() ? "_ro" : "_wo"); |
4919 | |
4920 | // Building the generic function prototype. |
4921 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
4922 | CGOpenCLRuntime OpenCLRT(CGM); |
4923 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
4924 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
4925 | llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty}; |
4926 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4927 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4928 | |
4929 | return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4930 | {Arg0, PacketSize, PacketAlign})); |
4931 | } |
4932 | |
4933 | // OpenCL v2.0 s6.13.9 - Address space qualifier functions. |
4934 | case Builtin::BIto_global: |
4935 | case Builtin::BIto_local: |
4936 | case Builtin::BIto_private: { |
4937 | auto Arg0 = EmitScalarExpr(E->getArg(0)); |
4938 | auto NewArgT = llvm::PointerType::get(Int8Ty, |
4939 | CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4940 | auto NewRetT = llvm::PointerType::get(Int8Ty, |
4941 | CGM.getContext().getTargetAddressSpace( |
4942 | E->getType()->getPointeeType().getAddressSpace())); |
4943 | auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false); |
4944 | llvm::Value *NewArg; |
4945 | if (Arg0->getType()->getPointerAddressSpace() != |
4946 | NewArgT->getPointerAddressSpace()) |
4947 | NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT); |
4948 | else |
4949 | NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT); |
4950 | auto NewName = std::string("__") + E->getDirectCallee()->getName().str(); |
4951 | auto NewCall = |
4952 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg}); |
4953 | return RValue::get(Builder.CreateBitOrPointerCast(NewCall, |
4954 | ConvertType(E->getType()))); |
4955 | } |
4956 | |
4957 | // OpenCL v2.0, s6.13.17 - Enqueue kernel function. |
4958 | // It contains four different overload formats specified in Table 6.13.17.1. |
4959 | case Builtin::BIenqueue_kernel: { |
4960 | StringRef Name; // Generated function call name |
4961 | unsigned NumArgs = E->getNumArgs(); |
4962 | |
4963 | llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy); |
4964 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
4965 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4966 | |
4967 | llvm::Value *Queue = EmitScalarExpr(E->getArg(0)); |
4968 | llvm::Value *Flags = EmitScalarExpr(E->getArg(1)); |
4969 | LValue NDRangeL = EmitAggExprToLValue(E->getArg(2)); |
4970 | llvm::Value *Range = NDRangeL.getAddress(*this).getPointer(); |
4971 | llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType(); |
4972 | |
4973 | if (NumArgs == 4) { |
4974 | // The most basic form of the call with parameters: |
4975 | // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void) |
4976 | Name = "__enqueue_kernel_basic"; |
4977 | llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy, |
4978 | GenericVoidPtrTy}; |
4979 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4980 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4981 | |
4982 | auto Info = |
4983 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3)); |
4984 | llvm::Value *Kernel = |
4985 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4986 | llvm::Value *Block = |
4987 | Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4988 | |
4989 | AttrBuilder B(Builder.getContext()); |
4990 | B.addByValAttr(NDRangeL.getAddress(*this).getElementType()); |
4991 | llvm::AttributeList ByValAttrSet = |
4992 | llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B); |
4993 | |
4994 | auto RTCall = |
4995 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet), |
4996 | {Queue, Flags, Range, Kernel, Block}); |
4997 | RTCall->setAttributes(ByValAttrSet); |
4998 | return RValue::get(RTCall); |
4999 | } |
5000 | assert(NumArgs >= 5 && "Invalid enqueue_kernel signature")(static_cast <bool> (NumArgs >= 5 && "Invalid enqueue_kernel signature" ) ? void (0) : __assert_fail ("NumArgs >= 5 && \"Invalid enqueue_kernel signature\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 5000, __extension__ __PRETTY_FUNCTION__ )); |
5001 | |
5002 | // Create a temporary array to hold the sizes of local pointer arguments |
5003 | // for the block. \p First is the position of the first size argument. |
5004 | auto CreateArrayForSizeVar = [=](unsigned First) |
5005 | -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> { |
5006 | llvm::APInt ArraySize(32, NumArgs - First); |
5007 | QualType SizeArrayTy = getContext().getConstantArrayType( |
5008 | getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal, |
5009 | /*IndexTypeQuals=*/0); |
5010 | auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes"); |
5011 | llvm::Value *TmpPtr = Tmp.getPointer(); |
5012 | llvm::Value *TmpSize = EmitLifetimeStart( |
5013 | CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr); |
5014 | llvm::Value *ElemPtr; |
5015 | // Each of the following arguments specifies the size of the corresponding |
5016 | // argument passed to the enqueued block. |
5017 | auto *Zero = llvm::ConstantInt::get(IntTy, 0); |
5018 | for (unsigned I = First; I < NumArgs; ++I) { |
5019 | auto *Index = llvm::ConstantInt::get(IntTy, I - First); |
5020 | auto *GEP = Builder.CreateGEP(Tmp.getElementType(), TmpPtr, |
5021 | {Zero, Index}); |
5022 | if (I == First) |
5023 | ElemPtr = GEP; |
5024 | auto *V = |
5025 | Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy); |
5026 | Builder.CreateAlignedStore( |
5027 | V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy)); |
5028 | } |
5029 | return std::tie(ElemPtr, TmpSize, TmpPtr); |
5030 | }; |
5031 | |
5032 | // Could have events and/or varargs. |
5033 | if (E->getArg(3)->getType()->isBlockPointerType()) { |
5034 | // No events passed, but has variadic arguments. |
5035 | Name = "__enqueue_kernel_varargs"; |
5036 | auto Info = |
5037 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3)); |
5038 | llvm::Value *Kernel = |
5039 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
5040 | auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
5041 | llvm::Value *ElemPtr, *TmpSize, *TmpPtr; |
5042 | std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4); |
5043 | |
5044 | // Create a vector of the arguments, as well as a constant value to |
5045 | // express to the runtime the number of variadic arguments. |
5046 | llvm::Value *const Args[] = {Queue, Flags, |
5047 | Range, Kernel, |
5048 | Block, ConstantInt::get(IntTy, NumArgs - 4), |
5049 | ElemPtr}; |
5050 | llvm::Type *const ArgTys[] = { |
5051 | QueueTy, IntTy, RangeTy, GenericVoidPtrTy, |
5052 | GenericVoidPtrTy, IntTy, ElemPtr->getType()}; |
5053 | |
5054 | llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false); |
5055 | auto Call = RValue::get( |
5056 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args)); |
5057 | if (TmpSize) |
5058 | EmitLifetimeEnd(TmpSize, TmpPtr); |
5059 | return Call; |
5060 | } |
5061 | // Any calls now have event arguments passed. |
5062 | if (NumArgs >= 7) { |
5063 | llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy); |
5064 | llvm::PointerType *EventPtrTy = EventTy->getPointerTo( |
5065 | CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
5066 | |
5067 | llvm::Value *NumEvents = |
5068 | Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty); |
5069 | |
5070 | // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments |
5071 | // to be a null pointer constant (including `0` literal), we can take it |
5072 | // into account and emit null pointer directly. |
5073 | llvm::Value *EventWaitList = nullptr; |
5074 | if (E->getArg(4)->isNullPointerConstant( |
5075 | getContext(), Expr::NPC_ValueDependentIsNotNull)) { |
5076 | EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy); |
5077 | } else { |
5078 | EventWaitList = E->getArg(4)->getType()->isArrayType() |
5079 | ? EmitArrayToPointerDecay(E->getArg(4)).getPointer() |
5080 | : EmitScalarExpr(E->getArg(4)); |
5081 | // Convert to generic address space. |
5082 | EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy); |
5083 | } |
5084 | llvm::Value *EventRet = nullptr; |
5085 | if (E->getArg(5)->isNullPointerConstant( |
5086 | getContext(), Expr::NPC_ValueDependentIsNotNull)) { |
5087 | EventRet = llvm::ConstantPointerNull::get(EventPtrTy); |
5088 | } else { |
5089 | EventRet = |
5090 | Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy); |
5091 | } |
5092 | |
5093 | auto Info = |
5094 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6)); |
5095 | llvm::Value *Kernel = |
5096 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
5097 | llvm::Value *Block = |
5098 | Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
5099 | |
5100 | std::vector<llvm::Type *> ArgTys = { |
5101 | QueueTy, Int32Ty, RangeTy, Int32Ty, |
5102 | EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy}; |
5103 | |
5104 | std::vector<llvm::Value *> Args = {Queue, Flags, Range, |
5105 | NumEvents, EventWaitList, EventRet, |
5106 | Kernel, Block}; |
5107 | |
5108 | if (NumArgs == 7) { |
5109 | // Has events but no variadics. |
5110 | Name = "__enqueue_kernel_basic_events"; |
5111 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
5112 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
5113 | return RValue::get( |
5114 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
5115 | llvm::ArrayRef<llvm::Value *>(Args))); |
5116 | } |
5117 | // Has event info and variadics |
5118 | // Pass the number of variadics to the runtime function too. |
5119 | Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7)); |
5120 | ArgTys.push_back(Int32Ty); |
5121 | Name = "__enqueue_kernel_events_varargs"; |
5122 | |
5123 | llvm::Value *ElemPtr, *TmpSize, *TmpPtr; |
5124 | std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7); |
5125 | Args.push_back(ElemPtr); |
5126 | ArgTys.push_back(ElemPtr->getType()); |
5127 | |
5128 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
5129 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
5130 | auto Call = |
5131 | RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
5132 | llvm::ArrayRef<llvm::Value *>(Args))); |
5133 | if (TmpSize) |
5134 | EmitLifetimeEnd(TmpSize, TmpPtr); |
5135 | return Call; |
5136 | } |
5137 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
5138 | } |
5139 | // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block |
5140 | // parameter. |
5141 | case Builtin::BIget_kernel_work_group_size: { |
5142 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
5143 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
5144 | auto Info = |
5145 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0)); |
5146 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
5147 | Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
5148 | return RValue::get(EmitRuntimeCall( |
5149 | CGM.CreateRuntimeFunction( |
5150 | llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy}, |
5151 | false), |
5152 | "__get_kernel_work_group_size_impl"), |
5153 | {Kernel, Arg})); |
5154 | } |
5155 | case Builtin::BIget_kernel_preferred_work_group_size_multiple: { |
5156 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
5157 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
5158 | auto Info = |
5159 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0)); |
5160 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
5161 | Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
5162 | return RValue::get(EmitRuntimeCall( |
5163 | CGM.CreateRuntimeFunction( |
5164 | llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy}, |
5165 | false), |
5166 | "__get_kernel_preferred_work_group_size_multiple_impl"), |
5167 | {Kernel, Arg})); |
5168 | } |
5169 | case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: |
5170 | case Builtin::BIget_kernel_sub_group_count_for_ndrange: { |
5171 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
5172 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
5173 | LValue NDRangeL = EmitAggExprToLValue(E->getArg(0)); |
5174 | llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer(); |
5175 | auto Info = |
5176 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1)); |
5177 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
5178 | Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
5179 | const char *Name = |
5180 | BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange |
5181 | ? "__get_kernel_max_sub_group_size_for_ndrange_impl" |
5182 | : "__get_kernel_sub_group_count_for_ndrange_impl"; |
5183 | return RValue::get(EmitRuntimeCall( |
5184 | CGM.CreateRuntimeFunction( |
5185 | llvm::FunctionType::get( |
5186 | IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy}, |
5187 | false), |
5188 | Name), |
5189 | {NDRange, Kernel, Block})); |
5190 | } |
5191 | |
5192 | case Builtin::BI__builtin_store_half: |
5193 | case Builtin::BI__builtin_store_halff: { |
5194 | Value *Val = EmitScalarExpr(E->getArg(0)); |
5195 | Address Address = EmitPointerWithAlignment(E->getArg(1)); |
5196 | Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy()); |
5197 | return RValue::get(Builder.CreateStore(HalfVal, Address)); |
5198 | } |
5199 | case Builtin::BI__builtin_load_half: { |
5200 | Address Address = EmitPointerWithAlignment(E->getArg(0)); |
5201 | Value *HalfVal = Builder.CreateLoad(Address); |
5202 | return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy())); |
5203 | } |
5204 | case Builtin::BI__builtin_load_halff: { |
5205 | Address Address = EmitPointerWithAlignment(E->getArg(0)); |
5206 | Value *HalfVal = Builder.CreateLoad(Address); |
5207 | return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy())); |
5208 | } |
5209 | case Builtin::BIprintf: |
5210 | if (getTarget().getTriple().isNVPTX() || |
5211 | getTarget().getTriple().isAMDGCN()) { |
5212 | if (getLangOpts().OpenMPIsDevice) |
5213 | return EmitOpenMPDevicePrintfCallExpr(E); |
5214 | if (getTarget().getTriple().isNVPTX()) |
5215 | return EmitNVPTXDevicePrintfCallExpr(E); |
5216 | if (getTarget().getTriple().isAMDGCN() && getLangOpts().HIP) |
5217 | return EmitAMDGPUDevicePrintfCallExpr(E); |
5218 | } |
5219 | |
5220 | break; |
5221 | case Builtin::BI__builtin_canonicalize: |
5222 | case Builtin::BI__builtin_canonicalizef: |
5223 | case Builtin::BI__builtin_canonicalizef16: |
5224 | case Builtin::BI__builtin_canonicalizel: |
5225 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize)); |
5226 | |
5227 | case Builtin::BI__builtin_thread_pointer: { |
5228 | if (!getContext().getTargetInfo().isTLSSupported()) |
5229 | CGM.ErrorUnsupported(E, "__builtin_thread_pointer"); |
5230 | // Fall through - it's already mapped to the intrinsic by GCCBuiltin. |
5231 | break; |
5232 | } |
5233 | case Builtin::BI__builtin_os_log_format: |
5234 | return emitBuiltinOSLogFormat(*E); |
5235 | |
5236 | case Builtin::BI__xray_customevent: { |
5237 | if (!ShouldXRayInstrumentFunction()) |
5238 | return RValue::getIgnored(); |
5239 | |
5240 | if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( |
5241 | XRayInstrKind::Custom)) |
5242 | return RValue::getIgnored(); |
5243 | |
5244 | if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) |
5245 | if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents()) |
5246 | return RValue::getIgnored(); |
5247 | |
5248 | Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent); |
5249 | auto FTy = F->getFunctionType(); |
5250 | auto Arg0 = E->getArg(0); |
5251 | auto Arg0Val = EmitScalarExpr(Arg0); |
5252 | auto Arg0Ty = Arg0->getType(); |
5253 | auto PTy0 = FTy->getParamType(0); |
5254 | if (PTy0 != Arg0Val->getType()) { |
5255 | if (Arg0Ty->isArrayType()) |
5256 | Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer(); |
5257 | else |
5258 | Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0); |
5259 | } |
5260 | auto Arg1 = EmitScalarExpr(E->getArg(1)); |
5261 | auto PTy1 = FTy->getParamType(1); |
5262 | if (PTy1 != Arg1->getType()) |
5263 | Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1); |
5264 | return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1})); |
5265 | } |
5266 | |
5267 | case Builtin::BI__xray_typedevent: { |
5268 | // TODO: There should be a way to always emit events even if the current |
5269 | // function is not instrumented. Losing events in a stream can cripple |
5270 | // a trace. |
5271 | if (!ShouldXRayInstrumentFunction()) |
5272 | return RValue::getIgnored(); |
5273 | |
5274 | if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( |
5275 | XRayInstrKind::Typed)) |
5276 | return RValue::getIgnored(); |
5277 | |
5278 | if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) |
5279 | if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents()) |
5280 | return RValue::getIgnored(); |
5281 | |
5282 | Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent); |
5283 | auto FTy = F->getFunctionType(); |
5284 | auto Arg0 = EmitScalarExpr(E->getArg(0)); |
5285 | auto PTy0 = FTy->getParamType(0); |
5286 | if (PTy0 != Arg0->getType()) |
5287 | Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0); |
5288 | auto Arg1 = E->getArg(1); |
5289 | auto Arg1Val = EmitScalarExpr(Arg1); |
5290 | auto Arg1Ty = Arg1->getType(); |
5291 | auto PTy1 = FTy->getParamType(1); |
5292 | if (PTy1 != Arg1Val->getType()) { |
5293 | if (Arg1Ty->isArrayType()) |
5294 | Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer(); |
5295 | else |
5296 | Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1); |
5297 | } |
5298 | auto Arg2 = EmitScalarExpr(E->getArg(2)); |
5299 | auto PTy2 = FTy->getParamType(2); |
5300 | if (PTy2 != Arg2->getType()) |
5301 | Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2); |
5302 | return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2})); |
5303 | } |
5304 | |
5305 | case Builtin::BI__builtin_ms_va_start: |
5306 | case Builtin::BI__builtin_ms_va_end: |
5307 | return RValue::get( |
5308 | EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(), |
5309 | BuiltinID == Builtin::BI__builtin_ms_va_start)); |
5310 | |
5311 | case Builtin::BI__builtin_ms_va_copy: { |
5312 | // Lower this manually. We can't reliably determine whether or not any |
5313 | // given va_copy() is for a Win64 va_list from the calling convention |
5314 | // alone, because it's legal to do this from a System V ABI function. |
5315 | // With opaque pointer types, we won't have enough information in LLVM |
5316 | // IR to determine this from the argument types, either. Best to do it |
5317 | // now, while we have enough information. |
5318 | Address DestAddr = EmitMSVAListRef(E->getArg(0)); |
5319 | Address SrcAddr = EmitMSVAListRef(E->getArg(1)); |
5320 | |
5321 | llvm::Type *BPP = Int8PtrPtrTy; |
5322 | |
5323 | DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"), |
5324 | Int8PtrTy, DestAddr.getAlignment()); |
5325 | SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"), |
5326 | Int8PtrTy, SrcAddr.getAlignment()); |
5327 | |
5328 | Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val"); |
5329 | return RValue::get(Builder.CreateStore(ArgPtr, DestAddr)); |
5330 | } |
5331 | |
5332 | case Builtin::BI__builtin_get_device_side_mangled_name: { |
5333 | auto Name = CGM.getCUDARuntime().getDeviceSideName( |
5334 | cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl()); |
5335 | auto Str = CGM.GetAddrOfConstantCString(Name, ""); |
5336 | llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0), |
5337 | llvm::ConstantInt::get(SizeTy, 0)}; |
5338 | auto *Ptr = llvm::ConstantExpr::getGetElementPtr(Str.getElementType(), |
5339 | Str.getPointer(), Zeros); |
5340 | return RValue::get(Ptr); |
5341 | } |
5342 | } |
5343 | |
5344 | // If this is an alias for a lib function (e.g. __builtin_sin), emit |
5345 | // the call using the normal call path, but using the unmangled |
5346 | // version of the function name. |
5347 | if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) |
5348 | return emitLibraryCall(*this, FD, E, |
5349 | CGM.getBuiltinLibFunction(FD, BuiltinID)); |
5350 | |
5351 | // If this is a predefined lib function (e.g. malloc), emit the call |
5352 | // using exactly the normal call path. |
5353 | if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) |
5354 | return emitLibraryCall(*this, FD, E, |
5355 | cast<llvm::Constant>(EmitScalarExpr(E->getCallee()))); |
5356 | |
5357 | // Check that a call to a target specific builtin has the correct target |
5358 | // features. |
5359 | // This is down here to avoid non-target specific builtins, however, if |
5360 | // generic builtins start to require generic target features then we |
5361 | // can move this up to the beginning of the function. |
5362 | checkTargetFeatures(E, FD); |
5363 | |
5364 | if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID)) |
5365 | LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth); |
5366 | |
5367 | // See if we have a target specific intrinsic. |
5368 | const char *Name = getContext().BuiltinInfo.getName(BuiltinID); |
5369 | Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; |
5370 | StringRef Prefix = |
5371 | llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch()); |
5372 | if (!Prefix.empty()) { |
5373 | IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name); |
5374 | // NOTE we don't need to perform a compatibility flag check here since the |
5375 | // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the |
5376 | // MS builtins via ALL_MS_LANGUAGES and are filtered earlier. |
5377 | if (IntrinsicID == Intrinsic::not_intrinsic) |
5378 | IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name); |
5379 | } |
5380 | |
5381 | if (IntrinsicID != Intrinsic::not_intrinsic) { |
5382 | SmallVector<Value*, 16> Args; |
5383 | |
5384 | // Find out if any arguments are required to be integer constant |
5385 | // expressions. |
5386 | unsigned ICEArguments = 0; |
5387 | ASTContext::GetBuiltinTypeError Error; |
5388 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
5389 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 5389, __extension__ __PRETTY_FUNCTION__ )); |
5390 | |
5391 | Function *F = CGM.getIntrinsic(IntrinsicID); |
5392 | llvm::FunctionType *FTy = F->getFunctionType(); |
5393 | |
5394 | for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { |
5395 | Value *ArgValue; |
5396 | // If this is a normal argument, just emit it as a scalar. |
5397 | if ((ICEArguments & (1 << i)) == 0) { |
5398 | ArgValue = EmitScalarExpr(E->getArg(i)); |
5399 | } else { |
5400 | // If this is required to be a constant, constant fold it so that we |
5401 | // know that the generated intrinsic gets a ConstantInt. |
5402 | ArgValue = llvm::ConstantInt::get( |
5403 | getLLVMContext(), |
5404 | *E->getArg(i)->getIntegerConstantExpr(getContext())); |
5405 | } |
5406 | |
5407 | // If the intrinsic arg type is different from the builtin arg type |
5408 | // we need to do a bit cast. |
5409 | llvm::Type *PTy = FTy->getParamType(i); |
5410 | if (PTy != ArgValue->getType()) { |
5411 | // XXX - vector of pointers? |
5412 | if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) { |
5413 | if (PtrTy->getAddressSpace() != |
5414 | ArgValue->getType()->getPointerAddressSpace()) { |
5415 | ArgValue = Builder.CreateAddrSpaceCast( |
5416 | ArgValue, |
5417 | ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace())); |
5418 | } |
5419 | } |
5420 | |
5421 | assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&(static_cast <bool> (PTy->canLosslesslyBitCastTo(FTy ->getParamType(i)) && "Must be able to losslessly bit cast to param" ) ? void (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 5422, __extension__ __PRETTY_FUNCTION__ )) |
5422 | "Must be able to losslessly bit cast to param")(static_cast <bool> (PTy->canLosslesslyBitCastTo(FTy ->getParamType(i)) && "Must be able to losslessly bit cast to param" ) ? void (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 5422, __extension__ __PRETTY_FUNCTION__ )); |
5423 | // Cast vector type (e.g., v256i32) to x86_amx, this only happen |
5424 | // in amx intrinsics. |
5425 | if (PTy->isX86_AMXTy()) |
5426 | ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile, |
5427 | {ArgValue->getType()}, {ArgValue}); |
5428 | else |
5429 | ArgValue = Builder.CreateBitCast(ArgValue, PTy); |
5430 | } |
5431 | |
5432 | Args.push_back(ArgValue); |
5433 | } |
5434 | |
5435 | Value *V = Builder.CreateCall(F, Args); |
5436 | QualType BuiltinRetType = E->getType(); |
5437 | |
5438 | llvm::Type *RetTy = VoidTy; |
5439 | if (!BuiltinRetType->isVoidType()) |
5440 | RetTy = ConvertType(BuiltinRetType); |
5441 | |
5442 | if (RetTy != V->getType()) { |
5443 | // XXX - vector of pointers? |
5444 | if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) { |
5445 | if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) { |
5446 | V = Builder.CreateAddrSpaceCast( |
5447 | V, V->getType()->getPointerTo(PtrTy->getAddressSpace())); |
5448 | } |
5449 | } |
5450 | |
5451 | assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&(static_cast <bool> (V->getType()->canLosslesslyBitCastTo (RetTy) && "Must be able to losslessly bit cast result type" ) ? void (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 5452, __extension__ __PRETTY_FUNCTION__ )) |
5452 | "Must be able to losslessly bit cast result type")(static_cast <bool> (V->getType()->canLosslesslyBitCastTo (RetTy) && "Must be able to losslessly bit cast result type" ) ? void (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 5452, __extension__ __PRETTY_FUNCTION__ )); |
5453 | // Cast x86_amx to vector type (e.g., v256i32), this only happen |
5454 | // in amx intrinsics. |
5455 | if (V->getType()->isX86_AMXTy()) |
5456 | V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy}, |
5457 | {V}); |
5458 | else |
5459 | V = Builder.CreateBitCast(V, RetTy); |
5460 | } |
5461 | |
5462 | return RValue::get(V); |
5463 | } |
5464 | |
5465 | // Some target-specific builtins can have aggregate return values, e.g. |
5466 | // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force |
5467 | // ReturnValue to be non-null, so that the target-specific emission code can |
5468 | // always just emit into it. |
5469 | TypeEvaluationKind EvalKind = getEvaluationKind(E->getType()); |
5470 | if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) { |
5471 | Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp"); |
5472 | ReturnValue = ReturnValueSlot(DestPtr, false); |
5473 | } |
5474 | |
5475 | // Now see if we can emit a target-specific builtin. |
5476 | if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { |
5477 | switch (EvalKind) { |
5478 | case TEK_Scalar: |
5479 | return RValue::get(V); |
5480 | case TEK_Aggregate: |
5481 | return RValue::getAggregate(ReturnValue.getValue(), |
5482 | ReturnValue.isVolatile()); |
5483 | case TEK_Complex: |
5484 | llvm_unreachable("No current target builtin returns complex")::llvm::llvm_unreachable_internal("No current target builtin returns complex" , "clang/lib/CodeGen/CGBuiltin.cpp", 5484); |
5485 | } |
5486 | llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr")::llvm::llvm_unreachable_internal("Bad evaluation kind in EmitBuiltinExpr" , "clang/lib/CodeGen/CGBuiltin.cpp", 5486); |
5487 | } |
5488 | |
5489 | ErrorUnsupported(E, "builtin function"); |
5490 | |
5491 | // Unknown builtin, for now just dump it out and return undef. |
5492 | return GetUndefRValue(E->getType()); |
5493 | } |
5494 | |
5495 | static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, |
5496 | unsigned BuiltinID, const CallExpr *E, |
5497 | ReturnValueSlot ReturnValue, |
5498 | llvm::Triple::ArchType Arch) { |
5499 | switch (Arch) { |
5500 | case llvm::Triple::arm: |
5501 | case llvm::Triple::armeb: |
5502 | case llvm::Triple::thumb: |
5503 | case llvm::Triple::thumbeb: |
5504 | return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch); |
5505 | case llvm::Triple::aarch64: |
5506 | case llvm::Triple::aarch64_32: |
5507 | case llvm::Triple::aarch64_be: |
5508 | return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch); |
5509 | case llvm::Triple::bpfeb: |
5510 | case llvm::Triple::bpfel: |
5511 | return CGF->EmitBPFBuiltinExpr(BuiltinID, E); |
5512 | case llvm::Triple::x86: |
5513 | case llvm::Triple::x86_64: |
5514 | return CGF->EmitX86BuiltinExpr(BuiltinID, E); |
5515 | case llvm::Triple::ppc: |
5516 | case llvm::Triple::ppcle: |
5517 | case llvm::Triple::ppc64: |
5518 | case llvm::Triple::ppc64le: |
5519 | return CGF->EmitPPCBuiltinExpr(BuiltinID, E); |
5520 | case llvm::Triple::r600: |
5521 | case llvm::Triple::amdgcn: |
5522 | return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E); |
5523 | case llvm::Triple::systemz: |
5524 | return CGF->EmitSystemZBuiltinExpr(BuiltinID, E); |
5525 | case llvm::Triple::nvptx: |
5526 | case llvm::Triple::nvptx64: |
5527 | return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E); |
5528 | case llvm::Triple::wasm32: |
5529 | case llvm::Triple::wasm64: |
5530 | return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E); |
5531 | case llvm::Triple::hexagon: |
5532 | return CGF->EmitHexagonBuiltinExpr(BuiltinID, E); |
5533 | case llvm::Triple::riscv32: |
5534 | case llvm::Triple::riscv64: |
5535 | return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue); |
5536 | default: |
5537 | return nullptr; |
5538 | } |
5539 | } |
5540 | |
5541 | Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, |
5542 | const CallExpr *E, |
5543 | ReturnValueSlot ReturnValue) { |
5544 | if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) { |
5545 | assert(getContext().getAuxTargetInfo() && "Missing aux target info")(static_cast <bool> (getContext().getAuxTargetInfo() && "Missing aux target info") ? void (0) : __assert_fail ("getContext().getAuxTargetInfo() && \"Missing aux target info\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 5545, __extension__ __PRETTY_FUNCTION__ )); |
5546 | return EmitTargetArchBuiltinExpr( |
5547 | this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E, |
5548 | ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch()); |
5549 | } |
5550 | |
5551 | return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue, |
5552 | getTarget().getTriple().getArch()); |
5553 | } |
5554 | |
5555 | static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF, |
5556 | NeonTypeFlags TypeFlags, |
5557 | bool HasLegalHalfType = true, |
5558 | bool V1Ty = false, |
5559 | bool AllowBFloatArgsAndRet = true) { |
5560 | int IsQuad = TypeFlags.isQuad(); |
5561 | switch (TypeFlags.getEltType()) { |
5562 | case NeonTypeFlags::Int8: |
5563 | case NeonTypeFlags::Poly8: |
5564 | return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad)); |
5565 | case NeonTypeFlags::Int16: |
5566 | case NeonTypeFlags::Poly16: |
5567 | return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); |
5568 | case NeonTypeFlags::BFloat16: |
5569 | if (AllowBFloatArgsAndRet) |
5570 | return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad)); |
5571 | else |
5572 | return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); |
5573 | case NeonTypeFlags::Float16: |
5574 | if (HasLegalHalfType) |
5575 | return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad)); |
5576 | else |
5577 | return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); |
5578 | case NeonTypeFlags::Int32: |
5579 | return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad)); |
5580 | case NeonTypeFlags::Int64: |
5581 | case NeonTypeFlags::Poly64: |
5582 | return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad)); |
5583 | case NeonTypeFlags::Poly128: |
5584 | // FIXME: i128 and f128 doesn't get fully support in Clang and llvm. |
5585 | // There is a lot of i128 and f128 API missing. |
5586 | // so we use v16i8 to represent poly128 and get pattern matched. |
5587 | return llvm::FixedVectorType::get(CGF->Int8Ty, 16); |
5588 | case NeonTypeFlags::Float32: |
5589 | return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad)); |
5590 | case NeonTypeFlags::Float64: |
5591 | return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad)); |
5592 | } |
5593 | llvm_unreachable("Unknown vector element type!")::llvm::llvm_unreachable_internal("Unknown vector element type!" , "clang/lib/CodeGen/CGBuiltin.cpp", 5593); |
5594 | } |
5595 | |
5596 | static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF, |
5597 | NeonTypeFlags IntTypeFlags) { |
5598 | int IsQuad = IntTypeFlags.isQuad(); |
5599 | switch (IntTypeFlags.getEltType()) { |
5600 | case NeonTypeFlags::Int16: |
5601 | return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad)); |
5602 | case NeonTypeFlags::Int32: |
5603 | return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad)); |
5604 | case NeonTypeFlags::Int64: |
5605 | return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad)); |
5606 | default: |
5607 | llvm_unreachable("Type can't be converted to floating-point!")::llvm::llvm_unreachable_internal("Type can't be converted to floating-point!" , "clang/lib/CodeGen/CGBuiltin.cpp", 5607); |
5608 | } |
5609 | } |
5610 | |
5611 | Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C, |
5612 | const ElementCount &Count) { |
5613 | Value *SV = llvm::ConstantVector::getSplat(Count, C); |
5614 | return Builder.CreateShuffleVector(V, V, SV, "lane"); |
5615 | } |
5616 | |
5617 | Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { |
5618 | ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount(); |
5619 | return EmitNeonSplat(V, C, EC); |
5620 | } |
5621 | |
5622 | Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, |
5623 | const char *name, |
5624 | unsigned shift, bool rightshift) { |
5625 | unsigned j = 0; |
5626 | for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); |
5627 | ai != ae; ++ai, ++j) { |
5628 | if (F->isConstrainedFPIntrinsic()) |
5629 | if (ai->getType()->isMetadataTy()) |
5630 | continue; |
5631 | if (shift > 0 && shift == j) |
5632 | Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); |
5633 | else |
5634 | Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); |
5635 | } |
5636 | |
5637 | if (F->isConstrainedFPIntrinsic()) |
5638 | return Builder.CreateConstrainedFPCall(F, Ops, name); |
5639 | else |
5640 | return Builder.CreateCall(F, Ops, name); |
5641 | } |
5642 | |
5643 | Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty, |
5644 | bool neg) { |
5645 | int SV = cast<ConstantInt>(V)->getSExtValue(); |
5646 | return ConstantInt::get(Ty, neg ? -SV : SV); |
5647 | } |
5648 | |
5649 | // Right-shift a vector by a constant. |
5650 | Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift, |
5651 | llvm::Type *Ty, bool usgn, |
5652 | const char *name) { |
5653 | llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); |
5654 | |
5655 | int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue(); |
5656 | int EltSize = VTy->getScalarSizeInBits(); |
5657 | |
5658 | Vec = Builder.CreateBitCast(Vec, Ty); |
5659 | |
5660 | // lshr/ashr are undefined when the shift amount is equal to the vector |
5661 | // element size. |
5662 | if (ShiftAmt == EltSize) { |
5663 | if (usgn) { |
5664 | // Right-shifting an unsigned value by its size yields 0. |
5665 | return llvm::ConstantAggregateZero::get(VTy); |
5666 | } else { |
5667 | // Right-shifting a signed value by its size is equivalent |
5668 | // to a shift of size-1. |
5669 | --ShiftAmt; |
5670 | Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt); |
5671 | } |
5672 | } |
5673 | |
5674 | Shift = EmitNeonShiftVector(Shift, Ty, false); |
5675 | if (usgn) |
5676 | return Builder.CreateLShr(Vec, Shift, name); |
5677 | else |
5678 | return Builder.CreateAShr(Vec, Shift, name); |
5679 | } |
5680 | |
5681 | enum { |
5682 | AddRetType = (1 << 0), |
5683 | Add1ArgType = (1 << 1), |
5684 | Add2ArgTypes = (1 << 2), |
5685 | |
5686 | VectorizeRetType = (1 << 3), |
5687 | VectorizeArgTypes = (1 << 4), |
5688 | |
5689 | InventFloatType = (1 << 5), |
5690 | UnsignedAlts = (1 << 6), |
5691 | |
5692 | Use64BitVectors = (1 << 7), |
5693 | Use128BitVectors = (1 << 8), |
5694 | |
5695 | Vectorize1ArgType = Add1ArgType | VectorizeArgTypes, |
5696 | VectorRet = AddRetType | VectorizeRetType, |
5697 | VectorRetGetArgs01 = |
5698 | AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes, |
5699 | FpCmpzModifiers = |
5700 | AddRetType | VectorizeRetType | Add1ArgType | InventFloatType |
5701 | }; |
5702 | |
5703 | namespace { |
5704 | struct ARMVectorIntrinsicInfo { |
5705 | const char *NameHint; |
5706 | unsigned BuiltinID; |
5707 | unsigned LLVMIntrinsic; |
5708 | unsigned AltLLVMIntrinsic; |
5709 | uint64_t TypeModifier; |
5710 | |
5711 | bool operator<(unsigned RHSBuiltinID) const { |
5712 | return BuiltinID < RHSBuiltinID; |
5713 | } |
5714 | bool operator<(const ARMVectorIntrinsicInfo &TE) const { |
5715 | return BuiltinID < TE.BuiltinID; |
5716 | } |
5717 | }; |
5718 | } // end anonymous namespace |
5719 | |
5720 | #define NEONMAP0(NameBase) \ |
5721 | { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 } |
5722 | |
5723 | #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ |
5724 | { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ |
5725 | Intrinsic::LLVMIntrinsic, 0, TypeModifier } |
5726 | |
5727 | #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \ |
5728 | { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ |
5729 | Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \ |
5730 | TypeModifier } |
5731 | |
5732 | static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = { |
5733 | NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0), |
5734 | NEONMAP0(splat_lane_v), |
5735 | NEONMAP0(splat_laneq_v), |
5736 | NEONMAP0(splatq_lane_v), |
5737 | NEONMAP0(splatq_laneq_v), |
5738 | NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), |
5739 | NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), |
5740 | NEONMAP1(vabs_v, arm_neon_vabs, 0), |
5741 | NEONMAP1(vabsq_v, arm_neon_vabs, 0), |
5742 | NEONMAP0(vadd_v), |
5743 | NEONMAP0(vaddhn_v), |
5744 | NEONMAP0(vaddq_v), |
5745 | NEONMAP1(vaesdq_v, arm_neon_aesd, 0), |
5746 | NEONMAP1(vaeseq_v, arm_neon_aese, 0), |
5747 | NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0), |
5748 | NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0), |
5749 | NEONMAP1(vbfdot_v, arm_neon_bfdot, 0), |
5750 | NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0), |
5751 | NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0), |
5752 | NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0), |
5753 | NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0), |
5754 | NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType), |
5755 | NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType), |
5756 | NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType), |
5757 | NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType), |
5758 | NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType), |
5759 | NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType), |
5760 | NEONMAP1(vcage_v, arm_neon_vacge, 0), |
5761 | NEONMAP1(vcageq_v, arm_neon_vacge, 0), |
5762 | NEONMAP1(vcagt_v, arm_neon_vacgt, 0), |
5763 | NEONMAP1(vcagtq_v, arm_neon_vacgt, 0), |
5764 | NEONMAP1(vcale_v, arm_neon_vacge, 0), |
5765 | NEONMAP1(vcaleq_v, arm_neon_vacge, 0), |
5766 | NEONMAP1(vcalt_v, arm_neon_vacgt, 0), |
5767 | NEONMAP1(vcaltq_v, arm_neon_vacgt, 0), |
5768 | NEONMAP0(vceqz_v), |
5769 | NEONMAP0(vceqzq_v), |
5770 | NEONMAP0(vcgez_v), |
5771 | NEONMAP0(vcgezq_v), |
5772 | NEONMAP0(vcgtz_v), |
5773 | NEONMAP0(vcgtzq_v), |
5774 | NEONMAP0(vclez_v), |
5775 | NEONMAP0(vclezq_v), |
5776 | NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType), |
5777 | NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType), |
5778 | NEONMAP0(vcltz_v), |
5779 | NEONMAP0(vcltzq_v), |
5780 | NEONMAP1(vclz_v, ctlz, Add1ArgType), |
5781 | NEONMAP1(vclzq_v, ctlz, Add1ArgType), |
5782 | NEONMAP1(vcnt_v, ctpop, Add1ArgType), |
5783 | NEONMAP1(vcntq_v, ctpop, Add1ArgType), |
5784 | NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0), |
5785 | NEONMAP0(vcvt_f16_v), |
5786 | NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0), |
5787 | NEONMAP0(vcvt_f32_v), |
5788 | NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
5789 | NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
5790 | NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0), |
5791 | NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0), |
5792 | NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0), |
5793 | NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0), |
5794 | NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0), |
5795 | NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0), |
5796 | NEONMAP0(vcvt_s16_v), |
5797 | NEONMAP0(vcvt_s32_v), |
5798 | NEONMAP0(vcvt_s64_v), |
5799 | NEONMAP0(vcvt_u16_v), |
5800 | NEONMAP0(vcvt_u32_v), |
5801 | NEONMAP0(vcvt_u64_v), |
5802 | NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0), |
5803 | NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0), |
5804 | NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0), |
5805 | NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0), |
5806 | NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0), |
5807 | NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0), |
5808 | NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0), |
5809 | NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0), |
5810 | NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0), |
5811 | NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0), |
5812 | NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0), |
5813 | NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0), |
5814 | NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0), |
5815 | NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0), |
5816 | NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0), |
5817 | NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0), |
5818 | NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0), |
5819 | NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0), |
5820 | NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0), |
5821 | NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0), |
5822 | NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0), |
5823 | NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0), |
5824 | NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0), |
5825 | NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0), |
5826 | NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0), |
5827 | NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0), |
5828 | NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0), |
5829 | NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0), |
5830 | NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0), |
5831 | NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0), |
5832 | NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0), |
5833 | NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0), |
5834 | NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0), |
5835 | NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0), |
5836 | NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0), |
5837 | NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0), |
5838 | NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0), |
5839 | NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0), |
5840 | NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0), |
5841 | NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0), |
5842 | NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0), |
5843 | NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0), |
5844 | NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0), |
5845 | NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0), |
5846 | NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0), |
5847 | NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0), |
5848 | NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0), |
5849 | NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0), |
5850 | NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0), |
5851 | NEONMAP0(vcvtq_f16_v), |
5852 | NEONMAP0(vcvtq_f32_v), |
5853 | NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
5854 | NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
5855 | NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0), |
5856 | NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0), |
5857 | NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0), |
5858 | NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0), |
5859 | NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0), |
5860 | NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0), |
5861 | NEONMAP0(vcvtq_s16_v), |
5862 | NEONMAP0(vcvtq_s32_v), |
5863 | NEONMAP0(vcvtq_s64_v), |
5864 | NEONMAP0(vcvtq_u16_v), |
5865 | NEONMAP0(vcvtq_u32_v), |
5866 | NEONMAP0(vcvtq_u64_v), |
5867 | NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0), |
5868 | NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0), |
5869 | NEONMAP0(vext_v), |
5870 | NEONMAP0(vextq_v), |
5871 | NEONMAP0(vfma_v), |
5872 | NEONMAP0(vfmaq_v), |
5873 | NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), |
5874 | NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), |
5875 | NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), |
5876 | NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), |
5877 | NEONMAP0(vld1_dup_v), |
5878 | NEONMAP1(vld1_v, arm_neon_vld1, 0), |
5879 | NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0), |
5880 | NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0), |
5881 | NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0), |
5882 | NEONMAP0(vld1q_dup_v), |
5883 | NEONMAP1(vld1q_v, arm_neon_vld1, 0), |
5884 | NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0), |
5885 | NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0), |
5886 | NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0), |
5887 | NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0), |
5888 | NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0), |
5889 | NEONMAP1(vld2_v, arm_neon_vld2, 0), |
5890 | NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0), |
5891 | NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0), |
5892 | NEONMAP1(vld2q_v, arm_neon_vld2, 0), |
5893 | NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0), |
5894 | NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0), |
5895 | NEONMAP1(vld3_v, arm_neon_vld3, 0), |
5896 | NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0), |
5897 | NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0), |
5898 | NEONMAP1(vld3q_v, arm_neon_vld3, 0), |
5899 | NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0), |
5900 | NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0), |
5901 | NEONMAP1(vld4_v, arm_neon_vld4, 0), |
5902 | NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0), |
5903 | NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0), |
5904 | NEONMAP1(vld4q_v, arm_neon_vld4, 0), |
5905 | NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), |
5906 | NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType), |
5907 | NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType), |
5908 | NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), |
5909 | NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), |
5910 | NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType), |
5911 | NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType), |
5912 | NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), |
5913 | NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0), |
5914 | NEONMAP0(vmovl_v), |
5915 | NEONMAP0(vmovn_v), |
5916 | NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType), |
5917 | NEONMAP0(vmull_v), |
5918 | NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType), |
5919 | NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), |
5920 | NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), |
5921 | NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType), |
5922 | NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), |
5923 | NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), |
5924 | NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType), |
5925 | NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts), |
5926 | NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts), |
5927 | NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType), |
5928 | NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType), |
5929 | NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts), |
5930 | NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts), |
5931 | NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0), |
5932 | NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0), |
5933 | NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType), |
5934 | NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType), |
5935 | NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType), |
5936 | NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts), |
5937 | NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType), |
5938 | NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType), |
5939 | NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType), |
5940 | NEONMAP1(vqrdmlah_v, arm_neon_vqrdmlah, Add1ArgType), |
5941 | NEONMAP1(vqrdmlahq_v, arm_neon_vqrdmlah, Add1ArgType), |
5942 | NEONMAP1(vqrdmlsh_v, arm_neon_vqrdmlsh, Add1ArgType), |
5943 | NEONMAP1(vqrdmlshq_v, arm_neon_vqrdmlsh, Add1ArgType), |
5944 | NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType), |
5945 | NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType), |
5946 | NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), |
5947 | NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), |
5948 | NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), |
5949 | NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), |
5950 | NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), |
5951 | NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), |
5952 | NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0), |
5953 | NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0), |
5954 | NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts), |
5955 | NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts), |
5956 | NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType), |
5957 | NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), |
5958 | NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), |
5959 | NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType), |
5960 | NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType), |
5961 | NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), |
5962 | NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), |
5963 | NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType), |
5964 | NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType), |
5965 | NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType), |
5966 | NEONMAP0(vrndi_v), |
5967 | NEONMAP0(vrndiq_v), |
5968 | NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType), |
5969 | NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType), |
5970 | NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType), |
5971 | NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType), |
5972 | NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType), |
5973 | NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType), |
5974 | NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType), |
5975 | NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType), |
5976 | NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType), |
5977 | NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), |
5978 | NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), |
5979 | NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), |
5980 | NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), |
5981 | NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), |
5982 | NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), |
5983 | NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType), |
5984 | NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType), |
5985 | NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType), |
5986 | NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0), |
5987 | NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0), |
5988 | NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0), |
5989 | NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0), |
5990 | NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0), |
5991 | NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0), |
5992 | NEONMAP0(vshl_n_v), |
5993 | NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), |
5994 | NEONMAP0(vshll_n_v), |
5995 | NEONMAP0(vshlq_n_v), |
5996 | NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), |
5997 | NEONMAP0(vshr_n_v), |
5998 | NEONMAP0(vshrn_n_v), |
5999 | NEONMAP0(vshrq_n_v), |
6000 | NEONMAP1(vst1_v, arm_neon_vst1, 0), |
6001 | NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0), |
6002 | NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0), |
6003 | NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0), |
6004 | NEONMAP1(vst1q_v, arm_neon_vst1, 0), |
6005 | NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0), |
6006 | NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0), |
6007 | NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0), |
6008 | NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0), |
6009 | NEONMAP1(vst2_v, arm_neon_vst2, 0), |
6010 | NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0), |
6011 | NEONMAP1(vst2q_v, arm_neon_vst2, 0), |
6012 | NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0), |
6013 | NEONMAP1(vst3_v, arm_neon_vst3, 0), |
6014 | NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0), |
6015 | NEONMAP1(vst3q_v, arm_neon_vst3, 0), |
6016 | NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0), |
6017 | NEONMAP1(vst4_v, arm_neon_vst4, 0), |
6018 | NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0), |
6019 | NEONMAP1(vst4q_v, arm_neon_vst4, 0), |
6020 | NEONMAP0(vsubhn_v), |
6021 | NEONMAP0(vtrn_v), |
6022 | NEONMAP0(vtrnq_v), |
6023 | NEONMAP0(vtst_v), |
6024 | NEONMAP0(vtstq_v), |
6025 | NEONMAP1(vusdot_v, arm_neon_usdot, 0), |
6026 | NEONMAP1(vusdotq_v, arm_neon_usdot, 0), |
6027 | NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0), |
6028 | NEONMAP0(vuzp_v), |
6029 | NEONMAP0(vuzpq_v), |
6030 | NEONMAP0(vzip_v), |
6031 | NEONMAP0(vzipq_v) |
6032 | }; |
6033 | |
6034 | static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = { |
6035 | NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0), |
6036 | NEONMAP0(splat_lane_v), |
6037 | NEONMAP0(splat_laneq_v), |
6038 | NEONMAP0(splatq_lane_v), |
6039 | NEONMAP0(splatq_laneq_v), |
6040 | NEONMAP1(vabs_v, aarch64_neon_abs, 0), |
6041 | NEONMAP1(vabsq_v, aarch64_neon_abs, 0), |
6042 | NEONMAP0(vadd_v), |
6043 | NEONMAP0(vaddhn_v), |
6044 | NEONMAP0(vaddq_p128), |
6045 | NEONMAP0(vaddq_v), |
6046 | NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0), |
6047 | NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0), |
6048 | NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0), |
6049 | NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0), |
6050 | NEONMAP2(vbcaxq_v, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts), |
6051 | NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0), |
6052 | NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0), |
6053 | NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0), |
6054 | NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0), |
6055 | NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0), |
6056 | NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType), |
6057 | NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType), |
6058 | NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType), |
6059 | NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType), |
6060 | NEONMAP1(vcage_v, aarch64_neon_facge, 0), |
6061 | NEONMAP1(vcageq_v, aarch64_neon_facge, 0), |
6062 | NEONMAP1(vcagt_v, aarch64_neon_facgt, 0), |
6063 | NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0), |
6064 | NEONMAP1(vcale_v, aarch64_neon_facge, 0), |
6065 | NEONMAP1(vcaleq_v, aarch64_neon_facge, 0), |
6066 | NEONMAP1(vcalt_v, aarch64_neon_facgt, 0), |
6067 | NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0), |
6068 | NEONMAP0(vceqz_v), |
6069 | NEONMAP0(vceqzq_v), |
6070 | NEONMAP0(vcgez_v), |
6071 | NEONMAP0(vcgezq_v), |
6072 | NEONMAP0(vcgtz_v), |
6073 | NEONMAP0(vcgtzq_v), |
6074 | NEONMAP0(vclez_v), |
6075 | NEONMAP0(vclezq_v), |
6076 | NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType), |
6077 | NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType), |
6078 | NEONMAP0(vcltz_v), |
6079 | NEONMAP0(vcltzq_v), |
6080 | NEONMAP1(vclz_v, ctlz, Add1ArgType), |
6081 | NEONMAP1(vclzq_v, ctlz, Add1ArgType), |
6082 | NEONMAP1(vcmla_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType), |
6083 | NEONMAP1(vcmla_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType), |
6084 | NEONMAP1(vcmla_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType), |
6085 | NEONMAP1(vcmla_v, aarch64_neon_vcmla_rot0, Add1ArgType), |
6086 | NEONMAP1(vcmlaq_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType), |
6087 | NEONMAP1(vcmlaq_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType), |
6088 | NEONMAP1(vcmlaq_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType), |
6089 | NEONMAP1(vcmlaq_v, aarch64_neon_vcmla_rot0, Add1ArgType), |
6090 | NEONMAP1(vcnt_v, ctpop, Add1ArgType), |
6091 | NEONMAP1(vcntq_v, ctpop, Add1ArgType), |
6092 | NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0), |
6093 | NEONMAP0(vcvt_f16_v), |
6094 | NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0), |
6095 | NEONMAP0(vcvt_f32_v), |
6096 | NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
6097 | NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
6098 | NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
6099 | NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), |
6100 | NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), |
6101 | NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), |
6102 | NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), |
6103 | NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), |
6104 | NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), |
6105 | NEONMAP0(vcvtq_f16_v), |
6106 | NEONMAP0(vcvtq_f32_v), |
6107 | NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0), |
6108 | NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
6109 | NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
6110 | NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
6111 | NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), |
6112 | NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), |
6113 | NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), |
6114 | NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), |
6115 | NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), |
6116 | NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), |
6117 | NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType), |
6118 | NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0), |
6119 | NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0), |
6120 | NEONMAP2(veor3q_v, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts), |
6121 | NEONMAP0(vext_v), |
6122 | NEONMAP0(vextq_v), |
6123 | NEONMAP0(vfma_v), |
6124 | NEONMAP0(vfmaq_v), |
6125 | NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0), |
6126 | NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0), |
6127 | NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0), |
6128 | NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0), |
6129 | NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0), |
6130 | NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0), |
6131 | NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0), |
6132 | NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0), |
6133 | NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), |
6134 | NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), |
6135 | NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), |
6136 | NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), |
6137 | NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0), |
6138 | NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0), |
6139 | NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0), |
6140 | NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0), |
6141 | NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0), |
6142 | NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0), |
6143 | NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0), |
6144 | NEONMAP0(vmovl_v), |
6145 | NEONMAP0(vmovn_v), |
6146 | NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType), |
6147 | NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType), |
6148 | NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType), |
6149 | NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), |
6150 | NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), |
6151 | NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType), |
6152 | NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType), |
6153 | NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType), |
6154 | NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), |
6155 | NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), |
6156 | NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0), |
6157 | NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0), |
6158 | NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0), |
6159 | NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0), |
6160 | NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType), |
6161 | NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0), |
6162 | NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0), |
6163 | NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType), |
6164 | NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType), |
6165 | NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts), |
6166 | NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType), |
6167 | NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType), |
6168 | NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType), |
6169 | NEONMAP1(vqrdmlah_v, aarch64_neon_sqrdmlah, Add1ArgType), |
6170 | NEONMAP1(vqrdmlahq_v, aarch64_neon_sqrdmlah, Add1ArgType), |
6171 | NEONMAP1(vqrdmlsh_v, aarch64_neon_sqrdmlsh, Add1ArgType), |
6172 | NEONMAP1(vqrdmlshq_v, aarch64_neon_sqrdmlsh, Add1ArgType), |
6173 | NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0), |
6174 | NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), |
6175 | NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType), |
6176 | NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0), |
6177 | NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), |
6178 | NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType), |
6179 | NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), |
6180 | NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), |
6181 | NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts), |
6182 | NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), |
6183 | NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts), |
6184 | NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), |
6185 | NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0), |
6186 | NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0), |
6187 | NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), |
6188 | NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), |
6189 | NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType), |
6190 | NEONMAP1(vrax1q_v, aarch64_crypto_rax1, 0), |
6191 | NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), |
6192 | NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), |
6193 | NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType), |
6194 | NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType), |
6195 | NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), |
6196 | NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), |
6197 | NEONMAP1(vrnd32x_v, aarch64_neon_frint32x, Add1ArgType), |
6198 | NEONMAP1(vrnd32xq_v, aarch64_neon_frint32x, Add1ArgType), |
6199 | NEONMAP1(vrnd32z_v, aarch64_neon_frint32z, Add1ArgType), |
6200 | NEONMAP1(vrnd32zq_v, aarch64_neon_frint32z, Add1ArgType), |
6201 | NEONMAP1(vrnd64x_v, aarch64_neon_frint64x, Add1ArgType), |
6202 | NEONMAP1(vrnd64xq_v, aarch64_neon_frint64x, Add1ArgType), |
6203 | NEONMAP1(vrnd64z_v, aarch64_neon_frint64z, Add1ArgType), |
6204 | NEONMAP1(vrnd64zq_v, aarch64_neon_frint64z, Add1ArgType), |
6205 | NEONMAP0(vrndi_v), |
6206 | NEONMAP0(vrndiq_v), |
6207 | NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), |
6208 | NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), |
6209 | NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), |
6210 | NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), |
6211 | NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), |
6212 | NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), |
6213 | NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType), |
6214 | NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType), |
6215 | NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType), |
6216 | NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0), |
6217 | NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0), |
6218 | NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0), |
6219 | NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0), |
6220 | NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0), |
6221 | NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0), |
6222 | NEONMAP1(vsha512h2q_v, aarch64_crypto_sha512h2, 0), |
6223 | NEONMAP1(vsha512hq_v, aarch64_crypto_sha512h, 0), |
6224 | NEONMAP1(vsha512su0q_v, aarch64_crypto_sha512su0, 0), |
6225 | NEONMAP1(vsha512su1q_v, aarch64_crypto_sha512su1, 0), |
6226 | NEONMAP0(vshl_n_v), |
6227 | NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), |
6228 | NEONMAP0(vshll_n_v), |
6229 | NEONMAP0(vshlq_n_v), |
6230 | NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), |
6231 | NEONMAP0(vshr_n_v), |
6232 | NEONMAP0(vshrn_n_v), |
6233 | NEONMAP0(vshrq_n_v), |
6234 | NEONMAP1(vsm3partw1q_v, aarch64_crypto_sm3partw1, 0), |
6235 | NEONMAP1(vsm3partw2q_v, aarch64_crypto_sm3partw2, 0), |
6236 | NEONMAP1(vsm3ss1q_v, aarch64_crypto_sm3ss1, 0), |
6237 | NEONMAP1(vsm3tt1aq_v, aarch64_crypto_sm3tt1a, 0), |
6238 | NEONMAP1(vsm3tt1bq_v, aarch64_crypto_sm3tt1b, 0), |
6239 | NEONMAP1(vsm3tt2aq_v, aarch64_crypto_sm3tt2a, 0), |
6240 | NEONMAP1(vsm3tt2bq_v, aarch64_crypto_sm3tt2b, 0), |
6241 | NEONMAP1(vsm4ekeyq_v, aarch64_crypto_sm4ekey, 0), |
6242 | NEONMAP1(vsm4eq_v, aarch64_crypto_sm4e, 0), |
6243 | NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0), |
6244 | NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0), |
6245 | NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0), |
6246 | NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0), |
6247 | NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0), |
6248 | NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0), |
6249 | NEONMAP0(vsubhn_v), |
6250 | NEONMAP0(vtst_v), |
6251 | NEONMAP0(vtstq_v), |
6252 | NEONMAP1(vusdot_v, aarch64_neon_usdot, 0), |
6253 | NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0), |
6254 | NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0), |
6255 | NEONMAP1(vxarq_v, aarch64_crypto_xar, 0), |
6256 | }; |
6257 | |
6258 | static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = { |
6259 | NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType), |
6260 | NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType), |
6261 | NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType), |
6262 | NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), |
6263 | NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), |
6264 | NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), |
6265 | NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), |
6266 | NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), |
6267 | NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), |
6268 | NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6269 | NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), |
6270 | NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType), |
6271 | NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), |
6272 | NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType), |
6273 | NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6274 | NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6275 | NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType), |
6276 | NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType), |
6277 | NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), |
6278 | NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), |
6279 | NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType), |
6280 | NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType), |
6281 | NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), |
6282 | NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), |
6283 | NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
6284 | NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
6285 | NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
6286 | NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
6287 | NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
6288 | NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
6289 | NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
6290 | NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
6291 | NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
6292 | NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
6293 | NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0), |
6294 | NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
6295 | NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
6296 | NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
6297 | NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
6298 | NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
6299 | NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
6300 | NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
6301 | NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
6302 | NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
6303 | NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
6304 | NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
6305 | NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
6306 | NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
6307 | NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
6308 | NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
6309 | NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
6310 | NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
6311 | NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
6312 | NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0), |
6313 | NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6314 | NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6315 | NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6316 | NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6317 | NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), |
6318 | NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), |
6319 | NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6320 | NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6321 | NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), |
6322 | NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), |
6323 | NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6324 | NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6325 | NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6326 | NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6327 | NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), |
6328 | NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), |
6329 | NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6330 | NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6331 | NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), |
6332 | NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), |
6333 | NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0), |
6334 | NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType), |
6335 | NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType), |
6336 | NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6337 | NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6338 | NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6339 | NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6340 | NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6341 | NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6342 | NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6343 | NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6344 | NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6345 | NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6346 | NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), |
6347 | NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType), |
6348 | NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), |
6349 | NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType), |
6350 | NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), |
6351 | NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), |
6352 | NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType), |
6353 | NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType), |
6354 | NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), |
6355 | NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), |
6356 | NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType), |
6357 | NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType), |
6358 | NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors), |
6359 | NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType), |
6360 | NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors), |
6361 | NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0), |
6362 | NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType), |
6363 | NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType), |
6364 | NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), |
6365 | NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), |
6366 | NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), |
6367 | NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), |
6368 | NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType), |
6369 | NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), |
6370 | NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), |
6371 | NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), |
6372 | NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType), |
6373 | NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), |
6374 | NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType), |
6375 | NEONMAP1(vqrdmlahh_s16, aarch64_neon_sqrdmlah, Vectorize1ArgType | Use64BitVectors), |
6376 | NEONMAP1(vqrdmlahs_s32, aarch64_neon_sqrdmlah, Add1ArgType), |
6377 | NEONMAP1(vqrdmlshh_s16, aarch64_neon_sqrdmlsh, Vectorize1ArgType | Use64BitVectors), |
6378 | NEONMAP1(vqrdmlshs_s32, aarch64_neon_sqrdmlsh, Add1ArgType), |
6379 | NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors), |
6380 | NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType), |
6381 | NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), |
6382 | NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), |
6383 | NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType), |
6384 | NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType), |
6385 | NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), |
6386 | NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), |
6387 | NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType), |
6388 | NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType), |
6389 | NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType), |
6390 | NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType), |
6391 | NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), |
6392 | NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), |
6393 | NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), |
6394 | NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), |
6395 | NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType), |
6396 | NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), |
6397 | NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), |
6398 | NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
6399 | NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
6400 | NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
6401 | NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
6402 | NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType), |
6403 | NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType), |
6404 | NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
6405 | NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
6406 | NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
6407 | NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
6408 | NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType), |
6409 | NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType), |
6410 | NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType), |
6411 | NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType), |
6412 | NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), |
6413 | NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), |
6414 | NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType), |
6415 | NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType), |
6416 | NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType), |
6417 | NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), |
6418 | NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), |
6419 | NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), |
6420 | NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), |
6421 | NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType), |
6422 | NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), |
6423 | NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), |
6424 | NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), |
6425 | NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), |
6426 | NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType), |
6427 | NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType), |
6428 | NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), |
6429 | NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), |
6430 | NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType), |
6431 | NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType), |
6432 | NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType), |
6433 | NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType), |
6434 | NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType), |
6435 | NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType), |
6436 | NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType), |
6437 | NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType), |
6438 | NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType), |
6439 | NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType), |
6440 | NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType), |
6441 | NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType), |
6442 | NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0), |
6443 | NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0), |
6444 | NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0), |
6445 | NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0), |
6446 | NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType), |
6447 | NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType), |
6448 | NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType), |
6449 | NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType), |
6450 | NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), |
6451 | NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType), |
6452 | NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), |
6453 | NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType), |
6454 | NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType), |
6455 | NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType), |
6456 | NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), |
6457 | NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType), |
6458 | NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), |
6459 | NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType), |
6460 | // FP16 scalar intrinisics go here. |
6461 | NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType), |
6462 | NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
6463 | NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
6464 | NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
6465 | NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
6466 | NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
6467 | NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
6468 | NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
6469 | NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
6470 | NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
6471 | NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
6472 | NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
6473 | NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
6474 | NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
6475 | NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
6476 | NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
6477 | NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
6478 | NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
6479 | NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
6480 | NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
6481 | NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
6482 | NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
6483 | NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
6484 | NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
6485 | NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
6486 | NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
6487 | NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
6488 | NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
6489 | NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
6490 | NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType), |
6491 | NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType), |
6492 | NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType), |
6493 | NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType), |
6494 | NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType), |
6495 | }; |
6496 | |
6497 | #undef NEONMAP0 |
6498 | #undef NEONMAP1 |
6499 | #undef NEONMAP2 |
6500 | |
6501 | #define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ |
6502 | { \ |
6503 | #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ |
6504 | TypeModifier \ |
6505 | } |
6506 | |
6507 | #define SVEMAP2(NameBase, TypeModifier) \ |
6508 | { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier } |
6509 | static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = { |
6510 | #define GET_SVE_LLVM_INTRINSIC_MAP |
6511 | #include "clang/Basic/arm_sve_builtin_cg.inc" |
6512 | #include "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def" |
6513 | #undef GET_SVE_LLVM_INTRINSIC_MAP |
6514 | }; |
6515 | |
6516 | #undef SVEMAP1 |
6517 | #undef SVEMAP2 |
6518 | |
6519 | static bool NEONSIMDIntrinsicsProvenSorted = false; |
6520 | |
6521 | static bool AArch64SIMDIntrinsicsProvenSorted = false; |
6522 | static bool AArch64SISDIntrinsicsProvenSorted = false; |
6523 | static bool AArch64SVEIntrinsicsProvenSorted = false; |
6524 | |
6525 | static const ARMVectorIntrinsicInfo * |
6526 | findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap, |
6527 | unsigned BuiltinID, bool &MapProvenSorted) { |
6528 | |
6529 | #ifndef NDEBUG |
6530 | if (!MapProvenSorted) { |
6531 | assert(llvm::is_sorted(IntrinsicMap))(static_cast <bool> (llvm::is_sorted(IntrinsicMap)) ? void (0) : __assert_fail ("llvm::is_sorted(IntrinsicMap)", "clang/lib/CodeGen/CGBuiltin.cpp" , 6531, __extension__ __PRETTY_FUNCTION__)); |
6532 | MapProvenSorted = true; |
6533 | } |
6534 | #endif |
6535 | |
6536 | const ARMVectorIntrinsicInfo *Builtin = |
6537 | llvm::lower_bound(IntrinsicMap, BuiltinID); |
6538 | |
6539 | if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID) |
6540 | return Builtin; |
6541 | |
6542 | return nullptr; |
6543 | } |
6544 | |
6545 | Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID, |
6546 | unsigned Modifier, |
6547 | llvm::Type *ArgType, |
6548 | const CallExpr *E) { |
6549 | int VectorSize = 0; |
6550 | if (Modifier & Use64BitVectors) |
6551 | VectorSize = 64; |
6552 | else if (Modifier & Use128BitVectors) |
6553 | VectorSize = 128; |
6554 | |
6555 | // Return type. |
6556 | SmallVector<llvm::Type *, 3> Tys; |
6557 | if (Modifier & AddRetType) { |
6558 | llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); |
6559 | if (Modifier & VectorizeRetType) |
6560 | Ty = llvm::FixedVectorType::get( |
6561 | Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1); |
6562 | |
6563 | Tys.push_back(Ty); |
6564 | } |
6565 | |
6566 | // Arguments. |
6567 | if (Modifier & VectorizeArgTypes) { |
6568 | int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1; |
6569 | ArgType = llvm::FixedVectorType::get(ArgType, Elts); |
6570 | } |
6571 | |
6572 | if (Modifier & (Add1ArgType | Add2ArgTypes)) |
6573 | Tys.push_back(ArgType); |
6574 | |
6575 | if (Modifier & Add2ArgTypes) |
6576 | Tys.push_back(ArgType); |
6577 | |
6578 | if (Modifier & InventFloatType) |
6579 | Tys.push_back(FloatTy); |
6580 | |
6581 | return CGM.getIntrinsic(IntrinsicID, Tys); |
6582 | } |
6583 | |
6584 | static Value *EmitCommonNeonSISDBuiltinExpr( |
6585 | CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo, |
6586 | SmallVectorImpl<Value *> &Ops, const CallExpr *E) { |
6587 | unsigned BuiltinID = SISDInfo.BuiltinID; |
6588 | unsigned int Int = SISDInfo.LLVMIntrinsic; |
6589 | unsigned Modifier = SISDInfo.TypeModifier; |
6590 | const char *s = SISDInfo.NameHint; |
6591 | |
6592 | switch (BuiltinID) { |
6593 | case NEON::BI__builtin_neon_vcled_s64: |
6594 | case NEON::BI__builtin_neon_vcled_u64: |
6595 | case NEON::BI__builtin_neon_vcles_f32: |
6596 | case NEON::BI__builtin_neon_vcled_f64: |
6597 | case NEON::BI__builtin_neon_vcltd_s64: |
6598 | case NEON::BI__builtin_neon_vcltd_u64: |
6599 | case NEON::BI__builtin_neon_vclts_f32: |
6600 | case NEON::BI__builtin_neon_vcltd_f64: |
6601 | case NEON::BI__builtin_neon_vcales_f32: |
6602 | case NEON::BI__builtin_neon_vcaled_f64: |
6603 | case NEON::BI__builtin_neon_vcalts_f32: |
6604 | case NEON::BI__builtin_neon_vcaltd_f64: |
6605 | // Only one direction of comparisons actually exist, cmle is actually a cmge |
6606 | // with swapped operands. The table gives us the right intrinsic but we |
6607 | // still need to do the swap. |
6608 | std::swap(Ops[0], Ops[1]); |
6609 | break; |
6610 | } |
6611 | |
6612 | assert(Int && "Generic code assumes a valid intrinsic")(static_cast <bool> (Int && "Generic code assumes a valid intrinsic" ) ? void (0) : __assert_fail ("Int && \"Generic code assumes a valid intrinsic\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 6612, __extension__ __PRETTY_FUNCTION__ )); |
6613 | |
6614 | // Determine the type(s) of this overloaded AArch64 intrinsic. |
6615 | const Expr *Arg = E->getArg(0); |
6616 | llvm::Type *ArgTy = CGF.ConvertType(Arg->getType()); |
6617 | Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E); |
6618 | |
6619 | int j = 0; |
6620 | ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0); |
6621 | for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); |
6622 | ai != ae; ++ai, ++j) { |
6623 | llvm::Type *ArgTy = ai->getType(); |
6624 | if (Ops[j]->getType()->getPrimitiveSizeInBits() == |
6625 | ArgTy->getPrimitiveSizeInBits()) |
6626 | continue; |
6627 | |
6628 | assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy())(static_cast <bool> (ArgTy->isVectorTy() && ! Ops[j]->getType()->isVectorTy()) ? void (0) : __assert_fail ("ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()" , "clang/lib/CodeGen/CGBuiltin.cpp", 6628, __extension__ __PRETTY_FUNCTION__ )); |
6629 | // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate |
6630 | // it before inserting. |
6631 | Ops[j] = CGF.Builder.CreateTruncOrBitCast( |
6632 | Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType()); |
6633 | Ops[j] = |
6634 | CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0); |
6635 | } |
6636 | |
6637 | Value *Result = CGF.EmitNeonCall(F, Ops, s); |
6638 | llvm::Type *ResultType = CGF.ConvertType(E->getType()); |
6639 | if (ResultType->getPrimitiveSizeInBits().getFixedSize() < |
6640 | Result->getType()->getPrimitiveSizeInBits().getFixedSize()) |
6641 | return CGF.Builder.CreateExtractElement(Result, C0); |
6642 | |
6643 | return CGF.Builder.CreateBitCast(Result, ResultType, s); |
6644 | } |
6645 | |
6646 | Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( |
6647 | unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic, |
6648 | const char *NameHint, unsigned Modifier, const CallExpr *E, |
6649 | SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1, |
6650 | llvm::Triple::ArchType Arch) { |
6651 | // Get the last argument, which specifies the vector type. |
6652 | const Expr *Arg = E->getArg(E->getNumArgs() - 1); |
6653 | Optional<llvm::APSInt> NeonTypeConst = |
6654 | Arg->getIntegerConstantExpr(getContext()); |
6655 | if (!NeonTypeConst) |
6656 | return nullptr; |
6657 | |
6658 | // Determine the type of this overloaded NEON intrinsic. |
6659 | NeonTypeFlags Type(NeonTypeConst->getZExtValue()); |
6660 | bool Usgn = Type.isUnsigned(); |
6661 | bool Quad = Type.isQuad(); |
6662 | const bool HasLegalHalfType = getTarget().hasLegalHalfType(); |
6663 | const bool AllowBFloatArgsAndRet = |
6664 | getTargetHooks().getABIInfo().allowBFloatArgsAndRet(); |
6665 | |
6666 | llvm::FixedVectorType *VTy = |
6667 | GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet); |
6668 | llvm::Type *Ty = VTy; |
6669 | if (!Ty) |
6670 | return nullptr; |
6671 | |
6672 | auto getAlignmentValue32 = [&](Address addr) -> Value* { |
6673 | return Builder.getInt32(addr.getAlignment().getQuantity()); |
6674 | }; |
6675 | |
6676 | unsigned Int = LLVMIntrinsic; |
6677 | if ((Modifier & UnsignedAlts) && !Usgn) |
6678 | Int = AltLLVMIntrinsic; |
6679 | |
6680 | switch (BuiltinID) { |
6681 | default: break; |
6682 | case NEON::BI__builtin_neon_splat_lane_v: |
6683 | case NEON::BI__builtin_neon_splat_laneq_v: |
6684 | case NEON::BI__builtin_neon_splatq_lane_v: |
6685 | case NEON::BI__builtin_neon_splatq_laneq_v: { |
6686 | auto NumElements = VTy->getElementCount(); |
6687 | if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v) |
6688 | NumElements = NumElements * 2; |
6689 | if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v) |
6690 | NumElements = NumElements.divideCoefficientBy(2); |
6691 | |
6692 | Ops[0] = Builder.CreateBitCast(Ops[0], VTy); |
6693 | return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements); |
6694 | } |
6695 | case NEON::BI__builtin_neon_vpadd_v: |
6696 | case NEON::BI__builtin_neon_vpaddq_v: |
6697 | // We don't allow fp/int overloading of intrinsics. |
6698 | if (VTy->getElementType()->isFloatingPointTy() && |
6699 | Int == Intrinsic::aarch64_neon_addp) |
6700 | Int = Intrinsic::aarch64_neon_faddp; |
6701 | break; |
6702 | case NEON::BI__builtin_neon_vabs_v: |
6703 | case NEON::BI__builtin_neon_vabsq_v: |
6704 | if (VTy->getElementType()->isFloatingPointTy()) |
6705 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs"); |
6706 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs"); |
6707 | case NEON::BI__builtin_neon_vadd_v: |
6708 | case NEON::BI__builtin_neon_vaddq_v: { |
6709 | llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, Quad ? 16 : 8); |
6710 | Ops[0] = Builder.CreateBitCast(Ops[0], VTy); |
6711 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); |
6712 | Ops[0] = Builder.CreateXor(Ops[0], Ops[1]); |
6713 | return Builder.CreateBitCast(Ops[0], Ty); |
6714 | } |
6715 | case NEON::BI__builtin_neon_vaddhn_v: { |
6716 | llvm::FixedVectorType *SrcTy = |
6717 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
6718 | |
6719 | // %sum = add <4 x i32> %lhs, %rhs |
6720 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
6721 | Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); |
6722 | Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn"); |
6723 | |
6724 | // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> |
6725 | Constant *ShiftAmt = |
6726 | ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); |
6727 | Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn"); |
6728 | |
6729 | // %res = trunc <4 x i32> %high to <4 x i16> |
6730 | return Builder.CreateTrunc(Ops[0], VTy, "vaddhn"); |
6731 | } |
6732 | case NEON::BI__builtin_neon_vcale_v: |
6733 | case NEON::BI__builtin_neon_vcaleq_v: |
6734 | case NEON::BI__builtin_neon_vcalt_v: |
6735 | case NEON::BI__builtin_neon_vcaltq_v: |
6736 | std::swap(Ops[0], Ops[1]); |
6737 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
6738 | case NEON::BI__builtin_neon_vcage_v: |
6739 | case NEON::BI__builtin_neon_vcageq_v: |
6740 | case NEON::BI__builtin_neon_vcagt_v: |
6741 | case NEON::BI__builtin_neon_vcagtq_v: { |
6742 | llvm::Type *Ty; |
6743 | switch (VTy->getScalarSizeInBits()) { |
6744 | default: llvm_unreachable("unexpected type")::llvm::llvm_unreachable_internal("unexpected type", "clang/lib/CodeGen/CGBuiltin.cpp" , 6744); |
6745 | case 32: |
6746 | Ty = FloatTy; |
6747 | break; |
6748 | case 64: |
6749 | Ty = DoubleTy; |
6750 | break; |
6751 | case 16: |
6752 | Ty = HalfTy; |
6753 | break; |
6754 | } |
6755 | auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements()); |
6756 | llvm::Type *Tys[] = { VTy, VecFlt }; |
6757 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6758 | return EmitNeonCall(F, Ops, NameHint); |
6759 | } |
6760 | case NEON::BI__builtin_neon_vceqz_v: |
6761 | case NEON::BI__builtin_neon_vceqzq_v: |
6762 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ, |
6763 | ICmpInst::ICMP_EQ, "vceqz"); |
6764 | case NEON::BI__builtin_neon_vcgez_v: |
6765 | case NEON::BI__builtin_neon_vcgezq_v: |
6766 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE, |
6767 | ICmpInst::ICMP_SGE, "vcgez"); |
6768 | case NEON::BI__builtin_neon_vclez_v: |
6769 | case NEON::BI__builtin_neon_vclezq_v: |
6770 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE, |
6771 | ICmpInst::ICMP_SLE, "vclez"); |
6772 | case NEON::BI__builtin_neon_vcgtz_v: |
6773 | case NEON::BI__builtin_neon_vcgtzq_v: |
6774 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT, |
6775 | ICmpInst::ICMP_SGT, "vcgtz"); |
6776 | case NEON::BI__builtin_neon_vcltz_v: |
6777 | case NEON::BI__builtin_neon_vcltzq_v: |
6778 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT, |
6779 | ICmpInst::ICMP_SLT, "vcltz"); |
6780 | case NEON::BI__builtin_neon_vclz_v: |
6781 | case NEON::BI__builtin_neon_vclzq_v: |
6782 | // We generate target-independent intrinsic, which needs a second argument |
6783 | // for whether or not clz of zero is undefined; on ARM it isn't. |
6784 | Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef())); |
6785 | break; |
6786 | case NEON::BI__builtin_neon_vcvt_f32_v: |
6787 | case NEON::BI__builtin_neon_vcvtq_f32_v: |
6788 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6789 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad), |
6790 | HasLegalHalfType); |
6791 | return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") |
6792 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); |
6793 | case NEON::BI__builtin_neon_vcvt_f16_v: |
6794 | case NEON::BI__builtin_neon_vcvtq_f16_v: |
6795 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6796 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad), |
6797 | HasLegalHalfType); |
6798 | return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") |
6799 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); |
6800 | case NEON::BI__builtin_neon_vcvt_n_f16_v: |
6801 | case NEON::BI__builtin_neon_vcvt_n_f32_v: |
6802 | case NEON::BI__builtin_neon_vcvt_n_f64_v: |
6803 | case NEON::BI__builtin_neon_vcvtq_n_f16_v: |
6804 | case NEON::BI__builtin_neon_vcvtq_n_f32_v: |
6805 | case NEON::BI__builtin_neon_vcvtq_n_f64_v: { |
6806 | llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty }; |
6807 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; |
6808 | Function *F = CGM.getIntrinsic(Int, Tys); |
6809 | return EmitNeonCall(F, Ops, "vcvt_n"); |
6810 | } |
6811 | case NEON::BI__builtin_neon_vcvt_n_s16_v: |
6812 | case NEON::BI__builtin_neon_vcvt_n_s32_v: |
6813 | case NEON::BI__builtin_neon_vcvt_n_u16_v: |
6814 | case NEON::BI__builtin_neon_vcvt_n_u32_v: |
6815 | case NEON::BI__builtin_neon_vcvt_n_s64_v: |
6816 | case NEON::BI__builtin_neon_vcvt_n_u64_v: |
6817 | case NEON::BI__builtin_neon_vcvtq_n_s16_v: |
6818 | case NEON::BI__builtin_neon_vcvtq_n_s32_v: |
6819 | case NEON::BI__builtin_neon_vcvtq_n_u16_v: |
6820 | case NEON::BI__builtin_neon_vcvtq_n_u32_v: |
6821 | case NEON::BI__builtin_neon_vcvtq_n_s64_v: |
6822 | case NEON::BI__builtin_neon_vcvtq_n_u64_v: { |
6823 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
6824 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6825 | return EmitNeonCall(F, Ops, "vcvt_n"); |
6826 | } |
6827 | case NEON::BI__builtin_neon_vcvt_s32_v: |
6828 | case NEON::BI__builtin_neon_vcvt_u32_v: |
6829 | case NEON::BI__builtin_neon_vcvt_s64_v: |
6830 | case NEON::BI__builtin_neon_vcvt_u64_v: |
6831 | case NEON::BI__builtin_neon_vcvt_s16_v: |
6832 | case NEON::BI__builtin_neon_vcvt_u16_v: |
6833 | case NEON::BI__builtin_neon_vcvtq_s32_v: |
6834 | case NEON::BI__builtin_neon_vcvtq_u32_v: |
6835 | case NEON::BI__builtin_neon_vcvtq_s64_v: |
6836 | case NEON::BI__builtin_neon_vcvtq_u64_v: |
6837 | case NEON::BI__builtin_neon_vcvtq_s16_v: |
6838 | case NEON::BI__builtin_neon_vcvtq_u16_v: { |
6839 | Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type)); |
6840 | return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") |
6841 | : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); |
6842 | } |
6843 | case NEON::BI__builtin_neon_vcvta_s16_v: |
6844 | case NEON::BI__builtin_neon_vcvta_s32_v: |
6845 | case NEON::BI__builtin_neon_vcvta_s64_v: |
6846 | case NEON::BI__builtin_neon_vcvta_u16_v: |
6847 | case NEON::BI__builtin_neon_vcvta_u32_v: |
6848 | case NEON::BI__builtin_neon_vcvta_u64_v: |
6849 | case NEON::BI__builtin_neon_vcvtaq_s16_v: |
6850 | case NEON::BI__builtin_neon_vcvtaq_s32_v: |
6851 | case NEON::BI__builtin_neon_vcvtaq_s64_v: |
6852 | case NEON::BI__builtin_neon_vcvtaq_u16_v: |
6853 | case NEON::BI__builtin_neon_vcvtaq_u32_v: |
6854 | case NEON::BI__builtin_neon_vcvtaq_u64_v: |
6855 | case NEON::BI__builtin_neon_vcvtn_s16_v: |
6856 | case NEON::BI__builtin_neon_vcvtn_s32_v: |
6857 | case NEON::BI__builtin_neon_vcvtn_s64_v: |
6858 | case NEON::BI__builtin_neon_vcvtn_u16_v: |
6859 | case NEON::BI__builtin_neon_vcvtn_u32_v: |
6860 | case NEON::BI__builtin_neon_vcvtn_u64_v: |
6861 | case NEON::BI__builtin_neon_vcvtnq_s16_v: |
6862 | case NEON::BI__builtin_neon_vcvtnq_s32_v: |
6863 | case NEON::BI__builtin_neon_vcvtnq_s64_v: |
6864 | case NEON::BI__builtin_neon_vcvtnq_u16_v: |
6865 | case NEON::BI__builtin_neon_vcvtnq_u32_v: |
6866 | case NEON::BI__builtin_neon_vcvtnq_u64_v: |
6867 | case NEON::BI__builtin_neon_vcvtp_s16_v: |
6868 | case NEON::BI__builtin_neon_vcvtp_s32_v: |
6869 | case NEON::BI__builtin_neon_vcvtp_s64_v: |
6870 | case NEON::BI__builtin_neon_vcvtp_u16_v: |
6871 | case NEON::BI__builtin_neon_vcvtp_u32_v: |
6872 | case NEON::BI__builtin_neon_vcvtp_u64_v: |
6873 | case NEON::BI__builtin_neon_vcvtpq_s16_v: |
6874 | case NEON::BI__builtin_neon_vcvtpq_s32_v: |
6875 | case NEON::BI__builtin_neon_vcvtpq_s64_v: |
6876 | case NEON::BI__builtin_neon_vcvtpq_u16_v: |
6877 | case NEON::BI__builtin_neon_vcvtpq_u32_v: |
6878 | case NEON::BI__builtin_neon_vcvtpq_u64_v: |
6879 | case NEON::BI__builtin_neon_vcvtm_s16_v: |
6880 | case NEON::BI__builtin_neon_vcvtm_s32_v: |
6881 | case NEON::BI__builtin_neon_vcvtm_s64_v: |
6882 | case NEON::BI__builtin_neon_vcvtm_u16_v: |
6883 | case NEON::BI__builtin_neon_vcvtm_u32_v: |
6884 | case NEON::BI__builtin_neon_vcvtm_u64_v: |
6885 | case NEON::BI__builtin_neon_vcvtmq_s16_v: |
6886 | case NEON::BI__builtin_neon_vcvtmq_s32_v: |
6887 | case NEON::BI__builtin_neon_vcvtmq_s64_v: |
6888 | case NEON::BI__builtin_neon_vcvtmq_u16_v: |
6889 | case NEON::BI__builtin_neon_vcvtmq_u32_v: |
6890 | case NEON::BI__builtin_neon_vcvtmq_u64_v: { |
6891 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
6892 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint); |
6893 | } |
6894 | case NEON::BI__builtin_neon_vcvtx_f32_v: { |
6895 | llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty}; |
6896 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint); |
6897 | |
6898 | } |
6899 | case NEON::BI__builtin_neon_vext_v: |
6900 | case NEON::BI__builtin_neon_vextq_v: { |
6901 | int CV = cast<ConstantInt>(Ops[2])->getSExtValue(); |
6902 | SmallVector<int, 16> Indices; |
6903 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
6904 | Indices.push_back(i+CV); |
6905 | |
6906 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6907 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6908 | return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext"); |
6909 | } |
6910 | case NEON::BI__builtin_neon_vfma_v: |
6911 | case NEON::BI__builtin_neon_vfmaq_v: { |
6912 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6913 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6914 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
6915 | |
6916 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. |
6917 | return emitCallMaybeConstrainedFPBuiltin( |
6918 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty, |
6919 | {Ops[1], Ops[2], Ops[0]}); |
6920 | } |
6921 | case NEON::BI__builtin_neon_vld1_v: |
6922 | case NEON::BI__builtin_neon_vld1q_v: { |
6923 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
6924 | Ops.push_back(getAlignmentValue32(PtrOp0)); |
6925 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1"); |
6926 | } |
6927 | case NEON::BI__builtin_neon_vld1_x2_v: |
6928 | case NEON::BI__builtin_neon_vld1q_x2_v: |
6929 | case NEON::BI__builtin_neon_vld1_x3_v: |
6930 | case NEON::BI__builtin_neon_vld1q_x3_v: |
6931 | case NEON::BI__builtin_neon_vld1_x4_v: |
6932 | case NEON::BI__builtin_neon_vld1q_x4_v: { |
6933 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); |
6934 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
6935 | llvm::Type *Tys[2] = { VTy, PTy }; |
6936 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6937 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN"); |
6938 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
6939 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6940 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
6941 | } |
6942 | case NEON::BI__builtin_neon_vld2_v: |
6943 | case NEON::BI__builtin_neon_vld2q_v: |
6944 | case NEON::BI__builtin_neon_vld3_v: |
6945 | case NEON::BI__builtin_neon_vld3q_v: |
6946 | case NEON::BI__builtin_neon_vld4_v: |
6947 | case NEON::BI__builtin_neon_vld4q_v: |
6948 | case NEON::BI__builtin_neon_vld2_dup_v: |
6949 | case NEON::BI__builtin_neon_vld2q_dup_v: |
6950 | case NEON::BI__builtin_neon_vld3_dup_v: |
6951 | case NEON::BI__builtin_neon_vld3q_dup_v: |
6952 | case NEON::BI__builtin_neon_vld4_dup_v: |
6953 | case NEON::BI__builtin_neon_vld4q_dup_v: { |
6954 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
6955 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6956 | Value *Align = getAlignmentValue32(PtrOp1); |
6957 | Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint); |
6958 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
6959 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6960 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
6961 | } |
6962 | case NEON::BI__builtin_neon_vld1_dup_v: |
6963 | case NEON::BI__builtin_neon_vld1q_dup_v: { |
6964 | Value *V = UndefValue::get(Ty); |
6965 | PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType()); |
6966 | LoadInst *Ld = Builder.CreateLoad(PtrOp0); |
6967 | llvm::Constant *CI = ConstantInt::get(SizeTy, 0); |
6968 | Ops[0] = Builder.CreateInsertElement(V, Ld, CI); |
6969 | return EmitNeonSplat(Ops[0], CI); |
6970 | } |
6971 | case NEON::BI__builtin_neon_vld2_lane_v: |
6972 | case NEON::BI__builtin_neon_vld2q_lane_v: |
6973 | case NEON::BI__builtin_neon_vld3_lane_v: |
6974 | case NEON::BI__builtin_neon_vld3q_lane_v: |
6975 | case NEON::BI__builtin_neon_vld4_lane_v: |
6976 | case NEON::BI__builtin_neon_vld4q_lane_v: { |
6977 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
6978 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6979 | for (unsigned I = 2; I < Ops.size() - 1; ++I) |
6980 | Ops[I] = Builder.CreateBitCast(Ops[I], Ty); |
6981 | Ops.push_back(getAlignmentValue32(PtrOp1)); |
6982 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint); |
6983 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
6984 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6985 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
6986 | } |
6987 | case NEON::BI__builtin_neon_vmovl_v: { |
6988 | llvm::FixedVectorType *DTy = |
6989 | llvm::FixedVectorType::getTruncatedElementVectorType(VTy); |
6990 | Ops[0] = Builder.CreateBitCast(Ops[0], DTy); |
6991 | if (Usgn) |
6992 | return Builder.CreateZExt(Ops[0], Ty, "vmovl"); |
6993 | return Builder.CreateSExt(Ops[0], Ty, "vmovl"); |
6994 | } |
6995 | case NEON::BI__builtin_neon_vmovn_v: { |
6996 | llvm::FixedVectorType *QTy = |
6997 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
6998 | Ops[0] = Builder.CreateBitCast(Ops[0], QTy); |
6999 | return Builder.CreateTrunc(Ops[0], Ty, "vmovn"); |
7000 | } |
7001 | case NEON::BI__builtin_neon_vmull_v: |
7002 | // FIXME: the integer vmull operations could be emitted in terms of pure |
7003 | // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of |
7004 | // hoisting the exts outside loops. Until global ISel comes along that can |
7005 | // see through such movement this leads to bad CodeGen. So we need an |
7006 | // intrinsic for now. |
7007 | Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; |
7008 | Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int; |
7009 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); |
7010 | case NEON::BI__builtin_neon_vpadal_v: |
7011 | case NEON::BI__builtin_neon_vpadalq_v: { |
7012 | // The source operand type has twice as many elements of half the size. |
7013 | unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); |
7014 | llvm::Type *EltTy = |
7015 | llvm::IntegerType::get(getLLVMContext(), EltBits / 2); |
7016 | auto *NarrowTy = |
7017 | llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2); |
7018 | llvm::Type *Tys[2] = { Ty, NarrowTy }; |
7019 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); |
7020 | } |
7021 | case NEON::BI__builtin_neon_vpaddl_v: |
7022 | case NEON::BI__builtin_neon_vpaddlq_v: { |
7023 | // The source operand type has twice as many elements of half the size. |
7024 | unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); |
7025 | llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); |
7026 | auto *NarrowTy = |
7027 | llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2); |
7028 | llvm::Type *Tys[2] = { Ty, NarrowTy }; |
7029 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl"); |
7030 | } |
7031 | case NEON::BI__builtin_neon_vqdmlal_v: |
7032 | case NEON::BI__builtin_neon_vqdmlsl_v: { |
7033 | SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end()); |
7034 | Ops[1] = |
7035 | EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal"); |
7036 | Ops.resize(2); |
7037 | return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint); |
7038 | } |
7039 | case NEON::BI__builtin_neon_vqdmulhq_lane_v: |
7040 | case NEON::BI__builtin_neon_vqdmulh_lane_v: |
7041 | case NEON::BI__builtin_neon_vqrdmulhq_lane_v: |
7042 | case NEON::BI__builtin_neon_vqrdmulh_lane_v: { |
7043 | auto *RTy = cast<llvm::FixedVectorType>(Ty); |
7044 | if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || |
7045 | BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v) |
7046 | RTy = llvm::FixedVectorType::get(RTy->getElementType(), |
7047 | RTy->getNumElements() * 2); |
7048 | llvm::Type *Tys[2] = { |
7049 | RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false, |
7050 | /*isQuad*/ false))}; |
7051 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); |
7052 | } |
7053 | case NEON::BI__builtin_neon_vqdmulhq_laneq_v: |
7054 | case NEON::BI__builtin_neon_vqdmulh_laneq_v: |
7055 | case NEON::BI__builtin_neon_vqrdmulhq_laneq_v: |
7056 | case NEON::BI__builtin_neon_vqrdmulh_laneq_v: { |
7057 | llvm::Type *Tys[2] = { |
7058 | Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false, |
7059 | /*isQuad*/ true))}; |
7060 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); |
7061 | } |
7062 | case NEON::BI__builtin_neon_vqshl_n_v: |
7063 | case NEON::BI__builtin_neon_vqshlq_n_v: |
7064 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n", |
7065 | 1, false); |
7066 | case NEON::BI__builtin_neon_vqshlu_n_v: |
7067 | case NEON::BI__builtin_neon_vqshluq_n_v: |
7068 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", |
7069 | 1, false); |
7070 | case NEON::BI__builtin_neon_vrecpe_v: |
7071 | case NEON::BI__builtin_neon_vrecpeq_v: |
7072 | case NEON::BI__builtin_neon_vrsqrte_v: |
7073 | case NEON::BI__builtin_neon_vrsqrteq_v: |
7074 | Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic; |
7075 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint); |
7076 | case NEON::BI__builtin_neon_vrndi_v: |
7077 | case NEON::BI__builtin_neon_vrndiq_v: |
7078 | Int = Builder.getIsFPConstrained() |
7079 | ? Intrinsic::experimental_constrained_nearbyint |
7080 | : Intrinsic::nearbyint; |
7081 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint); |
7082 | case NEON::BI__builtin_neon_vrshr_n_v: |
7083 | case NEON::BI__builtin_neon_vrshrq_n_v: |
7084 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", |
7085 | 1, true); |
7086 | case NEON::BI__builtin_neon_vsha512hq_v: |
7087 | case NEON::BI__builtin_neon_vsha512h2q_v: |
7088 | case NEON::BI__builtin_neon_vsha512su0q_v: |
7089 | case NEON::BI__builtin_neon_vsha512su1q_v: { |
7090 | Function *F = CGM.getIntrinsic(Int); |
7091 | return EmitNeonCall(F, Ops, ""); |
7092 | } |
7093 | case NEON::BI__builtin_neon_vshl_n_v: |
7094 | case NEON::BI__builtin_neon_vshlq_n_v: |
7095 | Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); |
7096 | return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], |
7097 | "vshl_n"); |
7098 | case NEON::BI__builtin_neon_vshll_n_v: { |
7099 | llvm::FixedVectorType *SrcTy = |
7100 | llvm::FixedVectorType::getTruncatedElementVectorType(VTy); |
7101 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
7102 | if (Usgn) |
7103 | Ops[0] = Builder.CreateZExt(Ops[0], VTy); |
7104 | else |
7105 | Ops[0] = Builder.CreateSExt(Ops[0], VTy); |
7106 | Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false); |
7107 | return Builder.CreateShl(Ops[0], Ops[1], "vshll_n"); |
7108 | } |
7109 | case NEON::BI__builtin_neon_vshrn_n_v: { |
7110 | llvm::FixedVectorType *SrcTy = |
7111 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
7112 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
7113 | Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false); |
7114 | if (Usgn) |
7115 | Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]); |
7116 | else |
7117 | Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]); |
7118 | return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n"); |
7119 | } |
7120 | case NEON::BI__builtin_neon_vshr_n_v: |
7121 | case NEON::BI__builtin_neon_vshrq_n_v: |
7122 | return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n"); |
7123 | case NEON::BI__builtin_neon_vst1_v: |
7124 | case NEON::BI__builtin_neon_vst1q_v: |
7125 | case NEON::BI__builtin_neon_vst2_v: |
7126 | case NEON::BI__builtin_neon_vst2q_v: |
7127 | case NEON::BI__builtin_neon_vst3_v: |
7128 | case NEON::BI__builtin_neon_vst3q_v: |
7129 | case NEON::BI__builtin_neon_vst4_v: |
7130 | case NEON::BI__builtin_neon_vst4q_v: |
7131 | case NEON::BI__builtin_neon_vst2_lane_v: |
7132 | case NEON::BI__builtin_neon_vst2q_lane_v: |
7133 | case NEON::BI__builtin_neon_vst3_lane_v: |
7134 | case NEON::BI__builtin_neon_vst3q_lane_v: |
7135 | case NEON::BI__builtin_neon_vst4_lane_v: |
7136 | case NEON::BI__builtin_neon_vst4q_lane_v: { |
7137 | llvm::Type *Tys[] = {Int8PtrTy, Ty}; |
7138 | Ops.push_back(getAlignmentValue32(PtrOp0)); |
7139 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, ""); |
7140 | } |
7141 | case NEON::BI__builtin_neon_vsm3partw1q_v: |
7142 | case NEON::BI__builtin_neon_vsm3partw2q_v: |
7143 | case NEON::BI__builtin_neon_vsm3ss1q_v: |
7144 | case NEON::BI__builtin_neon_vsm4ekeyq_v: |
7145 | case NEON::BI__builtin_neon_vsm4eq_v: { |
7146 | Function *F = CGM.getIntrinsic(Int); |
7147 | return EmitNeonCall(F, Ops, ""); |
7148 | } |
7149 | case NEON::BI__builtin_neon_vsm3tt1aq_v: |
7150 | case NEON::BI__builtin_neon_vsm3tt1bq_v: |
7151 | case NEON::BI__builtin_neon_vsm3tt2aq_v: |
7152 | case NEON::BI__builtin_neon_vsm3tt2bq_v: { |
7153 | Function *F = CGM.getIntrinsic(Int); |
7154 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); |
7155 | return EmitNeonCall(F, Ops, ""); |
7156 | } |
7157 | case NEON::BI__builtin_neon_vst1_x2_v: |
7158 | case NEON::BI__builtin_neon_vst1q_x2_v: |
7159 | case NEON::BI__builtin_neon_vst1_x3_v: |
7160 | case NEON::BI__builtin_neon_vst1q_x3_v: |
7161 | case NEON::BI__builtin_neon_vst1_x4_v: |
7162 | case NEON::BI__builtin_neon_vst1q_x4_v: { |
7163 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); |
7164 | // TODO: Currently in AArch32 mode the pointer operand comes first, whereas |
7165 | // in AArch64 it comes last. We may want to stick to one or another. |
7166 | if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be || |
7167 | Arch == llvm::Triple::aarch64_32) { |
7168 | llvm::Type *Tys[2] = { VTy, PTy }; |
7169 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
7170 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); |
7171 | } |
7172 | llvm::Type *Tys[2] = { PTy, VTy }; |
7173 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); |
7174 | } |
7175 | case NEON::BI__builtin_neon_vsubhn_v: { |
7176 | llvm::FixedVectorType *SrcTy = |
7177 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
7178 | |
7179 | // %sum = add <4 x i32> %lhs, %rhs |
7180 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
7181 | Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); |
7182 | Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn"); |
7183 | |
7184 | // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> |
7185 | Constant *ShiftAmt = |
7186 | ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); |
7187 | Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn"); |
7188 | |
7189 | // %res = trunc <4 x i32> %high to <4 x i16> |
7190 | return Builder.CreateTrunc(Ops[0], VTy, "vsubhn"); |
7191 | } |
7192 | case NEON::BI__builtin_neon_vtrn_v: |
7193 | case NEON::BI__builtin_neon_vtrnq_v: { |
7194 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
7195 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7196 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
7197 | Value *SV = nullptr; |
7198 | |
7199 | for (unsigned vi = 0; vi != 2; ++vi) { |
7200 | SmallVector<int, 16> Indices; |
7201 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
7202 | Indices.push_back(i+vi); |
7203 | Indices.push_back(i+e+vi); |
7204 | } |
7205 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
7206 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn"); |
7207 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
7208 | } |
7209 | return SV; |
7210 | } |
7211 | case NEON::BI__builtin_neon_vtst_v: |
7212 | case NEON::BI__builtin_neon_vtstq_v: { |
7213 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
7214 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7215 | Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); |
7216 | Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], |
7217 | ConstantAggregateZero::get(Ty)); |
7218 | return Builder.CreateSExt(Ops[0], Ty, "vtst"); |
7219 | } |
7220 | case NEON::BI__builtin_neon_vuzp_v: |
7221 | case NEON::BI__builtin_neon_vuzpq_v: { |
7222 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
7223 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7224 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
7225 | Value *SV = nullptr; |
7226 | |
7227 | for (unsigned vi = 0; vi != 2; ++vi) { |
7228 | SmallVector<int, 16> Indices; |
7229 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
7230 | Indices.push_back(2*i+vi); |
7231 | |
7232 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
7233 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp"); |
7234 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
7235 | } |
7236 | return SV; |
7237 | } |
7238 | case NEON::BI__builtin_neon_vxarq_v: { |
7239 | Function *F = CGM.getIntrinsic(Int); |
7240 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
7241 | return EmitNeonCall(F, Ops, ""); |
7242 | } |
7243 | case NEON::BI__builtin_neon_vzip_v: |
7244 | case NEON::BI__builtin_neon_vzipq_v: { |
7245 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
7246 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7247 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
7248 | Value *SV = nullptr; |
7249 | |
7250 | for (unsigned vi = 0; vi != 2; ++vi) { |
7251 | SmallVector<int, 16> Indices; |
7252 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
7253 | Indices.push_back((i + vi*e) >> 1); |
7254 | Indices.push_back(((i + vi*e) >> 1)+e); |
7255 | } |
7256 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
7257 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip"); |
7258 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
7259 | } |
7260 | return SV; |
7261 | } |
7262 | case NEON::BI__builtin_neon_vdot_v: |
7263 | case NEON::BI__builtin_neon_vdotq_v: { |
7264 | auto *InputTy = |
7265 | llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
7266 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7267 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; |
7268 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot"); |
7269 | } |
7270 | case NEON::BI__builtin_neon_vfmlal_low_v: |
7271 | case NEON::BI__builtin_neon_vfmlalq_low_v: { |
7272 | auto *InputTy = |
7273 | llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
7274 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7275 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low"); |
7276 | } |
7277 | case NEON::BI__builtin_neon_vfmlsl_low_v: |
7278 | case NEON::BI__builtin_neon_vfmlslq_low_v: { |
7279 | auto *InputTy = |
7280 | llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
7281 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7282 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low"); |
7283 | } |
7284 | case NEON::BI__builtin_neon_vfmlal_high_v: |
7285 | case NEON::BI__builtin_neon_vfmlalq_high_v: { |
7286 | auto *InputTy = |
7287 | llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
7288 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7289 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high"); |
7290 | } |
7291 | case NEON::BI__builtin_neon_vfmlsl_high_v: |
7292 | case NEON::BI__builtin_neon_vfmlslq_high_v: { |
7293 | auto *InputTy = |
7294 | llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
7295 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7296 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high"); |
7297 | } |
7298 | case NEON::BI__builtin_neon_vmmlaq_v: { |
7299 | auto *InputTy = |
7300 | llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
7301 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7302 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; |
7303 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla"); |
7304 | } |
7305 | case NEON::BI__builtin_neon_vusmmlaq_v: { |
7306 | auto *InputTy = |
7307 | llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
7308 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7309 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla"); |
7310 | } |
7311 | case NEON::BI__builtin_neon_vusdot_v: |
7312 | case NEON::BI__builtin_neon_vusdotq_v: { |
7313 | auto *InputTy = |
7314 | llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
7315 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7316 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot"); |
7317 | } |
7318 | case NEON::BI__builtin_neon_vbfdot_v: |
7319 | case NEON::BI__builtin_neon_vbfdotq_v: { |
7320 | llvm::Type *InputTy = |
7321 | llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16); |
7322 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7323 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot"); |
7324 | } |
7325 | case NEON::BI__builtin_neon___a32_vcvt_bf16_v: { |
7326 | llvm::Type *Tys[1] = { Ty }; |
7327 | Function *F = CGM.getIntrinsic(Int, Tys); |
7328 | return EmitNeonCall(F, Ops, "vcvtfp2bf"); |
7329 | } |
7330 | |
7331 | } |
7332 | |
7333 | assert(Int && "Expected valid intrinsic number")(static_cast <bool> (Int && "Expected valid intrinsic number" ) ? void (0) : __assert_fail ("Int && \"Expected valid intrinsic number\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 7333, __extension__ __PRETTY_FUNCTION__ )); |
7334 | |
7335 | // Determine the type(s) of this overloaded AArch64 intrinsic. |
7336 | Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E); |
7337 | |
7338 | Value *Result = EmitNeonCall(F, Ops, NameHint); |
7339 | llvm::Type *ResultType = ConvertType(E->getType()); |
7340 | // AArch64 intrinsic one-element vector type cast to |
7341 | // scalar type expected by the builtin |
7342 | return Builder.CreateBitCast(Result, ResultType, NameHint); |
7343 | } |
7344 | |
7345 | Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr( |
7346 | Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp, |
7347 | const CmpInst::Predicate Ip, const Twine &Name) { |
7348 | llvm::Type *OTy = Op->getType(); |
7349 | |
7350 | // FIXME: this is utterly horrific. We should not be looking at previous |
7351 | // codegen context to find out what needs doing. Unfortunately TableGen |
7352 | // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32 |
7353 | // (etc). |
7354 | if (BitCastInst *BI = dyn_cast<BitCastInst>(Op)) |
7355 | OTy = BI->getOperand(0)->getType(); |
7356 | |
7357 | Op = Builder.CreateBitCast(Op, OTy); |
7358 | if (OTy->getScalarType()->isFloatingPointTy()) { |
7359 | if (Fp == CmpInst::FCMP_OEQ) |
7360 | Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy)); |
7361 | else |
7362 | Op = Builder.CreateFCmpS(Fp, Op, Constant::getNullValue(OTy)); |
7363 | } else { |
7364 | Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy)); |
7365 | } |
7366 | return Builder.CreateSExt(Op, Ty, Name); |
7367 | } |
7368 | |
7369 | static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
7370 | Value *ExtOp, Value *IndexOp, |
7371 | llvm::Type *ResTy, unsigned IntID, |
7372 | const char *Name) { |
7373 | SmallVector<Value *, 2> TblOps; |
7374 | if (ExtOp) |
7375 | TblOps.push_back(ExtOp); |
7376 | |
7377 | // Build a vector containing sequential number like (0, 1, 2, ..., 15) |
7378 | SmallVector<int, 16> Indices; |
7379 | auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
7380 | for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) { |
7381 | Indices.push_back(2*i); |
7382 | Indices.push_back(2*i+1); |
7383 | } |
7384 | |
7385 | int PairPos = 0, End = Ops.size() - 1; |
7386 | while (PairPos < End) { |
7387 | TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], |
7388 | Ops[PairPos+1], Indices, |
7389 | Name)); |
7390 | PairPos += 2; |
7391 | } |
7392 | |
7393 | // If there's an odd number of 64-bit lookup table, fill the high 64-bit |
7394 | // of the 128-bit lookup table with zero. |
7395 | if (PairPos == End) { |
7396 | Value *ZeroTbl = ConstantAggregateZero::get(TblTy); |
7397 | TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], |
7398 | ZeroTbl, Indices, Name)); |
7399 | } |
7400 | |
7401 | Function *TblF; |
7402 | TblOps.push_back(IndexOp); |
7403 | TblF = CGF.CGM.getIntrinsic(IntID, ResTy); |
7404 | |
7405 | return CGF.EmitNeonCall(TblF, TblOps, Name); |
7406 | } |
7407 | |
7408 | Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) { |
7409 | unsigned Value; |
7410 | switch (BuiltinID) { |
7411 | default: |
7412 | return nullptr; |
7413 | case ARM::BI__builtin_arm_nop: |
7414 | Value = 0; |
7415 | break; |
7416 | case ARM::BI__builtin_arm_yield: |
7417 | case ARM::BI__yield: |
7418 | Value = 1; |
7419 | break; |
7420 | case ARM::BI__builtin_arm_wfe: |
7421 | case ARM::BI__wfe: |
7422 | Value = 2; |
7423 | break; |
7424 | case ARM::BI__builtin_arm_wfi: |
7425 | case ARM::BI__wfi: |
7426 | Value = 3; |
7427 | break; |
7428 | case ARM::BI__builtin_arm_sev: |
7429 | case ARM::BI__sev: |
7430 | Value = 4; |
7431 | break; |
7432 | case ARM::BI__builtin_arm_sevl: |
7433 | case ARM::BI__sevl: |
7434 | Value = 5; |
7435 | break; |
7436 | } |
7437 | |
7438 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint), |
7439 | llvm::ConstantInt::get(Int32Ty, Value)); |
7440 | } |
7441 | |
7442 | enum SpecialRegisterAccessKind { |
7443 | NormalRead, |
7444 | VolatileRead, |
7445 | Write, |
7446 | }; |
7447 | |
7448 | // Generates the IR for the read/write special register builtin, |
7449 | // ValueType is the type of the value that is to be written or read, |
7450 | // RegisterType is the type of the register being written to or read from. |
7451 | static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF, |
7452 | const CallExpr *E, |
7453 | llvm::Type *RegisterType, |
7454 | llvm::Type *ValueType, |
7455 | SpecialRegisterAccessKind AccessKind, |
7456 | StringRef SysReg = "") { |
7457 | // write and register intrinsics only support 32 and 64 bit operations. |
7458 | assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))(static_cast <bool> ((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && "Unsupported size for register." ) ? void (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 7459, __extension__ __PRETTY_FUNCTION__ )) |
7459 | && "Unsupported size for register.")(static_cast <bool> ((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && "Unsupported size for register." ) ? void (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 7459, __extension__ __PRETTY_FUNCTION__ )); |
7460 | |
7461 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
7462 | CodeGen::CodeGenModule &CGM = CGF.CGM; |
7463 | LLVMContext &Context = CGM.getLLVMContext(); |
7464 | |
7465 | if (SysReg.empty()) { |
7466 | const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts(); |
7467 | SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString(); |
7468 | } |
7469 | |
7470 | llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) }; |
7471 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); |
7472 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); |
7473 | |
7474 | llvm::Type *Types[] = { RegisterType }; |
7475 | |
7476 | bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32); |
7477 | assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))(static_cast <bool> (!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register" ) ? void (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 7478, __extension__ __PRETTY_FUNCTION__ )) |
7478 | && "Can't fit 64-bit value in 32-bit register")(static_cast <bool> (!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register" ) ? void (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 7478, __extension__ __PRETTY_FUNCTION__ )); |
7479 | |
7480 | if (AccessKind != Write) { |
7481 | assert(AccessKind == NormalRead || AccessKind == VolatileRead)(static_cast <bool> (AccessKind == NormalRead || AccessKind == VolatileRead) ? void (0) : __assert_fail ("AccessKind == NormalRead || AccessKind == VolatileRead" , "clang/lib/CodeGen/CGBuiltin.cpp", 7481, __extension__ __PRETTY_FUNCTION__ )); |
7482 | llvm::Function *F = CGM.getIntrinsic( |
7483 | AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register |
7484 | : llvm::Intrinsic::read_register, |
7485 | Types); |
7486 | llvm::Value *Call = Builder.CreateCall(F, Metadata); |
7487 | |
7488 | if (MixedTypes) |
7489 | // Read into 64 bit register and then truncate result to 32 bit. |
7490 | return Builder.CreateTrunc(Call, ValueType); |
7491 | |
7492 | if (ValueType->isPointerTy()) |
7493 | // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*). |
7494 | return Builder.CreateIntToPtr(Call, ValueType); |
7495 | |
7496 | return Call; |
7497 | } |
7498 | |
7499 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); |
7500 | llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1)); |
7501 | if (MixedTypes) { |
7502 | // Extend 32 bit write value to 64 bit to pass to write. |
7503 | ArgValue = Builder.CreateZExt(ArgValue, RegisterType); |
7504 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
7505 | } |
7506 | |
7507 | if (ValueType->isPointerTy()) { |
7508 | // Have VoidPtrTy ArgValue but want to return an i32/i64. |
7509 | ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType); |
7510 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
7511 | } |
7512 | |
7513 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
7514 | } |
7515 | |
7516 | /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra |
7517 | /// argument that specifies the vector type. |
7518 | static bool HasExtraNeonArgument(unsigned BuiltinID) { |
7519 | switch (BuiltinID) { |
7520 | default: break; |
7521 | case NEON::BI__builtin_neon_vget_lane_i8: |
7522 | case NEON::BI__builtin_neon_vget_lane_i16: |
7523 | case NEON::BI__builtin_neon_vget_lane_bf16: |
7524 | case NEON::BI__builtin_neon_vget_lane_i32: |
7525 | case NEON::BI__builtin_neon_vget_lane_i64: |
7526 | case NEON::BI__builtin_neon_vget_lane_f32: |
7527 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
7528 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
7529 | case NEON::BI__builtin_neon_vgetq_lane_bf16: |
7530 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
7531 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
7532 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
7533 | case NEON::BI__builtin_neon_vduph_lane_bf16: |
7534 | case NEON::BI__builtin_neon_vduph_laneq_bf16: |
7535 | case NEON::BI__builtin_neon_vset_lane_i8: |
7536 | case NEON::BI__builtin_neon_vset_lane_i16: |
7537 | case NEON::BI__builtin_neon_vset_lane_bf16: |
7538 | case NEON::BI__builtin_neon_vset_lane_i32: |
7539 | case NEON::BI__builtin_neon_vset_lane_i64: |
7540 | case NEON::BI__builtin_neon_vset_lane_f32: |
7541 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
7542 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
7543 | case NEON::BI__builtin_neon_vsetq_lane_bf16: |
7544 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
7545 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
7546 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
7547 | case NEON::BI__builtin_neon_vsha1h_u32: |
7548 | case NEON::BI__builtin_neon_vsha1cq_u32: |
7549 | case NEON::BI__builtin_neon_vsha1pq_u32: |
7550 | case NEON::BI__builtin_neon_vsha1mq_u32: |
7551 | case NEON::BI__builtin_neon_vcvth_bf16_f32: |
7552 | case clang::ARM::BI_MoveToCoprocessor: |
7553 | case clang::ARM::BI_MoveToCoprocessor2: |
7554 | return false; |
7555 | } |
7556 | return true; |
7557 | } |
7558 | |
7559 | Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, |
7560 | const CallExpr *E, |
7561 | ReturnValueSlot ReturnValue, |
7562 | llvm::Triple::ArchType Arch) { |
7563 | if (auto Hint = GetValueForARMHint(BuiltinID)) |
7564 | return Hint; |
7565 | |
7566 | if (BuiltinID == ARM::BI__emit) { |
7567 | bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb; |
7568 | llvm::FunctionType *FTy = |
7569 | llvm::FunctionType::get(VoidTy, /*Variadic=*/false); |
7570 | |
7571 | Expr::EvalResult Result; |
7572 | if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext())) |
7573 | llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant" , "clang/lib/CodeGen/CGBuiltin.cpp", 7573); |
7574 | |
7575 | llvm::APSInt Value = Result.Val.getInt(); |
7576 | uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue(); |
7577 | |
7578 | llvm::InlineAsm *Emit = |
7579 | IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "", |
7580 | /*hasSideEffects=*/true) |
7581 | : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "", |
7582 | /*hasSideEffects=*/true); |
7583 | |
7584 | return Builder.CreateCall(Emit); |
7585 | } |
7586 | |
7587 | if (BuiltinID == ARM::BI__builtin_arm_dbg) { |
7588 | Value *Option = EmitScalarExpr(E->getArg(0)); |
7589 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option); |
7590 | } |
7591 | |
7592 | if (BuiltinID == ARM::BI__builtin_arm_prefetch) { |
7593 | Value *Address = EmitScalarExpr(E->getArg(0)); |
7594 | Value *RW = EmitScalarExpr(E->getArg(1)); |
7595 | Value *IsData = EmitScalarExpr(E->getArg(2)); |
7596 | |
7597 | // Locality is not supported on ARM target |
7598 | Value *Locality = llvm::ConstantInt::get(Int32Ty, 3); |
7599 | |
7600 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
7601 | return Builder.CreateCall(F, {Address, RW, Locality, IsData}); |
7602 | } |
7603 | |
7604 | if (BuiltinID == ARM::BI__builtin_arm_rbit) { |
7605 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
7606 | return Builder.CreateCall( |
7607 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); |
7608 | } |
7609 | |
7610 | if (BuiltinID == ARM::BI__builtin_arm_cls) { |
7611 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
7612 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls"); |
7613 | } |
7614 | if (BuiltinID == ARM::BI__builtin_arm_cls64) { |
7615 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
7616 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg, |
7617 | "cls"); |
7618 | } |
7619 | |
7620 | if (BuiltinID == ARM::BI__clear_cache) { |
7621 | assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")(static_cast <bool> (E->getNumArgs() == 2 && "__clear_cache takes 2 arguments") ? void (0) : __assert_fail ("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 7621, __extension__ __PRETTY_FUNCTION__ )); |
7622 | const FunctionDecl *FD = E->getDirectCallee(); |
7623 | Value *Ops[2]; |
7624 | for (unsigned i = 0; i < 2; i++) |
7625 | Ops[i] = EmitScalarExpr(E->getArg(i)); |
7626 | llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); |
7627 | llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); |
7628 | StringRef Name = FD->getName(); |
7629 | return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); |
7630 | } |
7631 | |
7632 | if (BuiltinID == ARM::BI__builtin_arm_mcrr || |
7633 | BuiltinID == ARM::BI__builtin_arm_mcrr2) { |
7634 | Function *F; |
7635 | |
7636 | switch (BuiltinID) { |
7637 | default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "clang/lib/CodeGen/CGBuiltin.cpp" , 7637); |
7638 | case ARM::BI__builtin_arm_mcrr: |
7639 | F = CGM.getIntrinsic(Intrinsic::arm_mcrr); |
7640 | break; |
7641 | case ARM::BI__builtin_arm_mcrr2: |
7642 | F = CGM.getIntrinsic(Intrinsic::arm_mcrr2); |
7643 | break; |
7644 | } |
7645 | |
7646 | // MCRR{2} instruction has 5 operands but |
7647 | // the intrinsic has 4 because Rt and Rt2 |
7648 | // are represented as a single unsigned 64 |
7649 | // bit integer in the intrinsic definition |
7650 | // but internally it's represented as 2 32 |
7651 | // bit integers. |
7652 | |
7653 | Value *Coproc = EmitScalarExpr(E->getArg(0)); |
7654 | Value *Opc1 = EmitScalarExpr(E->getArg(1)); |
7655 | Value *RtAndRt2 = EmitScalarExpr(E->getArg(2)); |
7656 | Value *CRm = EmitScalarExpr(E->getArg(3)); |
7657 | |
7658 | Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); |
7659 | Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty); |
7660 | Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1); |
7661 | Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty); |
7662 | |
7663 | return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm}); |
7664 | } |
7665 | |
7666 | if (BuiltinID == ARM::BI__builtin_arm_mrrc || |
7667 | BuiltinID == ARM::BI__builtin_arm_mrrc2) { |
7668 | Function *F; |
7669 | |
7670 | switch (BuiltinID) { |
7671 | default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "clang/lib/CodeGen/CGBuiltin.cpp" , 7671); |
7672 | case ARM::BI__builtin_arm_mrrc: |
7673 | F = CGM.getIntrinsic(Intrinsic::arm_mrrc); |
7674 | break; |
7675 | case ARM::BI__builtin_arm_mrrc2: |
7676 | F = CGM.getIntrinsic(Intrinsic::arm_mrrc2); |
7677 | break; |
7678 | } |
7679 | |
7680 | Value *Coproc = EmitScalarExpr(E->getArg(0)); |
7681 | Value *Opc1 = EmitScalarExpr(E->getArg(1)); |
7682 | Value *CRm = EmitScalarExpr(E->getArg(2)); |
7683 | Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm}); |
7684 | |
7685 | // Returns an unsigned 64 bit integer, represented |
7686 | // as two 32 bit integers. |
7687 | |
7688 | Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1); |
7689 | Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0); |
7690 | Rt = Builder.CreateZExt(Rt, Int64Ty); |
7691 | Rt1 = Builder.CreateZExt(Rt1, Int64Ty); |
7692 | |
7693 | Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32); |
7694 | RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true); |
7695 | RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1); |
7696 | |
7697 | return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType())); |
7698 | } |
7699 | |
7700 | if (BuiltinID == ARM::BI__builtin_arm_ldrexd || |
7701 | ((BuiltinID == ARM::BI__builtin_arm_ldrex || |
7702 | BuiltinID == ARM::BI__builtin_arm_ldaex) && |
7703 | getContext().getTypeSize(E->getType()) == 64) || |
7704 | BuiltinID == ARM::BI__ldrexd) { |
7705 | Function *F; |
7706 | |
7707 | switch (BuiltinID) { |
7708 | default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "clang/lib/CodeGen/CGBuiltin.cpp" , 7708); |
7709 | case ARM::BI__builtin_arm_ldaex: |
7710 | F = CGM.getIntrinsic(Intrinsic::arm_ldaexd); |
7711 | break; |
7712 | case ARM::BI__builtin_arm_ldrexd: |
7713 | case ARM::BI__builtin_arm_ldrex: |
7714 | case ARM::BI__ldrexd: |
7715 | F = CGM.getIntrinsic(Intrinsic::arm_ldrexd); |
7716 | break; |
7717 | } |
7718 | |
7719 | Value *LdPtr = EmitScalarExpr(E->getArg(0)); |
7720 | Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), |
7721 | "ldrexd"); |
7722 | |
7723 | Value *Val0 = Builder.CreateExtractValue(Val, 1); |
7724 | Value *Val1 = Builder.CreateExtractValue(Val, 0); |
7725 | Val0 = Builder.CreateZExt(Val0, Int64Ty); |
7726 | Val1 = Builder.CreateZExt(Val1, Int64Ty); |
7727 | |
7728 | Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32); |
7729 | Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); |
7730 | Val = Builder.CreateOr(Val, Val1); |
7731 | return Builder.CreateBitCast(Val, ConvertType(E->getType())); |
7732 | } |
7733 | |
7734 | if (BuiltinID == ARM::BI__builtin_arm_ldrex || |
7735 | BuiltinID == ARM::BI__builtin_arm_ldaex) { |
7736 | Value *LoadAddr = EmitScalarExpr(E->getArg(0)); |
7737 | |
7738 | QualType Ty = E->getType(); |
7739 | llvm::Type *RealResTy = ConvertType(Ty); |
7740 | llvm::Type *IntTy = |
7741 | llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty)); |
7742 | llvm::Type *PtrTy = IntTy->getPointerTo(); |
7743 | LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy); |
7744 | |
7745 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex |
7746 | ? Intrinsic::arm_ldaex |
7747 | : Intrinsic::arm_ldrex, |
7748 | PtrTy); |
7749 | CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldrex"); |
7750 | Val->addParamAttr( |
7751 | 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy)); |
7752 | |
7753 | if (RealResTy->isPointerTy()) |
7754 | return Builder.CreateIntToPtr(Val, RealResTy); |
7755 | else { |
7756 | llvm::Type *IntResTy = llvm::IntegerType::get( |
7757 | getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy)); |
7758 | return Builder.CreateBitCast(Builder.CreateTruncOrBitCast(Val, IntResTy), |
7759 | RealResTy); |
7760 | } |
7761 | } |
7762 | |
7763 | if (BuiltinID == ARM::BI__builtin_arm_strexd || |
7764 | ((BuiltinID == ARM::BI__builtin_arm_stlex || |
7765 | BuiltinID == ARM::BI__builtin_arm_strex) && |
7766 | getContext().getTypeSize(E->getArg(0)->getType()) == 64)) { |
7767 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex |
7768 | ? Intrinsic::arm_stlexd |
7769 | : Intrinsic::arm_strexd); |
7770 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty); |
7771 | |
7772 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); |
7773 | Value *Val = EmitScalarExpr(E->getArg(0)); |
7774 | Builder.CreateStore(Val, Tmp); |
7775 | |
7776 | Address LdPtr = Builder.CreateElementBitCast(Tmp, STy); |
7777 | Val = Builder.CreateLoad(LdPtr); |
7778 | |
7779 | Value *Arg0 = Builder.CreateExtractValue(Val, 0); |
7780 | Value *Arg1 = Builder.CreateExtractValue(Val, 1); |
7781 | Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); |
7782 | return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd"); |
7783 | } |
7784 | |
7785 | if (BuiltinID == ARM::BI__builtin_arm_strex || |
7786 | BuiltinID == ARM::BI__builtin_arm_stlex) { |
7787 | Value *StoreVal = EmitScalarExpr(E->getArg(0)); |
7788 | Value *StoreAddr = EmitScalarExpr(E->getArg(1)); |
7789 | |
7790 | QualType Ty = E->getArg(0)->getType(); |
7791 | llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), |
7792 | getContext().getTypeSize(Ty)); |
7793 | StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); |
7794 | |
7795 | if (StoreVal->getType()->isPointerTy()) |
7796 | StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty); |
7797 | else { |
7798 | llvm::Type *IntTy = llvm::IntegerType::get( |
7799 | getLLVMContext(), |
7800 | CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType())); |
7801 | StoreVal = Builder.CreateBitCast(StoreVal, IntTy); |
7802 | StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty); |
7803 | } |
7804 | |
7805 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex |
7806 | ? Intrinsic::arm_stlex |
7807 | : Intrinsic::arm_strex, |
7808 | StoreAddr->getType()); |
7809 | |
7810 | CallInst *CI = Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex"); |
7811 | CI->addParamAttr( |
7812 | 1, Attribute::get(getLLVMContext(), Attribute::ElementType, StoreTy)); |
7813 | return CI; |
7814 | } |
7815 | |
7816 | if (BuiltinID == ARM::BI__builtin_arm_clrex) { |
7817 | Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex); |
7818 | return Builder.CreateCall(F); |
7819 | } |
7820 | |
7821 | // CRC32 |
7822 | Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; |
7823 | switch (BuiltinID) { |
7824 | case ARM::BI__builtin_arm_crc32b: |
7825 | CRCIntrinsicID = Intrinsic::arm_crc32b; break; |
7826 | case ARM::BI__builtin_arm_crc32cb: |
7827 | CRCIntrinsicID = Intrinsic::arm_crc32cb; break; |
7828 | case ARM::BI__builtin_arm_crc32h: |
7829 | CRCIntrinsicID = Intrinsic::arm_crc32h; break; |
7830 | case ARM::BI__builtin_arm_crc32ch: |
7831 | CRCIntrinsicID = Intrinsic::arm_crc32ch; break; |
7832 | case ARM::BI__builtin_arm_crc32w: |
7833 | case ARM::BI__builtin_arm_crc32d: |
7834 | CRCIntrinsicID = Intrinsic::arm_crc32w; break; |
7835 | case ARM::BI__builtin_arm_crc32cw: |
7836 | case ARM::BI__builtin_arm_crc32cd: |
7837 | CRCIntrinsicID = Intrinsic::arm_crc32cw; break; |
7838 | } |
7839 | |
7840 | if (CRCIntrinsicID != Intrinsic::not_intrinsic) { |
7841 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
7842 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); |
7843 | |
7844 | // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w |
7845 | // intrinsics, hence we need different codegen for these cases. |
7846 | if (BuiltinID == ARM::BI__builtin_arm_crc32d || |
7847 | BuiltinID == ARM::BI__builtin_arm_crc32cd) { |
7848 | Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); |
7849 | Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty); |
7850 | Value *Arg1b = Builder.CreateLShr(Arg1, C1); |
7851 | Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty); |
7852 | |
7853 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); |
7854 | Value *Res = Builder.CreateCall(F, {Arg0, Arg1a}); |
7855 | return Builder.CreateCall(F, {Res, Arg1b}); |
7856 | } else { |
7857 | Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty); |
7858 | |
7859 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); |
7860 | return Builder.CreateCall(F, {Arg0, Arg1}); |
7861 | } |
7862 | } |
7863 | |
7864 | if (BuiltinID == ARM::BI__builtin_arm_rsr || |
7865 | BuiltinID == ARM::BI__builtin_arm_rsr64 || |
7866 | BuiltinID == ARM::BI__builtin_arm_rsrp || |
7867 | BuiltinID == ARM::BI__builtin_arm_wsr || |
7868 | BuiltinID == ARM::BI__builtin_arm_wsr64 || |
7869 | BuiltinID == ARM::BI__builtin_arm_wsrp) { |
7870 | |
7871 | SpecialRegisterAccessKind AccessKind = Write; |
7872 | if (BuiltinID == ARM::BI__builtin_arm_rsr || |
7873 | BuiltinID == ARM::BI__builtin_arm_rsr64 || |
7874 | BuiltinID == ARM::BI__builtin_arm_rsrp) |
7875 | AccessKind = VolatileRead; |
7876 | |
7877 | bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp || |
7878 | BuiltinID == ARM::BI__builtin_arm_wsrp; |
7879 | |
7880 | bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 || |
7881 | BuiltinID == ARM::BI__builtin_arm_wsr64; |
7882 | |
7883 | llvm::Type *ValueType; |
7884 | llvm::Type *RegisterType; |
7885 | if (IsPointerBuiltin) { |
7886 | ValueType = VoidPtrTy; |
7887 | RegisterType = Int32Ty; |
7888 | } else if (Is64Bit) { |
7889 | ValueType = RegisterType = Int64Ty; |
7890 | } else { |
7891 | ValueType = RegisterType = Int32Ty; |
7892 | } |
7893 | |
7894 | return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, |
7895 | AccessKind); |
7896 | } |
7897 | |
7898 | // Handle MSVC intrinsics before argument evaluation to prevent double |
7899 | // evaluation. |
7900 | if (Optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID)) |
7901 | return EmitMSVCBuiltinExpr(*MsvcIntId, E); |
7902 | |
7903 | // Deal with MVE builtins |
7904 | if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch)) |
7905 | return Result; |
7906 | // Handle CDE builtins |
7907 | if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch)) |
7908 | return Result; |
7909 | |
7910 | // Find out if any arguments are required to be integer constant |
7911 | // expressions. |
7912 | unsigned ICEArguments = 0; |
7913 | ASTContext::GetBuiltinTypeError Error; |
7914 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
7915 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 7915, __extension__ __PRETTY_FUNCTION__ )); |
7916 | |
7917 | auto getAlignmentValue32 = [&](Address addr) -> Value* { |
7918 | return Builder.getInt32(addr.getAlignment().getQuantity()); |
7919 | }; |
7920 | |
7921 | Address PtrOp0 = Address::invalid(); |
7922 | Address PtrOp1 = Address::invalid(); |
7923 | SmallVector<Value*, 4> Ops; |
7924 | bool HasExtraArg = HasExtraNeonArgument(BuiltinID); |
7925 | unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0); |
7926 | for (unsigned i = 0, e = NumArgs; i != e; i++) { |
7927 | if (i == 0) { |
7928 | switch (BuiltinID) { |
7929 | case NEON::BI__builtin_neon_vld1_v: |
7930 | case NEON::BI__builtin_neon_vld1q_v: |
7931 | case NEON::BI__builtin_neon_vld1q_lane_v: |
7932 | case NEON::BI__builtin_neon_vld1_lane_v: |
7933 | case NEON::BI__builtin_neon_vld1_dup_v: |
7934 | case NEON::BI__builtin_neon_vld1q_dup_v: |
7935 | case NEON::BI__builtin_neon_vst1_v: |
7936 | case NEON::BI__builtin_neon_vst1q_v: |
7937 | case NEON::BI__builtin_neon_vst1q_lane_v: |
7938 | case NEON::BI__builtin_neon_vst1_lane_v: |
7939 | case NEON::BI__builtin_neon_vst2_v: |
7940 | case NEON::BI__builtin_neon_vst2q_v: |
7941 | case NEON::BI__builtin_neon_vst2_lane_v: |
7942 | case NEON::BI__builtin_neon_vst2q_lane_v: |
7943 | case NEON::BI__builtin_neon_vst3_v: |
7944 | case NEON::BI__builtin_neon_vst3q_v: |
7945 | case NEON::BI__builtin_neon_vst3_lane_v: |
7946 | case NEON::BI__builtin_neon_vst3q_lane_v: |
7947 | case NEON::BI__builtin_neon_vst4_v: |
7948 | case NEON::BI__builtin_neon_vst4q_v: |
7949 | case NEON::BI__builtin_neon_vst4_lane_v: |
7950 | case NEON::BI__builtin_neon_vst4q_lane_v: |
7951 | // Get the alignment for the argument in addition to the value; |
7952 | // we'll use it later. |
7953 | PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); |
7954 | Ops.push_back(PtrOp0.getPointer()); |
7955 | continue; |
7956 | } |
7957 | } |
7958 | if (i == 1) { |
7959 | switch (BuiltinID) { |
7960 | case NEON::BI__builtin_neon_vld2_v: |
7961 | case NEON::BI__builtin_neon_vld2q_v: |
7962 | case NEON::BI__builtin_neon_vld3_v: |
7963 | case NEON::BI__builtin_neon_vld3q_v: |
7964 | case NEON::BI__builtin_neon_vld4_v: |
7965 | case NEON::BI__builtin_neon_vld4q_v: |
7966 | case NEON::BI__builtin_neon_vld2_lane_v: |
7967 | case NEON::BI__builtin_neon_vld2q_lane_v: |
7968 | case NEON::BI__builtin_neon_vld3_lane_v: |
7969 | case NEON::BI__builtin_neon_vld3q_lane_v: |
7970 | case NEON::BI__builtin_neon_vld4_lane_v: |
7971 | case NEON::BI__builtin_neon_vld4q_lane_v: |
7972 | case NEON::BI__builtin_neon_vld2_dup_v: |
7973 | case NEON::BI__builtin_neon_vld2q_dup_v: |
7974 | case NEON::BI__builtin_neon_vld3_dup_v: |
7975 | case NEON::BI__builtin_neon_vld3q_dup_v: |
7976 | case NEON::BI__builtin_neon_vld4_dup_v: |
7977 | case NEON::BI__builtin_neon_vld4q_dup_v: |
7978 | // Get the alignment for the argument in addition to the value; |
7979 | // we'll use it later. |
7980 | PtrOp1 = EmitPointerWithAlignment(E->getArg(1)); |
7981 | Ops.push_back(PtrOp1.getPointer()); |
7982 | continue; |
7983 | } |
7984 | } |
7985 | |
7986 | if ((ICEArguments & (1 << i)) == 0) { |
7987 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
7988 | } else { |
7989 | // If this is required to be a constant, constant fold it so that we know |
7990 | // that the generated intrinsic gets a ConstantInt. |
7991 | Ops.push_back(llvm::ConstantInt::get( |
7992 | getLLVMContext(), |
7993 | *E->getArg(i)->getIntegerConstantExpr(getContext()))); |
7994 | } |
7995 | } |
7996 | |
7997 | switch (BuiltinID) { |
7998 | default: break; |
7999 | |
8000 | case NEON::BI__builtin_neon_vget_lane_i8: |
8001 | case NEON::BI__builtin_neon_vget_lane_i16: |
8002 | case NEON::BI__builtin_neon_vget_lane_i32: |
8003 | case NEON::BI__builtin_neon_vget_lane_i64: |
8004 | case NEON::BI__builtin_neon_vget_lane_bf16: |
8005 | case NEON::BI__builtin_neon_vget_lane_f32: |
8006 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
8007 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
8008 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
8009 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
8010 | case NEON::BI__builtin_neon_vgetq_lane_bf16: |
8011 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
8012 | case NEON::BI__builtin_neon_vduph_lane_bf16: |
8013 | case NEON::BI__builtin_neon_vduph_laneq_bf16: |
8014 | return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane"); |
8015 | |
8016 | case NEON::BI__builtin_neon_vrndns_f32: { |
8017 | Value *Arg = EmitScalarExpr(E->getArg(0)); |
8018 | llvm::Type *Tys[] = {Arg->getType()}; |
8019 | Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys); |
8020 | return Builder.CreateCall(F, {Arg}, "vrndn"); } |
8021 | |
8022 | case NEON::BI__builtin_neon_vset_lane_i8: |
8023 | case NEON::BI__builtin_neon_vset_lane_i16: |
8024 | case NEON::BI__builtin_neon_vset_lane_i32: |
8025 | case NEON::BI__builtin_neon_vset_lane_i64: |
8026 | case NEON::BI__builtin_neon_vset_lane_bf16: |
8027 | case NEON::BI__builtin_neon_vset_lane_f32: |
8028 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
8029 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
8030 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
8031 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
8032 | case NEON::BI__builtin_neon_vsetq_lane_bf16: |
8033 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
8034 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
8035 | |
8036 | case NEON::BI__builtin_neon_vsha1h_u32: |
8037 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops, |
8038 | "vsha1h"); |
8039 | case NEON::BI__builtin_neon_vsha1cq_u32: |
8040 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops, |
8041 | "vsha1h"); |
8042 | case NEON::BI__builtin_neon_vsha1pq_u32: |
8043 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops, |
8044 | "vsha1h"); |
8045 | case NEON::BI__builtin_neon_vsha1mq_u32: |
8046 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops, |
8047 | "vsha1h"); |
8048 | |
8049 | case NEON::BI__builtin_neon_vcvth_bf16_f32: { |
8050 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops, |
8051 | "vcvtbfp2bf"); |
8052 | } |
8053 | |
8054 | // The ARM _MoveToCoprocessor builtins put the input register value as |
8055 | // the first argument, but the LLVM intrinsic expects it as the third one. |
8056 | case ARM::BI_MoveToCoprocessor: |
8057 | case ARM::BI_MoveToCoprocessor2: { |
8058 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ? |
8059 | Intrinsic::arm_mcr : Intrinsic::arm_mcr2); |
8060 | return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0], |
8061 | Ops[3], Ops[4], Ops[5]}); |
8062 | } |
8063 | } |
8064 | |
8065 | // Get the last argument, which specifies the vector type. |
8066 | assert(HasExtraArg)(static_cast <bool> (HasExtraArg) ? void (0) : __assert_fail ("HasExtraArg", "clang/lib/CodeGen/CGBuiltin.cpp", 8066, __extension__ __PRETTY_FUNCTION__)); |
8067 | const Expr *Arg = E->getArg(E->getNumArgs()-1); |
8068 | Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()); |
8069 | if (!Result) |
8070 | return nullptr; |
8071 | |
8072 | if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f || |
8073 | BuiltinID == ARM::BI__builtin_arm_vcvtr_d) { |
8074 | // Determine the overloaded type of this builtin. |
8075 | llvm::Type *Ty; |
8076 | if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f) |
8077 | Ty = FloatTy; |
8078 | else |
8079 | Ty = DoubleTy; |
8080 | |
8081 | // Determine whether this is an unsigned conversion or not. |
8082 | bool usgn = Result->getZExtValue() == 1; |
8083 | unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; |
8084 | |
8085 | // Call the appropriate intrinsic. |
8086 | Function *F = CGM.getIntrinsic(Int, Ty); |
8087 | return Builder.CreateCall(F, Ops, "vcvtr"); |
8088 | } |
8089 | |
8090 | // Determine the type of this overloaded NEON intrinsic. |
8091 | NeonTypeFlags Type = Result->getZExtValue(); |
8092 | bool usgn = Type.isUnsigned(); |
8093 | bool rightShift = false; |
8094 | |
8095 | llvm::FixedVectorType *VTy = |
8096 | GetNeonType(this, Type, getTarget().hasLegalHalfType(), false, |
8097 | getTarget().hasBFloat16Type()); |
8098 | llvm::Type *Ty = VTy; |
8099 | if (!Ty) |
8100 | return nullptr; |
8101 | |
8102 | // Many NEON builtins have identical semantics and uses in ARM and |
8103 | // AArch64. Emit these in a single function. |
8104 | auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap); |
8105 | const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap( |
8106 | IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted); |
8107 | if (Builtin) |
8108 | return EmitCommonNeonBuiltinExpr( |
8109 | Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, |
8110 | Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch); |
8111 | |
8112 | unsigned Int; |
8113 | switch (BuiltinID) { |
8114 | default: return nullptr; |
8115 | case NEON::BI__builtin_neon_vld1q_lane_v: |
8116 | // Handle 64-bit integer elements as a special case. Use shuffles of |
8117 | // one-element vectors to avoid poor code for i64 in the backend. |
8118 | if (VTy->getElementType()->isIntegerTy(64)) { |
8119 | // Extract the other lane. |
8120 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
8121 | int Lane = cast<ConstantInt>(Ops[2])->getZExtValue(); |
8122 | Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane)); |
8123 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); |
8124 | // Load the value as a one-element vector. |
8125 | Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1); |
8126 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
8127 | Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys); |
8128 | Value *Align = getAlignmentValue32(PtrOp0); |
8129 | Value *Ld = Builder.CreateCall(F, {Ops[0], Align}); |
8130 | // Combine them. |
8131 | int Indices[] = {1 - Lane, Lane}; |
8132 | return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane"); |
8133 | } |
8134 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
8135 | case NEON::BI__builtin_neon_vld1_lane_v: { |
8136 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
8137 | PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType()); |
8138 | Value *Ld = Builder.CreateLoad(PtrOp0); |
8139 | return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane"); |
8140 | } |
8141 | case NEON::BI__builtin_neon_vqrshrn_n_v: |
8142 | Int = |
8143 | usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; |
8144 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n", |
8145 | 1, true); |
8146 | case NEON::BI__builtin_neon_vqrshrun_n_v: |
8147 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty), |
8148 | Ops, "vqrshrun_n", 1, true); |
8149 | case NEON::BI__builtin_neon_vqshrn_n_v: |
8150 | Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; |
8151 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n", |
8152 | 1, true); |
8153 | case NEON::BI__builtin_neon_vqshrun_n_v: |
8154 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty), |
8155 | Ops, "vqshrun_n", 1, true); |
8156 | case NEON::BI__builtin_neon_vrecpe_v: |
8157 | case NEON::BI__builtin_neon_vrecpeq_v: |
8158 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), |
8159 | Ops, "vrecpe"); |
8160 | case NEON::BI__builtin_neon_vrshrn_n_v: |
8161 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), |
8162 | Ops, "vrshrn_n", 1, true); |
8163 | case NEON::BI__builtin_neon_vrsra_n_v: |
8164 | case NEON::BI__builtin_neon_vrsraq_n_v: |
8165 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
8166 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
8167 | Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); |
8168 | Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; |
8169 | Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]}); |
8170 | return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); |
8171 | case NEON::BI__builtin_neon_vsri_n_v: |
8172 | case NEON::BI__builtin_neon_vsriq_n_v: |
8173 | rightShift = true; |
8174 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
8175 | case NEON::BI__builtin_neon_vsli_n_v: |
8176 | case NEON::BI__builtin_neon_vsliq_n_v: |
8177 | Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift); |
8178 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty), |
8179 | Ops, "vsli_n"); |
8180 | case NEON::BI__builtin_neon_vsra_n_v: |
8181 | case NEON::BI__builtin_neon_vsraq_n_v: |
8182 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
8183 | Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n"); |
8184 | return Builder.CreateAdd(Ops[0], Ops[1]); |
8185 | case NEON::BI__builtin_neon_vst1q_lane_v: |
8186 | // Handle 64-bit integer elements as a special case. Use a shuffle to get |
8187 | // a one-element vector and avoid poor code for i64 in the backend. |
8188 | if (VTy->getElementType()->isIntegerTy(64)) { |
8189 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
8190 | Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2])); |
8191 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); |
8192 | Ops[2] = getAlignmentValue32(PtrOp0); |
8193 | llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()}; |
8194 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, |
8195 | Tys), Ops); |
8196 | } |
8197 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
8198 | case NEON::BI__builtin_neon_vst1_lane_v: { |
8199 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
8200 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); |
8201 | auto St = Builder.CreateStore( |
8202 | Ops[1], Builder.CreateElementBitCast(PtrOp0, Ops[1]->getType())); |
8203 | return St; |
8204 | } |
8205 | case NEON::BI__builtin_neon_vtbl1_v: |
8206 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), |
8207 | Ops, "vtbl1"); |
8208 | case NEON::BI__builtin_neon_vtbl2_v: |
8209 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), |
8210 | Ops, "vtbl2"); |
8211 | case NEON::BI__builtin_neon_vtbl3_v: |
8212 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), |
8213 | Ops, "vtbl3"); |
8214 | case NEON::BI__builtin_neon_vtbl4_v: |
8215 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), |
8216 | Ops, "vtbl4"); |
8217 | case NEON::BI__builtin_neon_vtbx1_v: |
8218 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), |
8219 | Ops, "vtbx1"); |
8220 | case NEON::BI__builtin_neon_vtbx2_v: |
8221 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), |
8222 | Ops, "vtbx2"); |
8223 | case NEON::BI__builtin_neon_vtbx3_v: |
8224 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), |
8225 | Ops, "vtbx3"); |
8226 | case NEON::BI__builtin_neon_vtbx4_v: |
8227 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), |
8228 | Ops, "vtbx4"); |
8229 | } |
8230 | } |
8231 | |
8232 | template<typename Integer> |
8233 | static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) { |
8234 | return E->getIntegerConstantExpr(Context)->getExtValue(); |
8235 | } |
8236 | |
8237 | static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V, |
8238 | llvm::Type *T, bool Unsigned) { |
8239 | // Helper function called by Tablegen-constructed ARM MVE builtin codegen, |
8240 | // which finds it convenient to specify signed/unsigned as a boolean flag. |
8241 | return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T); |
8242 | } |
8243 | |
8244 | static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V, |
8245 | uint32_t Shift, bool Unsigned) { |
8246 | // MVE helper function for integer shift right. This must handle signed vs |
8247 | // unsigned, and also deal specially with the case where the shift count is |
8248 | // equal to the lane size. In LLVM IR, an LShr with that parameter would be |
8249 | // undefined behavior, but in MVE it's legal, so we must convert it to code |
8250 | // that is not undefined in IR. |
8251 | unsigned LaneBits = cast<llvm::VectorType>(V->getType()) |
8252 | ->getElementType() |
8253 | ->getPrimitiveSizeInBits(); |
8254 | if (Shift == LaneBits) { |
8255 | // An unsigned shift of the full lane size always generates zero, so we can |
8256 | // simply emit a zero vector. A signed shift of the full lane size does the |
8257 | // same thing as shifting by one bit fewer. |
8258 | if (Unsigned) |
8259 | return llvm::Constant::getNullValue(V->getType()); |
8260 | else |
8261 | --Shift; |
8262 | } |
8263 | return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift); |
8264 | } |
8265 | |
8266 | static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) { |
8267 | // MVE-specific helper function for a vector splat, which infers the element |
8268 | // count of the output vector by knowing that MVE vectors are all 128 bits |
8269 | // wide. |
8270 | unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits(); |
8271 | return Builder.CreateVectorSplat(Elements, V); |
8272 | } |
8273 | |
8274 | static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder, |
8275 | CodeGenFunction *CGF, |
8276 | llvm::Value *V, |
8277 | llvm::Type *DestType) { |
8278 | // Convert one MVE vector type into another by reinterpreting its in-register |
8279 | // format. |
8280 | // |
8281 | // Little-endian, this is identical to a bitcast (which reinterprets the |
8282 | // memory format). But big-endian, they're not necessarily the same, because |
8283 | // the register and memory formats map to each other differently depending on |
8284 | // the lane size. |
8285 | // |
8286 | // We generate a bitcast whenever we can (if we're little-endian, or if the |
8287 | // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic |
8288 | // that performs the different kind of reinterpretation. |
8289 | if (CGF->getTarget().isBigEndian() && |
8290 | V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) { |
8291 | return Builder.CreateCall( |
8292 | CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq, |
8293 | {DestType, V->getType()}), |
8294 | V); |
8295 | } else { |
8296 | return Builder.CreateBitCast(V, DestType); |
8297 | } |
8298 | } |
8299 | |
8300 | static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) { |
8301 | // Make a shufflevector that extracts every other element of a vector (evens |
8302 | // or odds, as desired). |
8303 | SmallVector<int, 16> Indices; |
8304 | unsigned InputElements = |
8305 | cast<llvm::FixedVectorType>(V->getType())->getNumElements(); |
8306 | for (unsigned i = 0; i < InputElements; i += 2) |
8307 | Indices.push_back(i + Odd); |
8308 | return Builder.CreateShuffleVector(V, Indices); |
8309 | } |
8310 | |
8311 | static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0, |
8312 | llvm::Value *V1) { |
8313 | // Make a shufflevector that interleaves two vectors element by element. |
8314 | assert(V0->getType() == V1->getType() && "Can't zip different vector types")(static_cast <bool> (V0->getType() == V1->getType () && "Can't zip different vector types") ? void (0) : __assert_fail ("V0->getType() == V1->getType() && \"Can't zip different vector types\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8314, __extension__ __PRETTY_FUNCTION__ )); |
8315 | SmallVector<int, 16> Indices; |
8316 | unsigned InputElements = |
8317 | cast<llvm::FixedVectorType>(V0->getType())->getNumElements(); |
8318 | for (unsigned i = 0; i < InputElements; i++) { |
8319 | Indices.push_back(i); |
8320 | Indices.push_back(i + InputElements); |
8321 | } |
8322 | return Builder.CreateShuffleVector(V0, V1, Indices); |
8323 | } |
8324 | |
8325 | template<unsigned HighBit, unsigned OtherBits> |
8326 | static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) { |
8327 | // MVE-specific helper function to make a vector splat of a constant such as |
8328 | // UINT_MAX or INT_MIN, in which all bits below the highest one are equal. |
8329 | llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType(); |
8330 | unsigned LaneBits = T->getPrimitiveSizeInBits(); |
8331 | uint32_t Value = HighBit << (LaneBits - 1); |
8332 | if (OtherBits) |
8333 | Value |= (1UL << (LaneBits - 1)) - 1; |
8334 | llvm::Value *Lane = llvm::ConstantInt::get(T, Value); |
8335 | return ARMMVEVectorSplat(Builder, Lane); |
8336 | } |
8337 | |
8338 | static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder, |
8339 | llvm::Value *V, |
8340 | unsigned ReverseWidth) { |
8341 | // MVE-specific helper function which reverses the elements of a |
8342 | // vector within every (ReverseWidth)-bit collection of lanes. |
8343 | SmallVector<int, 16> Indices; |
8344 | unsigned LaneSize = V->getType()->getScalarSizeInBits(); |
8345 | unsigned Elements = 128 / LaneSize; |
8346 | unsigned Mask = ReverseWidth / LaneSize - 1; |
8347 | for (unsigned i = 0; i < Elements; i++) |
8348 | Indices.push_back(i ^ Mask); |
8349 | return Builder.CreateShuffleVector(V, Indices); |
8350 | } |
8351 | |
8352 | Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID, |
8353 | const CallExpr *E, |
8354 | ReturnValueSlot ReturnValue, |
8355 | llvm::Triple::ArchType Arch) { |
8356 | enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType; |
8357 | Intrinsic::ID IRIntr; |
8358 | unsigned NumVectors; |
8359 | |
8360 | // Code autogenerated by Tablegen will handle all the simple builtins. |
8361 | switch (BuiltinID) { |
8362 | #include "clang/Basic/arm_mve_builtin_cg.inc" |
8363 | |
8364 | // If we didn't match an MVE builtin id at all, go back to the |
8365 | // main EmitARMBuiltinExpr. |
8366 | default: |
8367 | return nullptr; |
8368 | } |
8369 | |
8370 | // Anything that breaks from that switch is an MVE builtin that |
8371 | // needs handwritten code to generate. |
8372 | |
8373 | switch (CustomCodeGenType) { |
8374 | |
8375 | case CustomCodeGen::VLD24: { |
8376 | llvm::SmallVector<Value *, 4> Ops; |
8377 | llvm::SmallVector<llvm::Type *, 4> Tys; |
8378 | |
8379 | auto MvecCType = E->getType(); |
8380 | auto MvecLType = ConvertType(MvecCType); |
8381 | assert(MvecLType->isStructTy() &&(static_cast <bool> (MvecLType->isStructTy() && "Return type for vld[24]q should be a struct") ? void (0) : __assert_fail ("MvecLType->isStructTy() && \"Return type for vld[24]q should be a struct\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8382, __extension__ __PRETTY_FUNCTION__ )) |
8382 | "Return type for vld[24]q should be a struct")(static_cast <bool> (MvecLType->isStructTy() && "Return type for vld[24]q should be a struct") ? void (0) : __assert_fail ("MvecLType->isStructTy() && \"Return type for vld[24]q should be a struct\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8382, __extension__ __PRETTY_FUNCTION__ )); |
8383 | assert(MvecLType->getStructNumElements() == 1 &&(static_cast <bool> (MvecLType->getStructNumElements () == 1 && "Return-type struct for vld[24]q should have one element" ) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Return-type struct for vld[24]q should have one element\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8384, __extension__ __PRETTY_FUNCTION__ )) |
8384 | "Return-type struct for vld[24]q should have one element")(static_cast <bool> (MvecLType->getStructNumElements () == 1 && "Return-type struct for vld[24]q should have one element" ) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Return-type struct for vld[24]q should have one element\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8384, __extension__ __PRETTY_FUNCTION__ )); |
8385 | auto MvecLTypeInner = MvecLType->getStructElementType(0); |
8386 | assert(MvecLTypeInner->isArrayTy() &&(static_cast <bool> (MvecLTypeInner->isArrayTy() && "Return-type struct for vld[24]q should contain an array") ? void (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Return-type struct for vld[24]q should contain an array\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8387, __extension__ __PRETTY_FUNCTION__ )) |
8387 | "Return-type struct for vld[24]q should contain an array")(static_cast <bool> (MvecLTypeInner->isArrayTy() && "Return-type struct for vld[24]q should contain an array") ? void (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Return-type struct for vld[24]q should contain an array\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8387, __extension__ __PRETTY_FUNCTION__ )); |
8388 | assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&(static_cast <bool> (MvecLTypeInner->getArrayNumElements () == NumVectors && "Array member of return-type struct vld[24]q has wrong length" ) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8389, __extension__ __PRETTY_FUNCTION__ )) |
8389 | "Array member of return-type struct vld[24]q has wrong length")(static_cast <bool> (MvecLTypeInner->getArrayNumElements () == NumVectors && "Array member of return-type struct vld[24]q has wrong length" ) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8389, __extension__ __PRETTY_FUNCTION__ )); |
8390 | auto VecLType = MvecLTypeInner->getArrayElementType(); |
8391 | |
8392 | Tys.push_back(VecLType); |
8393 | |
8394 | auto Addr = E->getArg(0); |
8395 | Ops.push_back(EmitScalarExpr(Addr)); |
8396 | Tys.push_back(ConvertType(Addr->getType())); |
8397 | |
8398 | Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys)); |
8399 | Value *LoadResult = Builder.CreateCall(F, Ops); |
8400 | Value *MvecOut = UndefValue::get(MvecLType); |
8401 | for (unsigned i = 0; i < NumVectors; ++i) { |
8402 | Value *Vec = Builder.CreateExtractValue(LoadResult, i); |
8403 | MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i}); |
8404 | } |
8405 | |
8406 | if (ReturnValue.isNull()) |
8407 | return MvecOut; |
8408 | else |
8409 | return Builder.CreateStore(MvecOut, ReturnValue.getValue()); |
8410 | } |
8411 | |
8412 | case CustomCodeGen::VST24: { |
8413 | llvm::SmallVector<Value *, 4> Ops; |
8414 | llvm::SmallVector<llvm::Type *, 4> Tys; |
8415 | |
8416 | auto Addr = E->getArg(0); |
8417 | Ops.push_back(EmitScalarExpr(Addr)); |
8418 | Tys.push_back(ConvertType(Addr->getType())); |
8419 | |
8420 | auto MvecCType = E->getArg(1)->getType(); |
8421 | auto MvecLType = ConvertType(MvecCType); |
8422 | assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct")(static_cast <bool> (MvecLType->isStructTy() && "Data type for vst2q should be a struct") ? void (0) : __assert_fail ("MvecLType->isStructTy() && \"Data type for vst2q should be a struct\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8422, __extension__ __PRETTY_FUNCTION__ )); |
8423 | assert(MvecLType->getStructNumElements() == 1 &&(static_cast <bool> (MvecLType->getStructNumElements () == 1 && "Data-type struct for vst2q should have one element" ) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Data-type struct for vst2q should have one element\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8424, __extension__ __PRETTY_FUNCTION__ )) |
8424 | "Data-type struct for vst2q should have one element")(static_cast <bool> (MvecLType->getStructNumElements () == 1 && "Data-type struct for vst2q should have one element" ) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Data-type struct for vst2q should have one element\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8424, __extension__ __PRETTY_FUNCTION__ )); |
8425 | auto MvecLTypeInner = MvecLType->getStructElementType(0); |
8426 | assert(MvecLTypeInner->isArrayTy() &&(static_cast <bool> (MvecLTypeInner->isArrayTy() && "Data-type struct for vst2q should contain an array") ? void (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Data-type struct for vst2q should contain an array\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8427, __extension__ __PRETTY_FUNCTION__ )) |
8427 | "Data-type struct for vst2q should contain an array")(static_cast <bool> (MvecLTypeInner->isArrayTy() && "Data-type struct for vst2q should contain an array") ? void (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Data-type struct for vst2q should contain an array\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8427, __extension__ __PRETTY_FUNCTION__ )); |
8428 | assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&(static_cast <bool> (MvecLTypeInner->getArrayNumElements () == NumVectors && "Array member of return-type struct vld[24]q has wrong length" ) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8429, __extension__ __PRETTY_FUNCTION__ )) |
8429 | "Array member of return-type struct vld[24]q has wrong length")(static_cast <bool> (MvecLTypeInner->getArrayNumElements () == NumVectors && "Array member of return-type struct vld[24]q has wrong length" ) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8429, __extension__ __PRETTY_FUNCTION__ )); |
8430 | auto VecLType = MvecLTypeInner->getArrayElementType(); |
8431 | |
8432 | Tys.push_back(VecLType); |
8433 | |
8434 | AggValueSlot MvecSlot = CreateAggTemp(MvecCType); |
8435 | EmitAggExpr(E->getArg(1), MvecSlot); |
8436 | auto Mvec = Builder.CreateLoad(MvecSlot.getAddress()); |
8437 | for (unsigned i = 0; i < NumVectors; i++) |
8438 | Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i})); |
8439 | |
8440 | Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys)); |
8441 | Value *ToReturn = nullptr; |
8442 | for (unsigned i = 0; i < NumVectors; i++) { |
8443 | Ops.push_back(llvm::ConstantInt::get(Int32Ty, i)); |
8444 | ToReturn = Builder.CreateCall(F, Ops); |
8445 | Ops.pop_back(); |
8446 | } |
8447 | return ToReturn; |
8448 | } |
8449 | } |
8450 | llvm_unreachable("unknown custom codegen type.")::llvm::llvm_unreachable_internal("unknown custom codegen type." , "clang/lib/CodeGen/CGBuiltin.cpp", 8450); |
8451 | } |
8452 | |
8453 | Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID, |
8454 | const CallExpr *E, |
8455 | ReturnValueSlot ReturnValue, |
8456 | llvm::Triple::ArchType Arch) { |
8457 | switch (BuiltinID) { |
8458 | default: |
8459 | return nullptr; |
8460 | #include "clang/Basic/arm_cde_builtin_cg.inc" |
8461 | } |
8462 | } |
8463 | |
8464 | static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, |
8465 | const CallExpr *E, |
8466 | SmallVectorImpl<Value *> &Ops, |
8467 | llvm::Triple::ArchType Arch) { |
8468 | unsigned int Int = 0; |
8469 | const char *s = nullptr; |
8470 | |
8471 | switch (BuiltinID) { |
8472 | default: |
8473 | return nullptr; |
8474 | case NEON::BI__builtin_neon_vtbl1_v: |
8475 | case NEON::BI__builtin_neon_vqtbl1_v: |
8476 | case NEON::BI__builtin_neon_vqtbl1q_v: |
8477 | case NEON::BI__builtin_neon_vtbl2_v: |
8478 | case NEON::BI__builtin_neon_vqtbl2_v: |
8479 | case NEON::BI__builtin_neon_vqtbl2q_v: |
8480 | case NEON::BI__builtin_neon_vtbl3_v: |
8481 | case NEON::BI__builtin_neon_vqtbl3_v: |
8482 | case NEON::BI__builtin_neon_vqtbl3q_v: |
8483 | case NEON::BI__builtin_neon_vtbl4_v: |
8484 | case NEON::BI__builtin_neon_vqtbl4_v: |
8485 | case NEON::BI__builtin_neon_vqtbl4q_v: |
8486 | break; |
8487 | case NEON::BI__builtin_neon_vtbx1_v: |
8488 | case NEON::BI__builtin_neon_vqtbx1_v: |
8489 | case NEON::BI__builtin_neon_vqtbx1q_v: |
8490 | case NEON::BI__builtin_neon_vtbx2_v: |
8491 | case NEON::BI__builtin_neon_vqtbx2_v: |
8492 | case NEON::BI__builtin_neon_vqtbx2q_v: |
8493 | case NEON::BI__builtin_neon_vtbx3_v: |
8494 | case NEON::BI__builtin_neon_vqtbx3_v: |
8495 | case NEON::BI__builtin_neon_vqtbx3q_v: |
8496 | case NEON::BI__builtin_neon_vtbx4_v: |
8497 | case NEON::BI__builtin_neon_vqtbx4_v: |
8498 | case NEON::BI__builtin_neon_vqtbx4q_v: |
8499 | break; |
8500 | } |
8501 | |
8502 | assert(E->getNumArgs() >= 3)(static_cast <bool> (E->getNumArgs() >= 3) ? void (0) : __assert_fail ("E->getNumArgs() >= 3", "clang/lib/CodeGen/CGBuiltin.cpp" , 8502, __extension__ __PRETTY_FUNCTION__)); |
8503 | |
8504 | // Get the last argument, which specifies the vector type. |
8505 | const Expr *Arg = E->getArg(E->getNumArgs() - 1); |
8506 | Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext()); |
8507 | if (!Result) |
8508 | return nullptr; |
8509 | |
8510 | // Determine the type of this overloaded NEON intrinsic. |
8511 | NeonTypeFlags Type = Result->getZExtValue(); |
8512 | llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type); |
8513 | if (!Ty) |
8514 | return nullptr; |
8515 | |
8516 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
8517 | |
8518 | // AArch64 scalar builtins are not overloaded, they do not have an extra |
8519 | // argument that specifies the vector type, need to handle each case. |
8520 | switch (BuiltinID) { |
8521 | case NEON::BI__builtin_neon_vtbl1_v: { |
8522 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr, |
8523 | Ops[1], Ty, Intrinsic::aarch64_neon_tbl1, |
8524 | "vtbl1"); |
8525 | } |
8526 | case NEON::BI__builtin_neon_vtbl2_v: { |
8527 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr, |
8528 | Ops[2], Ty, Intrinsic::aarch64_neon_tbl1, |
8529 | "vtbl1"); |
8530 | } |
8531 | case NEON::BI__builtin_neon_vtbl3_v: { |
8532 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr, |
8533 | Ops[3], Ty, Intrinsic::aarch64_neon_tbl2, |
8534 | "vtbl2"); |
8535 | } |
8536 | case NEON::BI__builtin_neon_vtbl4_v: { |
8537 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr, |
8538 | Ops[4], Ty, Intrinsic::aarch64_neon_tbl2, |
8539 | "vtbl2"); |
8540 | } |
8541 | case NEON::BI__builtin_neon_vtbx1_v: { |
8542 | Value *TblRes = |
8543 | packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2], |
8544 | Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1"); |
8545 | |
8546 | llvm::Constant *EightV = ConstantInt::get(Ty, 8); |
8547 | Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV); |
8548 | CmpRes = Builder.CreateSExt(CmpRes, Ty); |
8549 | |
8550 | Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]); |
8551 | Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes); |
8552 | return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx"); |
8553 | } |
8554 | case NEON::BI__builtin_neon_vtbx2_v: { |
8555 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0], |
8556 | Ops[3], Ty, Intrinsic::aarch64_neon_tbx1, |
8557 | "vtbx1"); |
8558 | } |
8559 | case NEON::BI__builtin_neon_vtbx3_v: { |
8560 | Value *TblRes = |
8561 | packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4], |
8562 | Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2"); |
8563 | |
8564 | llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24); |
8565 | Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4], |
8566 | TwentyFourV); |
8567 | CmpRes = Builder.CreateSExt(CmpRes, Ty); |
8568 | |
8569 | Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]); |
8570 | Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes); |
8571 | return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx"); |
8572 | } |
8573 | case NEON::BI__builtin_neon_vtbx4_v: { |
8574 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0], |
8575 | Ops[5], Ty, Intrinsic::aarch64_neon_tbx2, |
8576 | "vtbx2"); |
8577 | } |
8578 | case NEON::BI__builtin_neon_vqtbl1_v: |
8579 | case NEON::BI__builtin_neon_vqtbl1q_v: |
8580 | Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break; |
8581 | case NEON::BI__builtin_neon_vqtbl2_v: |
8582 | case NEON::BI__builtin_neon_vqtbl2q_v: { |
8583 | Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break; |
8584 | case NEON::BI__builtin_neon_vqtbl3_v: |
8585 | case NEON::BI__builtin_neon_vqtbl3q_v: |
8586 | Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break; |
8587 | case NEON::BI__builtin_neon_vqtbl4_v: |
8588 | case NEON::BI__builtin_neon_vqtbl4q_v: |
8589 | Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break; |
8590 | case NEON::BI__builtin_neon_vqtbx1_v: |
8591 | case NEON::BI__builtin_neon_vqtbx1q_v: |
8592 | Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break; |
8593 | case NEON::BI__builtin_neon_vqtbx2_v: |
8594 | case NEON::BI__builtin_neon_vqtbx2q_v: |
8595 | Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break; |
8596 | case NEON::BI__builtin_neon_vqtbx3_v: |
8597 | case NEON::BI__builtin_neon_vqtbx3q_v: |
8598 | Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break; |
8599 | case NEON::BI__builtin_neon_vqtbx4_v: |
8600 | case NEON::BI__builtin_neon_vqtbx4q_v: |
8601 | Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break; |
8602 | } |
8603 | } |
8604 | |
8605 | if (!Int) |
8606 | return nullptr; |
8607 | |
8608 | Function *F = CGF.CGM.getIntrinsic(Int, Ty); |
8609 | return CGF.EmitNeonCall(F, Ops, s); |
8610 | } |
8611 | |
8612 | Value *CodeGenFunction::vectorWrapScalar16(Value *Op) { |
8613 | auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
8614 | Op = Builder.CreateBitCast(Op, Int16Ty); |
8615 | Value *V = UndefValue::get(VTy); |
8616 | llvm::Constant *CI = ConstantInt::get(SizeTy, 0); |
8617 | Op = Builder.CreateInsertElement(V, Op, CI); |
8618 | return Op; |
8619 | } |
8620 | |
8621 | /// SVEBuiltinMemEltTy - Returns the memory element type for this memory |
8622 | /// access builtin. Only required if it can't be inferred from the base pointer |
8623 | /// operand. |
8624 | llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags) { |
8625 | switch (TypeFlags.getMemEltType()) { |
8626 | case SVETypeFlags::MemEltTyDefault: |
8627 | return getEltType(TypeFlags); |
8628 | case SVETypeFlags::MemEltTyInt8: |
8629 | return Builder.getInt8Ty(); |
8630 | case SVETypeFlags::MemEltTyInt16: |
8631 | return Builder.getInt16Ty(); |
8632 | case SVETypeFlags::MemEltTyInt32: |
8633 | return Builder.getInt32Ty(); |
8634 | case SVETypeFlags::MemEltTyInt64: |
8635 | return Builder.getInt64Ty(); |
8636 | } |
8637 | llvm_unreachable("Unknown MemEltType")::llvm::llvm_unreachable_internal("Unknown MemEltType", "clang/lib/CodeGen/CGBuiltin.cpp" , 8637); |
8638 | } |
8639 | |
8640 | llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) { |
8641 | switch (TypeFlags.getEltType()) { |
8642 | default: |
8643 | llvm_unreachable("Invalid SVETypeFlag!")::llvm::llvm_unreachable_internal("Invalid SVETypeFlag!", "clang/lib/CodeGen/CGBuiltin.cpp" , 8643); |
8644 | |
8645 | case SVETypeFlags::EltTyInt8: |
8646 | return Builder.getInt8Ty(); |
8647 | case SVETypeFlags::EltTyInt16: |
8648 | return Builder.getInt16Ty(); |
8649 | case SVETypeFlags::EltTyInt32: |
8650 | return Builder.getInt32Ty(); |
8651 | case SVETypeFlags::EltTyInt64: |
8652 | return Builder.getInt64Ty(); |
8653 | |
8654 | case SVETypeFlags::EltTyFloat16: |
8655 | return Builder.getHalfTy(); |
8656 | case SVETypeFlags::EltTyFloat32: |
8657 | return Builder.getFloatTy(); |
8658 | case SVETypeFlags::EltTyFloat64: |
8659 | return Builder.getDoubleTy(); |
8660 | |
8661 | case SVETypeFlags::EltTyBFloat16: |
8662 | return Builder.getBFloatTy(); |
8663 | |
8664 | case SVETypeFlags::EltTyBool8: |
8665 | case SVETypeFlags::EltTyBool16: |
8666 | case SVETypeFlags::EltTyBool32: |
8667 | case SVETypeFlags::EltTyBool64: |
8668 | return Builder.getInt1Ty(); |
8669 | } |
8670 | } |
8671 | |
8672 | // Return the llvm predicate vector type corresponding to the specified element |
8673 | // TypeFlags. |
8674 | llvm::ScalableVectorType * |
8675 | CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) { |
8676 | switch (TypeFlags.getEltType()) { |
8677 | default: llvm_unreachable("Unhandled SVETypeFlag!")::llvm::llvm_unreachable_internal("Unhandled SVETypeFlag!", "clang/lib/CodeGen/CGBuiltin.cpp" , 8677); |
8678 | |
8679 | case SVETypeFlags::EltTyInt8: |
8680 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); |
8681 | case SVETypeFlags::EltTyInt16: |
8682 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8683 | case SVETypeFlags::EltTyInt32: |
8684 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4); |
8685 | case SVETypeFlags::EltTyInt64: |
8686 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2); |
8687 | |
8688 | case SVETypeFlags::EltTyBFloat16: |
8689 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8690 | case SVETypeFlags::EltTyFloat16: |
8691 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8692 | case SVETypeFlags::EltTyFloat32: |
8693 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4); |
8694 | case SVETypeFlags::EltTyFloat64: |
8695 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2); |
8696 | |
8697 | case SVETypeFlags::EltTyBool8: |
8698 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); |
8699 | case SVETypeFlags::EltTyBool16: |
8700 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8701 | case SVETypeFlags::EltTyBool32: |
8702 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4); |
8703 | case SVETypeFlags::EltTyBool64: |
8704 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2); |
8705 | } |
8706 | } |
8707 | |
8708 | // Return the llvm vector type corresponding to the specified element TypeFlags. |
8709 | llvm::ScalableVectorType * |
8710 | CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) { |
8711 | switch (TypeFlags.getEltType()) { |
8712 | default: |
8713 | llvm_unreachable("Invalid SVETypeFlag!")::llvm::llvm_unreachable_internal("Invalid SVETypeFlag!", "clang/lib/CodeGen/CGBuiltin.cpp" , 8713); |
8714 | |
8715 | case SVETypeFlags::EltTyInt8: |
8716 | return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16); |
8717 | case SVETypeFlags::EltTyInt16: |
8718 | return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8); |
8719 | case SVETypeFlags::EltTyInt32: |
8720 | return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4); |
8721 | case SVETypeFlags::EltTyInt64: |
8722 | return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2); |
8723 | |
8724 | case SVETypeFlags::EltTyFloat16: |
8725 | return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8); |
8726 | case SVETypeFlags::EltTyBFloat16: |
8727 | return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8); |
8728 | case SVETypeFlags::EltTyFloat32: |
8729 | return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4); |
8730 | case SVETypeFlags::EltTyFloat64: |
8731 | return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2); |
8732 | |
8733 | case SVETypeFlags::EltTyBool8: |
8734 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); |
8735 | case SVETypeFlags::EltTyBool16: |
8736 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8737 | case SVETypeFlags::EltTyBool32: |
8738 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4); |
8739 | case SVETypeFlags::EltTyBool64: |
8740 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2); |
8741 | } |
8742 | } |
8743 | |
8744 | llvm::Value * |
8745 | CodeGenFunction::EmitSVEAllTruePred(const SVETypeFlags &TypeFlags) { |
8746 | Function *Ptrue = |
8747 | CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags)); |
8748 | return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)}); |
8749 | } |
8750 | |
8751 | constexpr unsigned SVEBitsPerBlock = 128; |
8752 | |
8753 | static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) { |
8754 | unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits(); |
8755 | return llvm::ScalableVectorType::get(EltTy, NumElts); |
8756 | } |
8757 | |
8758 | // Reinterpret the input predicate so that it can be used to correctly isolate |
8759 | // the elements of the specified datatype. |
8760 | Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred, |
8761 | llvm::ScalableVectorType *VTy) { |
8762 | auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy); |
8763 | if (Pred->getType() == RTy) |
8764 | return Pred; |
8765 | |
8766 | unsigned IntID; |
8767 | llvm::Type *IntrinsicTy; |
8768 | switch (VTy->getMinNumElements()) { |
8769 | default: |
8770 | llvm_unreachable("unsupported element count!")::llvm::llvm_unreachable_internal("unsupported element count!" , "clang/lib/CodeGen/CGBuiltin.cpp", 8770); |
8771 | case 2: |
8772 | case 4: |
8773 | case 8: |
8774 | IntID = Intrinsic::aarch64_sve_convert_from_svbool; |
8775 | IntrinsicTy = RTy; |
8776 | break; |
8777 | case 16: |
8778 | IntID = Intrinsic::aarch64_sve_convert_to_svbool; |
8779 | IntrinsicTy = Pred->getType(); |
8780 | break; |
8781 | } |
8782 | |
8783 | Function *F = CGM.getIntrinsic(IntID, IntrinsicTy); |
8784 | Value *C = Builder.CreateCall(F, Pred); |
8785 | assert(C->getType() == RTy && "Unexpected return type!")(static_cast <bool> (C->getType() == RTy && "Unexpected return type!" ) ? void (0) : __assert_fail ("C->getType() == RTy && \"Unexpected return type!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8785, __extension__ __PRETTY_FUNCTION__ )); |
8786 | return C; |
8787 | } |
8788 | |
8789 | Value *CodeGenFunction::EmitSVEGatherLoad(const SVETypeFlags &TypeFlags, |
8790 | SmallVectorImpl<Value *> &Ops, |
8791 | unsigned IntID) { |
8792 | auto *ResultTy = getSVEType(TypeFlags); |
8793 | auto *OverloadedTy = |
8794 | llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy); |
8795 | |
8796 | // At the ACLE level there's only one predicate type, svbool_t, which is |
8797 | // mapped to <n x 16 x i1>. However, this might be incompatible with the |
8798 | // actual type being loaded. For example, when loading doubles (i64) the |
8799 | // predicated should be <n x 2 x i1> instead. At the IR level the type of |
8800 | // the predicate and the data being loaded must match. Cast accordingly. |
8801 | Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy); |
8802 | |
8803 | Function *F = nullptr; |
8804 | if (Ops[1]->getType()->isVectorTy()) |
8805 | // This is the "vector base, scalar offset" case. In order to uniquely |
8806 | // map this built-in to an LLVM IR intrinsic, we need both the return type |
8807 | // and the type of the vector base. |
8808 | F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()}); |
8809 | else |
8810 | // This is the "scalar base, vector offset case". The type of the offset |
8811 | // is encoded in the name of the intrinsic. We only need to specify the |
8812 | // return type in order to uniquely map this built-in to an LLVM IR |
8813 | // intrinsic. |
8814 | F = CGM.getIntrinsic(IntID, OverloadedTy); |
8815 | |
8816 | // Pass 0 when the offset is missing. This can only be applied when using |
8817 | // the "vector base" addressing mode for which ACLE allows no offset. The |
8818 | // corresponding LLVM IR always requires an offset. |
8819 | if (Ops.size() == 2) { |
8820 | assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset")(static_cast <bool> (Ops[1]->getType()->isVectorTy () && "Scalar base requires an offset") ? void (0) : __assert_fail ("Ops[1]->getType()->isVectorTy() && \"Scalar base requires an offset\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8820, __extension__ __PRETTY_FUNCTION__ )); |
8821 | Ops.push_back(ConstantInt::get(Int64Ty, 0)); |
8822 | } |
8823 | |
8824 | // For "vector base, scalar index" scale the index so that it becomes a |
8825 | // scalar offset. |
8826 | if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) { |
8827 | unsigned BytesPerElt = |
8828 | OverloadedTy->getElementType()->getScalarSizeInBits() / 8; |
8829 | Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt); |
8830 | Ops[2] = Builder.CreateMul(Ops[2], Scale); |
8831 | } |
8832 | |
8833 | Value *Call = Builder.CreateCall(F, Ops); |
8834 | |
8835 | // The following sext/zext is only needed when ResultTy != OverloadedTy. In |
8836 | // other cases it's folded into a nop. |
8837 | return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy) |
8838 | : Builder.CreateSExt(Call, ResultTy); |
8839 | } |
8840 | |
8841 | Value *CodeGenFunction::EmitSVEScatterStore(const SVETypeFlags &TypeFlags, |
8842 | SmallVectorImpl<Value *> &Ops, |
8843 | unsigned IntID) { |
8844 | auto *SrcDataTy = getSVEType(TypeFlags); |
8845 | auto *OverloadedTy = |
8846 | llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy); |
8847 | |
8848 | // In ACLE the source data is passed in the last argument, whereas in LLVM IR |
8849 | // it's the first argument. Move it accordingly. |
8850 | Ops.insert(Ops.begin(), Ops.pop_back_val()); |
8851 | |
8852 | Function *F = nullptr; |
8853 | if (Ops[2]->getType()->isVectorTy()) |
8854 | // This is the "vector base, scalar offset" case. In order to uniquely |
8855 | // map this built-in to an LLVM IR intrinsic, we need both the return type |
8856 | // and the type of the vector base. |
8857 | F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()}); |
8858 | else |
8859 | // This is the "scalar base, vector offset case". The type of the offset |
8860 | // is encoded in the name of the intrinsic. We only need to specify the |
8861 | // return type in order to uniquely map this built-in to an LLVM IR |
8862 | // intrinsic. |
8863 | F = CGM.getIntrinsic(IntID, OverloadedTy); |
8864 | |
8865 | // Pass 0 when the offset is missing. This can only be applied when using |
8866 | // the "vector base" addressing mode for which ACLE allows no offset. The |
8867 | // corresponding LLVM IR always requires an offset. |
8868 | if (Ops.size() == 3) { |
8869 | assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset")(static_cast <bool> (Ops[1]->getType()->isVectorTy () && "Scalar base requires an offset") ? void (0) : __assert_fail ("Ops[1]->getType()->isVectorTy() && \"Scalar base requires an offset\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 8869, __extension__ __PRETTY_FUNCTION__ )); |
8870 | Ops.push_back(ConstantInt::get(Int64Ty, 0)); |
8871 | } |
8872 | |
8873 | // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's |
8874 | // folded into a nop. |
8875 | Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy); |
8876 | |
8877 | // At the ACLE level there's only one predicate type, svbool_t, which is |
8878 | // mapped to <n x 16 x i1>. However, this might be incompatible with the |
8879 | // actual type being stored. For example, when storing doubles (i64) the |
8880 | // predicated should be <n x 2 x i1> instead. At the IR level the type of |
8881 | // the predicate and the data being stored must match. Cast accordingly. |
8882 | Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy); |
8883 | |
8884 | // For "vector base, scalar index" scale the index so that it becomes a |
8885 | // scalar offset. |
8886 | if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) { |
8887 | unsigned BytesPerElt = |
8888 | OverloadedTy->getElementType()->getScalarSizeInBits() / 8; |
8889 | Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt); |
8890 | Ops[3] = Builder.CreateMul(Ops[3], Scale); |
8891 | } |
8892 | |
8893 | return Builder.CreateCall(F, Ops); |
8894 | } |
8895 | |
8896 | Value *CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags, |
8897 | SmallVectorImpl<Value *> &Ops, |
8898 | unsigned IntID) { |
8899 | // The gather prefetches are overloaded on the vector input - this can either |
8900 | // be the vector of base addresses or vector of offsets. |
8901 | auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType()); |
8902 | if (!OverloadedTy) |
8903 | OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType()); |
8904 | |
8905 | // Cast the predicate from svbool_t to the right number of elements. |
8906 | Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy); |
8907 | |
8908 | // vector + imm addressing modes |
8909 | if (Ops[1]->getType()->isVectorTy()) { |
8910 | if (Ops.size() == 3) { |
8911 | // Pass 0 for 'vector+imm' when the index is omitted. |
8912 | Ops.push_back(ConstantInt::get(Int64Ty, 0)); |
8913 | |
8914 | // The sv_prfop is the last operand in the builtin and IR intrinsic. |
8915 | std::swap(Ops[2], Ops[3]); |
8916 | } else { |
8917 | // Index needs to be passed as scaled offset. |
8918 | llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags); |
8919 | unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8; |
8920 | Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt); |
8921 | Ops[2] = Builder.CreateMul(Ops[2], Scale); |
8922 | } |
8923 | } |
8924 | |
8925 | Function *F = CGM.getIntrinsic(IntID, OverloadedTy); |
8926 | return Builder.CreateCall(F, Ops); |
8927 | } |
8928 | |
8929 | Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags, |
8930 | SmallVectorImpl<Value*> &Ops, |
8931 | unsigned IntID) { |
8932 | llvm::ScalableVectorType *VTy = getSVEType(TypeFlags); |
8933 | auto VecPtrTy = llvm::PointerType::getUnqual(VTy); |
8934 | auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType()); |
8935 | |
8936 | unsigned N; |
8937 | switch (IntID) { |
8938 | case Intrinsic::aarch64_sve_ld2: |
8939 | N = 2; |
8940 | break; |
8941 | case Intrinsic::aarch64_sve_ld3: |
8942 | N = 3; |
8943 | break; |
8944 | case Intrinsic::aarch64_sve_ld4: |
8945 | N = 4; |
8946 | break; |
8947 | default: |
8948 | llvm_unreachable("unknown intrinsic!")::llvm::llvm_unreachable_internal("unknown intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 8948); |
8949 | } |
8950 | auto RetTy = llvm::VectorType::get(VTy->getElementType(), |
8951 | VTy->getElementCount() * N); |
8952 | |
8953 | Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy); |
8954 | Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy); |
8955 | Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0); |
8956 | BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset); |
8957 | BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy); |
8958 | |
8959 | Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()}); |
8960 | return Builder.CreateCall(F, { Predicate, BasePtr }); |
8961 | } |
8962 | |
8963 | Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags, |
8964 | SmallVectorImpl<Value*> &Ops, |
8965 | unsigned IntID) { |
8966 | llvm::ScalableVectorType *VTy = getSVEType(TypeFlags); |
8967 | auto VecPtrTy = llvm::PointerType::getUnqual(VTy); |
8968 | auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType()); |
8969 | |
8970 | unsigned N; |
8971 | switch (IntID) { |
8972 | case Intrinsic::aarch64_sve_st2: |
8973 | N = 2; |
8974 | break; |
8975 | case Intrinsic::aarch64_sve_st3: |
8976 | N = 3; |
8977 | break; |
8978 | case Intrinsic::aarch64_sve_st4: |
8979 | N = 4; |
8980 | break; |
8981 | default: |
8982 | llvm_unreachable("unknown intrinsic!")::llvm::llvm_unreachable_internal("unknown intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 8982); |
8983 | } |
8984 | auto TupleTy = |
8985 | llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N); |
8986 | |
8987 | Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy); |
8988 | Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy); |
8989 | Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0); |
8990 | Value *Val = Ops.back(); |
8991 | BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset); |
8992 | BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy); |
8993 | |
8994 | // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we |
8995 | // need to break up the tuple vector. |
8996 | SmallVector<llvm::Value*, 5> Operands; |
8997 | Function *FExtr = |
8998 | CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy}); |
8999 | for (unsigned I = 0; I < N; ++I) |
9000 | Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)})); |
9001 | Operands.append({Predicate, BasePtr}); |
9002 | |
9003 | Function *F = CGM.getIntrinsic(IntID, { VTy }); |
9004 | return Builder.CreateCall(F, Operands); |
9005 | } |
9006 | |
9007 | // SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and |
9008 | // svpmullt_pair intrinsics, with the exception that their results are bitcast |
9009 | // to a wider type. |
9010 | Value *CodeGenFunction::EmitSVEPMull(const SVETypeFlags &TypeFlags, |
9011 | SmallVectorImpl<Value *> &Ops, |
9012 | unsigned BuiltinID) { |
9013 | // Splat scalar operand to vector (intrinsics with _n infix) |
9014 | if (TypeFlags.hasSplatOperand()) { |
9015 | unsigned OpNo = TypeFlags.getSplatOperand(); |
9016 | Ops[OpNo] = EmitSVEDupX(Ops[OpNo]); |
9017 | } |
9018 | |
9019 | // The pair-wise function has a narrower overloaded type. |
9020 | Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType()); |
9021 | Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]}); |
9022 | |
9023 | // Now bitcast to the wider result type. |
9024 | llvm::ScalableVectorType *Ty = getSVEType(TypeFlags); |
9025 | return EmitSVEReinterpret(Call, Ty); |
9026 | } |
9027 | |
9028 | Value *CodeGenFunction::EmitSVEMovl(const SVETypeFlags &TypeFlags, |
9029 | ArrayRef<Value *> Ops, unsigned BuiltinID) { |
9030 | llvm::Type *OverloadedTy = getSVEType(TypeFlags); |
9031 | Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy); |
9032 | return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)}); |
9033 | } |
9034 | |
9035 | Value *CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags, |
9036 | SmallVectorImpl<Value *> &Ops, |
9037 | unsigned BuiltinID) { |
9038 | auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags); |
9039 | auto *VectorTy = getSVEVectorForElementType(MemEltTy); |
9040 | auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy); |
9041 | |
9042 | Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy); |
9043 | Value *BasePtr = Ops[1]; |
9044 | |
9045 | // Implement the index operand if not omitted. |
9046 | if (Ops.size() > 3) { |
9047 | BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo()); |
9048 | BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]); |
9049 | } |
9050 | |
9051 | // Prefetch intriniscs always expect an i8* |
9052 | BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty)); |
9053 | Value *PrfOp = Ops.back(); |
9054 | |
9055 | Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType()); |
9056 | return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp}); |
9057 | } |
9058 | |
9059 | Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E, |
9060 | llvm::Type *ReturnTy, |
9061 | SmallVectorImpl<Value *> &Ops, |
9062 | unsigned BuiltinID, |
9063 | bool IsZExtReturn) { |
9064 | QualType LangPTy = E->getArg(1)->getType(); |
9065 | llvm::Type *MemEltTy = CGM.getTypes().ConvertType( |
9066 | LangPTy->castAs<PointerType>()->getPointeeType()); |
9067 | |
9068 | // The vector type that is returned may be different from the |
9069 | // eventual type loaded from memory. |
9070 | auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy); |
9071 | auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy); |
9072 | |
9073 | Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy); |
9074 | Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo()); |
9075 | Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0); |
9076 | BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset); |
9077 | |
9078 | BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo()); |
9079 | Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy); |
9080 | auto *Load = |
9081 | cast<llvm::Instruction>(Builder.CreateCall(F, {Predicate, BasePtr})); |
9082 | auto TBAAInfo = CGM.getTBAAAccessInfo(LangPTy->getPointeeType()); |
9083 | CGM.DecorateInstructionWithTBAA(Load, TBAAInfo); |
9084 | |
9085 | return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy) |
9086 | : Builder.CreateSExt(Load, VectorTy); |
9087 | } |
9088 | |
9089 | Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E, |
9090 | SmallVectorImpl<Value *> &Ops, |
9091 | unsigned BuiltinID) { |
9092 | QualType LangPTy = E->getArg(1)->getType(); |
9093 | llvm::Type *MemEltTy = CGM.getTypes().ConvertType( |
9094 | LangPTy->castAs<PointerType>()->getPointeeType()); |
9095 | |
9096 | // The vector type that is stored may be different from the |
9097 | // eventual type stored to memory. |
9098 | auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType()); |
9099 | auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy); |
9100 | |
9101 | Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy); |
9102 | Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo()); |
9103 | Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0); |
9104 | BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset); |
9105 | |
9106 | // Last value is always the data |
9107 | llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy); |
9108 | |
9109 | BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo()); |
9110 | Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy); |
9111 | auto *Store = |
9112 | cast<llvm::Instruction>(Builder.CreateCall(F, {Val, Predicate, BasePtr})); |
9113 | auto TBAAInfo = CGM.getTBAAAccessInfo(LangPTy->getPointeeType()); |
9114 | CGM.DecorateInstructionWithTBAA(Store, TBAAInfo); |
9115 | return Store; |
9116 | } |
9117 | |
9118 | // Limit the usage of scalable llvm IR generated by the ACLE by using the |
9119 | // sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat. |
9120 | Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) { |
9121 | auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty); |
9122 | return Builder.CreateCall(F, Scalar); |
9123 | } |
9124 | |
9125 | Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) { |
9126 | return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType())); |
9127 | } |
9128 | |
9129 | Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) { |
9130 | // FIXME: For big endian this needs an additional REV, or needs a separate |
9131 | // intrinsic that is code-generated as a no-op, because the LLVM bitcast |
9132 | // instruction is defined as 'bitwise' equivalent from memory point of |
9133 | // view (when storing/reloading), whereas the svreinterpret builtin |
9134 | // implements bitwise equivalent cast from register point of view. |
9135 | // LLVM CodeGen for a bitcast must add an explicit REV for big-endian. |
9136 | return Builder.CreateBitCast(Val, Ty); |
9137 | } |
9138 | |
9139 | static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty, |
9140 | SmallVectorImpl<Value *> &Ops) { |
9141 | auto *SplatZero = Constant::getNullValue(Ty); |
9142 | Ops.insert(Ops.begin(), SplatZero); |
9143 | } |
9144 | |
9145 | static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty, |
9146 | SmallVectorImpl<Value *> &Ops) { |
9147 | auto *SplatUndef = UndefValue::get(Ty); |
9148 | Ops.insert(Ops.begin(), SplatUndef); |
9149 | } |
9150 | |
9151 | SmallVector<llvm::Type *, 2> |
9152 | CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags &TypeFlags, |
9153 | llvm::Type *ResultType, |
9154 | ArrayRef<Value *> Ops) { |
9155 | if (TypeFlags.isOverloadNone()) |
9156 | return {}; |
9157 | |
9158 | llvm::Type *DefaultType = getSVEType(TypeFlags); |
9159 | |
9160 | if (TypeFlags.isOverloadWhile()) |
9161 | return {DefaultType, Ops[1]->getType()}; |
9162 | |
9163 | if (TypeFlags.isOverloadWhileRW()) |
9164 | return {getSVEPredType(TypeFlags), Ops[0]->getType()}; |
9165 | |
9166 | if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet()) |
9167 | return {Ops[0]->getType(), Ops.back()->getType()}; |
9168 | |
9169 | if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet()) |
9170 | return {ResultType, Ops[0]->getType()}; |
9171 | |
9172 | assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads")(static_cast <bool> (TypeFlags.isOverloadDefault() && "Unexpected value for overloads") ? void (0) : __assert_fail ("TypeFlags.isOverloadDefault() && \"Unexpected value for overloads\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 9172, __extension__ __PRETTY_FUNCTION__ )); |
9173 | return {DefaultType}; |
9174 | } |
9175 | |
9176 | Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, |
9177 | const CallExpr *E) { |
9178 | // Find out if any arguments are required to be integer constant expressions. |
9179 | unsigned ICEArguments = 0; |
9180 | ASTContext::GetBuiltinTypeError Error; |
9181 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
9182 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 9182, __extension__ __PRETTY_FUNCTION__ )); |
9183 | |
9184 | llvm::Type *Ty = ConvertType(E->getType()); |
9185 | if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 && |
9186 | BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) { |
9187 | Value *Val = EmitScalarExpr(E->getArg(0)); |
9188 | return EmitSVEReinterpret(Val, Ty); |
9189 | } |
9190 | |
9191 | llvm::SmallVector<Value *, 4> Ops; |
9192 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { |
9193 | if ((ICEArguments & (1 << i)) == 0) |
9194 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
9195 | else { |
9196 | // If this is required to be a constant, constant fold it so that we know |
9197 | // that the generated intrinsic gets a ConstantInt. |
9198 | Optional<llvm::APSInt> Result = |
9199 | E->getArg(i)->getIntegerConstantExpr(getContext()); |
9200 | assert(Result && "Expected argument to be a constant")(static_cast <bool> (Result && "Expected argument to be a constant" ) ? void (0) : __assert_fail ("Result && \"Expected argument to be a constant\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 9200, __extension__ __PRETTY_FUNCTION__ )); |
9201 | |
9202 | // Immediates for SVE llvm intrinsics are always 32bit. We can safely |
9203 | // truncate because the immediate has been range checked and no valid |
9204 | // immediate requires more than a handful of bits. |
9205 | *Result = Result->extOrTrunc(32); |
9206 | Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result)); |
9207 | } |
9208 | } |
9209 | |
9210 | auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID, |
9211 | AArch64SVEIntrinsicsProvenSorted); |
9212 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
9213 | if (TypeFlags.isLoad()) |
9214 | return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic, |
9215 | TypeFlags.isZExtReturn()); |
9216 | else if (TypeFlags.isStore()) |
9217 | return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic); |
9218 | else if (TypeFlags.isGatherLoad()) |
9219 | return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
9220 | else if (TypeFlags.isScatterStore()) |
9221 | return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
9222 | else if (TypeFlags.isPrefetch()) |
9223 | return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
9224 | else if (TypeFlags.isGatherPrefetch()) |
9225 | return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
9226 | else if (TypeFlags.isStructLoad()) |
9227 | return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
9228 | else if (TypeFlags.isStructStore()) |
9229 | return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
9230 | else if (TypeFlags.isUndef()) |
9231 | return UndefValue::get(Ty); |
9232 | else if (Builtin->LLVMIntrinsic != 0) { |
9233 | if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp) |
9234 | InsertExplicitZeroOperand(Builder, Ty, Ops); |
9235 | |
9236 | if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp) |
9237 | InsertExplicitUndefOperand(Builder, Ty, Ops); |
9238 | |
9239 | // Some ACLE builtins leave out the argument to specify the predicate |
9240 | // pattern, which is expected to be expanded to an SV_ALL pattern. |
9241 | if (TypeFlags.isAppendSVALL()) |
9242 | Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31)); |
9243 | if (TypeFlags.isInsertOp1SVALL()) |
9244 | Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31)); |
9245 | |
9246 | // Predicates must match the main datatype. |
9247 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
9248 | if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType())) |
9249 | if (PredTy->getElementType()->isIntegerTy(1)) |
9250 | Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags)); |
9251 | |
9252 | // Splat scalar operand to vector (intrinsics with _n infix) |
9253 | if (TypeFlags.hasSplatOperand()) { |
9254 | unsigned OpNo = TypeFlags.getSplatOperand(); |
9255 | Ops[OpNo] = EmitSVEDupX(Ops[OpNo]); |
9256 | } |
9257 | |
9258 | if (TypeFlags.isReverseCompare()) |
9259 | std::swap(Ops[1], Ops[2]); |
9260 | |
9261 | if (TypeFlags.isReverseUSDOT()) |
9262 | std::swap(Ops[1], Ops[2]); |
9263 | |
9264 | // Predicated intrinsics with _z suffix need a select w/ zeroinitializer. |
9265 | if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) { |
9266 | llvm::Type *OpndTy = Ops[1]->getType(); |
9267 | auto *SplatZero = Constant::getNullValue(OpndTy); |
9268 | Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy); |
9269 | Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero}); |
9270 | } |
9271 | |
9272 | Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic, |
9273 | getSVEOverloadTypes(TypeFlags, Ty, Ops)); |
9274 | Value *Call = Builder.CreateCall(F, Ops); |
9275 | |
9276 | // Predicate results must be converted to svbool_t. |
9277 | if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType())) |
9278 | if (PredTy->getScalarType()->isIntegerTy(1)) |
9279 | Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty)); |
9280 | |
9281 | return Call; |
9282 | } |
9283 | |
9284 | switch (BuiltinID) { |
9285 | default: |
9286 | return nullptr; |
9287 | |
9288 | case SVE::BI__builtin_sve_svmov_b_z: { |
9289 | // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op) |
9290 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
9291 | llvm::Type* OverloadedTy = getSVEType(TypeFlags); |
9292 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy); |
9293 | return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]}); |
9294 | } |
9295 | |
9296 | case SVE::BI__builtin_sve_svnot_b_z: { |
9297 | // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg) |
9298 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
9299 | llvm::Type* OverloadedTy = getSVEType(TypeFlags); |
9300 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy); |
9301 | return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]}); |
9302 | } |
9303 | |
9304 | case SVE::BI__builtin_sve_svmovlb_u16: |
9305 | case SVE::BI__builtin_sve_svmovlb_u32: |
9306 | case SVE::BI__builtin_sve_svmovlb_u64: |
9307 | return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb); |
9308 | |
9309 | case SVE::BI__builtin_sve_svmovlb_s16: |
9310 | case SVE::BI__builtin_sve_svmovlb_s32: |
9311 | case SVE::BI__builtin_sve_svmovlb_s64: |
9312 | return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb); |
9313 | |
9314 | case SVE::BI__builtin_sve_svmovlt_u16: |
9315 | case SVE::BI__builtin_sve_svmovlt_u32: |
9316 | case SVE::BI__builtin_sve_svmovlt_u64: |
9317 | return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt); |
9318 | |
9319 | case SVE::BI__builtin_sve_svmovlt_s16: |
9320 | case SVE::BI__builtin_sve_svmovlt_s32: |
9321 | case SVE::BI__builtin_sve_svmovlt_s64: |
9322 | return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt); |
9323 | |
9324 | case SVE::BI__builtin_sve_svpmullt_u16: |
9325 | case SVE::BI__builtin_sve_svpmullt_u64: |
9326 | case SVE::BI__builtin_sve_svpmullt_n_u16: |
9327 | case SVE::BI__builtin_sve_svpmullt_n_u64: |
9328 | return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair); |
9329 | |
9330 | case SVE::BI__builtin_sve_svpmullb_u16: |
9331 | case SVE::BI__builtin_sve_svpmullb_u64: |
9332 | case SVE::BI__builtin_sve_svpmullb_n_u16: |
9333 | case SVE::BI__builtin_sve_svpmullb_n_u64: |
9334 | return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair); |
9335 | |
9336 | case SVE::BI__builtin_sve_svdup_n_b8: |
9337 | case SVE::BI__builtin_sve_svdup_n_b16: |
9338 | case SVE::BI__builtin_sve_svdup_n_b32: |
9339 | case SVE::BI__builtin_sve_svdup_n_b64: { |
9340 | Value *CmpNE = |
9341 | Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType())); |
9342 | llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags); |
9343 | Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy); |
9344 | return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty)); |
9345 | } |
9346 | |
9347 | case SVE::BI__builtin_sve_svdupq_n_b8: |
9348 | case SVE::BI__builtin_sve_svdupq_n_b16: |
9349 | case SVE::BI__builtin_sve_svdupq_n_b32: |
9350 | case SVE::BI__builtin_sve_svdupq_n_b64: |
9351 | case SVE::BI__builtin_sve_svdupq_n_u8: |
9352 | case SVE::BI__builtin_sve_svdupq_n_s8: |
9353 | case SVE::BI__builtin_sve_svdupq_n_u64: |
9354 | case SVE::BI__builtin_sve_svdupq_n_f64: |
9355 | case SVE::BI__builtin_sve_svdupq_n_s64: |
9356 | case SVE::BI__builtin_sve_svdupq_n_u16: |
9357 | case SVE::BI__builtin_sve_svdupq_n_f16: |
9358 | case SVE::BI__builtin_sve_svdupq_n_bf16: |
9359 | case SVE::BI__builtin_sve_svdupq_n_s16: |
9360 | case SVE::BI__builtin_sve_svdupq_n_u32: |
9361 | case SVE::BI__builtin_sve_svdupq_n_f32: |
9362 | case SVE::BI__builtin_sve_svdupq_n_s32: { |
9363 | // These builtins are implemented by storing each element to an array and using |
9364 | // ld1rq to materialize a vector. |
9365 | unsigned NumOpnds = Ops.size(); |
9366 | |
9367 | bool IsBoolTy = |
9368 | cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1); |
9369 | |
9370 | // For svdupq_n_b* the element type of is an integer of type 128/numelts, |
9371 | // so that the compare can use the width that is natural for the expected |
9372 | // number of predicate lanes. |
9373 | llvm::Type *EltTy = Ops[0]->getType(); |
9374 | if (IsBoolTy) |
9375 | EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds); |
9376 | |
9377 | SmallVector<llvm::Value *, 16> VecOps; |
9378 | for (unsigned I = 0; I < NumOpnds; ++I) |
9379 | VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy)); |
9380 | Value *Vec = BuildVector(VecOps); |
9381 | |
9382 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
9383 | Value *Pred = EmitSVEAllTruePred(TypeFlags); |
9384 | |
9385 | llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy); |
9386 | Value *InsertSubVec = Builder.CreateInsertVector( |
9387 | OverloadedTy, UndefValue::get(OverloadedTy), Vec, Builder.getInt64(0)); |
9388 | |
9389 | Function *F = |
9390 | CGM.getIntrinsic(Intrinsic::aarch64_sve_dupq_lane, OverloadedTy); |
9391 | Value *DupQLane = |
9392 | Builder.CreateCall(F, {InsertSubVec, Builder.getInt64(0)}); |
9393 | |
9394 | if (!IsBoolTy) |
9395 | return DupQLane; |
9396 | |
9397 | // For svdupq_n_b* we need to add an additional 'cmpne' with '0'. |
9398 | F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne |
9399 | : Intrinsic::aarch64_sve_cmpne_wide, |
9400 | OverloadedTy); |
9401 | Value *Call = Builder.CreateCall( |
9402 | F, {Pred, DupQLane, EmitSVEDupX(Builder.getInt64(0))}); |
9403 | return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty)); |
9404 | } |
9405 | |
9406 | case SVE::BI__builtin_sve_svpfalse_b: |
9407 | return ConstantInt::getFalse(Ty); |
9408 | |
9409 | case SVE::BI__builtin_sve_svlen_bf16: |
9410 | case SVE::BI__builtin_sve_svlen_f16: |
9411 | case SVE::BI__builtin_sve_svlen_f32: |
9412 | case SVE::BI__builtin_sve_svlen_f64: |
9413 | case SVE::BI__builtin_sve_svlen_s8: |
9414 | case SVE::BI__builtin_sve_svlen_s16: |
9415 | case SVE::BI__builtin_sve_svlen_s32: |
9416 | case SVE::BI__builtin_sve_svlen_s64: |
9417 | case SVE::BI__builtin_sve_svlen_u8: |
9418 | case SVE::BI__builtin_sve_svlen_u16: |
9419 | case SVE::BI__builtin_sve_svlen_u32: |
9420 | case SVE::BI__builtin_sve_svlen_u64: { |
9421 | SVETypeFlags TF(Builtin->TypeModifier); |
9422 | auto VTy = cast<llvm::VectorType>(getSVEType(TF)); |
9423 | auto *NumEls = |
9424 | llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue()); |
9425 | |
9426 | Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty); |
9427 | return Builder.CreateMul(NumEls, Builder.CreateCall(F)); |
9428 | } |
9429 | |
9430 | case SVE::BI__builtin_sve_svtbl2_u8: |
9431 | case SVE::BI__builtin_sve_svtbl2_s8: |
9432 | case SVE::BI__builtin_sve_svtbl2_u16: |
9433 | case SVE::BI__builtin_sve_svtbl2_s16: |
9434 | case SVE::BI__builtin_sve_svtbl2_u32: |
9435 | case SVE::BI__builtin_sve_svtbl2_s32: |
9436 | case SVE::BI__builtin_sve_svtbl2_u64: |
9437 | case SVE::BI__builtin_sve_svtbl2_s64: |
9438 | case SVE::BI__builtin_sve_svtbl2_f16: |
9439 | case SVE::BI__builtin_sve_svtbl2_bf16: |
9440 | case SVE::BI__builtin_sve_svtbl2_f32: |
9441 | case SVE::BI__builtin_sve_svtbl2_f64: { |
9442 | SVETypeFlags TF(Builtin->TypeModifier); |
9443 | auto VTy = cast<llvm::VectorType>(getSVEType(TF)); |
9444 | auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy); |
9445 | Function *FExtr = |
9446 | CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy}); |
9447 | Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)}); |
9448 | Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)}); |
9449 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy); |
9450 | return Builder.CreateCall(F, {V0, V1, Ops[1]}); |
9451 | } |
9452 | |
9453 | case SVE::BI__builtin_sve_svset_neonq_s8: |
9454 | case SVE::BI__builtin_sve_svset_neonq_s16: |
9455 | case SVE::BI__builtin_sve_svset_neonq_s32: |
9456 | case SVE::BI__builtin_sve_svset_neonq_s64: |
9457 | case SVE::BI__builtin_sve_svset_neonq_u8: |
9458 | case SVE::BI__builtin_sve_svset_neonq_u16: |
9459 | case SVE::BI__builtin_sve_svset_neonq_u32: |
9460 | case SVE::BI__builtin_sve_svset_neonq_u64: |
9461 | case SVE::BI__builtin_sve_svset_neonq_f16: |
9462 | case SVE::BI__builtin_sve_svset_neonq_f32: |
9463 | case SVE::BI__builtin_sve_svset_neonq_f64: |
9464 | case SVE::BI__builtin_sve_svset_neonq_bf16: { |
9465 | return Builder.CreateInsertVector(Ty, Ops[0], Ops[1], Builder.getInt64(0)); |
9466 | } |
9467 | |
9468 | case SVE::BI__builtin_sve_svget_neonq_s8: |
9469 | case SVE::BI__builtin_sve_svget_neonq_s16: |
9470 | case SVE::BI__builtin_sve_svget_neonq_s32: |
9471 | case SVE::BI__builtin_sve_svget_neonq_s64: |
9472 | case SVE::BI__builtin_sve_svget_neonq_u8: |
9473 | case SVE::BI__builtin_sve_svget_neonq_u16: |
9474 | case SVE::BI__builtin_sve_svget_neonq_u32: |
9475 | case SVE::BI__builtin_sve_svget_neonq_u64: |
9476 | case SVE::BI__builtin_sve_svget_neonq_f16: |
9477 | case SVE::BI__builtin_sve_svget_neonq_f32: |
9478 | case SVE::BI__builtin_sve_svget_neonq_f64: |
9479 | case SVE::BI__builtin_sve_svget_neonq_bf16: { |
9480 | return Builder.CreateExtractVector(Ty, Ops[0], Builder.getInt64(0)); |
9481 | } |
9482 | |
9483 | case SVE::BI__builtin_sve_svdup_neonq_s8: |
9484 | case SVE::BI__builtin_sve_svdup_neonq_s16: |
9485 | case SVE::BI__builtin_sve_svdup_neonq_s32: |
9486 | case SVE::BI__builtin_sve_svdup_neonq_s64: |
9487 | case SVE::BI__builtin_sve_svdup_neonq_u8: |
9488 | case SVE::BI__builtin_sve_svdup_neonq_u16: |
9489 | case SVE::BI__builtin_sve_svdup_neonq_u32: |
9490 | case SVE::BI__builtin_sve_svdup_neonq_u64: |
9491 | case SVE::BI__builtin_sve_svdup_neonq_f16: |
9492 | case SVE::BI__builtin_sve_svdup_neonq_f32: |
9493 | case SVE::BI__builtin_sve_svdup_neonq_f64: |
9494 | case SVE::BI__builtin_sve_svdup_neonq_bf16: { |
9495 | Value *Insert = Builder.CreateInsertVector(Ty, UndefValue::get(Ty), Ops[0], |
9496 | Builder.getInt64(0)); |
9497 | return Builder.CreateIntrinsic(Intrinsic::aarch64_sve_dupq_lane, {Ty}, |
9498 | {Insert, Builder.getInt64(0)}); |
9499 | } |
9500 | } |
9501 | |
9502 | /// Should not happen |
9503 | return nullptr; |
9504 | } |
9505 | |
9506 | Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, |
9507 | const CallExpr *E, |
9508 | llvm::Triple::ArchType Arch) { |
9509 | if (BuiltinID >= AArch64::FirstSVEBuiltin && |
9510 | BuiltinID <= AArch64::LastSVEBuiltin) |
9511 | return EmitAArch64SVEBuiltinExpr(BuiltinID, E); |
9512 | |
9513 | unsigned HintID = static_cast<unsigned>(-1); |
9514 | switch (BuiltinID) { |
9515 | default: break; |
9516 | case AArch64::BI__builtin_arm_nop: |
9517 | HintID = 0; |
9518 | break; |
9519 | case AArch64::BI__builtin_arm_yield: |
9520 | case AArch64::BI__yield: |
9521 | HintID = 1; |
9522 | break; |
9523 | case AArch64::BI__builtin_arm_wfe: |
9524 | case AArch64::BI__wfe: |
9525 | HintID = 2; |
9526 | break; |
9527 | case AArch64::BI__builtin_arm_wfi: |
9528 | case AArch64::BI__wfi: |
9529 | HintID = 3; |
9530 | break; |
9531 | case AArch64::BI__builtin_arm_sev: |
9532 | case AArch64::BI__sev: |
9533 | HintID = 4; |
9534 | break; |
9535 | case AArch64::BI__builtin_arm_sevl: |
9536 | case AArch64::BI__sevl: |
9537 | HintID = 5; |
9538 | break; |
9539 | } |
9540 | |
9541 | if (HintID != static_cast<unsigned>(-1)) { |
9542 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint); |
9543 | return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID)); |
9544 | } |
9545 | |
9546 | if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { |
9547 | Value *Address = EmitScalarExpr(E->getArg(0)); |
9548 | Value *RW = EmitScalarExpr(E->getArg(1)); |
9549 | Value *CacheLevel = EmitScalarExpr(E->getArg(2)); |
9550 | Value *RetentionPolicy = EmitScalarExpr(E->getArg(3)); |
9551 | Value *IsData = EmitScalarExpr(E->getArg(4)); |
9552 | |
9553 | Value *Locality = nullptr; |
9554 | if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) { |
9555 | // Temporal fetch, needs to convert cache level to locality. |
9556 | Locality = llvm::ConstantInt::get(Int32Ty, |
9557 | -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3); |
9558 | } else { |
9559 | // Streaming fetch. |
9560 | Locality = llvm::ConstantInt::get(Int32Ty, 0); |
9561 | } |
9562 | |
9563 | // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify |
9564 | // PLDL3STRM or PLDL2STRM. |
9565 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
9566 | return Builder.CreateCall(F, {Address, RW, Locality, IsData}); |
9567 | } |
9568 | |
9569 | if (BuiltinID == AArch64::BI__builtin_arm_rbit) { |
9570 | assert((getContext().getTypeSize(E->getType()) == 32) &&(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 32) && "rbit of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 9571, __extension__ __PRETTY_FUNCTION__ )) |
9571 | "rbit of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 32) && "rbit of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 9571, __extension__ __PRETTY_FUNCTION__ )); |
9572 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9573 | return Builder.CreateCall( |
9574 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); |
9575 | } |
9576 | if (BuiltinID == AArch64::BI__builtin_arm_rbit64) { |
9577 | assert((getContext().getTypeSize(E->getType()) == 64) &&(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 64) && "rbit of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 9578, __extension__ __PRETTY_FUNCTION__ )) |
9578 | "rbit of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 64) && "rbit of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 9578, __extension__ __PRETTY_FUNCTION__ )); |
9579 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9580 | return Builder.CreateCall( |
9581 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); |
9582 | } |
9583 | |
9584 | if (BuiltinID == AArch64::BI__builtin_arm_cls) { |
9585 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9586 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg, |
9587 | "cls"); |
9588 | } |
9589 | if (BuiltinID == AArch64::BI__builtin_arm_cls64) { |
9590 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9591 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg, |
9592 | "cls"); |
9593 | } |
9594 | |
9595 | if (BuiltinID == AArch64::BI__builtin_arm_frint32zf || |
9596 | BuiltinID == AArch64::BI__builtin_arm_frint32z) { |
9597 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9598 | llvm::Type *Ty = Arg->getType(); |
9599 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty), |
9600 | Arg, "frint32z"); |
9601 | } |
9602 | |
9603 | if (BuiltinID == AArch64::BI__builtin_arm_frint64zf || |
9604 | BuiltinID == AArch64::BI__builtin_arm_frint64z) { |
9605 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9606 | llvm::Type *Ty = Arg->getType(); |
9607 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty), |
9608 | Arg, "frint64z"); |
9609 | } |
9610 | |
9611 | if (BuiltinID == AArch64::BI__builtin_arm_frint32xf || |
9612 | BuiltinID == AArch64::BI__builtin_arm_frint32x) { |
9613 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9614 | llvm::Type *Ty = Arg->getType(); |
9615 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty), |
9616 | Arg, "frint32x"); |
9617 | } |
9618 | |
9619 | if (BuiltinID == AArch64::BI__builtin_arm_frint64xf || |
9620 | BuiltinID == AArch64::BI__builtin_arm_frint64x) { |
9621 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9622 | llvm::Type *Ty = Arg->getType(); |
9623 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty), |
9624 | Arg, "frint64x"); |
9625 | } |
9626 | |
9627 | if (BuiltinID == AArch64::BI__builtin_arm_jcvt) { |
9628 | assert((getContext().getTypeSize(E->getType()) == 32) &&(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 32) && "__jcvt of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"__jcvt of unusual size!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 9629, __extension__ __PRETTY_FUNCTION__ )) |
9629 | "__jcvt of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 32) && "__jcvt of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"__jcvt of unusual size!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 9629, __extension__ __PRETTY_FUNCTION__ )); |
9630 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9631 | return Builder.CreateCall( |
9632 | CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg); |
9633 | } |
9634 | |
9635 | if (BuiltinID == AArch64::BI__builtin_arm_ld64b || |
9636 | BuiltinID == AArch64::BI__builtin_arm_st64b || |
9637 | BuiltinID == AArch64::BI__builtin_arm_st64bv || |
9638 | BuiltinID == AArch64::BI__builtin_arm_st64bv0) { |
9639 | llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0)); |
9640 | llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1)); |
9641 | |
9642 | if (BuiltinID == AArch64::BI__builtin_arm_ld64b) { |
9643 | // Load from the address via an LLVM intrinsic, receiving a |
9644 | // tuple of 8 i64 words, and store each one to ValPtr. |
9645 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b); |
9646 | llvm::Value *Val = Builder.CreateCall(F, MemAddr); |
9647 | llvm::Value *ToRet; |
9648 | for (size_t i = 0; i < 8; i++) { |
9649 | llvm::Value *ValOffsetPtr = |
9650 | Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i)); |
9651 | Address Addr = |
9652 | Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(8)); |
9653 | ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr); |
9654 | } |
9655 | return ToRet; |
9656 | } else { |
9657 | // Load 8 i64 words from ValPtr, and store them to the address |
9658 | // via an LLVM intrinsic. |
9659 | SmallVector<llvm::Value *, 9> Args; |
9660 | Args.push_back(MemAddr); |
9661 | for (size_t i = 0; i < 8; i++) { |
9662 | llvm::Value *ValOffsetPtr = |
9663 | Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i)); |
9664 | Address Addr = |
9665 | Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(8)); |
9666 | Args.push_back(Builder.CreateLoad(Addr)); |
9667 | } |
9668 | |
9669 | auto Intr = (BuiltinID == AArch64::BI__builtin_arm_st64b |
9670 | ? Intrinsic::aarch64_st64b |
9671 | : BuiltinID == AArch64::BI__builtin_arm_st64bv |
9672 | ? Intrinsic::aarch64_st64bv |
9673 | : Intrinsic::aarch64_st64bv0); |
9674 | Function *F = CGM.getIntrinsic(Intr); |
9675 | return Builder.CreateCall(F, Args); |
9676 | } |
9677 | } |
9678 | |
9679 | if (BuiltinID == AArch64::BI__builtin_arm_rndr || |
9680 | BuiltinID == AArch64::BI__builtin_arm_rndrrs) { |
9681 | |
9682 | auto Intr = (BuiltinID == AArch64::BI__builtin_arm_rndr |
9683 | ? Intrinsic::aarch64_rndr |
9684 | : Intrinsic::aarch64_rndrrs); |
9685 | Function *F = CGM.getIntrinsic(Intr); |
9686 | llvm::Value *Val = Builder.CreateCall(F); |
9687 | Value *RandomValue = Builder.CreateExtractValue(Val, 0); |
9688 | Value *Status = Builder.CreateExtractValue(Val, 1); |
9689 | |
9690 | Address MemAddress = EmitPointerWithAlignment(E->getArg(0)); |
9691 | Builder.CreateStore(RandomValue, MemAddress); |
9692 | Status = Builder.CreateZExt(Status, Int32Ty); |
9693 | return Status; |
9694 | } |
9695 | |
9696 | if (BuiltinID == AArch64::BI__clear_cache) { |
9697 | assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")(static_cast <bool> (E->getNumArgs() == 2 && "__clear_cache takes 2 arguments") ? void (0) : __assert_fail ("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 9697, __extension__ __PRETTY_FUNCTION__ )); |
9698 | const FunctionDecl *FD = E->getDirectCallee(); |
9699 | Value *Ops[2]; |
9700 | for (unsigned i = 0; i < 2; i++) |
9701 | Ops[i] = EmitScalarExpr(E->getArg(i)); |
9702 | llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); |
9703 | llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); |
9704 | StringRef Name = FD->getName(); |
9705 | return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); |
9706 | } |
9707 | |
9708 | if ((BuiltinID == AArch64::BI__builtin_arm_ldrex || |
9709 | BuiltinID == AArch64::BI__builtin_arm_ldaex) && |
9710 | getContext().getTypeSize(E->getType()) == 128) { |
9711 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex |
9712 | ? Intrinsic::aarch64_ldaxp |
9713 | : Intrinsic::aarch64_ldxp); |
9714 | |
9715 | Value *LdPtr = EmitScalarExpr(E->getArg(0)); |
9716 | Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), |
9717 | "ldxp"); |
9718 | |
9719 | Value *Val0 = Builder.CreateExtractValue(Val, 1); |
9720 | Value *Val1 = Builder.CreateExtractValue(Val, 0); |
9721 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
9722 | Val0 = Builder.CreateZExt(Val0, Int128Ty); |
9723 | Val1 = Builder.CreateZExt(Val1, Int128Ty); |
9724 | |
9725 | Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64); |
9726 | Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); |
9727 | Val = Builder.CreateOr(Val, Val1); |
9728 | return Builder.CreateBitCast(Val, ConvertType(E->getType())); |
9729 | } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex || |
9730 | BuiltinID == AArch64::BI__builtin_arm_ldaex) { |
9731 | Value *LoadAddr = EmitScalarExpr(E->getArg(0)); |
9732 | |
9733 | QualType Ty = E->getType(); |
9734 | llvm::Type *RealResTy = ConvertType(Ty); |
9735 | llvm::Type *IntTy = |
9736 | llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty)); |
9737 | llvm::Type *PtrTy = IntTy->getPointerTo(); |
9738 | LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy); |
9739 | |
9740 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex |
9741 | ? Intrinsic::aarch64_ldaxr |
9742 | : Intrinsic::aarch64_ldxr, |
9743 | PtrTy); |
9744 | CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr"); |
9745 | Val->addParamAttr( |
9746 | 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy)); |
9747 | |
9748 | if (RealResTy->isPointerTy()) |
9749 | return Builder.CreateIntToPtr(Val, RealResTy); |
9750 | |
9751 | llvm::Type *IntResTy = llvm::IntegerType::get( |
9752 | getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy)); |
9753 | return Builder.CreateBitCast(Builder.CreateTruncOrBitCast(Val, IntResTy), |
9754 | RealResTy); |
9755 | } |
9756 | |
9757 | if ((BuiltinID == AArch64::BI__builtin_arm_strex || |
9758 | BuiltinID == AArch64::BI__builtin_arm_stlex) && |
9759 | getContext().getTypeSize(E->getArg(0)->getType()) == 128) { |
9760 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex |
9761 | ? Intrinsic::aarch64_stlxp |
9762 | : Intrinsic::aarch64_stxp); |
9763 | llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty); |
9764 | |
9765 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); |
9766 | EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true); |
9767 | |
9768 | Tmp = Builder.CreateElementBitCast(Tmp, STy); |
9769 | llvm::Value *Val = Builder.CreateLoad(Tmp); |
9770 | |
9771 | Value *Arg0 = Builder.CreateExtractValue(Val, 0); |
9772 | Value *Arg1 = Builder.CreateExtractValue(Val, 1); |
9773 | Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), |
9774 | Int8PtrTy); |
9775 | return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp"); |
9776 | } |
9777 | |
9778 | if (BuiltinID == AArch64::BI__builtin_arm_strex || |
9779 | BuiltinID == AArch64::BI__builtin_arm_stlex) { |
9780 | Value *StoreVal = EmitScalarExpr(E->getArg(0)); |
9781 | Value *StoreAddr = EmitScalarExpr(E->getArg(1)); |
9782 | |
9783 | QualType Ty = E->getArg(0)->getType(); |
9784 | llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), |
9785 | getContext().getTypeSize(Ty)); |
9786 | StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); |
9787 | |
9788 | if (StoreVal->getType()->isPointerTy()) |
9789 | StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty); |
9790 | else { |
9791 | llvm::Type *IntTy = llvm::IntegerType::get( |
9792 | getLLVMContext(), |
9793 | CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType())); |
9794 | StoreVal = Builder.CreateBitCast(StoreVal, IntTy); |
9795 | StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty); |
9796 | } |
9797 | |
9798 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex |
9799 | ? Intrinsic::aarch64_stlxr |
9800 | : Intrinsic::aarch64_stxr, |
9801 | StoreAddr->getType()); |
9802 | CallInst *CI = Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr"); |
9803 | CI->addParamAttr( |
9804 | 1, Attribute::get(getLLVMContext(), Attribute::ElementType, StoreTy)); |
9805 | return CI; |
9806 | } |
9807 | |
9808 | if (BuiltinID == AArch64::BI__getReg) { |
9809 | Expr::EvalResult Result; |
9810 | if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext())) |
9811 | llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant" , "clang/lib/CodeGen/CGBuiltin.cpp", 9811); |
9812 | |
9813 | llvm::APSInt Value = Result.Val.getInt(); |
9814 | LLVMContext &Context = CGM.getLLVMContext(); |
9815 | std::string Reg = Value == 31 ? "sp" : "x" + toString(Value, 10); |
9816 | |
9817 | llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)}; |
9818 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); |
9819 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); |
9820 | |
9821 | llvm::Function *F = |
9822 | CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty}); |
9823 | return Builder.CreateCall(F, Metadata); |
9824 | } |
9825 | |
9826 | if (BuiltinID == AArch64::BI__builtin_arm_clrex) { |
9827 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex); |
9828 | return Builder.CreateCall(F); |
9829 | } |
9830 | |
9831 | if (BuiltinID == AArch64::BI_ReadWriteBarrier) |
9832 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
9833 | llvm::SyncScope::SingleThread); |
9834 | |
9835 | // CRC32 |
9836 | Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; |
9837 | switch (BuiltinID) { |
9838 | case AArch64::BI__builtin_arm_crc32b: |
9839 | CRCIntrinsicID = Intrinsic::aarch64_crc32b; break; |
9840 | case AArch64::BI__builtin_arm_crc32cb: |
9841 | CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break; |
9842 | case AArch64::BI__builtin_arm_crc32h: |
9843 | CRCIntrinsicID = Intrinsic::aarch64_crc32h; break; |
9844 | case AArch64::BI__builtin_arm_crc32ch: |
9845 | CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break; |
9846 | case AArch64::BI__builtin_arm_crc32w: |
9847 | CRCIntrinsicID = Intrinsic::aarch64_crc32w; break; |
9848 | case AArch64::BI__builtin_arm_crc32cw: |
9849 | CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break; |
9850 | case AArch64::BI__builtin_arm_crc32d: |
9851 | CRCIntrinsicID = Intrinsic::aarch64_crc32x; break; |
9852 | case AArch64::BI__builtin_arm_crc32cd: |
9853 | CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break; |
9854 | } |
9855 | |
9856 | if (CRCIntrinsicID != Intrinsic::not_intrinsic) { |
9857 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
9858 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); |
9859 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); |
9860 | |
9861 | llvm::Type *DataTy = F->getFunctionType()->getParamType(1); |
9862 | Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy); |
9863 | |
9864 | return Builder.CreateCall(F, {Arg0, Arg1}); |
9865 | } |
9866 | |
9867 | // Memory Operations (MOPS) |
9868 | if (BuiltinID == AArch64::BI__builtin_arm_mops_memset_tag) { |
9869 | Value *Dst = EmitScalarExpr(E->getArg(0)); |
9870 | Value *Val = EmitScalarExpr(E->getArg(1)); |
9871 | Value *Size = EmitScalarExpr(E->getArg(2)); |
9872 | Dst = Builder.CreatePointerCast(Dst, Int8PtrTy); |
9873 | Val = Builder.CreateTrunc(Val, Int8Ty); |
9874 | Size = Builder.CreateIntCast(Size, Int64Ty, false); |
9875 | return Builder.CreateCall( |
9876 | CGM.getIntrinsic(Intrinsic::aarch64_mops_memset_tag), {Dst, Val, Size}); |
9877 | } |
9878 | |
9879 | // Memory Tagging Extensions (MTE) Intrinsics |
9880 | Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic; |
9881 | switch (BuiltinID) { |
9882 | case AArch64::BI__builtin_arm_irg: |
9883 | MTEIntrinsicID = Intrinsic::aarch64_irg; break; |
9884 | case AArch64::BI__builtin_arm_addg: |
9885 | MTEIntrinsicID = Intrinsic::aarch64_addg; break; |
9886 | case AArch64::BI__builtin_arm_gmi: |
9887 | MTEIntrinsicID = Intrinsic::aarch64_gmi; break; |
9888 | case AArch64::BI__builtin_arm_ldg: |
9889 | MTEIntrinsicID = Intrinsic::aarch64_ldg; break; |
9890 | case AArch64::BI__builtin_arm_stg: |
9891 | MTEIntrinsicID = Intrinsic::aarch64_stg; break; |
9892 | case AArch64::BI__builtin_arm_subp: |
9893 | MTEIntrinsicID = Intrinsic::aarch64_subp; break; |
9894 | } |
9895 | |
9896 | if (MTEIntrinsicID != Intrinsic::not_intrinsic) { |
9897 | llvm::Type *T = ConvertType(E->getType()); |
9898 | |
9899 | if (MTEIntrinsicID == Intrinsic::aarch64_irg) { |
9900 | Value *Pointer = EmitScalarExpr(E->getArg(0)); |
9901 | Value *Mask = EmitScalarExpr(E->getArg(1)); |
9902 | |
9903 | Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); |
9904 | Mask = Builder.CreateZExt(Mask, Int64Ty); |
9905 | Value *RV = Builder.CreateCall( |
9906 | CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask}); |
9907 | return Builder.CreatePointerCast(RV, T); |
9908 | } |
9909 | if (MTEIntrinsicID == Intrinsic::aarch64_addg) { |
9910 | Value *Pointer = EmitScalarExpr(E->getArg(0)); |
9911 | Value *TagOffset = EmitScalarExpr(E->getArg(1)); |
9912 | |
9913 | Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); |
9914 | TagOffset = Builder.CreateZExt(TagOffset, Int64Ty); |
9915 | Value *RV = Builder.CreateCall( |
9916 | CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset}); |
9917 | return Builder.CreatePointerCast(RV, T); |
9918 | } |
9919 | if (MTEIntrinsicID == Intrinsic::aarch64_gmi) { |
9920 | Value *Pointer = EmitScalarExpr(E->getArg(0)); |
9921 | Value *ExcludedMask = EmitScalarExpr(E->getArg(1)); |
9922 | |
9923 | ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty); |
9924 | Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); |
9925 | return Builder.CreateCall( |
9926 | CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask}); |
9927 | } |
9928 | // Although it is possible to supply a different return |
9929 | // address (first arg) to this intrinsic, for now we set |
9930 | // return address same as input address. |
9931 | if (MTEIntrinsicID == Intrinsic::aarch64_ldg) { |
9932 | Value *TagAddress = EmitScalarExpr(E->getArg(0)); |
9933 | TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy); |
9934 | Value *RV = Builder.CreateCall( |
9935 | CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress}); |
9936 | return Builder.CreatePointerCast(RV, T); |
9937 | } |
9938 | // Although it is possible to supply a different tag (to set) |
9939 | // to this intrinsic (as first arg), for now we supply |
9940 | // the tag that is in input address arg (common use case). |
9941 | if (MTEIntrinsicID == Intrinsic::aarch64_stg) { |
9942 | Value *TagAddress = EmitScalarExpr(E->getArg(0)); |
9943 | TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy); |
9944 | return Builder.CreateCall( |
9945 | CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress}); |
9946 | } |
9947 | if (MTEIntrinsicID == Intrinsic::aarch64_subp) { |
9948 | Value *PointerA = EmitScalarExpr(E->getArg(0)); |
9949 | Value *PointerB = EmitScalarExpr(E->getArg(1)); |
9950 | PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy); |
9951 | PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy); |
9952 | return Builder.CreateCall( |
9953 | CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB}); |
9954 | } |
9955 | } |
9956 | |
9957 | if (BuiltinID == AArch64::BI__builtin_arm_rsr || |
9958 | BuiltinID == AArch64::BI__builtin_arm_rsr64 || |
9959 | BuiltinID == AArch64::BI__builtin_arm_rsrp || |
9960 | BuiltinID == AArch64::BI__builtin_arm_wsr || |
9961 | BuiltinID == AArch64::BI__builtin_arm_wsr64 || |
9962 | BuiltinID == AArch64::BI__builtin_arm_wsrp) { |
9963 | |
9964 | SpecialRegisterAccessKind AccessKind = Write; |
9965 | if (BuiltinID == AArch64::BI__builtin_arm_rsr || |
9966 | BuiltinID == AArch64::BI__builtin_arm_rsr64 || |
9967 | BuiltinID == AArch64::BI__builtin_arm_rsrp) |
9968 | AccessKind = VolatileRead; |
9969 | |
9970 | bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp || |
9971 | BuiltinID == AArch64::BI__builtin_arm_wsrp; |
9972 | |
9973 | bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr && |
9974 | BuiltinID != AArch64::BI__builtin_arm_wsr; |
9975 | |
9976 | llvm::Type *ValueType; |
9977 | llvm::Type *RegisterType = Int64Ty; |
9978 | if (IsPointerBuiltin) { |
9979 | ValueType = VoidPtrTy; |
9980 | } else if (Is64Bit) { |
9981 | ValueType = Int64Ty; |
9982 | } else { |
9983 | ValueType = Int32Ty; |
9984 | } |
9985 | |
9986 | return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, |
9987 | AccessKind); |
9988 | } |
9989 | |
9990 | if (BuiltinID == AArch64::BI_ReadStatusReg || |
9991 | BuiltinID == AArch64::BI_WriteStatusReg) { |
9992 | LLVMContext &Context = CGM.getLLVMContext(); |
9993 | |
9994 | unsigned SysReg = |
9995 | E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
9996 | |
9997 | std::string SysRegStr; |
9998 | llvm::raw_string_ostream(SysRegStr) << |
9999 | ((1 << 1) | ((SysReg >> 14) & 1)) << ":" << |
10000 | ((SysReg >> 11) & 7) << ":" << |
10001 | ((SysReg >> 7) & 15) << ":" << |
10002 | ((SysReg >> 3) & 15) << ":" << |
10003 | ( SysReg & 7); |
10004 | |
10005 | llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) }; |
10006 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); |
10007 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); |
10008 | |
10009 | llvm::Type *RegisterType = Int64Ty; |
10010 | llvm::Type *Types[] = { RegisterType }; |
10011 | |
10012 | if (BuiltinID == AArch64::BI_ReadStatusReg) { |
10013 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); |
10014 | |
10015 | return Builder.CreateCall(F, Metadata); |
10016 | } |
10017 | |
10018 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); |
10019 | llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1)); |
10020 | |
10021 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
10022 | } |
10023 | |
10024 | if (BuiltinID == AArch64::BI_AddressOfReturnAddress) { |
10025 | llvm::Function *F = |
10026 | CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy); |
10027 | return Builder.CreateCall(F); |
10028 | } |
10029 | |
10030 | if (BuiltinID == AArch64::BI__builtin_sponentry) { |
10031 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy); |
10032 | return Builder.CreateCall(F); |
10033 | } |
10034 | |
10035 | if (BuiltinID == AArch64::BI__mulh || BuiltinID == AArch64::BI__umulh) { |
10036 | llvm::Type *ResType = ConvertType(E->getType()); |
10037 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
10038 | |
10039 | bool IsSigned = BuiltinID == AArch64::BI__mulh; |
10040 | Value *LHS = |
10041 | Builder.CreateIntCast(EmitScalarExpr(E->getArg(0)), Int128Ty, IsSigned); |
10042 | Value *RHS = |
10043 | Builder.CreateIntCast(EmitScalarExpr(E->getArg(1)), Int128Ty, IsSigned); |
10044 | |
10045 | Value *MulResult, *HigherBits; |
10046 | if (IsSigned) { |
10047 | MulResult = Builder.CreateNSWMul(LHS, RHS); |
10048 | HigherBits = Builder.CreateAShr(MulResult, 64); |
10049 | } else { |
10050 | MulResult = Builder.CreateNUWMul(LHS, RHS); |
10051 | HigherBits = Builder.CreateLShr(MulResult, 64); |
10052 | } |
10053 | HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned); |
10054 | |
10055 | return HigherBits; |
10056 | } |
10057 | |
10058 | // Handle MSVC intrinsics before argument evaluation to prevent double |
10059 | // evaluation. |
10060 | if (Optional<MSVCIntrin> MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID)) |
10061 | return EmitMSVCBuiltinExpr(*MsvcIntId, E); |
10062 | |
10063 | // Find out if any arguments are required to be integer constant |
10064 | // expressions. |
10065 | unsigned ICEArguments = 0; |
10066 | ASTContext::GetBuiltinTypeError Error; |
10067 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
10068 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 10068, __extension__ __PRETTY_FUNCTION__ )); |
10069 | |
10070 | llvm::SmallVector<Value*, 4> Ops; |
10071 | Address PtrOp0 = Address::invalid(); |
10072 | for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { |
10073 | if (i == 0) { |
10074 | switch (BuiltinID) { |
10075 | case NEON::BI__builtin_neon_vld1_v: |
10076 | case NEON::BI__builtin_neon_vld1q_v: |
10077 | case NEON::BI__builtin_neon_vld1_dup_v: |
10078 | case NEON::BI__builtin_neon_vld1q_dup_v: |
10079 | case NEON::BI__builtin_neon_vld1_lane_v: |
10080 | case NEON::BI__builtin_neon_vld1q_lane_v: |
10081 | case NEON::BI__builtin_neon_vst1_v: |
10082 | case NEON::BI__builtin_neon_vst1q_v: |
10083 | case NEON::BI__builtin_neon_vst1_lane_v: |
10084 | case NEON::BI__builtin_neon_vst1q_lane_v: |
10085 | // Get the alignment for the argument in addition to the value; |
10086 | // we'll use it later. |
10087 | PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); |
10088 | Ops.push_back(PtrOp0.getPointer()); |
10089 | continue; |
10090 | } |
10091 | } |
10092 | if ((ICEArguments & (1 << i)) == 0) { |
10093 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
10094 | } else { |
10095 | // If this is required to be a constant, constant fold it so that we know |
10096 | // that the generated intrinsic gets a ConstantInt. |
10097 | Ops.push_back(llvm::ConstantInt::get( |
10098 | getLLVMContext(), |
10099 | *E->getArg(i)->getIntegerConstantExpr(getContext()))); |
10100 | } |
10101 | } |
10102 | |
10103 | auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap); |
10104 | const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap( |
10105 | SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted); |
10106 | |
10107 | if (Builtin) { |
10108 | Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1))); |
10109 | Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E); |
10110 | assert(Result && "SISD intrinsic should have been handled")(static_cast <bool> (Result && "SISD intrinsic should have been handled" ) ? void (0) : __assert_fail ("Result && \"SISD intrinsic should have been handled\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 10110, __extension__ __PRETTY_FUNCTION__ )); |
10111 | return Result; |
10112 | } |
10113 | |
10114 | const Expr *Arg = E->getArg(E->getNumArgs()-1); |
10115 | NeonTypeFlags Type(0); |
10116 | if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext())) |
10117 | // Determine the type of this overloaded NEON intrinsic. |
10118 | Type = NeonTypeFlags(Result->getZExtValue()); |
10119 | |
10120 | bool usgn = Type.isUnsigned(); |
10121 | bool quad = Type.isQuad(); |
10122 | |
10123 | // Handle non-overloaded intrinsics first. |
10124 | switch (BuiltinID) { |
10125 | default: break; |
10126 | case NEON::BI__builtin_neon_vabsh_f16: |
10127 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10128 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs"); |
10129 | case NEON::BI__builtin_neon_vaddq_p128: { |
10130 | llvm::Type *Ty = GetNeonType(this, NeonTypeFlags::Poly128); |
10131 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10132 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
10133 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
10134 | Ops[0] = Builder.CreateXor(Ops[0], Ops[1]); |
10135 | llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128); |
10136 | return Builder.CreateBitCast(Ops[0], Int128Ty); |
10137 | } |
10138 | case NEON::BI__builtin_neon_vldrq_p128: { |
10139 | llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128); |
10140 | llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0); |
10141 | Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy); |
10142 | return Builder.CreateAlignedLoad(Int128Ty, Ptr, |
10143 | CharUnits::fromQuantity(16)); |
10144 | } |
10145 | case NEON::BI__builtin_neon_vstrq_p128: { |
10146 | llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128); |
10147 | Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy); |
10148 | return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr); |
10149 | } |
10150 | case NEON::BI__builtin_neon_vcvts_f32_u32: |
10151 | case NEON::BI__builtin_neon_vcvtd_f64_u64: |
10152 | usgn = true; |
10153 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
10154 | case NEON::BI__builtin_neon_vcvts_f32_s32: |
10155 | case NEON::BI__builtin_neon_vcvtd_f64_s64: { |
10156 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10157 | bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64; |
10158 | llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty; |
10159 | llvm::Type *FTy = Is64 ? DoubleTy : FloatTy; |
10160 | Ops[0] = Builder.CreateBitCast(Ops[0], InTy); |
10161 | if (usgn) |
10162 | return Builder.CreateUIToFP(Ops[0], FTy); |
10163 | return Builder.CreateSIToFP(Ops[0], FTy); |
10164 | } |
10165 | case NEON::BI__builtin_neon_vcvth_f16_u16: |
10166 | case NEON::BI__builtin_neon_vcvth_f16_u32: |
10167 | case NEON::BI__builtin_neon_vcvth_f16_u64: |
10168 | usgn = true; |
10169 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
10170 | case NEON::BI__builtin_neon_vcvth_f16_s16: |
10171 | case NEON::BI__builtin_neon_vcvth_f16_s32: |
10172 | case NEON::BI__builtin_neon_vcvth_f16_s64: { |
10173 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10174 | llvm::Type *FTy = HalfTy; |
10175 | llvm::Type *InTy; |
10176 | if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64) |
10177 | InTy = Int64Ty; |
10178 | else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32) |
10179 | InTy = Int32Ty; |
10180 | else |
10181 | InTy = Int16Ty; |
10182 | Ops[0] = Builder.CreateBitCast(Ops[0], InTy); |
10183 | if (usgn) |
10184 | return Builder.CreateUIToFP(Ops[0], FTy); |
10185 | return Builder.CreateSIToFP(Ops[0], FTy); |
10186 | } |
10187 | case NEON::BI__builtin_neon_vcvtah_u16_f16: |
10188 | case NEON::BI__builtin_neon_vcvtmh_u16_f16: |
10189 | case NEON::BI__builtin_neon_vcvtnh_u16_f16: |
10190 | case NEON::BI__builtin_neon_vcvtph_u16_f16: |
10191 | case NEON::BI__builtin_neon_vcvth_u16_f16: |
10192 | case NEON::BI__builtin_neon_vcvtah_s16_f16: |
10193 | case NEON::BI__builtin_neon_vcvtmh_s16_f16: |
10194 | case NEON::BI__builtin_neon_vcvtnh_s16_f16: |
10195 | case NEON::BI__builtin_neon_vcvtph_s16_f16: |
10196 | case NEON::BI__builtin_neon_vcvth_s16_f16: { |
10197 | unsigned Int; |
10198 | llvm::Type* InTy = Int32Ty; |
10199 | llvm::Type* FTy = HalfTy; |
10200 | llvm::Type *Tys[2] = {InTy, FTy}; |
10201 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10202 | switch (BuiltinID) { |
10203 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "clang/lib/CodeGen/CGBuiltin.cpp", 10203); |
10204 | case NEON::BI__builtin_neon_vcvtah_u16_f16: |
10205 | Int = Intrinsic::aarch64_neon_fcvtau; break; |
10206 | case NEON::BI__builtin_neon_vcvtmh_u16_f16: |
10207 | Int = Intrinsic::aarch64_neon_fcvtmu; break; |
10208 | case NEON::BI__builtin_neon_vcvtnh_u16_f16: |
10209 | Int = Intrinsic::aarch64_neon_fcvtnu; break; |
10210 | case NEON::BI__builtin_neon_vcvtph_u16_f16: |
10211 | Int = Intrinsic::aarch64_neon_fcvtpu; break; |
10212 | case NEON::BI__builtin_neon_vcvth_u16_f16: |
10213 | Int = Intrinsic::aarch64_neon_fcvtzu; break; |
10214 | case NEON::BI__builtin_neon_vcvtah_s16_f16: |
10215 | Int = Intrinsic::aarch64_neon_fcvtas; break; |
10216 | case NEON::BI__builtin_neon_vcvtmh_s16_f16: |
10217 | Int = Intrinsic::aarch64_neon_fcvtms; break; |
10218 | case NEON::BI__builtin_neon_vcvtnh_s16_f16: |
10219 | Int = Intrinsic::aarch64_neon_fcvtns; break; |
10220 | case NEON::BI__builtin_neon_vcvtph_s16_f16: |
10221 | Int = Intrinsic::aarch64_neon_fcvtps; break; |
10222 | case NEON::BI__builtin_neon_vcvth_s16_f16: |
10223 | Int = Intrinsic::aarch64_neon_fcvtzs; break; |
10224 | } |
10225 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt"); |
10226 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
10227 | } |
10228 | case NEON::BI__builtin_neon_vcaleh_f16: |
10229 | case NEON::BI__builtin_neon_vcalth_f16: |
10230 | case NEON::BI__builtin_neon_vcageh_f16: |
10231 | case NEON::BI__builtin_neon_vcagth_f16: { |
10232 | unsigned Int; |
10233 | llvm::Type* InTy = Int32Ty; |
10234 | llvm::Type* FTy = HalfTy; |
10235 | llvm::Type *Tys[2] = {InTy, FTy}; |
10236 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10237 | switch (BuiltinID) { |
10238 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "clang/lib/CodeGen/CGBuiltin.cpp", 10238); |
10239 | case NEON::BI__builtin_neon_vcageh_f16: |
10240 | Int = Intrinsic::aarch64_neon_facge; break; |
10241 | case NEON::BI__builtin_neon_vcagth_f16: |
10242 | Int = Intrinsic::aarch64_neon_facgt; break; |
10243 | case NEON::BI__builtin_neon_vcaleh_f16: |
10244 | Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break; |
10245 | case NEON::BI__builtin_neon_vcalth_f16: |
10246 | Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break; |
10247 | } |
10248 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg"); |
10249 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
10250 | } |
10251 | case NEON::BI__builtin_neon_vcvth_n_s16_f16: |
10252 | case NEON::BI__builtin_neon_vcvth_n_u16_f16: { |
10253 | unsigned Int; |
10254 | llvm::Type* InTy = Int32Ty; |
10255 | llvm::Type* FTy = HalfTy; |
10256 | llvm::Type *Tys[2] = {InTy, FTy}; |
10257 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10258 | switch (BuiltinID) { |
10259 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "clang/lib/CodeGen/CGBuiltin.cpp", 10259); |
10260 | case NEON::BI__builtin_neon_vcvth_n_s16_f16: |
10261 | Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break; |
10262 | case NEON::BI__builtin_neon_vcvth_n_u16_f16: |
10263 | Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break; |
10264 | } |
10265 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n"); |
10266 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
10267 | } |
10268 | case NEON::BI__builtin_neon_vcvth_n_f16_s16: |
10269 | case NEON::BI__builtin_neon_vcvth_n_f16_u16: { |
10270 | unsigned Int; |
10271 | llvm::Type* FTy = HalfTy; |
10272 | llvm::Type* InTy = Int32Ty; |
10273 | llvm::Type *Tys[2] = {FTy, InTy}; |
10274 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10275 | switch (BuiltinID) { |
10276 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "clang/lib/CodeGen/CGBuiltin.cpp", 10276); |
10277 | case NEON::BI__builtin_neon_vcvth_n_f16_s16: |
10278 | Int = Intrinsic::aarch64_neon_vcvtfxs2fp; |
10279 | Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext"); |
10280 | break; |
10281 | case NEON::BI__builtin_neon_vcvth_n_f16_u16: |
10282 | Int = Intrinsic::aarch64_neon_vcvtfxu2fp; |
10283 | Ops[0] = Builder.CreateZExt(Ops[0], InTy); |
10284 | break; |
10285 | } |
10286 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n"); |
10287 | } |
10288 | case NEON::BI__builtin_neon_vpaddd_s64: { |
10289 | auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2); |
10290 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
10291 | // The vector is v2f64, so make sure it's bitcast to that. |
10292 | Vec = Builder.CreateBitCast(Vec, Ty, "v2i64"); |
10293 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); |
10294 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); |
10295 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); |
10296 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); |
10297 | // Pairwise addition of a v2f64 into a scalar f64. |
10298 | return Builder.CreateAdd(Op0, Op1, "vpaddd"); |
10299 | } |
10300 | case NEON::BI__builtin_neon_vpaddd_f64: { |
10301 | auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2); |
10302 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
10303 | // The vector is v2f64, so make sure it's bitcast to that. |
10304 | Vec = Builder.CreateBitCast(Vec, Ty, "v2f64"); |
10305 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); |
10306 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); |
10307 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); |
10308 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); |
10309 | // Pairwise addition of a v2f64 into a scalar f64. |
10310 | return Builder.CreateFAdd(Op0, Op1, "vpaddd"); |
10311 | } |
10312 | case NEON::BI__builtin_neon_vpadds_f32: { |
10313 | auto *Ty = llvm::FixedVectorType::get(FloatTy, 2); |
10314 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
10315 | // The vector is v2f32, so make sure it's bitcast to that. |
10316 | Vec = Builder.CreateBitCast(Vec, Ty, "v2f32"); |
10317 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); |
10318 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); |
10319 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); |
10320 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); |
10321 | // Pairwise addition of a v2f32 into a scalar f32. |
10322 | return Builder.CreateFAdd(Op0, Op1, "vpaddd"); |
10323 | } |
10324 | case NEON::BI__builtin_neon_vceqzd_s64: |
10325 | case NEON::BI__builtin_neon_vceqzd_f64: |
10326 | case NEON::BI__builtin_neon_vceqzs_f32: |
10327 | case NEON::BI__builtin_neon_vceqzh_f16: |
10328 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10329 | return EmitAArch64CompareBuiltinExpr( |
10330 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10331 | ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz"); |
10332 | case NEON::BI__builtin_neon_vcgezd_s64: |
10333 | case NEON::BI__builtin_neon_vcgezd_f64: |
10334 | case NEON::BI__builtin_neon_vcgezs_f32: |
10335 | case NEON::BI__builtin_neon_vcgezh_f16: |
10336 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10337 | return EmitAArch64CompareBuiltinExpr( |
10338 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10339 | ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez"); |
10340 | case NEON::BI__builtin_neon_vclezd_s64: |
10341 | case NEON::BI__builtin_neon_vclezd_f64: |
10342 | case NEON::BI__builtin_neon_vclezs_f32: |
10343 | case NEON::BI__builtin_neon_vclezh_f16: |
10344 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10345 | return EmitAArch64CompareBuiltinExpr( |
10346 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10347 | ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez"); |
10348 | case NEON::BI__builtin_neon_vcgtzd_s64: |
10349 | case NEON::BI__builtin_neon_vcgtzd_f64: |
10350 | case NEON::BI__builtin_neon_vcgtzs_f32: |
10351 | case NEON::BI__builtin_neon_vcgtzh_f16: |
10352 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10353 | return EmitAArch64CompareBuiltinExpr( |
10354 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10355 | ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz"); |
10356 | case NEON::BI__builtin_neon_vcltzd_s64: |
10357 | case NEON::BI__builtin_neon_vcltzd_f64: |
10358 | case NEON::BI__builtin_neon_vcltzs_f32: |
10359 | case NEON::BI__builtin_neon_vcltzh_f16: |
10360 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10361 | return EmitAArch64CompareBuiltinExpr( |
10362 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10363 | ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz"); |
10364 | |
10365 | case NEON::BI__builtin_neon_vceqzd_u64: { |
10366 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10367 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); |
10368 | Ops[0] = |
10369 | Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty)); |
10370 | return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd"); |
10371 | } |
10372 | case NEON::BI__builtin_neon_vceqd_f64: |
10373 | case NEON::BI__builtin_neon_vcled_f64: |
10374 | case NEON::BI__builtin_neon_vcltd_f64: |
10375 | case NEON::BI__builtin_neon_vcged_f64: |
10376 | case NEON::BI__builtin_neon_vcgtd_f64: { |
10377 | llvm::CmpInst::Predicate P; |
10378 | switch (BuiltinID) { |
10379 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "clang/lib/CodeGen/CGBuiltin.cpp", 10379); |
10380 | case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break; |
10381 | case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break; |
10382 | case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break; |
10383 | case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break; |
10384 | case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break; |
10385 | } |
10386 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10387 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
10388 | Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); |
10389 | if (P == llvm::FCmpInst::FCMP_OEQ) |
10390 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); |
10391 | else |
10392 | Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]); |
10393 | return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd"); |
10394 | } |
10395 | case NEON::BI__builtin_neon_vceqs_f32: |
10396 | case NEON::BI__builtin_neon_vcles_f32: |
10397 | case NEON::BI__builtin_neon_vclts_f32: |
10398 | case NEON::BI__builtin_neon_vcges_f32: |
10399 | case NEON::BI__builtin_neon_vcgts_f32: { |
10400 | llvm::CmpInst::Predicate P; |
10401 | switch (BuiltinID) { |
10402 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "clang/lib/CodeGen/CGBuiltin.cpp", 10402); |
10403 | case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break; |
10404 | case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break; |
10405 | case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break; |
10406 | case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break; |
10407 | case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break; |
10408 | } |
10409 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10410 | Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy); |
10411 | Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy); |
10412 | if (P == llvm::FCmpInst::FCMP_OEQ) |
10413 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); |
10414 | else |
10415 | Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]); |
10416 | return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd"); |
10417 | } |
10418 | case NEON::BI__builtin_neon_vceqh_f16: |
10419 | case NEON::BI__builtin_neon_vcleh_f16: |
10420 | case NEON::BI__builtin_neon_vclth_f16: |
10421 | case NEON::BI__builtin_neon_vcgeh_f16: |
10422 | case NEON::BI__builtin_neon_vcgth_f16: { |
10423 | llvm::CmpInst::Predicate P; |
10424 | switch (BuiltinID) { |
10425 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "clang/lib/CodeGen/CGBuiltin.cpp", 10425); |
10426 | case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break; |
10427 | case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break; |
10428 | case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break; |
10429 | case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break; |
10430 | case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break; |
10431 | } |
10432 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10433 | Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy); |
10434 | Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy); |
10435 | if (P == llvm::FCmpInst::FCMP_OEQ) |
10436 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); |
10437 | else |
10438 | Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]); |
10439 | return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd"); |
10440 | } |
10441 | case NEON::BI__builtin_neon_vceqd_s64: |
10442 | case NEON::BI__builtin_neon_vceqd_u64: |
10443 | case NEON::BI__builtin_neon_vcgtd_s64: |
10444 | case NEON::BI__builtin_neon_vcgtd_u64: |
10445 | case NEON::BI__builtin_neon_vcltd_s64: |
10446 | case NEON::BI__builtin_neon_vcltd_u64: |
10447 | case NEON::BI__builtin_neon_vcged_u64: |
10448 | case NEON::BI__builtin_neon_vcged_s64: |
10449 | case NEON::BI__builtin_neon_vcled_u64: |
10450 | case NEON::BI__builtin_neon_vcled_s64: { |
10451 | llvm::CmpInst::Predicate P; |
10452 | switch (BuiltinID) { |
10453 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "clang/lib/CodeGen/CGBuiltin.cpp", 10453); |
10454 | case NEON::BI__builtin_neon_vceqd_s64: |
10455 | case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break; |
10456 | case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break; |
10457 | case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break; |
10458 | case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break; |
10459 | case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break; |
10460 | case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break; |
10461 | case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break; |
10462 | case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break; |
10463 | case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break; |
10464 | } |
10465 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10466 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); |
10467 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); |
10468 | Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]); |
10469 | return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd"); |
10470 | } |
10471 | case NEON::BI__builtin_neon_vtstd_s64: |
10472 | case NEON::BI__builtin_neon_vtstd_u64: { |
10473 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10474 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); |
10475 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); |
10476 | Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); |
10477 | Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], |
10478 | llvm::Constant::getNullValue(Int64Ty)); |
10479 | return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd"); |
10480 | } |
10481 | case NEON::BI__builtin_neon_vset_lane_i8: |
10482 | case NEON::BI__builtin_neon_vset_lane_i16: |
10483 | case NEON::BI__builtin_neon_vset_lane_i32: |
10484 | case NEON::BI__builtin_neon_vset_lane_i64: |
10485 | case NEON::BI__builtin_neon_vset_lane_bf16: |
10486 | case NEON::BI__builtin_neon_vset_lane_f32: |
10487 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
10488 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
10489 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
10490 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
10491 | case NEON::BI__builtin_neon_vsetq_lane_bf16: |
10492 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
10493 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
10494 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
10495 | case NEON::BI__builtin_neon_vset_lane_f64: |
10496 | // The vector type needs a cast for the v1f64 variant. |
10497 | Ops[1] = |
10498 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1)); |
10499 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
10500 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
10501 | case NEON::BI__builtin_neon_vsetq_lane_f64: |
10502 | // The vector type needs a cast for the v2f64 variant. |
10503 | Ops[1] = |
10504 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2)); |
10505 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
10506 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
10507 | |
10508 | case NEON::BI__builtin_neon_vget_lane_i8: |
10509 | case NEON::BI__builtin_neon_vdupb_lane_i8: |
10510 | Ops[0] = |
10511 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8)); |
10512 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10513 | "vget_lane"); |
10514 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
10515 | case NEON::BI__builtin_neon_vdupb_laneq_i8: |
10516 | Ops[0] = |
10517 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16)); |
10518 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10519 | "vgetq_lane"); |
10520 | case NEON::BI__builtin_neon_vget_lane_i16: |
10521 | case NEON::BI__builtin_neon_vduph_lane_i16: |
10522 | Ops[0] = |
10523 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4)); |
10524 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10525 | "vget_lane"); |
10526 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
10527 | case NEON::BI__builtin_neon_vduph_laneq_i16: |
10528 | Ops[0] = |
10529 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8)); |
10530 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10531 | "vgetq_lane"); |
10532 | case NEON::BI__builtin_neon_vget_lane_i32: |
10533 | case NEON::BI__builtin_neon_vdups_lane_i32: |
10534 | Ops[0] = |
10535 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2)); |
10536 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10537 | "vget_lane"); |
10538 | case NEON::BI__builtin_neon_vdups_lane_f32: |
10539 | Ops[0] = |
10540 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2)); |
10541 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10542 | "vdups_lane"); |
10543 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
10544 | case NEON::BI__builtin_neon_vdups_laneq_i32: |
10545 | Ops[0] = |
10546 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4)); |
10547 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10548 | "vgetq_lane"); |
10549 | case NEON::BI__builtin_neon_vget_lane_i64: |
10550 | case NEON::BI__builtin_neon_vdupd_lane_i64: |
10551 | Ops[0] = |
10552 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1)); |
10553 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10554 | "vget_lane"); |
10555 | case NEON::BI__builtin_neon_vdupd_lane_f64: |
10556 | Ops[0] = |
10557 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1)); |
10558 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10559 | "vdupd_lane"); |
10560 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
10561 | case NEON::BI__builtin_neon_vdupd_laneq_i64: |
10562 | Ops[0] = |
10563 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2)); |
10564 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10565 | "vgetq_lane"); |
10566 | case NEON::BI__builtin_neon_vget_lane_f32: |
10567 | Ops[0] = |
10568 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2)); |
10569 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10570 | "vget_lane"); |
10571 | case NEON::BI__builtin_neon_vget_lane_f64: |
10572 | Ops[0] = |
10573 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1)); |
10574 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10575 | "vget_lane"); |
10576 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
10577 | case NEON::BI__builtin_neon_vdups_laneq_f32: |
10578 | Ops[0] = |
10579 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4)); |
10580 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10581 | "vgetq_lane"); |
10582 | case NEON::BI__builtin_neon_vgetq_lane_f64: |
10583 | case NEON::BI__builtin_neon_vdupd_laneq_f64: |
10584 | Ops[0] = |
10585 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2)); |
10586 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10587 | "vgetq_lane"); |
10588 | case NEON::BI__builtin_neon_vaddh_f16: |
10589 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10590 | return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh"); |
10591 | case NEON::BI__builtin_neon_vsubh_f16: |
10592 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10593 | return Builder.CreateFSub(Ops[0], Ops[1], "vsubh"); |
10594 | case NEON::BI__builtin_neon_vmulh_f16: |
10595 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10596 | return Builder.CreateFMul(Ops[0], Ops[1], "vmulh"); |
10597 | case NEON::BI__builtin_neon_vdivh_f16: |
10598 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10599 | return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh"); |
10600 | case NEON::BI__builtin_neon_vfmah_f16: |
10601 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. |
10602 | return emitCallMaybeConstrainedFPBuiltin( |
10603 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy, |
10604 | {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]}); |
10605 | case NEON::BI__builtin_neon_vfmsh_f16: { |
10606 | // FIXME: This should be an fneg instruction: |
10607 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy); |
10608 | Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh"); |
10609 | |
10610 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. |
10611 | return emitCallMaybeConstrainedFPBuiltin( |
10612 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy, |
10613 | {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]}); |
10614 | } |
10615 | case NEON::BI__builtin_neon_vaddd_s64: |
10616 | case NEON::BI__builtin_neon_vaddd_u64: |
10617 | return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd"); |
10618 | case NEON::BI__builtin_neon_vsubd_s64: |
10619 | case NEON::BI__builtin_neon_vsubd_u64: |
10620 | return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd"); |
10621 | case NEON::BI__builtin_neon_vqdmlalh_s16: |
10622 | case NEON::BI__builtin_neon_vqdmlslh_s16: { |
10623 | SmallVector<Value *, 2> ProductOps; |
10624 | ProductOps.push_back(vectorWrapScalar16(Ops[1])); |
10625 | ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2)))); |
10626 | auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4); |
10627 | Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), |
10628 | ProductOps, "vqdmlXl"); |
10629 | Constant *CI = ConstantInt::get(SizeTy, 0); |
10630 | Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); |
10631 | |
10632 | unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16 |
10633 | ? Intrinsic::aarch64_neon_sqadd |
10634 | : Intrinsic::aarch64_neon_sqsub; |
10635 | return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl"); |
10636 | } |
10637 | case NEON::BI__builtin_neon_vqshlud_n_s64: { |
10638 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10639 | Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); |
10640 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty), |
10641 | Ops, "vqshlu_n"); |
10642 | } |
10643 | case NEON::BI__builtin_neon_vqshld_n_u64: |
10644 | case NEON::BI__builtin_neon_vqshld_n_s64: { |
10645 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64 |
10646 | ? Intrinsic::aarch64_neon_uqshl |
10647 | : Intrinsic::aarch64_neon_sqshl; |
10648 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10649 | Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); |
10650 | return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n"); |
10651 | } |
10652 | case NEON::BI__builtin_neon_vrshrd_n_u64: |
10653 | case NEON::BI__builtin_neon_vrshrd_n_s64: { |
10654 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64 |
10655 | ? Intrinsic::aarch64_neon_urshl |
10656 | : Intrinsic::aarch64_neon_srshl; |
10657 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10658 | int SV = cast<ConstantInt>(Ops[1])->getSExtValue(); |
10659 | Ops[1] = ConstantInt::get(Int64Ty, -SV); |
10660 | return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n"); |
10661 | } |
10662 | case NEON::BI__builtin_neon_vrsrad_n_u64: |
10663 | case NEON::BI__builtin_neon_vrsrad_n_s64: { |
10664 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64 |
10665 | ? Intrinsic::aarch64_neon_urshl |
10666 | : Intrinsic::aarch64_neon_srshl; |
10667 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); |
10668 | Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2)))); |
10669 | Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty), |
10670 | {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)}); |
10671 | return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty)); |
10672 | } |
10673 | case NEON::BI__builtin_neon_vshld_n_s64: |
10674 | case NEON::BI__builtin_neon_vshld_n_u64: { |
10675 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
10676 | return Builder.CreateShl( |
10677 | Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n"); |
10678 | } |
10679 | case NEON::BI__builtin_neon_vshrd_n_s64: { |
10680 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
10681 | return Builder.CreateAShr( |
10682 | Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63), |
10683 | Amt->getZExtValue())), |
10684 | "shrd_n"); |
10685 | } |
10686 | case NEON::BI__builtin_neon_vshrd_n_u64: { |
10687 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
10688 | uint64_t ShiftAmt = Amt->getZExtValue(); |
10689 | // Right-shifting an unsigned value by its size yields 0. |
10690 | if (ShiftAmt == 64) |
10691 | return ConstantInt::get(Int64Ty, 0); |
10692 | return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt), |
10693 | "shrd_n"); |
10694 | } |
10695 | case NEON::BI__builtin_neon_vsrad_n_s64: { |
10696 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); |
10697 | Ops[1] = Builder.CreateAShr( |
10698 | Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63), |
10699 | Amt->getZExtValue())), |
10700 | "shrd_n"); |
10701 | return Builder.CreateAdd(Ops[0], Ops[1]); |
10702 | } |
10703 | case NEON::BI__builtin_neon_vsrad_n_u64: { |
10704 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); |
10705 | uint64_t ShiftAmt = Amt->getZExtValue(); |
10706 | // Right-shifting an unsigned value by its size yields 0. |
10707 | // As Op + 0 = Op, return Ops[0] directly. |
10708 | if (ShiftAmt == 64) |
10709 | return Ops[0]; |
10710 | Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt), |
10711 | "shrd_n"); |
10712 | return Builder.CreateAdd(Ops[0], Ops[1]); |
10713 | } |
10714 | case NEON::BI__builtin_neon_vqdmlalh_lane_s16: |
10715 | case NEON::BI__builtin_neon_vqdmlalh_laneq_s16: |
10716 | case NEON::BI__builtin_neon_vqdmlslh_lane_s16: |
10717 | case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: { |
10718 | Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), |
10719 | "lane"); |
10720 | SmallVector<Value *, 2> ProductOps; |
10721 | ProductOps.push_back(vectorWrapScalar16(Ops[1])); |
10722 | ProductOps.push_back(vectorWrapScalar16(Ops[2])); |
10723 | auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4); |
10724 | Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), |
10725 | ProductOps, "vqdmlXl"); |
10726 | Constant *CI = ConstantInt::get(SizeTy, 0); |
10727 | Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); |
10728 | Ops.pop_back(); |
10729 | |
10730 | unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 || |
10731 | BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16) |
10732 | ? Intrinsic::aarch64_neon_sqadd |
10733 | : Intrinsic::aarch64_neon_sqsub; |
10734 | return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl"); |
10735 | } |
10736 | case NEON::BI__builtin_neon_vqdmlals_s32: |
10737 | case NEON::BI__builtin_neon_vqdmlsls_s32: { |
10738 | SmallVector<Value *, 2> ProductOps; |
10739 | ProductOps.push_back(Ops[1]); |
10740 | ProductOps.push_back(EmitScalarExpr(E->getArg(2))); |
10741 | Ops[1] = |
10742 | EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), |
10743 | ProductOps, "vqdmlXl"); |
10744 | |
10745 | unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32 |
10746 | ? Intrinsic::aarch64_neon_sqadd |
10747 | : Intrinsic::aarch64_neon_sqsub; |
10748 | return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl"); |
10749 | } |
10750 | case NEON::BI__builtin_neon_vqdmlals_lane_s32: |
10751 | case NEON::BI__builtin_neon_vqdmlals_laneq_s32: |
10752 | case NEON::BI__builtin_neon_vqdmlsls_lane_s32: |
10753 | case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: { |
10754 | Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), |
10755 | "lane"); |
10756 | SmallVector<Value *, 2> ProductOps; |
10757 | ProductOps.push_back(Ops[1]); |
10758 | ProductOps.push_back(Ops[2]); |
10759 | Ops[1] = |
10760 | EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), |
10761 | ProductOps, "vqdmlXl"); |
10762 | Ops.pop_back(); |
10763 | |
10764 | unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 || |
10765 | BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32) |
10766 | ? Intrinsic::aarch64_neon_sqadd |
10767 | : Intrinsic::aarch64_neon_sqsub; |
10768 | return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl"); |
10769 | } |
10770 | case NEON::BI__builtin_neon_vget_lane_bf16: |
10771 | case NEON::BI__builtin_neon_vduph_lane_bf16: |
10772 | case NEON::BI__builtin_neon_vduph_lane_f16: { |
10773 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10774 | "vget_lane"); |
10775 | } |
10776 | case NEON::BI__builtin_neon_vgetq_lane_bf16: |
10777 | case NEON::BI__builtin_neon_vduph_laneq_bf16: |
10778 | case NEON::BI__builtin_neon_vduph_laneq_f16: { |
10779 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10780 | "vgetq_lane"); |
10781 | } |
10782 | |
10783 | case AArch64::BI_InterlockedAdd: { |
10784 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
10785 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); |
10786 | AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( |
10787 | AtomicRMWInst::Add, Arg0, Arg1, |
10788 | llvm::AtomicOrdering::SequentiallyConsistent); |
10789 | return Builder.CreateAdd(RMWI, Arg1); |
10790 | } |
10791 | } |
10792 | |
10793 | llvm::FixedVectorType *VTy = GetNeonType(this, Type); |
10794 | llvm::Type *Ty = VTy; |
10795 | if (!Ty) |
10796 | return nullptr; |
10797 | |
10798 | // Not all intrinsics handled by the common case work for AArch64 yet, so only |
10799 | // defer to common code if it's been added to our special map. |
10800 | Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, |
10801 | AArch64SIMDIntrinsicsProvenSorted); |
10802 | |
10803 | if (Builtin) |
10804 | return EmitCommonNeonBuiltinExpr( |
10805 | Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, |
10806 | Builtin->NameHint, Builtin->TypeModifier, E, Ops, |
10807 | /*never use addresses*/ Address::invalid(), Address::invalid(), Arch); |
10808 | |
10809 | if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) |
10810 | return V; |
10811 | |
10812 | unsigned Int; |
10813 | switch (BuiltinID) { |
10814 | default: return nullptr; |
10815 | case NEON::BI__builtin_neon_vbsl_v: |
10816 | case NEON::BI__builtin_neon_vbslq_v: { |
10817 | llvm::Type *BitTy = llvm::VectorType::getInteger(VTy); |
10818 | Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl"); |
10819 | Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl"); |
10820 | Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl"); |
10821 | |
10822 | Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl"); |
10823 | Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl"); |
10824 | Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl"); |
10825 | return Builder.CreateBitCast(Ops[0], Ty); |
10826 | } |
10827 | case NEON::BI__builtin_neon_vfma_lane_v: |
10828 | case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types |
10829 | // The ARM builtins (and instructions) have the addend as the first |
10830 | // operand, but the 'fma' intrinsics have it last. Swap it around here. |
10831 | Value *Addend = Ops[0]; |
10832 | Value *Multiplicand = Ops[1]; |
10833 | Value *LaneSource = Ops[2]; |
10834 | Ops[0] = Multiplicand; |
10835 | Ops[1] = LaneSource; |
10836 | Ops[2] = Addend; |
10837 | |
10838 | // Now adjust things to handle the lane access. |
10839 | auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v |
10840 | ? llvm::FixedVectorType::get(VTy->getElementType(), |
10841 | VTy->getNumElements() / 2) |
10842 | : VTy; |
10843 | llvm::Constant *cst = cast<Constant>(Ops[3]); |
10844 | Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst); |
10845 | Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy); |
10846 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane"); |
10847 | |
10848 | Ops.pop_back(); |
10849 | Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma |
10850 | : Intrinsic::fma; |
10851 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla"); |
10852 | } |
10853 | case NEON::BI__builtin_neon_vfma_laneq_v: { |
10854 | auto *VTy = cast<llvm::FixedVectorType>(Ty); |
10855 | // v1f64 fma should be mapped to Neon scalar f64 fma |
10856 | if (VTy && VTy->getElementType() == DoubleTy) { |
10857 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
10858 | Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); |
10859 | llvm::FixedVectorType *VTy = |
10860 | GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true)); |
10861 | Ops[2] = Builder.CreateBitCast(Ops[2], VTy); |
10862 | Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); |
10863 | Value *Result; |
10864 | Result = emitCallMaybeConstrainedFPBuiltin( |
10865 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, |
10866 | DoubleTy, {Ops[1], Ops[2], Ops[0]}); |
10867 | return Builder.CreateBitCast(Result, Ty); |
10868 | } |
10869 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
10870 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
10871 | |
10872 | auto *STy = llvm::FixedVectorType::get(VTy->getElementType(), |
10873 | VTy->getNumElements() * 2); |
10874 | Ops[2] = Builder.CreateBitCast(Ops[2], STy); |
10875 | Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), |
10876 | cast<ConstantInt>(Ops[3])); |
10877 | Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane"); |
10878 | |
10879 | return emitCallMaybeConstrainedFPBuiltin( |
10880 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty, |
10881 | {Ops[2], Ops[1], Ops[0]}); |
10882 | } |
10883 | case NEON::BI__builtin_neon_vfmaq_laneq_v: { |
10884 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
10885 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
10886 | |
10887 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
10888 | Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3])); |
10889 | return emitCallMaybeConstrainedFPBuiltin( |
10890 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty, |
10891 | {Ops[2], Ops[1], Ops[0]}); |
10892 | } |
10893 | case NEON::BI__builtin_neon_vfmah_lane_f16: |
10894 | case NEON::BI__builtin_neon_vfmas_lane_f32: |
10895 | case NEON::BI__builtin_neon_vfmah_laneq_f16: |
10896 | case NEON::BI__builtin_neon_vfmas_laneq_f32: |
10897 | case NEON::BI__builtin_neon_vfmad_lane_f64: |
10898 | case NEON::BI__builtin_neon_vfmad_laneq_f64: { |
10899 | Ops.push_back(EmitScalarExpr(E->getArg(3))); |
10900 | llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); |
10901 | Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); |
10902 | return emitCallMaybeConstrainedFPBuiltin( |
10903 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty, |
10904 | {Ops[1], Ops[2], Ops[0]}); |
10905 | } |
10906 | case NEON::BI__builtin_neon_vmull_v: |
10907 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
10908 | Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull; |
10909 | if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull; |
10910 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); |
10911 | case NEON::BI__builtin_neon_vmax_v: |
10912 | case NEON::BI__builtin_neon_vmaxq_v: |
10913 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
10914 | Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax; |
10915 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax; |
10916 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax"); |
10917 | case NEON::BI__builtin_neon_vmaxh_f16: { |
10918 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10919 | Int = Intrinsic::aarch64_neon_fmax; |
10920 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax"); |
10921 | } |
10922 | case NEON::BI__builtin_neon_vmin_v: |
10923 | case NEON::BI__builtin_neon_vminq_v: |
10924 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
10925 | Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin; |
10926 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin; |
10927 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin"); |
10928 | case NEON::BI__builtin_neon_vminh_f16: { |
10929 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10930 | Int = Intrinsic::aarch64_neon_fmin; |
10931 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin"); |
10932 | } |
10933 | case NEON::BI__builtin_neon_vabd_v: |
10934 | case NEON::BI__builtin_neon_vabdq_v: |
10935 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
10936 | Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd; |
10937 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd; |
10938 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd"); |
10939 | case NEON::BI__builtin_neon_vpadal_v: |
10940 | case NEON::BI__builtin_neon_vpadalq_v: { |
10941 | unsigned ArgElts = VTy->getNumElements(); |
10942 | llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType()); |
10943 | unsigned BitWidth = EltTy->getBitWidth(); |
10944 | auto *ArgTy = llvm::FixedVectorType::get( |
10945 | llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts); |
10946 | llvm::Type* Tys[2] = { VTy, ArgTy }; |
10947 | Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp; |
10948 | SmallVector<llvm::Value*, 1> TmpOps; |
10949 | TmpOps.push_back(Ops[1]); |
10950 | Function *F = CGM.getIntrinsic(Int, Tys); |
10951 | llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal"); |
10952 | llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType()); |
10953 | return Builder.CreateAdd(tmp, addend); |
10954 | } |
10955 | case NEON::BI__builtin_neon_vpmin_v: |
10956 | case NEON::BI__builtin_neon_vpminq_v: |
10957 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
10958 | Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp; |
10959 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp; |
10960 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); |
10961 | case NEON::BI__builtin_neon_vpmax_v: |
10962 | case NEON::BI__builtin_neon_vpmaxq_v: |
10963 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
10964 | Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp; |
10965 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp; |
10966 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); |
10967 | case NEON::BI__builtin_neon_vminnm_v: |
10968 | case NEON::BI__builtin_neon_vminnmq_v: |
10969 | Int = Intrinsic::aarch64_neon_fminnm; |
10970 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm"); |
10971 | case NEON::BI__builtin_neon_vminnmh_f16: |
10972 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10973 | Int = Intrinsic::aarch64_neon_fminnm; |
10974 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm"); |
10975 | case NEON::BI__builtin_neon_vmaxnm_v: |
10976 | case NEON::BI__builtin_neon_vmaxnmq_v: |
10977 | Int = Intrinsic::aarch64_neon_fmaxnm; |
10978 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm"); |
10979 | case NEON::BI__builtin_neon_vmaxnmh_f16: |
10980 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10981 | Int = Intrinsic::aarch64_neon_fmaxnm; |
10982 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm"); |
10983 | case NEON::BI__builtin_neon_vrecpss_f32: { |
10984 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10985 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy), |
10986 | Ops, "vrecps"); |
10987 | } |
10988 | case NEON::BI__builtin_neon_vrecpsd_f64: |
10989 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10990 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy), |
10991 | Ops, "vrecps"); |
10992 | case NEON::BI__builtin_neon_vrecpsh_f16: |
10993 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10994 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy), |
10995 | Ops, "vrecps"); |
10996 | case NEON::BI__builtin_neon_vqshrun_n_v: |
10997 | Int = Intrinsic::aarch64_neon_sqshrun; |
10998 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n"); |
10999 | case NEON::BI__builtin_neon_vqrshrun_n_v: |
11000 | Int = Intrinsic::aarch64_neon_sqrshrun; |
11001 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n"); |
11002 | case NEON::BI__builtin_neon_vqshrn_n_v: |
11003 | Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn; |
11004 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n"); |
11005 | case NEON::BI__builtin_neon_vrshrn_n_v: |
11006 | Int = Intrinsic::aarch64_neon_rshrn; |
11007 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n"); |
11008 | case NEON::BI__builtin_neon_vqrshrn_n_v: |
11009 | Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn; |
11010 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n"); |
11011 | case NEON::BI__builtin_neon_vrndah_f16: { |
11012 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11013 | Int = Builder.getIsFPConstrained() |
11014 | ? Intrinsic::experimental_constrained_round |
11015 | : Intrinsic::round; |
11016 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda"); |
11017 | } |
11018 | case NEON::BI__builtin_neon_vrnda_v: |
11019 | case NEON::BI__builtin_neon_vrndaq_v: { |
11020 | Int = Builder.getIsFPConstrained() |
11021 | ? Intrinsic::experimental_constrained_round |
11022 | : Intrinsic::round; |
11023 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda"); |
11024 | } |
11025 | case NEON::BI__builtin_neon_vrndih_f16: { |
11026 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11027 | Int = Builder.getIsFPConstrained() |
11028 | ? Intrinsic::experimental_constrained_nearbyint |
11029 | : Intrinsic::nearbyint; |
11030 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi"); |
11031 | } |
11032 | case NEON::BI__builtin_neon_vrndmh_f16: { |
11033 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11034 | Int = Builder.getIsFPConstrained() |
11035 | ? Intrinsic::experimental_constrained_floor |
11036 | : Intrinsic::floor; |
11037 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm"); |
11038 | } |
11039 | case NEON::BI__builtin_neon_vrndm_v: |
11040 | case NEON::BI__builtin_neon_vrndmq_v: { |
11041 | Int = Builder.getIsFPConstrained() |
11042 | ? Intrinsic::experimental_constrained_floor |
11043 | : Intrinsic::floor; |
11044 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm"); |
11045 | } |
11046 | case NEON::BI__builtin_neon_vrndnh_f16: { |
11047 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11048 | Int = Builder.getIsFPConstrained() |
11049 | ? Intrinsic::experimental_constrained_roundeven |
11050 | : Intrinsic::roundeven; |
11051 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn"); |
11052 | } |
11053 | case NEON::BI__builtin_neon_vrndn_v: |
11054 | case NEON::BI__builtin_neon_vrndnq_v: { |
11055 | Int = Builder.getIsFPConstrained() |
11056 | ? Intrinsic::experimental_constrained_roundeven |
11057 | : Intrinsic::roundeven; |
11058 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn"); |
11059 | } |
11060 | case NEON::BI__builtin_neon_vrndns_f32: { |
11061 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11062 | Int = Builder.getIsFPConstrained() |
11063 | ? Intrinsic::experimental_constrained_roundeven |
11064 | : Intrinsic::roundeven; |
11065 | return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn"); |
11066 | } |
11067 | case NEON::BI__builtin_neon_vrndph_f16: { |
11068 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11069 | Int = Builder.getIsFPConstrained() |
11070 | ? Intrinsic::experimental_constrained_ceil |
11071 | : Intrinsic::ceil; |
11072 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp"); |
11073 | } |
11074 | case NEON::BI__builtin_neon_vrndp_v: |
11075 | case NEON::BI__builtin_neon_vrndpq_v: { |
11076 | Int = Builder.getIsFPConstrained() |
11077 | ? Intrinsic::experimental_constrained_ceil |
11078 | : Intrinsic::ceil; |
11079 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp"); |
11080 | } |
11081 | case NEON::BI__builtin_neon_vrndxh_f16: { |
11082 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11083 | Int = Builder.getIsFPConstrained() |
11084 | ? Intrinsic::experimental_constrained_rint |
11085 | : Intrinsic::rint; |
11086 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx"); |
11087 | } |
11088 | case NEON::BI__builtin_neon_vrndx_v: |
11089 | case NEON::BI__builtin_neon_vrndxq_v: { |
11090 | Int = Builder.getIsFPConstrained() |
11091 | ? Intrinsic::experimental_constrained_rint |
11092 | : Intrinsic::rint; |
11093 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx"); |
11094 | } |
11095 | case NEON::BI__builtin_neon_vrndh_f16: { |
11096 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11097 | Int = Builder.getIsFPConstrained() |
11098 | ? Intrinsic::experimental_constrained_trunc |
11099 | : Intrinsic::trunc; |
11100 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz"); |
11101 | } |
11102 | case NEON::BI__builtin_neon_vrnd32x_v: |
11103 | case NEON::BI__builtin_neon_vrnd32xq_v: { |
11104 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11105 | Int = Intrinsic::aarch64_neon_frint32x; |
11106 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x"); |
11107 | } |
11108 | case NEON::BI__builtin_neon_vrnd32z_v: |
11109 | case NEON::BI__builtin_neon_vrnd32zq_v: { |
11110 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11111 | Int = Intrinsic::aarch64_neon_frint32z; |
11112 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z"); |
11113 | } |
11114 | case NEON::BI__builtin_neon_vrnd64x_v: |
11115 | case NEON::BI__builtin_neon_vrnd64xq_v: { |
11116 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11117 | Int = Intrinsic::aarch64_neon_frint64x; |
11118 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x"); |
11119 | } |
11120 | case NEON::BI__builtin_neon_vrnd64z_v: |
11121 | case NEON::BI__builtin_neon_vrnd64zq_v: { |
11122 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11123 | Int = Intrinsic::aarch64_neon_frint64z; |
11124 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z"); |
11125 | } |
11126 | case NEON::BI__builtin_neon_vrnd_v: |
11127 | case NEON::BI__builtin_neon_vrndq_v: { |
11128 | Int = Builder.getIsFPConstrained() |
11129 | ? Intrinsic::experimental_constrained_trunc |
11130 | : Intrinsic::trunc; |
11131 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz"); |
11132 | } |
11133 | case NEON::BI__builtin_neon_vcvt_f64_v: |
11134 | case NEON::BI__builtin_neon_vcvtq_f64_v: |
11135 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11136 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad)); |
11137 | return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") |
11138 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); |
11139 | case NEON::BI__builtin_neon_vcvt_f64_f32: { |
11140 | assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&(static_cast <bool> (Type.getEltType() == NeonTypeFlags ::Float64 && quad && "unexpected vcvt_f64_f32 builtin" ) ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 11141, __extension__ __PRETTY_FUNCTION__ )) |
11141 | "unexpected vcvt_f64_f32 builtin")(static_cast <bool> (Type.getEltType() == NeonTypeFlags ::Float64 && quad && "unexpected vcvt_f64_f32 builtin" ) ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 11141, __extension__ __PRETTY_FUNCTION__ )); |
11142 | NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false); |
11143 | Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag)); |
11144 | |
11145 | return Builder.CreateFPExt(Ops[0], Ty, "vcvt"); |
11146 | } |
11147 | case NEON::BI__builtin_neon_vcvt_f32_f64: { |
11148 | assert(Type.getEltType() == NeonTypeFlags::Float32 &&(static_cast <bool> (Type.getEltType() == NeonTypeFlags ::Float32 && "unexpected vcvt_f32_f64 builtin") ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 11149, __extension__ __PRETTY_FUNCTION__ )) |
11149 | "unexpected vcvt_f32_f64 builtin")(static_cast <bool> (Type.getEltType() == NeonTypeFlags ::Float32 && "unexpected vcvt_f32_f64 builtin") ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 11149, __extension__ __PRETTY_FUNCTION__ )); |
11150 | NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true); |
11151 | Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag)); |
11152 | |
11153 | return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt"); |
11154 | } |
11155 | case NEON::BI__builtin_neon_vcvt_s32_v: |
11156 | case NEON::BI__builtin_neon_vcvt_u32_v: |
11157 | case NEON::BI__builtin_neon_vcvt_s64_v: |
11158 | case NEON::BI__builtin_neon_vcvt_u64_v: |
11159 | case NEON::BI__builtin_neon_vcvt_s16_v: |
11160 | case NEON::BI__builtin_neon_vcvt_u16_v: |
11161 | case NEON::BI__builtin_neon_vcvtq_s32_v: |
11162 | case NEON::BI__builtin_neon_vcvtq_u32_v: |
11163 | case NEON::BI__builtin_neon_vcvtq_s64_v: |
11164 | case NEON::BI__builtin_neon_vcvtq_u64_v: |
11165 | case NEON::BI__builtin_neon_vcvtq_s16_v: |
11166 | case NEON::BI__builtin_neon_vcvtq_u16_v: { |
11167 | Int = |
11168 | usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs; |
11169 | llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)}; |
11170 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz"); |
11171 | } |
11172 | case NEON::BI__builtin_neon_vcvta_s16_v: |
11173 | case NEON::BI__builtin_neon_vcvta_u16_v: |
11174 | case NEON::BI__builtin_neon_vcvta_s32_v: |
11175 | case NEON::BI__builtin_neon_vcvtaq_s16_v: |
11176 | case NEON::BI__builtin_neon_vcvtaq_s32_v: |
11177 | case NEON::BI__builtin_neon_vcvta_u32_v: |
11178 | case NEON::BI__builtin_neon_vcvtaq_u16_v: |
11179 | case NEON::BI__builtin_neon_vcvtaq_u32_v: |
11180 | case NEON::BI__builtin_neon_vcvta_s64_v: |
11181 | case NEON::BI__builtin_neon_vcvtaq_s64_v: |
11182 | case NEON::BI__builtin_neon_vcvta_u64_v: |
11183 | case NEON::BI__builtin_neon_vcvtaq_u64_v: { |
11184 | Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas; |
11185 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
11186 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta"); |
11187 | } |
11188 | case NEON::BI__builtin_neon_vcvtm_s16_v: |
11189 | case NEON::BI__builtin_neon_vcvtm_s32_v: |
11190 | case NEON::BI__builtin_neon_vcvtmq_s16_v: |
11191 | case NEON::BI__builtin_neon_vcvtmq_s32_v: |
11192 | case NEON::BI__builtin_neon_vcvtm_u16_v: |
11193 | case NEON::BI__builtin_neon_vcvtm_u32_v: |
11194 | case NEON::BI__builtin_neon_vcvtmq_u16_v: |
11195 | case NEON::BI__builtin_neon_vcvtmq_u32_v: |
11196 | case NEON::BI__builtin_neon_vcvtm_s64_v: |
11197 | case NEON::BI__builtin_neon_vcvtmq_s64_v: |
11198 | case NEON::BI__builtin_neon_vcvtm_u64_v: |
11199 | case NEON::BI__builtin_neon_vcvtmq_u64_v: { |
11200 | Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms; |
11201 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
11202 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm"); |
11203 | } |
11204 | case NEON::BI__builtin_neon_vcvtn_s16_v: |
11205 | case NEON::BI__builtin_neon_vcvtn_s32_v: |
11206 | case NEON::BI__builtin_neon_vcvtnq_s16_v: |
11207 | case NEON::BI__builtin_neon_vcvtnq_s32_v: |
11208 | case NEON::BI__builtin_neon_vcvtn_u16_v: |
11209 | case NEON::BI__builtin_neon_vcvtn_u32_v: |
11210 | case NEON::BI__builtin_neon_vcvtnq_u16_v: |
11211 | case NEON::BI__builtin_neon_vcvtnq_u32_v: |
11212 | case NEON::BI__builtin_neon_vcvtn_s64_v: |
11213 | case NEON::BI__builtin_neon_vcvtnq_s64_v: |
11214 | case NEON::BI__builtin_neon_vcvtn_u64_v: |
11215 | case NEON::BI__builtin_neon_vcvtnq_u64_v: { |
11216 | Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns; |
11217 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
11218 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn"); |
11219 | } |
11220 | case NEON::BI__builtin_neon_vcvtp_s16_v: |
11221 | case NEON::BI__builtin_neon_vcvtp_s32_v: |
11222 | case NEON::BI__builtin_neon_vcvtpq_s16_v: |
11223 | case NEON::BI__builtin_neon_vcvtpq_s32_v: |
11224 | case NEON::BI__builtin_neon_vcvtp_u16_v: |
11225 | case NEON::BI__builtin_neon_vcvtp_u32_v: |
11226 | case NEON::BI__builtin_neon_vcvtpq_u16_v: |
11227 | case NEON::BI__builtin_neon_vcvtpq_u32_v: |
11228 | case NEON::BI__builtin_neon_vcvtp_s64_v: |
11229 | case NEON::BI__builtin_neon_vcvtpq_s64_v: |
11230 | case NEON::BI__builtin_neon_vcvtp_u64_v: |
11231 | case NEON::BI__builtin_neon_vcvtpq_u64_v: { |
11232 | Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps; |
11233 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
11234 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp"); |
11235 | } |
11236 | case NEON::BI__builtin_neon_vmulx_v: |
11237 | case NEON::BI__builtin_neon_vmulxq_v: { |
11238 | Int = Intrinsic::aarch64_neon_fmulx; |
11239 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx"); |
11240 | } |
11241 | case NEON::BI__builtin_neon_vmulxh_lane_f16: |
11242 | case NEON::BI__builtin_neon_vmulxh_laneq_f16: { |
11243 | // vmulx_lane should be mapped to Neon scalar mulx after |
11244 | // extracting the scalar element |
11245 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
11246 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); |
11247 | Ops.pop_back(); |
11248 | Int = Intrinsic::aarch64_neon_fmulx; |
11249 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx"); |
11250 | } |
11251 | case NEON::BI__builtin_neon_vmul_lane_v: |
11252 | case NEON::BI__builtin_neon_vmul_laneq_v: { |
11253 | // v1f64 vmul_lane should be mapped to Neon scalar mul lane |
11254 | bool Quad = false; |
11255 | if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v) |
11256 | Quad = true; |
11257 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
11258 | llvm::FixedVectorType *VTy = |
11259 | GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad)); |
11260 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); |
11261 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); |
11262 | Value *Result = Builder.CreateFMul(Ops[0], Ops[1]); |
11263 | return Builder.CreateBitCast(Result, Ty); |
11264 | } |
11265 | case NEON::BI__builtin_neon_vnegd_s64: |
11266 | return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd"); |
11267 | case NEON::BI__builtin_neon_vnegh_f16: |
11268 | return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh"); |
11269 | case NEON::BI__builtin_neon_vpmaxnm_v: |
11270 | case NEON::BI__builtin_neon_vpmaxnmq_v: { |
11271 | Int = Intrinsic::aarch64_neon_fmaxnmp; |
11272 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm"); |
11273 | } |
11274 | case NEON::BI__builtin_neon_vpminnm_v: |
11275 | case NEON::BI__builtin_neon_vpminnmq_v: { |
11276 | Int = Intrinsic::aarch64_neon_fminnmp; |
11277 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm"); |
11278 | } |
11279 | case NEON::BI__builtin_neon_vsqrth_f16: { |
11280 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11281 | Int = Builder.getIsFPConstrained() |
11282 | ? Intrinsic::experimental_constrained_sqrt |
11283 | : Intrinsic::sqrt; |
11284 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt"); |
11285 | } |
11286 | case NEON::BI__builtin_neon_vsqrt_v: |
11287 | case NEON::BI__builtin_neon_vsqrtq_v: { |
11288 | Int = Builder.getIsFPConstrained() |
11289 | ? Intrinsic::experimental_constrained_sqrt |
11290 | : Intrinsic::sqrt; |
11291 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11292 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt"); |
11293 | } |
11294 | case NEON::BI__builtin_neon_vrbit_v: |
11295 | case NEON::BI__builtin_neon_vrbitq_v: { |
11296 | Int = Intrinsic::bitreverse; |
11297 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit"); |
11298 | } |
11299 | case NEON::BI__builtin_neon_vaddv_u8: |
11300 | // FIXME: These are handled by the AArch64 scalar code. |
11301 | usgn = true; |
11302 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
11303 | case NEON::BI__builtin_neon_vaddv_s8: { |
11304 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
11305 | Ty = Int32Ty; |
11306 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11307 | llvm::Type *Tys[2] = { Ty, VTy }; |
11308 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11309 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
11310 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11311 | } |
11312 | case NEON::BI__builtin_neon_vaddv_u16: |
11313 | usgn = true; |
11314 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
11315 | case NEON::BI__builtin_neon_vaddv_s16: { |
11316 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
11317 | Ty = Int32Ty; |
11318 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11319 | llvm::Type *Tys[2] = { Ty, VTy }; |
11320 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11321 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
11322 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11323 | } |
11324 | case NEON::BI__builtin_neon_vaddvq_u8: |
11325 | usgn = true; |
11326 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
11327 | case NEON::BI__builtin_neon_vaddvq_s8: { |
11328 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
11329 | Ty = Int32Ty; |
11330 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11331 | llvm::Type *Tys[2] = { Ty, VTy }; |
11332 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11333 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
11334 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11335 | } |
11336 | case NEON::BI__builtin_neon_vaddvq_u16: |
11337 | usgn = true; |
11338 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
11339 | case NEON::BI__builtin_neon_vaddvq_s16: { |
11340 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
11341 | Ty = Int32Ty; |
11342 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11343 | llvm::Type *Tys[2] = { Ty, VTy }; |
11344 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11345 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
11346 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11347 | } |
11348 | case NEON::BI__builtin_neon_vmaxv_u8: { |
11349 | Int = Intrinsic::aarch64_neon_umaxv; |
11350 | Ty = Int32Ty; |
11351 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11352 | llvm::Type *Tys[2] = { Ty, VTy }; |
11353 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11354 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11355 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11356 | } |
11357 | case NEON::BI__builtin_neon_vmaxv_u16: { |
11358 | Int = Intrinsic::aarch64_neon_umaxv; |
11359 | Ty = Int32Ty; |
11360 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11361 | llvm::Type *Tys[2] = { Ty, VTy }; |
11362 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11363 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11364 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11365 | } |
11366 | case NEON::BI__builtin_neon_vmaxvq_u8: { |
11367 | Int = Intrinsic::aarch64_neon_umaxv; |
11368 | Ty = Int32Ty; |
11369 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11370 | llvm::Type *Tys[2] = { Ty, VTy }; |
11371 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11372 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11373 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11374 | } |
11375 | case NEON::BI__builtin_neon_vmaxvq_u16: { |
11376 | Int = Intrinsic::aarch64_neon_umaxv; |
11377 | Ty = Int32Ty; |
11378 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11379 | llvm::Type *Tys[2] = { Ty, VTy }; |
11380 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11381 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11382 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11383 | } |
11384 | case NEON::BI__builtin_neon_vmaxv_s8: { |
11385 | Int = Intrinsic::aarch64_neon_smaxv; |
11386 | Ty = Int32Ty; |
11387 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11388 | llvm::Type *Tys[2] = { Ty, VTy }; |
11389 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11390 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11391 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11392 | } |
11393 | case NEON::BI__builtin_neon_vmaxv_s16: { |
11394 | Int = Intrinsic::aarch64_neon_smaxv; |
11395 | Ty = Int32Ty; |
11396 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11397 | llvm::Type *Tys[2] = { Ty, VTy }; |
11398 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11399 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11400 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11401 | } |
11402 | case NEON::BI__builtin_neon_vmaxvq_s8: { |
11403 | Int = Intrinsic::aarch64_neon_smaxv; |
11404 | Ty = Int32Ty; |
11405 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11406 | llvm::Type *Tys[2] = { Ty, VTy }; |
11407 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11408 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11409 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11410 | } |
11411 | case NEON::BI__builtin_neon_vmaxvq_s16: { |
11412 | Int = Intrinsic::aarch64_neon_smaxv; |
11413 | Ty = Int32Ty; |
11414 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11415 | llvm::Type *Tys[2] = { Ty, VTy }; |
11416 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11417 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11418 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11419 | } |
11420 | case NEON::BI__builtin_neon_vmaxv_f16: { |
11421 | Int = Intrinsic::aarch64_neon_fmaxv; |
11422 | Ty = HalfTy; |
11423 | VTy = llvm::FixedVectorType::get(HalfTy, 4); |
11424 | llvm::Type *Tys[2] = { Ty, VTy }; |
11425 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11426 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11427 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11428 | } |
11429 | case NEON::BI__builtin_neon_vmaxvq_f16: { |
11430 | Int = Intrinsic::aarch64_neon_fmaxv; |
11431 | Ty = HalfTy; |
11432 | VTy = llvm::FixedVectorType::get(HalfTy, 8); |
11433 | llvm::Type *Tys[2] = { Ty, VTy }; |
11434 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11435 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11436 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11437 | } |
11438 | case NEON::BI__builtin_neon_vminv_u8: { |
11439 | Int = Intrinsic::aarch64_neon_uminv; |
11440 | Ty = Int32Ty; |
11441 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11442 | llvm::Type *Tys[2] = { Ty, VTy }; |
11443 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11444 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11445 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11446 | } |
11447 | case NEON::BI__builtin_neon_vminv_u16: { |
11448 | Int = Intrinsic::aarch64_neon_uminv; |
11449 | Ty = Int32Ty; |
11450 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11451 | llvm::Type *Tys[2] = { Ty, VTy }; |
11452 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11453 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11454 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11455 | } |
11456 | case NEON::BI__builtin_neon_vminvq_u8: { |
11457 | Int = Intrinsic::aarch64_neon_uminv; |
11458 | Ty = Int32Ty; |
11459 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11460 | llvm::Type *Tys[2] = { Ty, VTy }; |
11461 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11462 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11463 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11464 | } |
11465 | case NEON::BI__builtin_neon_vminvq_u16: { |
11466 | Int = Intrinsic::aarch64_neon_uminv; |
11467 | Ty = Int32Ty; |
11468 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11469 | llvm::Type *Tys[2] = { Ty, VTy }; |
11470 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11471 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11472 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11473 | } |
11474 | case NEON::BI__builtin_neon_vminv_s8: { |
11475 | Int = Intrinsic::aarch64_neon_sminv; |
11476 | Ty = Int32Ty; |
11477 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11478 | llvm::Type *Tys[2] = { Ty, VTy }; |
11479 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11480 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11481 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11482 | } |
11483 | case NEON::BI__builtin_neon_vminv_s16: { |
11484 | Int = Intrinsic::aarch64_neon_sminv; |
11485 | Ty = Int32Ty; |
11486 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11487 | llvm::Type *Tys[2] = { Ty, VTy }; |
11488 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11489 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11490 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11491 | } |
11492 | case NEON::BI__builtin_neon_vminvq_s8: { |
11493 | Int = Intrinsic::aarch64_neon_sminv; |
11494 | Ty = Int32Ty; |
11495 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11496 | llvm::Type *Tys[2] = { Ty, VTy }; |
11497 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11498 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11499 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11500 | } |
11501 | case NEON::BI__builtin_neon_vminvq_s16: { |
11502 | Int = Intrinsic::aarch64_neon_sminv; |
11503 | Ty = Int32Ty; |
11504 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11505 | llvm::Type *Tys[2] = { Ty, VTy }; |
11506 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11507 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11508 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11509 | } |
11510 | case NEON::BI__builtin_neon_vminv_f16: { |
11511 | Int = Intrinsic::aarch64_neon_fminv; |
11512 | Ty = HalfTy; |
11513 | VTy = llvm::FixedVectorType::get(HalfTy, 4); |
11514 | llvm::Type *Tys[2] = { Ty, VTy }; |
11515 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11516 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11517 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11518 | } |
11519 | case NEON::BI__builtin_neon_vminvq_f16: { |
11520 | Int = Intrinsic::aarch64_neon_fminv; |
11521 | Ty = HalfTy; |
11522 | VTy = llvm::FixedVectorType::get(HalfTy, 8); |
11523 | llvm::Type *Tys[2] = { Ty, VTy }; |
11524 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11525 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11526 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11527 | } |
11528 | case NEON::BI__builtin_neon_vmaxnmv_f16: { |
11529 | Int = Intrinsic::aarch64_neon_fmaxnmv; |
11530 | Ty = HalfTy; |
11531 | VTy = llvm::FixedVectorType::get(HalfTy, 4); |
11532 | llvm::Type *Tys[2] = { Ty, VTy }; |
11533 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11534 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv"); |
11535 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11536 | } |
11537 | case NEON::BI__builtin_neon_vmaxnmvq_f16: { |
11538 | Int = Intrinsic::aarch64_neon_fmaxnmv; |
11539 | Ty = HalfTy; |
11540 | VTy = llvm::FixedVectorType::get(HalfTy, 8); |
11541 | llvm::Type *Tys[2] = { Ty, VTy }; |
11542 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11543 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv"); |
11544 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11545 | } |
11546 | case NEON::BI__builtin_neon_vminnmv_f16: { |
11547 | Int = Intrinsic::aarch64_neon_fminnmv; |
11548 | Ty = HalfTy; |
11549 | VTy = llvm::FixedVectorType::get(HalfTy, 4); |
11550 | llvm::Type *Tys[2] = { Ty, VTy }; |
11551 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11552 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv"); |
11553 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11554 | } |
11555 | case NEON::BI__builtin_neon_vminnmvq_f16: { |
11556 | Int = Intrinsic::aarch64_neon_fminnmv; |
11557 | Ty = HalfTy; |
11558 | VTy = llvm::FixedVectorType::get(HalfTy, 8); |
11559 | llvm::Type *Tys[2] = { Ty, VTy }; |
11560 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11561 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv"); |
11562 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11563 | } |
11564 | case NEON::BI__builtin_neon_vmul_n_f64: { |
11565 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
11566 | Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy); |
11567 | return Builder.CreateFMul(Ops[0], RHS); |
11568 | } |
11569 | case NEON::BI__builtin_neon_vaddlv_u8: { |
11570 | Int = Intrinsic::aarch64_neon_uaddlv; |
11571 | Ty = Int32Ty; |
11572 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11573 | llvm::Type *Tys[2] = { Ty, VTy }; |
11574 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11575 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11576 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11577 | } |
11578 | case NEON::BI__builtin_neon_vaddlv_u16: { |
11579 | Int = Intrinsic::aarch64_neon_uaddlv; |
11580 | Ty = Int32Ty; |
11581 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11582 | llvm::Type *Tys[2] = { Ty, VTy }; |
11583 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11584 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11585 | } |
11586 | case NEON::BI__builtin_neon_vaddlvq_u8: { |
11587 | Int = Intrinsic::aarch64_neon_uaddlv; |
11588 | Ty = Int32Ty; |
11589 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11590 | llvm::Type *Tys[2] = { Ty, VTy }; |
11591 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11592 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11593 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11594 | } |
11595 | case NEON::BI__builtin_neon_vaddlvq_u16: { |
11596 | Int = Intrinsic::aarch64_neon_uaddlv; |
11597 | Ty = Int32Ty; |
11598 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11599 | llvm::Type *Tys[2] = { Ty, VTy }; |
11600 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11601 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11602 | } |
11603 | case NEON::BI__builtin_neon_vaddlv_s8: { |
11604 | Int = Intrinsic::aarch64_neon_saddlv; |
11605 | Ty = Int32Ty; |
11606 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11607 | llvm::Type *Tys[2] = { Ty, VTy }; |
11608 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11609 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11610 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11611 | } |
11612 | case NEON::BI__builtin_neon_vaddlv_s16: { |
11613 | Int = Intrinsic::aarch64_neon_saddlv; |
11614 | Ty = Int32Ty; |
11615 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11616 | llvm::Type *Tys[2] = { Ty, VTy }; |
11617 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11618 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11619 | } |
11620 | case NEON::BI__builtin_neon_vaddlvq_s8: { |
11621 | Int = Intrinsic::aarch64_neon_saddlv; |
11622 | Ty = Int32Ty; |
11623 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11624 | llvm::Type *Tys[2] = { Ty, VTy }; |
11625 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11626 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11627 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11628 | } |
11629 | case NEON::BI__builtin_neon_vaddlvq_s16: { |
11630 | Int = Intrinsic::aarch64_neon_saddlv; |
11631 | Ty = Int32Ty; |
11632 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11633 | llvm::Type *Tys[2] = { Ty, VTy }; |
11634 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11635 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11636 | } |
11637 | case NEON::BI__builtin_neon_vsri_n_v: |
11638 | case NEON::BI__builtin_neon_vsriq_n_v: { |
11639 | Int = Intrinsic::aarch64_neon_vsri; |
11640 | llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty); |
11641 | return EmitNeonCall(Intrin, Ops, "vsri_n"); |
11642 | } |
11643 | case NEON::BI__builtin_neon_vsli_n_v: |
11644 | case NEON::BI__builtin_neon_vsliq_n_v: { |
11645 | Int = Intrinsic::aarch64_neon_vsli; |
11646 | llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty); |
11647 | return EmitNeonCall(Intrin, Ops, "vsli_n"); |
11648 | } |
11649 | case NEON::BI__builtin_neon_vsra_n_v: |
11650 | case NEON::BI__builtin_neon_vsraq_n_v: |
11651 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11652 | Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n"); |
11653 | return Builder.CreateAdd(Ops[0], Ops[1]); |
11654 | case NEON::BI__builtin_neon_vrsra_n_v: |
11655 | case NEON::BI__builtin_neon_vrsraq_n_v: { |
11656 | Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl; |
11657 | SmallVector<llvm::Value*,2> TmpOps; |
11658 | TmpOps.push_back(Ops[1]); |
11659 | TmpOps.push_back(Ops[2]); |
11660 | Function* F = CGM.getIntrinsic(Int, Ty); |
11661 | llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true); |
11662 | Ops[0] = Builder.CreateBitCast(Ops[0], VTy); |
11663 | return Builder.CreateAdd(Ops[0], tmp); |
11664 | } |
11665 | case NEON::BI__builtin_neon_vld1_v: |
11666 | case NEON::BI__builtin_neon_vld1q_v: { |
11667 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy)); |
11668 | return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment()); |
11669 | } |
11670 | case NEON::BI__builtin_neon_vst1_v: |
11671 | case NEON::BI__builtin_neon_vst1q_v: |
11672 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy)); |
11673 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); |
11674 | return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment()); |
11675 | case NEON::BI__builtin_neon_vld1_lane_v: |
11676 | case NEON::BI__builtin_neon_vld1q_lane_v: { |
11677 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11678 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); |
11679 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11680 | Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], |
11681 | PtrOp0.getAlignment()); |
11682 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane"); |
11683 | } |
11684 | case NEON::BI__builtin_neon_vld1_dup_v: |
11685 | case NEON::BI__builtin_neon_vld1q_dup_v: { |
11686 | Value *V = UndefValue::get(Ty); |
11687 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); |
11688 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11689 | Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], |
11690 | PtrOp0.getAlignment()); |
11691 | llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); |
11692 | Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI); |
11693 | return EmitNeonSplat(Ops[0], CI); |
11694 | } |
11695 | case NEON::BI__builtin_neon_vst1_lane_v: |
11696 | case NEON::BI__builtin_neon_vst1q_lane_v: |
11697 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11698 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); |
11699 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
11700 | return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty), |
11701 | PtrOp0.getAlignment()); |
11702 | case NEON::BI__builtin_neon_vld2_v: |
11703 | case NEON::BI__builtin_neon_vld2q_v: { |
11704 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); |
11705 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11706 | llvm::Type *Tys[2] = { VTy, PTy }; |
11707 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys); |
11708 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); |
11709 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11710 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11711 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11712 | } |
11713 | case NEON::BI__builtin_neon_vld3_v: |
11714 | case NEON::BI__builtin_neon_vld3q_v: { |
11715 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); |
11716 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11717 | llvm::Type *Tys[2] = { VTy, PTy }; |
11718 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys); |
11719 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); |
11720 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11721 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11722 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11723 | } |
11724 | case NEON::BI__builtin_neon_vld4_v: |
11725 | case NEON::BI__builtin_neon_vld4q_v: { |
11726 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); |
11727 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11728 | llvm::Type *Tys[2] = { VTy, PTy }; |
11729 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys); |
11730 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); |
11731 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11732 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11733 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11734 | } |
11735 | case NEON::BI__builtin_neon_vld2_dup_v: |
11736 | case NEON::BI__builtin_neon_vld2q_dup_v: { |
11737 | llvm::Type *PTy = |
11738 | llvm::PointerType::getUnqual(VTy->getElementType()); |
11739 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11740 | llvm::Type *Tys[2] = { VTy, PTy }; |
11741 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys); |
11742 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); |
11743 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11744 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11745 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11746 | } |
11747 | case NEON::BI__builtin_neon_vld3_dup_v: |
11748 | case NEON::BI__builtin_neon_vld3q_dup_v: { |
11749 | llvm::Type *PTy = |
11750 | llvm::PointerType::getUnqual(VTy->getElementType()); |
11751 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11752 | llvm::Type *Tys[2] = { VTy, PTy }; |
11753 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys); |
11754 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); |
11755 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11756 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11757 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11758 | } |
11759 | case NEON::BI__builtin_neon_vld4_dup_v: |
11760 | case NEON::BI__builtin_neon_vld4q_dup_v: { |
11761 | llvm::Type *PTy = |
11762 | llvm::PointerType::getUnqual(VTy->getElementType()); |
11763 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11764 | llvm::Type *Tys[2] = { VTy, PTy }; |
11765 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys); |
11766 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); |
11767 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11768 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11769 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11770 | } |
11771 | case NEON::BI__builtin_neon_vld2_lane_v: |
11772 | case NEON::BI__builtin_neon_vld2q_lane_v: { |
11773 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
11774 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys); |
11775 | std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end()); |
11776 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11777 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11778 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); |
11779 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane"); |
11780 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
11781 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11782 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11783 | } |
11784 | case NEON::BI__builtin_neon_vld3_lane_v: |
11785 | case NEON::BI__builtin_neon_vld3q_lane_v: { |
11786 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
11787 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys); |
11788 | std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end()); |
11789 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11790 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11791 | Ops[3] = Builder.CreateBitCast(Ops[3], Ty); |
11792 | Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty); |
11793 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); |
11794 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
11795 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11796 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11797 | } |
11798 | case NEON::BI__builtin_neon_vld4_lane_v: |
11799 | case NEON::BI__builtin_neon_vld4q_lane_v: { |
11800 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
11801 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys); |
11802 | std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end()); |
11803 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11804 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11805 | Ops[3] = Builder.CreateBitCast(Ops[3], Ty); |
11806 | Ops[4] = Builder.CreateBitCast(Ops[4], Ty); |
11807 | Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty); |
11808 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane"); |
11809 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
11810 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11811 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11812 | } |
11813 | case NEON::BI__builtin_neon_vst2_v: |
11814 | case NEON::BI__builtin_neon_vst2q_v: { |
11815 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11816 | llvm::Type *Tys[2] = { VTy, Ops[2]->getType() }; |
11817 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys), |
11818 | Ops, ""); |
11819 | } |
11820 | case NEON::BI__builtin_neon_vst2_lane_v: |
11821 | case NEON::BI__builtin_neon_vst2q_lane_v: { |
11822 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11823 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
11824 | llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; |
11825 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys), |
11826 | Ops, ""); |
11827 | } |
11828 | case NEON::BI__builtin_neon_vst3_v: |
11829 | case NEON::BI__builtin_neon_vst3q_v: { |
11830 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11831 | llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; |
11832 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys), |
11833 | Ops, ""); |
11834 | } |
11835 | case NEON::BI__builtin_neon_vst3_lane_v: |
11836 | case NEON::BI__builtin_neon_vst3q_lane_v: { |
11837 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11838 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); |
11839 | llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; |
11840 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys), |
11841 | Ops, ""); |
11842 | } |
11843 | case NEON::BI__builtin_neon_vst4_v: |
11844 | case NEON::BI__builtin_neon_vst4q_v: { |
11845 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11846 | llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; |
11847 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys), |
11848 | Ops, ""); |
11849 | } |
11850 | case NEON::BI__builtin_neon_vst4_lane_v: |
11851 | case NEON::BI__builtin_neon_vst4q_lane_v: { |
11852 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11853 | Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty); |
11854 | llvm::Type *Tys[2] = { VTy, Ops[5]->getType() }; |
11855 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys), |
11856 | Ops, ""); |
11857 | } |
11858 | case NEON::BI__builtin_neon_vtrn_v: |
11859 | case NEON::BI__builtin_neon_vtrnq_v: { |
11860 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
11861 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11862 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11863 | Value *SV = nullptr; |
11864 | |
11865 | for (unsigned vi = 0; vi != 2; ++vi) { |
11866 | SmallVector<int, 16> Indices; |
11867 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
11868 | Indices.push_back(i+vi); |
11869 | Indices.push_back(i+e+vi); |
11870 | } |
11871 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
11872 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn"); |
11873 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
11874 | } |
11875 | return SV; |
11876 | } |
11877 | case NEON::BI__builtin_neon_vuzp_v: |
11878 | case NEON::BI__builtin_neon_vuzpq_v: { |
11879 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
11880 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11881 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11882 | Value *SV = nullptr; |
11883 | |
11884 | for (unsigned vi = 0; vi != 2; ++vi) { |
11885 | SmallVector<int, 16> Indices; |
11886 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
11887 | Indices.push_back(2*i+vi); |
11888 | |
11889 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
11890 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp"); |
11891 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
11892 | } |
11893 | return SV; |
11894 | } |
11895 | case NEON::BI__builtin_neon_vzip_v: |
11896 | case NEON::BI__builtin_neon_vzipq_v: { |
11897 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
11898 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11899 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11900 | Value *SV = nullptr; |
11901 | |
11902 | for (unsigned vi = 0; vi != 2; ++vi) { |
11903 | SmallVector<int, 16> Indices; |
11904 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
11905 | Indices.push_back((i + vi*e) >> 1); |
11906 | Indices.push_back(((i + vi*e) >> 1)+e); |
11907 | } |
11908 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
11909 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip"); |
11910 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
11911 | } |
11912 | return SV; |
11913 | } |
11914 | case NEON::BI__builtin_neon_vqtbl1q_v: { |
11915 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty), |
11916 | Ops, "vtbl1"); |
11917 | } |
11918 | case NEON::BI__builtin_neon_vqtbl2q_v: { |
11919 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty), |
11920 | Ops, "vtbl2"); |
11921 | } |
11922 | case NEON::BI__builtin_neon_vqtbl3q_v: { |
11923 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty), |
11924 | Ops, "vtbl3"); |
11925 | } |
11926 | case NEON::BI__builtin_neon_vqtbl4q_v: { |
11927 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty), |
11928 | Ops, "vtbl4"); |
11929 | } |
11930 | case NEON::BI__builtin_neon_vqtbx1q_v: { |
11931 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty), |
11932 | Ops, "vtbx1"); |
11933 | } |
11934 | case NEON::BI__builtin_neon_vqtbx2q_v: { |
11935 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty), |
11936 | Ops, "vtbx2"); |
11937 | } |
11938 | case NEON::BI__builtin_neon_vqtbx3q_v: { |
11939 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty), |
11940 | Ops, "vtbx3"); |
11941 | } |
11942 | case NEON::BI__builtin_neon_vqtbx4q_v: { |
11943 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty), |
11944 | Ops, "vtbx4"); |
11945 | } |
11946 | case NEON::BI__builtin_neon_vsqadd_v: |
11947 | case NEON::BI__builtin_neon_vsqaddq_v: { |
11948 | Int = Intrinsic::aarch64_neon_usqadd; |
11949 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd"); |
11950 | } |
11951 | case NEON::BI__builtin_neon_vuqadd_v: |
11952 | case NEON::BI__builtin_neon_vuqaddq_v: { |
11953 | Int = Intrinsic::aarch64_neon_suqadd; |
11954 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd"); |
11955 | } |
11956 | } |
11957 | } |
11958 | |
11959 | Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID, |
11960 | const CallExpr *E) { |
11961 | assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value ) && "unexpected BPF builtin") ? void (0) : __assert_fail ("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 11965, __extension__ __PRETTY_FUNCTION__ )) |
11962 | BuiltinID == BPF::BI__builtin_btf_type_id ||(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value ) && "unexpected BPF builtin") ? void (0) : __assert_fail ("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 11965, __extension__ __PRETTY_FUNCTION__ )) |
11963 | BuiltinID == BPF::BI__builtin_preserve_type_info ||(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value ) && "unexpected BPF builtin") ? void (0) : __assert_fail ("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 11965, __extension__ __PRETTY_FUNCTION__ )) |
11964 | BuiltinID == BPF::BI__builtin_preserve_enum_value) &&(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value ) && "unexpected BPF builtin") ? void (0) : __assert_fail ("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 11965, __extension__ __PRETTY_FUNCTION__ )) |
11965 | "unexpected BPF builtin")(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value ) && "unexpected BPF builtin") ? void (0) : __assert_fail ("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 11965, __extension__ __PRETTY_FUNCTION__ )); |
11966 | |
11967 | // A sequence number, injected into IR builtin functions, to |
11968 | // prevent CSE given the only difference of the funciton |
11969 | // may just be the debuginfo metadata. |
11970 | static uint32_t BuiltinSeqNum; |
11971 | |
11972 | switch (BuiltinID) { |
11973 | default: |
11974 | llvm_unreachable("Unexpected BPF builtin")::llvm::llvm_unreachable_internal("Unexpected BPF builtin", "clang/lib/CodeGen/CGBuiltin.cpp" , 11974); |
11975 | case BPF::BI__builtin_preserve_field_info: { |
11976 | const Expr *Arg = E->getArg(0); |
11977 | bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField; |
11978 | |
11979 | if (!getDebugInfo()) { |
11980 | CGM.Error(E->getExprLoc(), |
11981 | "using __builtin_preserve_field_info() without -g"); |
11982 | return IsBitField ? EmitLValue(Arg).getBitFieldPointer() |
11983 | : EmitLValue(Arg).getPointer(*this); |
11984 | } |
11985 | |
11986 | // Enable underlying preserve_*_access_index() generation. |
11987 | bool OldIsInPreservedAIRegion = IsInPreservedAIRegion; |
11988 | IsInPreservedAIRegion = true; |
11989 | Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer() |
11990 | : EmitLValue(Arg).getPointer(*this); |
11991 | IsInPreservedAIRegion = OldIsInPreservedAIRegion; |
11992 | |
11993 | ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
11994 | Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue()); |
11995 | |
11996 | // Built the IR for the preserve_field_info intrinsic. |
11997 | llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration( |
11998 | &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info, |
11999 | {FieldAddr->getType()}); |
12000 | return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind}); |
12001 | } |
12002 | case BPF::BI__builtin_btf_type_id: |
12003 | case BPF::BI__builtin_preserve_type_info: { |
12004 | if (!getDebugInfo()) { |
12005 | CGM.Error(E->getExprLoc(), "using builtin function without -g"); |
12006 | return nullptr; |
12007 | } |
12008 | |
12009 | const Expr *Arg0 = E->getArg(0); |
12010 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType( |
12011 | Arg0->getType(), Arg0->getExprLoc()); |
12012 | |
12013 | ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
12014 | Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue()); |
12015 | Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++); |
12016 | |
12017 | llvm::Function *FnDecl; |
12018 | if (BuiltinID == BPF::BI__builtin_btf_type_id) |
12019 | FnDecl = llvm::Intrinsic::getDeclaration( |
12020 | &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {}); |
12021 | else |
12022 | FnDecl = llvm::Intrinsic::getDeclaration( |
12023 | &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {}); |
12024 | CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue}); |
12025 | Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); |
12026 | return Fn; |
12027 | } |
12028 | case BPF::BI__builtin_preserve_enum_value: { |
12029 | if (!getDebugInfo()) { |
12030 | CGM.Error(E->getExprLoc(), "using builtin function without -g"); |
12031 | return nullptr; |
12032 | } |
12033 | |
12034 | const Expr *Arg0 = E->getArg(0); |
12035 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType( |
12036 | Arg0->getType(), Arg0->getExprLoc()); |
12037 | |
12038 | // Find enumerator |
12039 | const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens()); |
12040 | const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr()); |
12041 | const auto *DR = cast<DeclRefExpr>(CE->getSubExpr()); |
12042 | const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl()); |
12043 | |
12044 | auto &InitVal = Enumerator->getInitVal(); |
12045 | std::string InitValStr; |
12046 | if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX(9223372036854775807L))) |
12047 | InitValStr = std::to_string(InitVal.getSExtValue()); |
12048 | else |
12049 | InitValStr = std::to_string(InitVal.getZExtValue()); |
12050 | std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr; |
12051 | Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr); |
12052 | |
12053 | ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
12054 | Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue()); |
12055 | Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++); |
12056 | |
12057 | llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration( |
12058 | &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {}); |
12059 | CallInst *Fn = |
12060 | Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue}); |
12061 | Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); |
12062 | return Fn; |
12063 | } |
12064 | } |
12065 | } |
12066 | |
12067 | llvm::Value *CodeGenFunction:: |
12068 | BuildVector(ArrayRef<llvm::Value*> Ops) { |
12069 | assert((Ops.size() & (Ops.size() - 1)) == 0 &&(static_cast <bool> ((Ops.size() & (Ops.size() - 1) ) == 0 && "Not a power-of-two sized vector!") ? void ( 0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 12070, __extension__ __PRETTY_FUNCTION__ )) |
12070 | "Not a power-of-two sized vector!")(static_cast <bool> ((Ops.size() & (Ops.size() - 1) ) == 0 && "Not a power-of-two sized vector!") ? void ( 0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 12070, __extension__ __PRETTY_FUNCTION__ )); |
12071 | bool AllConstants = true; |
12072 | for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i) |
12073 | AllConstants &= isa<Constant>(Ops[i]); |
12074 | |
12075 | // If this is a constant vector, create a ConstantVector. |
12076 | if (AllConstants) { |
12077 | SmallVector<llvm::Constant*, 16> CstOps; |
12078 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
12079 | CstOps.push_back(cast<Constant>(Ops[i])); |
12080 | return llvm::ConstantVector::get(CstOps); |
12081 | } |
12082 | |
12083 | // Otherwise, insertelement the values to build the vector. |
12084 | Value *Result = llvm::UndefValue::get( |
12085 | llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size())); |
12086 | |
12087 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
12088 | Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i)); |
12089 | |
12090 | return Result; |
12091 | } |
12092 | |
12093 | // Convert the mask from an integer type to a vector of i1. |
12094 | static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask, |
12095 | unsigned NumElts) { |
12096 | |
12097 | auto *MaskTy = llvm::FixedVectorType::get( |
12098 | CGF.Builder.getInt1Ty(), |
12099 | cast<IntegerType>(Mask->getType())->getBitWidth()); |
12100 | Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy); |
12101 | |
12102 | // If we have less than 8 elements, then the starting mask was an i8 and |
12103 | // we need to extract down to the right number of elements. |
12104 | if (NumElts < 8) { |
12105 | int Indices[4]; |
12106 | for (unsigned i = 0; i != NumElts; ++i) |
12107 | Indices[i] = i; |
12108 | MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec, |
12109 | makeArrayRef(Indices, NumElts), |
12110 | "extract"); |
12111 | } |
12112 | return MaskVec; |
12113 | } |
12114 | |
12115 | static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
12116 | Align Alignment) { |
12117 | // Cast the pointer to right type. |
12118 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
12119 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
12120 | |
12121 | Value *MaskVec = getMaskVecValue( |
12122 | CGF, Ops[2], |
12123 | cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements()); |
12124 | |
12125 | return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec); |
12126 | } |
12127 | |
12128 | static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
12129 | Align Alignment) { |
12130 | // Cast the pointer to right type. |
12131 | llvm::Type *Ty = Ops[1]->getType(); |
12132 | Value *Ptr = |
12133 | CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
12134 | |
12135 | Value *MaskVec = getMaskVecValue( |
12136 | CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements()); |
12137 | |
12138 | return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]); |
12139 | } |
12140 | |
12141 | static Value *EmitX86ExpandLoad(CodeGenFunction &CGF, |
12142 | ArrayRef<Value *> Ops) { |
12143 | auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType()); |
12144 | llvm::Type *PtrTy = ResultTy->getElementType(); |
12145 | |
12146 | // Cast the pointer to element type. |
12147 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
12148 | llvm::PointerType::getUnqual(PtrTy)); |
12149 | |
12150 | Value *MaskVec = getMaskVecValue( |
12151 | CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements()); |
12152 | |
12153 | llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload, |
12154 | ResultTy); |
12155 | return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] }); |
12156 | } |
12157 | |
12158 | static Value *EmitX86CompressExpand(CodeGenFunction &CGF, |
12159 | ArrayRef<Value *> Ops, |
12160 | bool IsCompress) { |
12161 | auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType()); |
12162 | |
12163 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); |
12164 | |
12165 | Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress |
12166 | : Intrinsic::x86_avx512_mask_expand; |
12167 | llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy); |
12168 | return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec }); |
12169 | } |
12170 | |
12171 | static Value *EmitX86CompressStore(CodeGenFunction &CGF, |
12172 | ArrayRef<Value *> Ops) { |
12173 | auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType()); |
12174 | llvm::Type *PtrTy = ResultTy->getElementType(); |
12175 | |
12176 | // Cast the pointer to element type. |
12177 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
12178 | llvm::PointerType::getUnqual(PtrTy)); |
12179 | |
12180 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); |
12181 | |
12182 | llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore, |
12183 | ResultTy); |
12184 | return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec }); |
12185 | } |
12186 | |
12187 | static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc, |
12188 | ArrayRef<Value *> Ops, |
12189 | bool InvertLHS = false) { |
12190 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
12191 | Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts); |
12192 | Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts); |
12193 | |
12194 | if (InvertLHS) |
12195 | LHS = CGF.Builder.CreateNot(LHS); |
12196 | |
12197 | return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS), |
12198 | Ops[0]->getType()); |
12199 | } |
12200 | |
12201 | static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1, |
12202 | Value *Amt, bool IsRight) { |
12203 | llvm::Type *Ty = Op0->getType(); |
12204 | |
12205 | // Amount may be scalar immediate, in which case create a splat vector. |
12206 | // Funnel shifts amounts are treated as modulo and types are all power-of-2 so |
12207 | // we only care about the lowest log2 bits anyway. |
12208 | if (Amt->getType() != Ty) { |
12209 | unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements(); |
12210 | Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false); |
12211 | Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt); |
12212 | } |
12213 | |
12214 | unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl; |
12215 | Function *F = CGF.CGM.getIntrinsic(IID, Ty); |
12216 | return CGF.Builder.CreateCall(F, {Op0, Op1, Amt}); |
12217 | } |
12218 | |
12219 | static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
12220 | bool IsSigned) { |
12221 | Value *Op0 = Ops[0]; |
12222 | Value *Op1 = Ops[1]; |
12223 | llvm::Type *Ty = Op0->getType(); |
12224 | uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; |
12225 | |
12226 | CmpInst::Predicate Pred; |
12227 | switch (Imm) { |
12228 | case 0x0: |
12229 | Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; |
12230 | break; |
12231 | case 0x1: |
12232 | Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; |
12233 | break; |
12234 | case 0x2: |
12235 | Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; |
12236 | break; |
12237 | case 0x3: |
12238 | Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; |
12239 | break; |
12240 | case 0x4: |
12241 | Pred = ICmpInst::ICMP_EQ; |
12242 | break; |
12243 | case 0x5: |
12244 | Pred = ICmpInst::ICMP_NE; |
12245 | break; |
12246 | case 0x6: |
12247 | return llvm::Constant::getNullValue(Ty); // FALSE |
12248 | case 0x7: |
12249 | return llvm::Constant::getAllOnesValue(Ty); // TRUE |
12250 | default: |
12251 | llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate")::llvm::llvm_unreachable_internal("Unexpected XOP vpcom/vpcomu predicate" , "clang/lib/CodeGen/CGBuiltin.cpp", 12251); |
12252 | } |
12253 | |
12254 | Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1); |
12255 | Value *Res = CGF.Builder.CreateSExt(Cmp, Ty); |
12256 | return Res; |
12257 | } |
12258 | |
12259 | static Value *EmitX86Select(CodeGenFunction &CGF, |
12260 | Value *Mask, Value *Op0, Value *Op1) { |
12261 | |
12262 | // If the mask is all ones just return first argument. |
12263 | if (const auto *C = dyn_cast<Constant>(Mask)) |
12264 | if (C->isAllOnesValue()) |
12265 | return Op0; |
12266 | |
12267 | Mask = getMaskVecValue( |
12268 | CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements()); |
12269 | |
12270 | return CGF.Builder.CreateSelect(Mask, Op0, Op1); |
12271 | } |
12272 | |
12273 | static Value *EmitX86ScalarSelect(CodeGenFunction &CGF, |
12274 | Value *Mask, Value *Op0, Value *Op1) { |
12275 | // If the mask is all ones just return first argument. |
12276 | if (const auto *C = dyn_cast<Constant>(Mask)) |
12277 | if (C->isAllOnesValue()) |
12278 | return Op0; |
12279 | |
12280 | auto *MaskTy = llvm::FixedVectorType::get( |
12281 | CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth()); |
12282 | Mask = CGF.Builder.CreateBitCast(Mask, MaskTy); |
12283 | Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0); |
12284 | return CGF.Builder.CreateSelect(Mask, Op0, Op1); |
12285 | } |
12286 | |
12287 | static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp, |
12288 | unsigned NumElts, Value *MaskIn) { |
12289 | if (MaskIn) { |
12290 | const auto *C = dyn_cast<Constant>(MaskIn); |
12291 | if (!C || !C->isAllOnesValue()) |
12292 | Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts)); |
12293 | } |
12294 | |
12295 | if (NumElts < 8) { |
12296 | int Indices[8]; |
12297 | for (unsigned i = 0; i != NumElts; ++i) |
12298 | Indices[i] = i; |
12299 | for (unsigned i = NumElts; i != 8; ++i) |
12300 | Indices[i] = i % NumElts + NumElts; |
12301 | Cmp = CGF.Builder.CreateShuffleVector( |
12302 | Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices); |
12303 | } |
12304 | |
12305 | return CGF.Builder.CreateBitCast(Cmp, |
12306 | IntegerType::get(CGF.getLLVMContext(), |
12307 | std::max(NumElts, 8U))); |
12308 | } |
12309 | |
12310 | static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC, |
12311 | bool Signed, ArrayRef<Value *> Ops) { |
12312 | assert((Ops.size() == 2 || Ops.size() == 4) &&(static_cast <bool> ((Ops.size() == 2 || Ops.size() == 4 ) && "Unexpected number of arguments") ? void (0) : __assert_fail ("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 12313, __extension__ __PRETTY_FUNCTION__ )) |
12313 | "Unexpected number of arguments")(static_cast <bool> ((Ops.size() == 2 || Ops.size() == 4 ) && "Unexpected number of arguments") ? void (0) : __assert_fail ("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 12313, __extension__ __PRETTY_FUNCTION__ )); |
12314 | unsigned NumElts = |
12315 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
12316 | Value *Cmp; |
12317 | |
12318 | if (CC == 3) { |
12319 | Cmp = Constant::getNullValue( |
12320 | llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts)); |
12321 | } else if (CC == 7) { |
12322 | Cmp = Constant::getAllOnesValue( |
12323 | llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts)); |
12324 | } else { |
12325 | ICmpInst::Predicate Pred; |
12326 | switch (CC) { |
12327 | default: llvm_unreachable("Unknown condition code")::llvm::llvm_unreachable_internal("Unknown condition code", "clang/lib/CodeGen/CGBuiltin.cpp" , 12327); |
12328 | case 0: Pred = ICmpInst::ICMP_EQ; break; |
12329 | case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; |
12330 | case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; |
12331 | case 4: Pred = ICmpInst::ICMP_NE; break; |
12332 | case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; |
12333 | case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; |
12334 | } |
12335 | Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]); |
12336 | } |
12337 | |
12338 | Value *MaskIn = nullptr; |
12339 | if (Ops.size() == 4) |
12340 | MaskIn = Ops[3]; |
12341 | |
12342 | return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn); |
12343 | } |
12344 | |
12345 | static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) { |
12346 | Value *Zero = Constant::getNullValue(In->getType()); |
12347 | return EmitX86MaskedCompare(CGF, 1, true, { In, Zero }); |
12348 | } |
12349 | |
12350 | static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E, |
12351 | ArrayRef<Value *> Ops, bool IsSigned) { |
12352 | unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue(); |
12353 | llvm::Type *Ty = Ops[1]->getType(); |
12354 | |
12355 | Value *Res; |
12356 | if (Rnd != 4) { |
12357 | Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round |
12358 | : Intrinsic::x86_avx512_uitofp_round; |
12359 | Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() }); |
12360 | Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] }); |
12361 | } else { |
12362 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
12363 | Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty) |
12364 | : CGF.Builder.CreateUIToFP(Ops[0], Ty); |
12365 | } |
12366 | |
12367 | return EmitX86Select(CGF, Ops[2], Res, Ops[1]); |
12368 | } |
12369 | |
12370 | // Lowers X86 FMA intrinsics to IR. |
12371 | static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E, |
12372 | ArrayRef<Value *> Ops, unsigned BuiltinID, |
12373 | bool IsAddSub) { |
12374 | |
12375 | bool Subtract = false; |
12376 | Intrinsic::ID IID = Intrinsic::not_intrinsic; |
12377 | switch (BuiltinID) { |
12378 | default: break; |
12379 | case clang::X86::BI__builtin_ia32_vfmsubph512_mask3: |
12380 | Subtract = true; |
12381 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
12382 | case clang::X86::BI__builtin_ia32_vfmaddph512_mask: |
12383 | case clang::X86::BI__builtin_ia32_vfmaddph512_maskz: |
12384 | case clang::X86::BI__builtin_ia32_vfmaddph512_mask3: |
12385 | IID = llvm::Intrinsic::x86_avx512fp16_vfmadd_ph_512; |
12386 | break; |
12387 | case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3: |
12388 | Subtract = true; |
12389 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
12390 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask: |
12391 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz: |
12392 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3: |
12393 | IID = llvm::Intrinsic::x86_avx512fp16_vfmaddsub_ph_512; |
12394 | break; |
12395 | case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: |
12396 | Subtract = true; |
12397 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
12398 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask: |
12399 | case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: |
12400 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: |
12401 | IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break; |
12402 | case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: |
12403 | Subtract = true; |
12404 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
12405 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: |
12406 | case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: |
12407 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: |
12408 | IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break; |
12409 | case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
12410 | Subtract = true; |
12411 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
12412 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: |
12413 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
12414 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
12415 | IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512; |
12416 | break; |
12417 | case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
12418 | Subtract = true; |
12419 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
12420 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
12421 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
12422 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
12423 | IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512; |
12424 | break; |
12425 | } |
12426 | |
12427 | Value *A = Ops[0]; |
12428 | Value *B = Ops[1]; |
12429 | Value *C = Ops[2]; |
12430 | |
12431 | if (Subtract) |
12432 | C = CGF.Builder.CreateFNeg(C); |
12433 | |
12434 | Value *Res; |
12435 | |
12436 | // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding). |
12437 | if (IID != Intrinsic::not_intrinsic && |
12438 | (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 || |
12439 | IsAddSub)) { |
12440 | Function *Intr = CGF.CGM.getIntrinsic(IID); |
12441 | Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() }); |
12442 | } else { |
12443 | llvm::Type *Ty = A->getType(); |
12444 | Function *FMA; |
12445 | if (CGF.Builder.getIsFPConstrained()) { |
12446 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
12447 | FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty); |
12448 | Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C}); |
12449 | } else { |
12450 | FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty); |
12451 | Res = CGF.Builder.CreateCall(FMA, {A, B, C}); |
12452 | } |
12453 | } |
12454 | |
12455 | // Handle any required masking. |
12456 | Value *MaskFalseVal = nullptr; |
12457 | switch (BuiltinID) { |
12458 | case clang::X86::BI__builtin_ia32_vfmaddph512_mask: |
12459 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask: |
12460 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: |
12461 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask: |
12462 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: |
12463 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
12464 | MaskFalseVal = Ops[0]; |
12465 | break; |
12466 | case clang::X86::BI__builtin_ia32_vfmaddph512_maskz: |
12467 | case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: |
12468 | case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: |
12469 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz: |
12470 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
12471 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
12472 | MaskFalseVal = Constant::getNullValue(Ops[0]->getType()); |
12473 | break; |
12474 | case clang::X86::BI__builtin_ia32_vfmsubph512_mask3: |
12475 | case clang::X86::BI__builtin_ia32_vfmaddph512_mask3: |
12476 | case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: |
12477 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: |
12478 | case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: |
12479 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: |
12480 | case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3: |
12481 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3: |
12482 | case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
12483 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
12484 | case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
12485 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
12486 | MaskFalseVal = Ops[2]; |
12487 | break; |
12488 | } |
12489 | |
12490 | if (MaskFalseVal) |
12491 | return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal); |
12492 | |
12493 | return Res; |
12494 | } |
12495 | |
12496 | static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E, |
12497 | MutableArrayRef<Value *> Ops, Value *Upper, |
12498 | bool ZeroMask = false, unsigned PTIdx = 0, |
12499 | bool NegAcc = false) { |
12500 | unsigned Rnd = 4; |
12501 | if (Ops.size() > 4) |
12502 | Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue(); |
12503 | |
12504 | if (NegAcc) |
12505 | Ops[2] = CGF.Builder.CreateFNeg(Ops[2]); |
12506 | |
12507 | Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0); |
12508 | Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0); |
12509 | Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0); |
12510 | Value *Res; |
12511 | if (Rnd != 4) { |
12512 | Intrinsic::ID IID; |
12513 | |
12514 | switch (Ops[0]->getType()->getPrimitiveSizeInBits()) { |
12515 | case 16: |
12516 | IID = Intrinsic::x86_avx512fp16_vfmadd_f16; |
12517 | break; |
12518 | case 32: |
12519 | IID = Intrinsic::x86_avx512_vfmadd_f32; |
12520 | break; |
12521 | case 64: |
12522 | IID = Intrinsic::x86_avx512_vfmadd_f64; |
12523 | break; |
12524 | default: |
12525 | llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "clang/lib/CodeGen/CGBuiltin.cpp" , 12525); |
12526 | } |
12527 | Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), |
12528 | {Ops[0], Ops[1], Ops[2], Ops[4]}); |
12529 | } else if (CGF.Builder.getIsFPConstrained()) { |
12530 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
12531 | Function *FMA = CGF.CGM.getIntrinsic( |
12532 | Intrinsic::experimental_constrained_fma, Ops[0]->getType()); |
12533 | Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3)); |
12534 | } else { |
12535 | Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType()); |
12536 | Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3)); |
12537 | } |
12538 | // If we have more than 3 arguments, we need to do masking. |
12539 | if (Ops.size() > 3) { |
12540 | Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType()) |
12541 | : Ops[PTIdx]; |
12542 | |
12543 | // If we negated the accumulator and the its the PassThru value we need to |
12544 | // bypass the negate. Conveniently Upper should be the same thing in this |
12545 | // case. |
12546 | if (NegAcc && PTIdx == 2) |
12547 | PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0); |
12548 | |
12549 | Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru); |
12550 | } |
12551 | return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0); |
12552 | } |
12553 | |
12554 | static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned, |
12555 | ArrayRef<Value *> Ops) { |
12556 | llvm::Type *Ty = Ops[0]->getType(); |
12557 | // Arguments have a vXi32 type so cast to vXi64. |
12558 | Ty = llvm::FixedVectorType::get(CGF.Int64Ty, |
12559 | Ty->getPrimitiveSizeInBits() / 64); |
12560 | Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty); |
12561 | Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty); |
12562 | |
12563 | if (IsSigned) { |
12564 | // Shift left then arithmetic shift right. |
12565 | Constant *ShiftAmt = ConstantInt::get(Ty, 32); |
12566 | LHS = CGF.Builder.CreateShl(LHS, ShiftAmt); |
12567 | LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt); |
12568 | RHS = CGF.Builder.CreateShl(RHS, ShiftAmt); |
12569 | RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt); |
12570 | } else { |
12571 | // Clear the upper bits. |
12572 | Constant *Mask = ConstantInt::get(Ty, 0xffffffff); |
12573 | LHS = CGF.Builder.CreateAnd(LHS, Mask); |
12574 | RHS = CGF.Builder.CreateAnd(RHS, Mask); |
12575 | } |
12576 | |
12577 | return CGF.Builder.CreateMul(LHS, RHS); |
12578 | } |
12579 | |
12580 | // Emit a masked pternlog intrinsic. This only exists because the header has to |
12581 | // use a macro and we aren't able to pass the input argument to a pternlog |
12582 | // builtin and a select builtin without evaluating it twice. |
12583 | static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask, |
12584 | ArrayRef<Value *> Ops) { |
12585 | llvm::Type *Ty = Ops[0]->getType(); |
12586 | |
12587 | unsigned VecWidth = Ty->getPrimitiveSizeInBits(); |
12588 | unsigned EltWidth = Ty->getScalarSizeInBits(); |
12589 | Intrinsic::ID IID; |
12590 | if (VecWidth == 128 && EltWidth == 32) |
12591 | IID = Intrinsic::x86_avx512_pternlog_d_128; |
12592 | else if (VecWidth == 256 && EltWidth == 32) |
12593 | IID = Intrinsic::x86_avx512_pternlog_d_256; |
12594 | else if (VecWidth == 512 && EltWidth == 32) |
12595 | IID = Intrinsic::x86_avx512_pternlog_d_512; |
12596 | else if (VecWidth == 128 && EltWidth == 64) |
12597 | IID = Intrinsic::x86_avx512_pternlog_q_128; |
12598 | else if (VecWidth == 256 && EltWidth == 64) |
12599 | IID = Intrinsic::x86_avx512_pternlog_q_256; |
12600 | else if (VecWidth == 512 && EltWidth == 64) |
12601 | IID = Intrinsic::x86_avx512_pternlog_q_512; |
12602 | else |
12603 | llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "clang/lib/CodeGen/CGBuiltin.cpp" , 12603); |
12604 | |
12605 | Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), |
12606 | Ops.drop_back()); |
12607 | Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0]; |
12608 | return EmitX86Select(CGF, Ops[4], Ternlog, PassThru); |
12609 | } |
12610 | |
12611 | static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op, |
12612 | llvm::Type *DstTy) { |
12613 | unsigned NumberOfElements = |
12614 | cast<llvm::FixedVectorType>(DstTy)->getNumElements(); |
12615 | Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements); |
12616 | return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2"); |
12617 | } |
12618 | |
12619 | Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) { |
12620 | const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts(); |
12621 | StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString(); |
12622 | return EmitX86CpuIs(CPUStr); |
12623 | } |
12624 | |
12625 | // Convert F16 halfs to floats. |
12626 | static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF, |
12627 | ArrayRef<Value *> Ops, |
12628 | llvm::Type *DstTy) { |
12629 | assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&(static_cast <bool> ((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && "Unknown cvtph2ps intrinsic") ? void (0) : __assert_fail ("(Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && \"Unknown cvtph2ps intrinsic\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 12630, __extension__ __PRETTY_FUNCTION__ )) |
12630 | "Unknown cvtph2ps intrinsic")(static_cast <bool> ((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && "Unknown cvtph2ps intrinsic") ? void (0) : __assert_fail ("(Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && \"Unknown cvtph2ps intrinsic\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 12630, __extension__ __PRETTY_FUNCTION__ )); |
12631 | |
12632 | // If the SAE intrinsic doesn't use default rounding then we can't upgrade. |
12633 | if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) { |
12634 | Function *F = |
12635 | CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512); |
12636 | return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]}); |
12637 | } |
12638 | |
12639 | unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements(); |
12640 | Value *Src = Ops[0]; |
12641 | |
12642 | // Extract the subvector. |
12643 | if (NumDstElts != |
12644 | cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) { |
12645 | assert(NumDstElts == 4 && "Unexpected vector size")(static_cast <bool> (NumDstElts == 4 && "Unexpected vector size" ) ? void (0) : __assert_fail ("NumDstElts == 4 && \"Unexpected vector size\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 12645, __extension__ __PRETTY_FUNCTION__ )); |
12646 | Src = CGF.Builder.CreateShuffleVector(Src, ArrayRef<int>{0, 1, 2, 3}); |
12647 | } |
12648 | |
12649 | // Bitcast from vXi16 to vXf16. |
12650 | auto *HalfTy = llvm::FixedVectorType::get( |
12651 | llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts); |
12652 | Src = CGF.Builder.CreateBitCast(Src, HalfTy); |
12653 | |
12654 | // Perform the fp-extension. |
12655 | Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps"); |
12656 | |
12657 | if (Ops.size() >= 3) |
12658 | Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]); |
12659 | return Res; |
12660 | } |
12661 | |
12662 | // Convert a BF16 to a float. |
12663 | static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF, |
12664 | const CallExpr *E, |
12665 | ArrayRef<Value *> Ops) { |
12666 | llvm::Type *Int32Ty = CGF.Builder.getInt32Ty(); |
12667 | Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty); |
12668 | Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16); |
12669 | llvm::Type *ResultType = CGF.ConvertType(E->getType()); |
12670 | Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType); |
12671 | return BitCast; |
12672 | } |
12673 | |
12674 | Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) { |
12675 | |
12676 | llvm::Type *Int32Ty = Builder.getInt32Ty(); |
12677 | |
12678 | // Matching the struct layout from the compiler-rt/libgcc structure that is |
12679 | // filled in: |
12680 | // unsigned int __cpu_vendor; |
12681 | // unsigned int __cpu_type; |
12682 | // unsigned int __cpu_subtype; |
12683 | // unsigned int __cpu_features[1]; |
12684 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, |
12685 | llvm::ArrayType::get(Int32Ty, 1)); |
12686 | |
12687 | // Grab the global __cpu_model. |
12688 | llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model"); |
12689 | cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true); |
12690 | |
12691 | // Calculate the index needed to access the correct field based on the |
12692 | // range. Also adjust the expected value. |
12693 | unsigned Index; |
12694 | unsigned Value; |
12695 | std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr) |
12696 | #define X86_VENDOR(ENUM, STRING) \ |
12697 | .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)}) |
12698 | #define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \ |
12699 | .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)}) |
12700 | #define X86_CPU_TYPE(ENUM, STR) \ |
12701 | .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)}) |
12702 | #define X86_CPU_SUBTYPE(ENUM, STR) \ |
12703 | .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)}) |
12704 | #include "llvm/Support/X86TargetParser.def" |
12705 | .Default({0, 0}); |
12706 | assert(Value != 0 && "Invalid CPUStr passed to CpuIs")(static_cast <bool> (Value != 0 && "Invalid CPUStr passed to CpuIs" ) ? void (0) : __assert_fail ("Value != 0 && \"Invalid CPUStr passed to CpuIs\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 12706, __extension__ __PRETTY_FUNCTION__ )); |
12707 | |
12708 | // Grab the appropriate field from __cpu_model. |
12709 | llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0), |
12710 | ConstantInt::get(Int32Ty, Index)}; |
12711 | llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs); |
12712 | CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue, |
12713 | CharUnits::fromQuantity(4)); |
12714 | |
12715 | // Check the value of the field against the requested value. |
12716 | return Builder.CreateICmpEQ(CpuValue, |
12717 | llvm::ConstantInt::get(Int32Ty, Value)); |
12718 | } |
12719 | |
12720 | Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) { |
12721 | const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts(); |
12722 | StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString(); |
12723 | return EmitX86CpuSupports(FeatureStr); |
12724 | } |
12725 | |
12726 | Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) { |
12727 | return EmitX86CpuSupports(llvm::X86::getCpuSupportsMask(FeatureStrs)); |
12728 | } |
12729 | |
12730 | llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) { |
12731 | uint32_t Features1 = Lo_32(FeaturesMask); |
12732 | uint32_t Features2 = Hi_32(FeaturesMask); |
12733 | |
12734 | Value *Result = Builder.getTrue(); |
12735 | |
12736 | if (Features1 != 0) { |
12737 | // Matching the struct layout from the compiler-rt/libgcc structure that is |
12738 | // filled in: |
12739 | // unsigned int __cpu_vendor; |
12740 | // unsigned int __cpu_type; |
12741 | // unsigned int __cpu_subtype; |
12742 | // unsigned int __cpu_features[1]; |
12743 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, |
12744 | llvm::ArrayType::get(Int32Ty, 1)); |
12745 | |
12746 | // Grab the global __cpu_model. |
12747 | llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model"); |
12748 | cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true); |
12749 | |
12750 | // Grab the first (0th) element from the field __cpu_features off of the |
12751 | // global in the struct STy. |
12752 | Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3), |
12753 | Builder.getInt32(0)}; |
12754 | Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs); |
12755 | Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures, |
12756 | CharUnits::fromQuantity(4)); |
12757 | |
12758 | // Check the value of the bit corresponding to the feature requested. |
12759 | Value *Mask = Builder.getInt32(Features1); |
12760 | Value *Bitset = Builder.CreateAnd(Features, Mask); |
12761 | Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask); |
12762 | Result = Builder.CreateAnd(Result, Cmp); |
12763 | } |
12764 | |
12765 | if (Features2 != 0) { |
12766 | llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty, |
12767 | "__cpu_features2"); |
12768 | cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true); |
12769 | |
12770 | Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures2, |
12771 | CharUnits::fromQuantity(4)); |
12772 | |
12773 | // Check the value of the bit corresponding to the feature requested. |
12774 | Value *Mask = Builder.getInt32(Features2); |
12775 | Value *Bitset = Builder.CreateAnd(Features, Mask); |
12776 | Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask); |
12777 | Result = Builder.CreateAnd(Result, Cmp); |
12778 | } |
12779 | |
12780 | return Result; |
12781 | } |
12782 | |
12783 | Value *CodeGenFunction::EmitX86CpuInit() { |
12784 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, |
12785 | /*Variadic*/ false); |
12786 | llvm::FunctionCallee Func = |
12787 | CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init"); |
12788 | cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true); |
12789 | cast<llvm::GlobalValue>(Func.getCallee()) |
12790 | ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
12791 | return Builder.CreateCall(Func); |
12792 | } |
12793 | |
12794 | Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, |
12795 | const CallExpr *E) { |
12796 | if (BuiltinID == X86::BI__builtin_cpu_is) |
12797 | return EmitX86CpuIs(E); |
12798 | if (BuiltinID == X86::BI__builtin_cpu_supports) |
12799 | return EmitX86CpuSupports(E); |
12800 | if (BuiltinID == X86::BI__builtin_cpu_init) |
12801 | return EmitX86CpuInit(); |
12802 | |
12803 | // Handle MSVC intrinsics before argument evaluation to prevent double |
12804 | // evaluation. |
12805 | if (Optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID)) |
12806 | return EmitMSVCBuiltinExpr(*MsvcIntId, E); |
12807 | |
12808 | SmallVector<Value*, 4> Ops; |
12809 | bool IsMaskFCmp = false; |
12810 | bool IsConjFMA = false; |
12811 | |
12812 | // Find out if any arguments are required to be integer constant expressions. |
12813 | unsigned ICEArguments = 0; |
12814 | ASTContext::GetBuiltinTypeError Error; |
12815 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
12816 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 12816, __extension__ __PRETTY_FUNCTION__ )); |
12817 | |
12818 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { |
12819 | // If this is a normal argument, just emit it as a scalar. |
12820 | if ((ICEArguments & (1 << i)) == 0) { |
12821 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
12822 | continue; |
12823 | } |
12824 | |
12825 | // If this is required to be a constant, constant fold it so that we know |
12826 | // that the generated intrinsic gets a ConstantInt. |
12827 | Ops.push_back(llvm::ConstantInt::get( |
12828 | getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext()))); |
12829 | } |
12830 | |
12831 | // These exist so that the builtin that takes an immediate can be bounds |
12832 | // checked by clang to avoid passing bad immediates to the backend. Since |
12833 | // AVX has a larger immediate than SSE we would need separate builtins to |
12834 | // do the different bounds checking. Rather than create a clang specific |
12835 | // SSE only builtin, this implements eight separate builtins to match gcc |
12836 | // implementation. |
12837 | auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) { |
12838 | Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm)); |
12839 | llvm::Function *F = CGM.getIntrinsic(ID); |
12840 | return Builder.CreateCall(F, Ops); |
12841 | }; |
12842 | |
12843 | // For the vector forms of FP comparisons, translate the builtins directly to |
12844 | // IR. |
12845 | // TODO: The builtins could be removed if the SSE header files used vector |
12846 | // extension comparisons directly (vector ordered/unordered may need |
12847 | // additional support via __builtin_isnan()). |
12848 | auto getVectorFCmpIR = [this, &Ops, E](CmpInst::Predicate Pred, |
12849 | bool IsSignaling) { |
12850 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
12851 | Value *Cmp; |
12852 | if (IsSignaling) |
12853 | Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]); |
12854 | else |
12855 | Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]); |
12856 | llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType()); |
12857 | llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy); |
12858 | Value *Sext = Builder.CreateSExt(Cmp, IntVecTy); |
12859 | return Builder.CreateBitCast(Sext, FPVecTy); |
12860 | }; |
12861 | |
12862 | switch (BuiltinID) { |
12863 | default: return nullptr; |
12864 | case X86::BI_mm_prefetch: { |
12865 | Value *Address = Ops[0]; |
12866 | ConstantInt *C = cast<ConstantInt>(Ops[1]); |
12867 | Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1); |
12868 | Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3); |
12869 | Value *Data = ConstantInt::get(Int32Ty, 1); |
12870 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
12871 | return Builder.CreateCall(F, {Address, RW, Locality, Data}); |
12872 | } |
12873 | case X86::BI_mm_clflush: { |
12874 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush), |
12875 | Ops[0]); |
12876 | } |
12877 | case X86::BI_mm_lfence: { |
12878 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence)); |
12879 | } |
12880 | case X86::BI_mm_mfence: { |
12881 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence)); |
12882 | } |
12883 | case X86::BI_mm_sfence: { |
12884 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence)); |
12885 | } |
12886 | case X86::BI_mm_pause: { |
12887 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause)); |
12888 | } |
12889 | case X86::BI__rdtsc: { |
12890 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc)); |
12891 | } |
12892 | case X86::BI__builtin_ia32_rdtscp: { |
12893 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp)); |
12894 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1), |
12895 | Ops[0]); |
12896 | return Builder.CreateExtractValue(Call, 0); |
12897 | } |
12898 | case X86::BI__builtin_ia32_lzcnt_u16: |
12899 | case X86::BI__builtin_ia32_lzcnt_u32: |
12900 | case X86::BI__builtin_ia32_lzcnt_u64: { |
12901 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); |
12902 | return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)}); |
12903 | } |
12904 | case X86::BI__builtin_ia32_tzcnt_u16: |
12905 | case X86::BI__builtin_ia32_tzcnt_u32: |
12906 | case X86::BI__builtin_ia32_tzcnt_u64: { |
12907 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType()); |
12908 | return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)}); |
12909 | } |
12910 | case X86::BI__builtin_ia32_undef128: |
12911 | case X86::BI__builtin_ia32_undef256: |
12912 | case X86::BI__builtin_ia32_undef512: |
12913 | // The x86 definition of "undef" is not the same as the LLVM definition |
12914 | // (PR32176). We leave optimizing away an unnecessary zero constant to the |
12915 | // IR optimizer and backend. |
12916 | // TODO: If we had a "freeze" IR instruction to generate a fixed undef |
12917 | // value, we should use that here instead of a zero. |
12918 | return llvm::Constant::getNullValue(ConvertType(E->getType())); |
12919 | case X86::BI__builtin_ia32_vec_init_v8qi: |
12920 | case X86::BI__builtin_ia32_vec_init_v4hi: |
12921 | case X86::BI__builtin_ia32_vec_init_v2si: |
12922 | return Builder.CreateBitCast(BuildVector(Ops), |
12923 | llvm::Type::getX86_MMXTy(getLLVMContext())); |
12924 | case X86::BI__builtin_ia32_vec_ext_v2si: |
12925 | case X86::BI__builtin_ia32_vec_ext_v16qi: |
12926 | case X86::BI__builtin_ia32_vec_ext_v8hi: |
12927 | case X86::BI__builtin_ia32_vec_ext_v4si: |
12928 | case X86::BI__builtin_ia32_vec_ext_v4sf: |
12929 | case X86::BI__builtin_ia32_vec_ext_v2di: |
12930 | case X86::BI__builtin_ia32_vec_ext_v32qi: |
12931 | case X86::BI__builtin_ia32_vec_ext_v16hi: |
12932 | case X86::BI__builtin_ia32_vec_ext_v8si: |
12933 | case X86::BI__builtin_ia32_vec_ext_v4di: { |
12934 | unsigned NumElts = |
12935 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
12936 | uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue(); |
12937 | Index &= NumElts - 1; |
12938 | // These builtins exist so we can ensure the index is an ICE and in range. |
12939 | // Otherwise we could just do this in the header file. |
12940 | return Builder.CreateExtractElement(Ops[0], Index); |
12941 | } |
12942 | case X86::BI__builtin_ia32_vec_set_v16qi: |
12943 | case X86::BI__builtin_ia32_vec_set_v8hi: |
12944 | case X86::BI__builtin_ia32_vec_set_v4si: |
12945 | case X86::BI__builtin_ia32_vec_set_v2di: |
12946 | case X86::BI__builtin_ia32_vec_set_v32qi: |
12947 | case X86::BI__builtin_ia32_vec_set_v16hi: |
12948 | case X86::BI__builtin_ia32_vec_set_v8si: |
12949 | case X86::BI__builtin_ia32_vec_set_v4di: { |
12950 | unsigned NumElts = |
12951 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
12952 | unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue(); |
12953 | Index &= NumElts - 1; |
12954 | // These builtins exist so we can ensure the index is an ICE and in range. |
12955 | // Otherwise we could just do this in the header file. |
12956 | return Builder.CreateInsertElement(Ops[0], Ops[1], Index); |
12957 | } |
12958 | case X86::BI_mm_setcsr: |
12959 | case X86::BI__builtin_ia32_ldmxcsr: { |
12960 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); |
12961 | Builder.CreateStore(Ops[0], Tmp); |
12962 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), |
12963 | Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); |
12964 | } |
12965 | case X86::BI_mm_getcsr: |
12966 | case X86::BI__builtin_ia32_stmxcsr: { |
12967 | Address Tmp = CreateMemTemp(E->getType()); |
12968 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), |
12969 | Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); |
12970 | return Builder.CreateLoad(Tmp, "stmxcsr"); |
12971 | } |
12972 | case X86::BI__builtin_ia32_xsave: |
12973 | case X86::BI__builtin_ia32_xsave64: |
12974 | case X86::BI__builtin_ia32_xrstor: |
12975 | case X86::BI__builtin_ia32_xrstor64: |
12976 | case X86::BI__builtin_ia32_xsaveopt: |
12977 | case X86::BI__builtin_ia32_xsaveopt64: |
12978 | case X86::BI__builtin_ia32_xrstors: |
12979 | case X86::BI__builtin_ia32_xrstors64: |
12980 | case X86::BI__builtin_ia32_xsavec: |
12981 | case X86::BI__builtin_ia32_xsavec64: |
12982 | case X86::BI__builtin_ia32_xsaves: |
12983 | case X86::BI__builtin_ia32_xsaves64: |
12984 | case X86::BI__builtin_ia32_xsetbv: |
12985 | case X86::BI_xsetbv: { |
12986 | Intrinsic::ID ID; |
12987 | #define INTRINSIC_X86_XSAVE_ID(NAME) \ |
12988 | case X86::BI__builtin_ia32_##NAME: \ |
12989 | ID = Intrinsic::x86_##NAME; \ |
12990 | break |
12991 | switch (BuiltinID) { |
12992 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 12992); |
12993 | INTRINSIC_X86_XSAVE_ID(xsave); |
12994 | INTRINSIC_X86_XSAVE_ID(xsave64); |
12995 | INTRINSIC_X86_XSAVE_ID(xrstor); |
12996 | INTRINSIC_X86_XSAVE_ID(xrstor64); |
12997 | INTRINSIC_X86_XSAVE_ID(xsaveopt); |
12998 | INTRINSIC_X86_XSAVE_ID(xsaveopt64); |
12999 | INTRINSIC_X86_XSAVE_ID(xrstors); |
13000 | INTRINSIC_X86_XSAVE_ID(xrstors64); |
13001 | INTRINSIC_X86_XSAVE_ID(xsavec); |
13002 | INTRINSIC_X86_XSAVE_ID(xsavec64); |
13003 | INTRINSIC_X86_XSAVE_ID(xsaves); |
13004 | INTRINSIC_X86_XSAVE_ID(xsaves64); |
13005 | INTRINSIC_X86_XSAVE_ID(xsetbv); |
13006 | case X86::BI_xsetbv: |
13007 | ID = Intrinsic::x86_xsetbv; |
13008 | break; |
13009 | } |
13010 | #undef INTRINSIC_X86_XSAVE_ID |
13011 | Value *Mhi = Builder.CreateTrunc( |
13012 | Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty); |
13013 | Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty); |
13014 | Ops[1] = Mhi; |
13015 | Ops.push_back(Mlo); |
13016 | return Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
13017 | } |
13018 | case X86::BI__builtin_ia32_xgetbv: |
13019 | case X86::BI_xgetbv: |
13020 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops); |
13021 | case X86::BI__builtin_ia32_storedqudi128_mask: |
13022 | case X86::BI__builtin_ia32_storedqusi128_mask: |
13023 | case X86::BI__builtin_ia32_storedquhi128_mask: |
13024 | case X86::BI__builtin_ia32_storedquqi128_mask: |
13025 | case X86::BI__builtin_ia32_storeupd128_mask: |
13026 | case X86::BI__builtin_ia32_storeups128_mask: |
13027 | case X86::BI__builtin_ia32_storedqudi256_mask: |
13028 | case X86::BI__builtin_ia32_storedqusi256_mask: |
13029 | case X86::BI__builtin_ia32_storedquhi256_mask: |
13030 | case X86::BI__builtin_ia32_storedquqi256_mask: |
13031 | case X86::BI__builtin_ia32_storeupd256_mask: |
13032 | case X86::BI__builtin_ia32_storeups256_mask: |
13033 | case X86::BI__builtin_ia32_storedqudi512_mask: |
13034 | case X86::BI__builtin_ia32_storedqusi512_mask: |
13035 | case X86::BI__builtin_ia32_storedquhi512_mask: |
13036 | case X86::BI__builtin_ia32_storedquqi512_mask: |
13037 | case X86::BI__builtin_ia32_storeupd512_mask: |
13038 | case X86::BI__builtin_ia32_storeups512_mask: |
13039 | return EmitX86MaskedStore(*this, Ops, Align(1)); |
13040 | |
13041 | case X86::BI__builtin_ia32_storesh128_mask: |
13042 | case X86::BI__builtin_ia32_storess128_mask: |
13043 | case X86::BI__builtin_ia32_storesd128_mask: |
13044 | return EmitX86MaskedStore(*this, Ops, Align(1)); |
13045 | |
13046 | case X86::BI__builtin_ia32_vpopcntb_128: |
13047 | case X86::BI__builtin_ia32_vpopcntd_128: |
13048 | case X86::BI__builtin_ia32_vpopcntq_128: |
13049 | case X86::BI__builtin_ia32_vpopcntw_128: |
13050 | case X86::BI__builtin_ia32_vpopcntb_256: |
13051 | case X86::BI__builtin_ia32_vpopcntd_256: |
13052 | case X86::BI__builtin_ia32_vpopcntq_256: |
13053 | case X86::BI__builtin_ia32_vpopcntw_256: |
13054 | case X86::BI__builtin_ia32_vpopcntb_512: |
13055 | case X86::BI__builtin_ia32_vpopcntd_512: |
13056 | case X86::BI__builtin_ia32_vpopcntq_512: |
13057 | case X86::BI__builtin_ia32_vpopcntw_512: { |
13058 | llvm::Type *ResultType = ConvertType(E->getType()); |
13059 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); |
13060 | return Builder.CreateCall(F, Ops); |
13061 | } |
13062 | case X86::BI__builtin_ia32_cvtmask2b128: |
13063 | case X86::BI__builtin_ia32_cvtmask2b256: |
13064 | case X86::BI__builtin_ia32_cvtmask2b512: |
13065 | case X86::BI__builtin_ia32_cvtmask2w128: |
13066 | case X86::BI__builtin_ia32_cvtmask2w256: |
13067 | case X86::BI__builtin_ia32_cvtmask2w512: |
13068 | case X86::BI__builtin_ia32_cvtmask2d128: |
13069 | case X86::BI__builtin_ia32_cvtmask2d256: |
13070 | case X86::BI__builtin_ia32_cvtmask2d512: |
13071 | case X86::BI__builtin_ia32_cvtmask2q128: |
13072 | case X86::BI__builtin_ia32_cvtmask2q256: |
13073 | case X86::BI__builtin_ia32_cvtmask2q512: |
13074 | return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType())); |
13075 | |
13076 | case X86::BI__builtin_ia32_cvtb2mask128: |
13077 | case X86::BI__builtin_ia32_cvtb2mask256: |
13078 | case X86::BI__builtin_ia32_cvtb2mask512: |
13079 | case X86::BI__builtin_ia32_cvtw2mask128: |
13080 | case X86::BI__builtin_ia32_cvtw2mask256: |
13081 | case X86::BI__builtin_ia32_cvtw2mask512: |
13082 | case X86::BI__builtin_ia32_cvtd2mask128: |
13083 | case X86::BI__builtin_ia32_cvtd2mask256: |
13084 | case X86::BI__builtin_ia32_cvtd2mask512: |
13085 | case X86::BI__builtin_ia32_cvtq2mask128: |
13086 | case X86::BI__builtin_ia32_cvtq2mask256: |
13087 | case X86::BI__builtin_ia32_cvtq2mask512: |
13088 | return EmitX86ConvertToMask(*this, Ops[0]); |
13089 | |
13090 | case X86::BI__builtin_ia32_cvtdq2ps512_mask: |
13091 | case X86::BI__builtin_ia32_cvtqq2ps512_mask: |
13092 | case X86::BI__builtin_ia32_cvtqq2pd512_mask: |
13093 | case X86::BI__builtin_ia32_vcvtw2ph512_mask: |
13094 | case X86::BI__builtin_ia32_vcvtdq2ph512_mask: |
13095 | case X86::BI__builtin_ia32_vcvtqq2ph512_mask: |
13096 | return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ true); |
13097 | case X86::BI__builtin_ia32_cvtudq2ps512_mask: |
13098 | case X86::BI__builtin_ia32_cvtuqq2ps512_mask: |
13099 | case X86::BI__builtin_ia32_cvtuqq2pd512_mask: |
13100 | case X86::BI__builtin_ia32_vcvtuw2ph512_mask: |
13101 | case X86::BI__builtin_ia32_vcvtudq2ph512_mask: |
13102 | case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: |
13103 | return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ false); |
13104 | |
13105 | case X86::BI__builtin_ia32_vfmaddss3: |
13106 | case X86::BI__builtin_ia32_vfmaddsd3: |
13107 | case X86::BI__builtin_ia32_vfmaddsh3_mask: |
13108 | case X86::BI__builtin_ia32_vfmaddss3_mask: |
13109 | case X86::BI__builtin_ia32_vfmaddsd3_mask: |
13110 | return EmitScalarFMAExpr(*this, E, Ops, Ops[0]); |
13111 | case X86::BI__builtin_ia32_vfmaddss: |
13112 | case X86::BI__builtin_ia32_vfmaddsd: |
13113 | return EmitScalarFMAExpr(*this, E, Ops, |
13114 | Constant::getNullValue(Ops[0]->getType())); |
13115 | case X86::BI__builtin_ia32_vfmaddsh3_maskz: |
13116 | case X86::BI__builtin_ia32_vfmaddss3_maskz: |
13117 | case X86::BI__builtin_ia32_vfmaddsd3_maskz: |
13118 | return EmitScalarFMAExpr(*this, E, Ops, Ops[0], /*ZeroMask*/ true); |
13119 | case X86::BI__builtin_ia32_vfmaddsh3_mask3: |
13120 | case X86::BI__builtin_ia32_vfmaddss3_mask3: |
13121 | case X86::BI__builtin_ia32_vfmaddsd3_mask3: |
13122 | return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2); |
13123 | case X86::BI__builtin_ia32_vfmsubsh3_mask3: |
13124 | case X86::BI__builtin_ia32_vfmsubss3_mask3: |
13125 | case X86::BI__builtin_ia32_vfmsubsd3_mask3: |
13126 | return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2, |
13127 | /*NegAcc*/ true); |
13128 | case X86::BI__builtin_ia32_vfmaddph: |
13129 | case X86::BI__builtin_ia32_vfmaddps: |
13130 | case X86::BI__builtin_ia32_vfmaddpd: |
13131 | case X86::BI__builtin_ia32_vfmaddph256: |
13132 | case X86::BI__builtin_ia32_vfmaddps256: |
13133 | case X86::BI__builtin_ia32_vfmaddpd256: |
13134 | case X86::BI__builtin_ia32_vfmaddph512_mask: |
13135 | case X86::BI__builtin_ia32_vfmaddph512_maskz: |
13136 | case X86::BI__builtin_ia32_vfmaddph512_mask3: |
13137 | case X86::BI__builtin_ia32_vfmaddps512_mask: |
13138 | case X86::BI__builtin_ia32_vfmaddps512_maskz: |
13139 | case X86::BI__builtin_ia32_vfmaddps512_mask3: |
13140 | case X86::BI__builtin_ia32_vfmsubps512_mask3: |
13141 | case X86::BI__builtin_ia32_vfmaddpd512_mask: |
13142 | case X86::BI__builtin_ia32_vfmaddpd512_maskz: |
13143 | case X86::BI__builtin_ia32_vfmaddpd512_mask3: |
13144 | case X86::BI__builtin_ia32_vfmsubpd512_mask3: |
13145 | case X86::BI__builtin_ia32_vfmsubph512_mask3: |
13146 | return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ false); |
13147 | case X86::BI__builtin_ia32_vfmaddsubph512_mask: |
13148 | case X86::BI__builtin_ia32_vfmaddsubph512_maskz: |
13149 | case X86::BI__builtin_ia32_vfmaddsubph512_mask3: |
13150 | case X86::BI__builtin_ia32_vfmsubaddph512_mask3: |
13151 | case X86::BI__builtin_ia32_vfmaddsubps512_mask: |
13152 | case X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
13153 | case X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
13154 | case X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
13155 | case X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
13156 | case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
13157 | case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
13158 | case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
13159 | return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ true); |
13160 | |
13161 | case X86::BI__builtin_ia32_movdqa32store128_mask: |
13162 | case X86::BI__builtin_ia32_movdqa64store128_mask: |
13163 | case X86::BI__builtin_ia32_storeaps128_mask: |
13164 | case X86::BI__builtin_ia32_storeapd128_mask: |
13165 | case X86::BI__builtin_ia32_movdqa32store256_mask: |
13166 | case X86::BI__builtin_ia32_movdqa64store256_mask: |
13167 | case X86::BI__builtin_ia32_storeaps256_mask: |
13168 | case X86::BI__builtin_ia32_storeapd256_mask: |
13169 | case X86::BI__builtin_ia32_movdqa32store512_mask: |
13170 | case X86::BI__builtin_ia32_movdqa64store512_mask: |
13171 | case X86::BI__builtin_ia32_storeaps512_mask: |
13172 | case X86::BI__builtin_ia32_storeapd512_mask: |
13173 | return EmitX86MaskedStore( |
13174 | *this, Ops, |
13175 | getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign()); |
13176 | |
13177 | case X86::BI__builtin_ia32_loadups128_mask: |
13178 | case X86::BI__builtin_ia32_loadups256_mask: |
13179 | case X86::BI__builtin_ia32_loadups512_mask: |
13180 | case X86::BI__builtin_ia32_loadupd128_mask: |
13181 | case X86::BI__builtin_ia32_loadupd256_mask: |
13182 | case X86::BI__builtin_ia32_loadupd512_mask: |
13183 | case X86::BI__builtin_ia32_loaddquqi128_mask: |
13184 | case X86::BI__builtin_ia32_loaddquqi256_mask: |
13185 | case X86::BI__builtin_ia32_loaddquqi512_mask: |
13186 | case X86::BI__builtin_ia32_loaddquhi128_mask: |
13187 | case X86::BI__builtin_ia32_loaddquhi256_mask: |
13188 | case X86::BI__builtin_ia32_loaddquhi512_mask: |
13189 | case X86::BI__builtin_ia32_loaddqusi128_mask: |
13190 | case X86::BI__builtin_ia32_loaddqusi256_mask: |
13191 | case X86::BI__builtin_ia32_loaddqusi512_mask: |
13192 | case X86::BI__builtin_ia32_loaddqudi128_mask: |
13193 | case X86::BI__builtin_ia32_loaddqudi256_mask: |
13194 | case X86::BI__builtin_ia32_loaddqudi512_mask: |
13195 | return EmitX86MaskedLoad(*this, Ops, Align(1)); |
13196 | |
13197 | case X86::BI__builtin_ia32_loadsh128_mask: |
13198 | case X86::BI__builtin_ia32_loadss128_mask: |
13199 | case X86::BI__builtin_ia32_loadsd128_mask: |
13200 | return EmitX86MaskedLoad(*this, Ops, Align(1)); |
13201 | |
13202 | case X86::BI__builtin_ia32_loadaps128_mask: |
13203 | case X86::BI__builtin_ia32_loadaps256_mask: |
13204 | case X86::BI__builtin_ia32_loadaps512_mask: |
13205 | case X86::BI__builtin_ia32_loadapd128_mask: |
13206 | case X86::BI__builtin_ia32_loadapd256_mask: |
13207 | case X86::BI__builtin_ia32_loadapd512_mask: |
13208 | case X86::BI__builtin_ia32_movdqa32load128_mask: |
13209 | case X86::BI__builtin_ia32_movdqa32load256_mask: |
13210 | case X86::BI__builtin_ia32_movdqa32load512_mask: |
13211 | case X86::BI__builtin_ia32_movdqa64load128_mask: |
13212 | case X86::BI__builtin_ia32_movdqa64load256_mask: |
13213 | case X86::BI__builtin_ia32_movdqa64load512_mask: |
13214 | return EmitX86MaskedLoad( |
13215 | *this, Ops, |
13216 | getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign()); |
13217 | |
13218 | case X86::BI__builtin_ia32_expandloaddf128_mask: |
13219 | case X86::BI__builtin_ia32_expandloaddf256_mask: |
13220 | case X86::BI__builtin_ia32_expandloaddf512_mask: |
13221 | case X86::BI__builtin_ia32_expandloadsf128_mask: |
13222 | case X86::BI__builtin_ia32_expandloadsf256_mask: |
13223 | case X86::BI__builtin_ia32_expandloadsf512_mask: |
13224 | case X86::BI__builtin_ia32_expandloaddi128_mask: |
13225 | case X86::BI__builtin_ia32_expandloaddi256_mask: |
13226 | case X86::BI__builtin_ia32_expandloaddi512_mask: |
13227 | case X86::BI__builtin_ia32_expandloadsi128_mask: |
13228 | case X86::BI__builtin_ia32_expandloadsi256_mask: |
13229 | case X86::BI__builtin_ia32_expandloadsi512_mask: |
13230 | case X86::BI__builtin_ia32_expandloadhi128_mask: |
13231 | case X86::BI__builtin_ia32_expandloadhi256_mask: |
13232 | case X86::BI__builtin_ia32_expandloadhi512_mask: |
13233 | case X86::BI__builtin_ia32_expandloadqi128_mask: |
13234 | case X86::BI__builtin_ia32_expandloadqi256_mask: |
13235 | case X86::BI__builtin_ia32_expandloadqi512_mask: |
13236 | return EmitX86ExpandLoad(*this, Ops); |
13237 | |
13238 | case X86::BI__builtin_ia32_compressstoredf128_mask: |
13239 | case X86::BI__builtin_ia32_compressstoredf256_mask: |
13240 | case X86::BI__builtin_ia32_compressstoredf512_mask: |
13241 | case X86::BI__builtin_ia32_compressstoresf128_mask: |
13242 | case X86::BI__builtin_ia32_compressstoresf256_mask: |
13243 | case X86::BI__builtin_ia32_compressstoresf512_mask: |
13244 | case X86::BI__builtin_ia32_compressstoredi128_mask: |
13245 | case X86::BI__builtin_ia32_compressstoredi256_mask: |
13246 | case X86::BI__builtin_ia32_compressstoredi512_mask: |
13247 | case X86::BI__builtin_ia32_compressstoresi128_mask: |
13248 | case X86::BI__builtin_ia32_compressstoresi256_mask: |
13249 | case X86::BI__builtin_ia32_compressstoresi512_mask: |
13250 | case X86::BI__builtin_ia32_compressstorehi128_mask: |
13251 | case X86::BI__builtin_ia32_compressstorehi256_mask: |
13252 | case X86::BI__builtin_ia32_compressstorehi512_mask: |
13253 | case X86::BI__builtin_ia32_compressstoreqi128_mask: |
13254 | case X86::BI__builtin_ia32_compressstoreqi256_mask: |
13255 | case X86::BI__builtin_ia32_compressstoreqi512_mask: |
13256 | return EmitX86CompressStore(*this, Ops); |
13257 | |
13258 | case X86::BI__builtin_ia32_expanddf128_mask: |
13259 | case X86::BI__builtin_ia32_expanddf256_mask: |
13260 | case X86::BI__builtin_ia32_expanddf512_mask: |
13261 | case X86::BI__builtin_ia32_expandsf128_mask: |
13262 | case X86::BI__builtin_ia32_expandsf256_mask: |
13263 | case X86::BI__builtin_ia32_expandsf512_mask: |
13264 | case X86::BI__builtin_ia32_expanddi128_mask: |
13265 | case X86::BI__builtin_ia32_expanddi256_mask: |
13266 | case X86::BI__builtin_ia32_expanddi512_mask: |
13267 | case X86::BI__builtin_ia32_expandsi128_mask: |
13268 | case X86::BI__builtin_ia32_expandsi256_mask: |
13269 | case X86::BI__builtin_ia32_expandsi512_mask: |
13270 | case X86::BI__builtin_ia32_expandhi128_mask: |
13271 | case X86::BI__builtin_ia32_expandhi256_mask: |
13272 | case X86::BI__builtin_ia32_expandhi512_mask: |
13273 | case X86::BI__builtin_ia32_expandqi128_mask: |
13274 | case X86::BI__builtin_ia32_expandqi256_mask: |
13275 | case X86::BI__builtin_ia32_expandqi512_mask: |
13276 | return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false); |
13277 | |
13278 | case X86::BI__builtin_ia32_compressdf128_mask: |
13279 | case X86::BI__builtin_ia32_compressdf256_mask: |
13280 | case X86::BI__builtin_ia32_compressdf512_mask: |
13281 | case X86::BI__builtin_ia32_compresssf128_mask: |
13282 | case X86::BI__builtin_ia32_compresssf256_mask: |
13283 | case X86::BI__builtin_ia32_compresssf512_mask: |
13284 | case X86::BI__builtin_ia32_compressdi128_mask: |
13285 | case X86::BI__builtin_ia32_compressdi256_mask: |
13286 | case X86::BI__builtin_ia32_compressdi512_mask: |
13287 | case X86::BI__builtin_ia32_compresssi128_mask: |
13288 | case X86::BI__builtin_ia32_compresssi256_mask: |
13289 | case X86::BI__builtin_ia32_compresssi512_mask: |
13290 | case X86::BI__builtin_ia32_compresshi128_mask: |
13291 | case X86::BI__builtin_ia32_compresshi256_mask: |
13292 | case X86::BI__builtin_ia32_compresshi512_mask: |
13293 | case X86::BI__builtin_ia32_compressqi128_mask: |
13294 | case X86::BI__builtin_ia32_compressqi256_mask: |
13295 | case X86::BI__builtin_ia32_compressqi512_mask: |
13296 | return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true); |
13297 | |
13298 | case X86::BI__builtin_ia32_gather3div2df: |
13299 | case X86::BI__builtin_ia32_gather3div2di: |
13300 | case X86::BI__builtin_ia32_gather3div4df: |
13301 | case X86::BI__builtin_ia32_gather3div4di: |
13302 | case X86::BI__builtin_ia32_gather3div4sf: |
13303 | case X86::BI__builtin_ia32_gather3div4si: |
13304 | case X86::BI__builtin_ia32_gather3div8sf: |
13305 | case X86::BI__builtin_ia32_gather3div8si: |
13306 | case X86::BI__builtin_ia32_gather3siv2df: |
13307 | case X86::BI__builtin_ia32_gather3siv2di: |
13308 | case X86::BI__builtin_ia32_gather3siv4df: |
13309 | case X86::BI__builtin_ia32_gather3siv4di: |
13310 | case X86::BI__builtin_ia32_gather3siv4sf: |
13311 | case X86::BI__builtin_ia32_gather3siv4si: |
13312 | case X86::BI__builtin_ia32_gather3siv8sf: |
13313 | case X86::BI__builtin_ia32_gather3siv8si: |
13314 | case X86::BI__builtin_ia32_gathersiv8df: |
13315 | case X86::BI__builtin_ia32_gathersiv16sf: |
13316 | case X86::BI__builtin_ia32_gatherdiv8df: |
13317 | case X86::BI__builtin_ia32_gatherdiv16sf: |
13318 | case X86::BI__builtin_ia32_gathersiv8di: |
13319 | case X86::BI__builtin_ia32_gathersiv16si: |
13320 | case X86::BI__builtin_ia32_gatherdiv8di: |
13321 | case X86::BI__builtin_ia32_gatherdiv16si: { |
13322 | Intrinsic::ID IID; |
13323 | switch (BuiltinID) { |
13324 | default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "clang/lib/CodeGen/CGBuiltin.cpp" , 13324); |
13325 | case X86::BI__builtin_ia32_gather3div2df: |
13326 | IID = Intrinsic::x86_avx512_mask_gather3div2_df; |
13327 | break; |
13328 | case X86::BI__builtin_ia32_gather3div2di: |
13329 | IID = Intrinsic::x86_avx512_mask_gather3div2_di; |
13330 | break; |
13331 | case X86::BI__builtin_ia32_gather3div4df: |
13332 | IID = Intrinsic::x86_avx512_mask_gather3div4_df; |
13333 | break; |
13334 | case X86::BI__builtin_ia32_gather3div4di: |
13335 | IID = Intrinsic::x86_avx512_mask_gather3div4_di; |
13336 | break; |
13337 | case X86::BI__builtin_ia32_gather3div4sf: |
13338 | IID = Intrinsic::x86_avx512_mask_gather3div4_sf; |
13339 | break; |
13340 | case X86::BI__builtin_ia32_gather3div4si: |
13341 | IID = Intrinsic::x86_avx512_mask_gather3div4_si; |
13342 | break; |
13343 | case X86::BI__builtin_ia32_gather3div8sf: |
13344 | IID = Intrinsic::x86_avx512_mask_gather3div8_sf; |
13345 | break; |
13346 | case X86::BI__builtin_ia32_gather3div8si: |
13347 | IID = Intrinsic::x86_avx512_mask_gather3div8_si; |
13348 | break; |
13349 | case X86::BI__builtin_ia32_gather3siv2df: |
13350 | IID = Intrinsic::x86_avx512_mask_gather3siv2_df; |
13351 | break; |
13352 | case X86::BI__builtin_ia32_gather3siv2di: |
13353 | IID = Intrinsic::x86_avx512_mask_gather3siv2_di; |
13354 | break; |
13355 | case X86::BI__builtin_ia32_gather3siv4df: |
13356 | IID = Intrinsic::x86_avx512_mask_gather3siv4_df; |
13357 | break; |
13358 | case X86::BI__builtin_ia32_gather3siv4di: |
13359 | IID = Intrinsic::x86_avx512_mask_gather3siv4_di; |
13360 | break; |
13361 | case X86::BI__builtin_ia32_gather3siv4sf: |
13362 | IID = Intrinsic::x86_avx512_mask_gather3siv4_sf; |
13363 | break; |
13364 | case X86::BI__builtin_ia32_gather3siv4si: |
13365 | IID = Intrinsic::x86_avx512_mask_gather3siv4_si; |
13366 | break; |
13367 | case X86::BI__builtin_ia32_gather3siv8sf: |
13368 | IID = Intrinsic::x86_avx512_mask_gather3siv8_sf; |
13369 | break; |
13370 | case X86::BI__builtin_ia32_gather3siv8si: |
13371 | IID = Intrinsic::x86_avx512_mask_gather3siv8_si; |
13372 | break; |
13373 | case X86::BI__builtin_ia32_gathersiv8df: |
13374 | IID = Intrinsic::x86_avx512_mask_gather_dpd_512; |
13375 | break; |
13376 | case X86::BI__builtin_ia32_gathersiv16sf: |
13377 | IID = Intrinsic::x86_avx512_mask_gather_dps_512; |
13378 | break; |
13379 | case X86::BI__builtin_ia32_gatherdiv8df: |
13380 | IID = Intrinsic::x86_avx512_mask_gather_qpd_512; |
13381 | break; |
13382 | case X86::BI__builtin_ia32_gatherdiv16sf: |
13383 | IID = Intrinsic::x86_avx512_mask_gather_qps_512; |
13384 | break; |
13385 | case X86::BI__builtin_ia32_gathersiv8di: |
13386 | IID = Intrinsic::x86_avx512_mask_gather_dpq_512; |
13387 | break; |
13388 | case X86::BI__builtin_ia32_gathersiv16si: |
13389 | IID = Intrinsic::x86_avx512_mask_gather_dpi_512; |
13390 | break; |
13391 | case X86::BI__builtin_ia32_gatherdiv8di: |
13392 | IID = Intrinsic::x86_avx512_mask_gather_qpq_512; |
13393 | break; |
13394 | case X86::BI__builtin_ia32_gatherdiv16si: |
13395 | IID = Intrinsic::x86_avx512_mask_gather_qpi_512; |
13396 | break; |
13397 | } |
13398 | |
13399 | unsigned MinElts = std::min( |
13400 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(), |
13401 | cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements()); |
13402 | Ops[3] = getMaskVecValue(*this, Ops[3], MinElts); |
13403 | Function *Intr = CGM.getIntrinsic(IID); |
13404 | return Builder.CreateCall(Intr, Ops); |
13405 | } |
13406 | |
13407 | case X86::BI__builtin_ia32_scattersiv8df: |
13408 | case X86::BI__builtin_ia32_scattersiv16sf: |
13409 | case X86::BI__builtin_ia32_scatterdiv8df: |
13410 | case X86::BI__builtin_ia32_scatterdiv16sf: |
13411 | case X86::BI__builtin_ia32_scattersiv8di: |
13412 | case X86::BI__builtin_ia32_scattersiv16si: |
13413 | case X86::BI__builtin_ia32_scatterdiv8di: |
13414 | case X86::BI__builtin_ia32_scatterdiv16si: |
13415 | case X86::BI__builtin_ia32_scatterdiv2df: |
13416 | case X86::BI__builtin_ia32_scatterdiv2di: |
13417 | case X86::BI__builtin_ia32_scatterdiv4df: |
13418 | case X86::BI__builtin_ia32_scatterdiv4di: |
13419 | case X86::BI__builtin_ia32_scatterdiv4sf: |
13420 | case X86::BI__builtin_ia32_scatterdiv4si: |
13421 | case X86::BI__builtin_ia32_scatterdiv8sf: |
13422 | case X86::BI__builtin_ia32_scatterdiv8si: |
13423 | case X86::BI__builtin_ia32_scattersiv2df: |
13424 | case X86::BI__builtin_ia32_scattersiv2di: |
13425 | case X86::BI__builtin_ia32_scattersiv4df: |
13426 | case X86::BI__builtin_ia32_scattersiv4di: |
13427 | case X86::BI__builtin_ia32_scattersiv4sf: |
13428 | case X86::BI__builtin_ia32_scattersiv4si: |
13429 | case X86::BI__builtin_ia32_scattersiv8sf: |
13430 | case X86::BI__builtin_ia32_scattersiv8si: { |
13431 | Intrinsic::ID IID; |
13432 | switch (BuiltinID) { |
13433 | default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "clang/lib/CodeGen/CGBuiltin.cpp" , 13433); |
13434 | case X86::BI__builtin_ia32_scattersiv8df: |
13435 | IID = Intrinsic::x86_avx512_mask_scatter_dpd_512; |
13436 | break; |
13437 | case X86::BI__builtin_ia32_scattersiv16sf: |
13438 | IID = Intrinsic::x86_avx512_mask_scatter_dps_512; |
13439 | break; |
13440 | case X86::BI__builtin_ia32_scatterdiv8df: |
13441 | IID = Intrinsic::x86_avx512_mask_scatter_qpd_512; |
13442 | break; |
13443 | case X86::BI__builtin_ia32_scatterdiv16sf: |
13444 | IID = Intrinsic::x86_avx512_mask_scatter_qps_512; |
13445 | break; |
13446 | case X86::BI__builtin_ia32_scattersiv8di: |
13447 | IID = Intrinsic::x86_avx512_mask_scatter_dpq_512; |
13448 | break; |
13449 | case X86::BI__builtin_ia32_scattersiv16si: |
13450 | IID = Intrinsic::x86_avx512_mask_scatter_dpi_512; |
13451 | break; |
13452 | case X86::BI__builtin_ia32_scatterdiv8di: |
13453 | IID = Intrinsic::x86_avx512_mask_scatter_qpq_512; |
13454 | break; |
13455 | case X86::BI__builtin_ia32_scatterdiv16si: |
13456 | IID = Intrinsic::x86_avx512_mask_scatter_qpi_512; |
13457 | break; |
13458 | case X86::BI__builtin_ia32_scatterdiv2df: |
13459 | IID = Intrinsic::x86_avx512_mask_scatterdiv2_df; |
13460 | break; |
13461 | case X86::BI__builtin_ia32_scatterdiv2di: |
13462 | IID = Intrinsic::x86_avx512_mask_scatterdiv2_di; |
13463 | break; |
13464 | case X86::BI__builtin_ia32_scatterdiv4df: |
13465 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_df; |
13466 | break; |
13467 | case X86::BI__builtin_ia32_scatterdiv4di: |
13468 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_di; |
13469 | break; |
13470 | case X86::BI__builtin_ia32_scatterdiv4sf: |
13471 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf; |
13472 | break; |
13473 | case X86::BI__builtin_ia32_scatterdiv4si: |
13474 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_si; |
13475 | break; |
13476 | case X86::BI__builtin_ia32_scatterdiv8sf: |
13477 | IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf; |
13478 | break; |
13479 | case X86::BI__builtin_ia32_scatterdiv8si: |
13480 | IID = Intrinsic::x86_avx512_mask_scatterdiv8_si; |
13481 | break; |
13482 | case X86::BI__builtin_ia32_scattersiv2df: |
13483 | IID = Intrinsic::x86_avx512_mask_scattersiv2_df; |
13484 | break; |
13485 | case X86::BI__builtin_ia32_scattersiv2di: |
13486 | IID = Intrinsic::x86_avx512_mask_scattersiv2_di; |
13487 | break; |
13488 | case X86::BI__builtin_ia32_scattersiv4df: |
13489 | IID = Intrinsic::x86_avx512_mask_scattersiv4_df; |
13490 | break; |
13491 | case X86::BI__builtin_ia32_scattersiv4di: |
13492 | IID = Intrinsic::x86_avx512_mask_scattersiv4_di; |
13493 | break; |
13494 | case X86::BI__builtin_ia32_scattersiv4sf: |
13495 | IID = Intrinsic::x86_avx512_mask_scattersiv4_sf; |
13496 | break; |
13497 | case X86::BI__builtin_ia32_scattersiv4si: |
13498 | IID = Intrinsic::x86_avx512_mask_scattersiv4_si; |
13499 | break; |
13500 | case X86::BI__builtin_ia32_scattersiv8sf: |
13501 | IID = Intrinsic::x86_avx512_mask_scattersiv8_sf; |
13502 | break; |
13503 | case X86::BI__builtin_ia32_scattersiv8si: |
13504 | IID = Intrinsic::x86_avx512_mask_scattersiv8_si; |
13505 | break; |
13506 | } |
13507 | |
13508 | unsigned MinElts = std::min( |
13509 | cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(), |
13510 | cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements()); |
13511 | Ops[1] = getMaskVecValue(*this, Ops[1], MinElts); |
13512 | Function *Intr = CGM.getIntrinsic(IID); |
13513 | return Builder.CreateCall(Intr, Ops); |
13514 | } |
13515 | |
13516 | case X86::BI__builtin_ia32_vextractf128_pd256: |
13517 | case X86::BI__builtin_ia32_vextractf128_ps256: |
13518 | case X86::BI__builtin_ia32_vextractf128_si256: |
13519 | case X86::BI__builtin_ia32_extract128i256: |
13520 | case X86::BI__builtin_ia32_extractf64x4_mask: |
13521 | case X86::BI__builtin_ia32_extractf32x4_mask: |
13522 | case X86::BI__builtin_ia32_extracti64x4_mask: |
13523 | case X86::BI__builtin_ia32_extracti32x4_mask: |
13524 | case X86::BI__builtin_ia32_extractf32x8_mask: |
13525 | case X86::BI__builtin_ia32_extracti32x8_mask: |
13526 | case X86::BI__builtin_ia32_extractf32x4_256_mask: |
13527 | case X86::BI__builtin_ia32_extracti32x4_256_mask: |
13528 | case X86::BI__builtin_ia32_extractf64x2_256_mask: |
13529 | case X86::BI__builtin_ia32_extracti64x2_256_mask: |
13530 | case X86::BI__builtin_ia32_extractf64x2_512_mask: |
13531 | case X86::BI__builtin_ia32_extracti64x2_512_mask: { |
13532 | auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType())); |
13533 | unsigned NumElts = DstTy->getNumElements(); |
13534 | unsigned SrcNumElts = |
13535 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13536 | unsigned SubVectors = SrcNumElts / NumElts; |
13537 | unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue(); |
13538 | assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")(static_cast <bool> (llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors") ? void (0) : __assert_fail ("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 13538, __extension__ __PRETTY_FUNCTION__ )); |
13539 | Index &= SubVectors - 1; // Remove any extra bits. |
13540 | Index *= NumElts; |
13541 | |
13542 | int Indices[16]; |
13543 | for (unsigned i = 0; i != NumElts; ++i) |
13544 | Indices[i] = i + Index; |
13545 | |
13546 | Value *Res = Builder.CreateShuffleVector(Ops[0], |
13547 | makeArrayRef(Indices, NumElts), |
13548 | "extract"); |
13549 | |
13550 | if (Ops.size() == 4) |
13551 | Res = EmitX86Select(*this, Ops[3], Res, Ops[2]); |
13552 | |
13553 | return Res; |
13554 | } |
13555 | case X86::BI__builtin_ia32_vinsertf128_pd256: |
13556 | case X86::BI__builtin_ia32_vinsertf128_ps256: |
13557 | case X86::BI__builtin_ia32_vinsertf128_si256: |
13558 | case X86::BI__builtin_ia32_insert128i256: |
13559 | case X86::BI__builtin_ia32_insertf64x4: |
13560 | case X86::BI__builtin_ia32_insertf32x4: |
13561 | case X86::BI__builtin_ia32_inserti64x4: |
13562 | case X86::BI__builtin_ia32_inserti32x4: |
13563 | case X86::BI__builtin_ia32_insertf32x8: |
13564 | case X86::BI__builtin_ia32_inserti32x8: |
13565 | case X86::BI__builtin_ia32_insertf32x4_256: |
13566 | case X86::BI__builtin_ia32_inserti32x4_256: |
13567 | case X86::BI__builtin_ia32_insertf64x2_256: |
13568 | case X86::BI__builtin_ia32_inserti64x2_256: |
13569 | case X86::BI__builtin_ia32_insertf64x2_512: |
13570 | case X86::BI__builtin_ia32_inserti64x2_512: { |
13571 | unsigned DstNumElts = |
13572 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13573 | unsigned SrcNumElts = |
13574 | cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements(); |
13575 | unsigned SubVectors = DstNumElts / SrcNumElts; |
13576 | unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue(); |
13577 | assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")(static_cast <bool> (llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors") ? void (0) : __assert_fail ("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 13577, __extension__ __PRETTY_FUNCTION__ )); |
13578 | Index &= SubVectors - 1; // Remove any extra bits. |
13579 | Index *= SrcNumElts; |
13580 | |
13581 | int Indices[16]; |
13582 | for (unsigned i = 0; i != DstNumElts; ++i) |
13583 | Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i; |
13584 | |
13585 | Value *Op1 = Builder.CreateShuffleVector(Ops[1], |
13586 | makeArrayRef(Indices, DstNumElts), |
13587 | "widen"); |
13588 | |
13589 | for (unsigned i = 0; i != DstNumElts; ++i) { |
13590 | if (i >= Index && i < (Index + SrcNumElts)) |
13591 | Indices[i] = (i - Index) + DstNumElts; |
13592 | else |
13593 | Indices[i] = i; |
13594 | } |
13595 | |
13596 | return Builder.CreateShuffleVector(Ops[0], Op1, |
13597 | makeArrayRef(Indices, DstNumElts), |
13598 | "insert"); |
13599 | } |
13600 | case X86::BI__builtin_ia32_pmovqd512_mask: |
13601 | case X86::BI__builtin_ia32_pmovwb512_mask: { |
13602 | Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType()); |
13603 | return EmitX86Select(*this, Ops[2], Res, Ops[1]); |
13604 | } |
13605 | case X86::BI__builtin_ia32_pmovdb512_mask: |
13606 | case X86::BI__builtin_ia32_pmovdw512_mask: |
13607 | case X86::BI__builtin_ia32_pmovqw512_mask: { |
13608 | if (const auto *C = dyn_cast<Constant>(Ops[2])) |
13609 | if (C->isAllOnesValue()) |
13610 | return Builder.CreateTrunc(Ops[0], Ops[1]->getType()); |
13611 | |
13612 | Intrinsic::ID IID; |
13613 | switch (BuiltinID) { |
13614 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 13614); |
13615 | case X86::BI__builtin_ia32_pmovdb512_mask: |
13616 | IID = Intrinsic::x86_avx512_mask_pmov_db_512; |
13617 | break; |
13618 | case X86::BI__builtin_ia32_pmovdw512_mask: |
13619 | IID = Intrinsic::x86_avx512_mask_pmov_dw_512; |
13620 | break; |
13621 | case X86::BI__builtin_ia32_pmovqw512_mask: |
13622 | IID = Intrinsic::x86_avx512_mask_pmov_qw_512; |
13623 | break; |
13624 | } |
13625 | |
13626 | Function *Intr = CGM.getIntrinsic(IID); |
13627 | return Builder.CreateCall(Intr, Ops); |
13628 | } |
13629 | case X86::BI__builtin_ia32_pblendw128: |
13630 | case X86::BI__builtin_ia32_blendpd: |
13631 | case X86::BI__builtin_ia32_blendps: |
13632 | case X86::BI__builtin_ia32_blendpd256: |
13633 | case X86::BI__builtin_ia32_blendps256: |
13634 | case X86::BI__builtin_ia32_pblendw256: |
13635 | case X86::BI__builtin_ia32_pblendd128: |
13636 | case X86::BI__builtin_ia32_pblendd256: { |
13637 | unsigned NumElts = |
13638 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13639 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
13640 | |
13641 | int Indices[16]; |
13642 | // If there are more than 8 elements, the immediate is used twice so make |
13643 | // sure we handle that. |
13644 | for (unsigned i = 0; i != NumElts; ++i) |
13645 | Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i; |
13646 | |
13647 | return Builder.CreateShuffleVector(Ops[0], Ops[1], |
13648 | makeArrayRef(Indices, NumElts), |
13649 | "blend"); |
13650 | } |
13651 | case X86::BI__builtin_ia32_pshuflw: |
13652 | case X86::BI__builtin_ia32_pshuflw256: |
13653 | case X86::BI__builtin_ia32_pshuflw512: { |
13654 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
13655 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13656 | unsigned NumElts = Ty->getNumElements(); |
13657 | |
13658 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
13659 | Imm = (Imm & 0xff) * 0x01010101; |
13660 | |
13661 | int Indices[32]; |
13662 | for (unsigned l = 0; l != NumElts; l += 8) { |
13663 | for (unsigned i = 0; i != 4; ++i) { |
13664 | Indices[l + i] = l + (Imm & 3); |
13665 | Imm >>= 2; |
13666 | } |
13667 | for (unsigned i = 4; i != 8; ++i) |
13668 | Indices[l + i] = l + i; |
13669 | } |
13670 | |
13671 | return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts), |
13672 | "pshuflw"); |
13673 | } |
13674 | case X86::BI__builtin_ia32_pshufhw: |
13675 | case X86::BI__builtin_ia32_pshufhw256: |
13676 | case X86::BI__builtin_ia32_pshufhw512: { |
13677 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
13678 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13679 | unsigned NumElts = Ty->getNumElements(); |
13680 | |
13681 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
13682 | Imm = (Imm & 0xff) * 0x01010101; |
13683 | |
13684 | int Indices[32]; |
13685 | for (unsigned l = 0; l != NumElts; l += 8) { |
13686 | for (unsigned i = 0; i != 4; ++i) |
13687 | Indices[l + i] = l + i; |
13688 | for (unsigned i = 4; i != 8; ++i) { |
13689 | Indices[l + i] = l + 4 + (Imm & 3); |
13690 | Imm >>= 2; |
13691 | } |
13692 | } |
13693 | |
13694 | return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts), |
13695 | "pshufhw"); |
13696 | } |
13697 | case X86::BI__builtin_ia32_pshufd: |
13698 | case X86::BI__builtin_ia32_pshufd256: |
13699 | case X86::BI__builtin_ia32_pshufd512: |
13700 | case X86::BI__builtin_ia32_vpermilpd: |
13701 | case X86::BI__builtin_ia32_vpermilps: |
13702 | case X86::BI__builtin_ia32_vpermilpd256: |
13703 | case X86::BI__builtin_ia32_vpermilps256: |
13704 | case X86::BI__builtin_ia32_vpermilpd512: |
13705 | case X86::BI__builtin_ia32_vpermilps512: { |
13706 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
13707 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13708 | unsigned NumElts = Ty->getNumElements(); |
13709 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; |
13710 | unsigned NumLaneElts = NumElts / NumLanes; |
13711 | |
13712 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
13713 | Imm = (Imm & 0xff) * 0x01010101; |
13714 | |
13715 | int Indices[16]; |
13716 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
13717 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
13718 | Indices[i + l] = (Imm % NumLaneElts) + l; |
13719 | Imm /= NumLaneElts; |
13720 | } |
13721 | } |
13722 | |
13723 | return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts), |
13724 | "permil"); |
13725 | } |
13726 | case X86::BI__builtin_ia32_shufpd: |
13727 | case X86::BI__builtin_ia32_shufpd256: |
13728 | case X86::BI__builtin_ia32_shufpd512: |
13729 | case X86::BI__builtin_ia32_shufps: |
13730 | case X86::BI__builtin_ia32_shufps256: |
13731 | case X86::BI__builtin_ia32_shufps512: { |
13732 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
13733 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13734 | unsigned NumElts = Ty->getNumElements(); |
13735 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; |
13736 | unsigned NumLaneElts = NumElts / NumLanes; |
13737 | |
13738 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
13739 | Imm = (Imm & 0xff) * 0x01010101; |
13740 | |
13741 | int Indices[16]; |
13742 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
13743 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
13744 | unsigned Index = Imm % NumLaneElts; |
13745 | Imm /= NumLaneElts; |
13746 | if (i >= (NumLaneElts / 2)) |
13747 | Index += NumElts; |
13748 | Indices[l + i] = l + Index; |
13749 | } |
13750 | } |
13751 | |
13752 | return Builder.CreateShuffleVector(Ops[0], Ops[1], |
13753 | makeArrayRef(Indices, NumElts), |
13754 | "shufp"); |
13755 | } |
13756 | case X86::BI__builtin_ia32_permdi256: |
13757 | case X86::BI__builtin_ia32_permdf256: |
13758 | case X86::BI__builtin_ia32_permdi512: |
13759 | case X86::BI__builtin_ia32_permdf512: { |
13760 | unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
13761 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13762 | unsigned NumElts = Ty->getNumElements(); |
13763 | |
13764 | // These intrinsics operate on 256-bit lanes of four 64-bit elements. |
13765 | int Indices[8]; |
13766 | for (unsigned l = 0; l != NumElts; l += 4) |
13767 | for (unsigned i = 0; i != 4; ++i) |
13768 | Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3); |
13769 | |
13770 | return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts), |
13771 | "perm"); |
13772 | } |
13773 | case X86::BI__builtin_ia32_palignr128: |
13774 | case X86::BI__builtin_ia32_palignr256: |
13775 | case X86::BI__builtin_ia32_palignr512: { |
13776 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; |
13777 | |
13778 | unsigned NumElts = |
13779 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13780 | assert(NumElts % 16 == 0)(static_cast <bool> (NumElts % 16 == 0) ? void (0) : __assert_fail ("NumElts % 16 == 0", "clang/lib/CodeGen/CGBuiltin.cpp", 13780 , __extension__ __PRETTY_FUNCTION__)); |
13781 | |
13782 | // If palignr is shifting the pair of vectors more than the size of two |
13783 | // lanes, emit zero. |
13784 | if (ShiftVal >= 32) |
13785 | return llvm::Constant::getNullValue(ConvertType(E->getType())); |
13786 | |
13787 | // If palignr is shifting the pair of input vectors more than one lane, |
13788 | // but less than two lanes, convert to shifting in zeroes. |
13789 | if (ShiftVal > 16) { |
13790 | ShiftVal -= 16; |
13791 | Ops[1] = Ops[0]; |
13792 | Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType()); |
13793 | } |
13794 | |
13795 | int Indices[64]; |
13796 | // 256-bit palignr operates on 128-bit lanes so we need to handle that |
13797 | for (unsigned l = 0; l != NumElts; l += 16) { |
13798 | for (unsigned i = 0; i != 16; ++i) { |
13799 | unsigned Idx = ShiftVal + i; |
13800 | if (Idx >= 16) |
13801 | Idx += NumElts - 16; // End of lane, switch operand. |
13802 | Indices[l + i] = Idx + l; |
13803 | } |
13804 | } |
13805 | |
13806 | return Builder.CreateShuffleVector(Ops[1], Ops[0], |
13807 | makeArrayRef(Indices, NumElts), |
13808 | "palignr"); |
13809 | } |
13810 | case X86::BI__builtin_ia32_alignd128: |
13811 | case X86::BI__builtin_ia32_alignd256: |
13812 | case X86::BI__builtin_ia32_alignd512: |
13813 | case X86::BI__builtin_ia32_alignq128: |
13814 | case X86::BI__builtin_ia32_alignq256: |
13815 | case X86::BI__builtin_ia32_alignq512: { |
13816 | unsigned NumElts = |
13817 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13818 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; |
13819 | |
13820 | // Mask the shift amount to width of a vector. |
13821 | ShiftVal &= NumElts - 1; |
13822 | |
13823 | int Indices[16]; |
13824 | for (unsigned i = 0; i != NumElts; ++i) |
13825 | Indices[i] = i + ShiftVal; |
13826 | |
13827 | return Builder.CreateShuffleVector(Ops[1], Ops[0], |
13828 | makeArrayRef(Indices, NumElts), |
13829 | "valign"); |
13830 | } |
13831 | case X86::BI__builtin_ia32_shuf_f32x4_256: |
13832 | case X86::BI__builtin_ia32_shuf_f64x2_256: |
13833 | case X86::BI__builtin_ia32_shuf_i32x4_256: |
13834 | case X86::BI__builtin_ia32_shuf_i64x2_256: |
13835 | case X86::BI__builtin_ia32_shuf_f32x4: |
13836 | case X86::BI__builtin_ia32_shuf_f64x2: |
13837 | case X86::BI__builtin_ia32_shuf_i32x4: |
13838 | case X86::BI__builtin_ia32_shuf_i64x2: { |
13839 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
13840 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13841 | unsigned NumElts = Ty->getNumElements(); |
13842 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2; |
13843 | unsigned NumLaneElts = NumElts / NumLanes; |
13844 | |
13845 | int Indices[16]; |
13846 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
13847 | unsigned Index = (Imm % NumLanes) * NumLaneElts; |
13848 | Imm /= NumLanes; // Discard the bits we just used. |
13849 | if (l >= (NumElts / 2)) |
13850 | Index += NumElts; // Switch to other source. |
13851 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
13852 | Indices[l + i] = Index + i; |
13853 | } |
13854 | } |
13855 | |
13856 | return Builder.CreateShuffleVector(Ops[0], Ops[1], |
13857 | makeArrayRef(Indices, NumElts), |
13858 | "shuf"); |
13859 | } |
13860 | |
13861 | case X86::BI__builtin_ia32_vperm2f128_pd256: |
13862 | case X86::BI__builtin_ia32_vperm2f128_ps256: |
13863 | case X86::BI__builtin_ia32_vperm2f128_si256: |
13864 | case X86::BI__builtin_ia32_permti256: { |
13865 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
13866 | unsigned NumElts = |
13867 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13868 | |
13869 | // This takes a very simple approach since there are two lanes and a |
13870 | // shuffle can have 2 inputs. So we reserve the first input for the first |
13871 | // lane and the second input for the second lane. This may result in |
13872 | // duplicate sources, but this can be dealt with in the backend. |
13873 | |
13874 | Value *OutOps[2]; |
13875 | int Indices[8]; |
13876 | for (unsigned l = 0; l != 2; ++l) { |
13877 | // Determine the source for this lane. |
13878 | if (Imm & (1 << ((l * 4) + 3))) |
13879 | OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType()); |
13880 | else if (Imm & (1 << ((l * 4) + 1))) |
13881 | OutOps[l] = Ops[1]; |
13882 | else |
13883 | OutOps[l] = Ops[0]; |
13884 | |
13885 | for (unsigned i = 0; i != NumElts/2; ++i) { |
13886 | // Start with ith element of the source for this lane. |
13887 | unsigned Idx = (l * NumElts) + i; |
13888 | // If bit 0 of the immediate half is set, switch to the high half of |
13889 | // the source. |
13890 | if (Imm & (1 << (l * 4))) |
13891 | Idx += NumElts/2; |
13892 | Indices[(l * (NumElts/2)) + i] = Idx; |
13893 | } |
13894 | } |
13895 | |
13896 | return Builder.CreateShuffleVector(OutOps[0], OutOps[1], |
13897 | makeArrayRef(Indices, NumElts), |
13898 | "vperm"); |
13899 | } |
13900 | |
13901 | case X86::BI__builtin_ia32_pslldqi128_byteshift: |
13902 | case X86::BI__builtin_ia32_pslldqi256_byteshift: |
13903 | case X86::BI__builtin_ia32_pslldqi512_byteshift: { |
13904 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
13905 | auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13906 | // Builtin type is vXi64 so multiply by 8 to get bytes. |
13907 | unsigned NumElts = ResultType->getNumElements() * 8; |
13908 | |
13909 | // If pslldq is shifting the vector more than 15 bytes, emit zero. |
13910 | if (ShiftVal >= 16) |
13911 | return llvm::Constant::getNullValue(ResultType); |
13912 | |
13913 | int Indices[64]; |
13914 | // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that |
13915 | for (unsigned l = 0; l != NumElts; l += 16) { |
13916 | for (unsigned i = 0; i != 16; ++i) { |
13917 | unsigned Idx = NumElts + i - ShiftVal; |
13918 | if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand. |
13919 | Indices[l + i] = Idx + l; |
13920 | } |
13921 | } |
13922 | |
13923 | auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts); |
13924 | Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast"); |
13925 | Value *Zero = llvm::Constant::getNullValue(VecTy); |
13926 | Value *SV = Builder.CreateShuffleVector(Zero, Cast, |
13927 | makeArrayRef(Indices, NumElts), |
13928 | "pslldq"); |
13929 | return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast"); |
13930 | } |
13931 | case X86::BI__builtin_ia32_psrldqi128_byteshift: |
13932 | case X86::BI__builtin_ia32_psrldqi256_byteshift: |
13933 | case X86::BI__builtin_ia32_psrldqi512_byteshift: { |
13934 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
13935 | auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13936 | // Builtin type is vXi64 so multiply by 8 to get bytes. |
13937 | unsigned NumElts = ResultType->getNumElements() * 8; |
13938 | |
13939 | // If psrldq is shifting the vector more than 15 bytes, emit zero. |
13940 | if (ShiftVal >= 16) |
13941 | return llvm::Constant::getNullValue(ResultType); |
13942 | |
13943 | int Indices[64]; |
13944 | // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that |
13945 | for (unsigned l = 0; l != NumElts; l += 16) { |
13946 | for (unsigned i = 0; i != 16; ++i) { |
13947 | unsigned Idx = i + ShiftVal; |
13948 | if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand. |
13949 | Indices[l + i] = Idx + l; |
13950 | } |
13951 | } |
13952 | |
13953 | auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts); |
13954 | Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast"); |
13955 | Value *Zero = llvm::Constant::getNullValue(VecTy); |
13956 | Value *SV = Builder.CreateShuffleVector(Cast, Zero, |
13957 | makeArrayRef(Indices, NumElts), |
13958 | "psrldq"); |
13959 | return Builder.CreateBitCast(SV, ResultType, "cast"); |
13960 | } |
13961 | case X86::BI__builtin_ia32_kshiftliqi: |
13962 | case X86::BI__builtin_ia32_kshiftlihi: |
13963 | case X86::BI__builtin_ia32_kshiftlisi: |
13964 | case X86::BI__builtin_ia32_kshiftlidi: { |
13965 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
13966 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13967 | |
13968 | if (ShiftVal >= NumElts) |
13969 | return llvm::Constant::getNullValue(Ops[0]->getType()); |
13970 | |
13971 | Value *In = getMaskVecValue(*this, Ops[0], NumElts); |
13972 | |
13973 | int Indices[64]; |
13974 | for (unsigned i = 0; i != NumElts; ++i) |
13975 | Indices[i] = NumElts + i - ShiftVal; |
13976 | |
13977 | Value *Zero = llvm::Constant::getNullValue(In->getType()); |
13978 | Value *SV = Builder.CreateShuffleVector(Zero, In, |
13979 | makeArrayRef(Indices, NumElts), |
13980 | "kshiftl"); |
13981 | return Builder.CreateBitCast(SV, Ops[0]->getType()); |
13982 | } |
13983 | case X86::BI__builtin_ia32_kshiftriqi: |
13984 | case X86::BI__builtin_ia32_kshiftrihi: |
13985 | case X86::BI__builtin_ia32_kshiftrisi: |
13986 | case X86::BI__builtin_ia32_kshiftridi: { |
13987 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
13988 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13989 | |
13990 | if (ShiftVal >= NumElts) |
13991 | return llvm::Constant::getNullValue(Ops[0]->getType()); |
13992 | |
13993 | Value *In = getMaskVecValue(*this, Ops[0], NumElts); |
13994 | |
13995 | int Indices[64]; |
13996 | for (unsigned i = 0; i != NumElts; ++i) |
13997 | Indices[i] = i + ShiftVal; |
13998 | |
13999 | Value *Zero = llvm::Constant::getNullValue(In->getType()); |
14000 | Value *SV = Builder.CreateShuffleVector(In, Zero, |
14001 | makeArrayRef(Indices, NumElts), |
14002 | "kshiftr"); |
14003 | return Builder.CreateBitCast(SV, Ops[0]->getType()); |
14004 | } |
14005 | case X86::BI__builtin_ia32_movnti: |
14006 | case X86::BI__builtin_ia32_movnti64: |
14007 | case X86::BI__builtin_ia32_movntsd: |
14008 | case X86::BI__builtin_ia32_movntss: { |
14009 | llvm::MDNode *Node = llvm::MDNode::get( |
14010 | getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); |
14011 | |
14012 | Value *Ptr = Ops[0]; |
14013 | Value *Src = Ops[1]; |
14014 | |
14015 | // Extract the 0'th element of the source vector. |
14016 | if (BuiltinID == X86::BI__builtin_ia32_movntsd || |
14017 | BuiltinID == X86::BI__builtin_ia32_movntss) |
14018 | Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract"); |
14019 | |
14020 | // Convert the type of the pointer to a pointer to the stored type. |
14021 | Value *BC = Builder.CreateBitCast( |
14022 | Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast"); |
14023 | |
14024 | // Unaligned nontemporal store of the scalar value. |
14025 | StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC); |
14026 | SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); |
14027 | SI->setAlignment(llvm::Align(1)); |
14028 | return SI; |
14029 | } |
14030 | // Rotate is a special case of funnel shift - 1st 2 args are the same. |
14031 | case X86::BI__builtin_ia32_vprotb: |
14032 | case X86::BI__builtin_ia32_vprotw: |
14033 | case X86::BI__builtin_ia32_vprotd: |
14034 | case X86::BI__builtin_ia32_vprotq: |
14035 | case X86::BI__builtin_ia32_vprotbi: |
14036 | case X86::BI__builtin_ia32_vprotwi: |
14037 | case X86::BI__builtin_ia32_vprotdi: |
14038 | case X86::BI__builtin_ia32_vprotqi: |
14039 | case X86::BI__builtin_ia32_prold128: |
14040 | case X86::BI__builtin_ia32_prold256: |
14041 | case X86::BI__builtin_ia32_prold512: |
14042 | case X86::BI__builtin_ia32_prolq128: |
14043 | case X86::BI__builtin_ia32_prolq256: |
14044 | case X86::BI__builtin_ia32_prolq512: |
14045 | case X86::BI__builtin_ia32_prolvd128: |
14046 | case X86::BI__builtin_ia32_prolvd256: |
14047 | case X86::BI__builtin_ia32_prolvd512: |
14048 | case X86::BI__builtin_ia32_prolvq128: |
14049 | case X86::BI__builtin_ia32_prolvq256: |
14050 | case X86::BI__builtin_ia32_prolvq512: |
14051 | return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false); |
14052 | case X86::BI__builtin_ia32_prord128: |
14053 | case X86::BI__builtin_ia32_prord256: |
14054 | case X86::BI__builtin_ia32_prord512: |
14055 | case X86::BI__builtin_ia32_prorq128: |
14056 | case X86::BI__builtin_ia32_prorq256: |
14057 | case X86::BI__builtin_ia32_prorq512: |
14058 | case X86::BI__builtin_ia32_prorvd128: |
14059 | case X86::BI__builtin_ia32_prorvd256: |
14060 | case X86::BI__builtin_ia32_prorvd512: |
14061 | case X86::BI__builtin_ia32_prorvq128: |
14062 | case X86::BI__builtin_ia32_prorvq256: |
14063 | case X86::BI__builtin_ia32_prorvq512: |
14064 | return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true); |
14065 | case X86::BI__builtin_ia32_selectb_128: |
14066 | case X86::BI__builtin_ia32_selectb_256: |
14067 | case X86::BI__builtin_ia32_selectb_512: |
14068 | case X86::BI__builtin_ia32_selectw_128: |
14069 | case X86::BI__builtin_ia32_selectw_256: |
14070 | case X86::BI__builtin_ia32_selectw_512: |
14071 | case X86::BI__builtin_ia32_selectd_128: |
14072 | case X86::BI__builtin_ia32_selectd_256: |
14073 | case X86::BI__builtin_ia32_selectd_512: |
14074 | case X86::BI__builtin_ia32_selectq_128: |
14075 | case X86::BI__builtin_ia32_selectq_256: |
14076 | case X86::BI__builtin_ia32_selectq_512: |
14077 | case X86::BI__builtin_ia32_selectph_128: |
14078 | case X86::BI__builtin_ia32_selectph_256: |
14079 | case X86::BI__builtin_ia32_selectph_512: |
14080 | case X86::BI__builtin_ia32_selectps_128: |
14081 | case X86::BI__builtin_ia32_selectps_256: |
14082 | case X86::BI__builtin_ia32_selectps_512: |
14083 | case X86::BI__builtin_ia32_selectpd_128: |
14084 | case X86::BI__builtin_ia32_selectpd_256: |
14085 | case X86::BI__builtin_ia32_selectpd_512: |
14086 | return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]); |
14087 | case X86::BI__builtin_ia32_selectsh_128: |
14088 | case X86::BI__builtin_ia32_selectss_128: |
14089 | case X86::BI__builtin_ia32_selectsd_128: { |
14090 | Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0); |
14091 | Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0); |
14092 | A = EmitX86ScalarSelect(*this, Ops[0], A, B); |
14093 | return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0); |
14094 | } |
14095 | case X86::BI__builtin_ia32_cmpb128_mask: |
14096 | case X86::BI__builtin_ia32_cmpb256_mask: |
14097 | case X86::BI__builtin_ia32_cmpb512_mask: |
14098 | case X86::BI__builtin_ia32_cmpw128_mask: |
14099 | case X86::BI__builtin_ia32_cmpw256_mask: |
14100 | case X86::BI__builtin_ia32_cmpw512_mask: |
14101 | case X86::BI__builtin_ia32_cmpd128_mask: |
14102 | case X86::BI__builtin_ia32_cmpd256_mask: |
14103 | case X86::BI__builtin_ia32_cmpd512_mask: |
14104 | case X86::BI__builtin_ia32_cmpq128_mask: |
14105 | case X86::BI__builtin_ia32_cmpq256_mask: |
14106 | case X86::BI__builtin_ia32_cmpq512_mask: { |
14107 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; |
14108 | return EmitX86MaskedCompare(*this, CC, true, Ops); |
14109 | } |
14110 | case X86::BI__builtin_ia32_ucmpb128_mask: |
14111 | case X86::BI__builtin_ia32_ucmpb256_mask: |
14112 | case X86::BI__builtin_ia32_ucmpb512_mask: |
14113 | case X86::BI__builtin_ia32_ucmpw128_mask: |
14114 | case X86::BI__builtin_ia32_ucmpw256_mask: |
14115 | case X86::BI__builtin_ia32_ucmpw512_mask: |
14116 | case X86::BI__builtin_ia32_ucmpd128_mask: |
14117 | case X86::BI__builtin_ia32_ucmpd256_mask: |
14118 | case X86::BI__builtin_ia32_ucmpd512_mask: |
14119 | case X86::BI__builtin_ia32_ucmpq128_mask: |
14120 | case X86::BI__builtin_ia32_ucmpq256_mask: |
14121 | case X86::BI__builtin_ia32_ucmpq512_mask: { |
14122 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; |
14123 | return EmitX86MaskedCompare(*this, CC, false, Ops); |
14124 | } |
14125 | case X86::BI__builtin_ia32_vpcomb: |
14126 | case X86::BI__builtin_ia32_vpcomw: |
14127 | case X86::BI__builtin_ia32_vpcomd: |
14128 | case X86::BI__builtin_ia32_vpcomq: |
14129 | return EmitX86vpcom(*this, Ops, true); |
14130 | case X86::BI__builtin_ia32_vpcomub: |
14131 | case X86::BI__builtin_ia32_vpcomuw: |
14132 | case X86::BI__builtin_ia32_vpcomud: |
14133 | case X86::BI__builtin_ia32_vpcomuq: |
14134 | return EmitX86vpcom(*this, Ops, false); |
14135 | |
14136 | case X86::BI__builtin_ia32_kortestcqi: |
14137 | case X86::BI__builtin_ia32_kortestchi: |
14138 | case X86::BI__builtin_ia32_kortestcsi: |
14139 | case X86::BI__builtin_ia32_kortestcdi: { |
14140 | Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops); |
14141 | Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType()); |
14142 | Value *Cmp = Builder.CreateICmpEQ(Or, C); |
14143 | return Builder.CreateZExt(Cmp, ConvertType(E->getType())); |
14144 | } |
14145 | case X86::BI__builtin_ia32_kortestzqi: |
14146 | case X86::BI__builtin_ia32_kortestzhi: |
14147 | case X86::BI__builtin_ia32_kortestzsi: |
14148 | case X86::BI__builtin_ia32_kortestzdi: { |
14149 | Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops); |
14150 | Value *C = llvm::Constant::getNullValue(Ops[0]->getType()); |
14151 | Value *Cmp = Builder.CreateICmpEQ(Or, C); |
14152 | return Builder.CreateZExt(Cmp, ConvertType(E->getType())); |
14153 | } |
14154 | |
14155 | case X86::BI__builtin_ia32_ktestcqi: |
14156 | case X86::BI__builtin_ia32_ktestzqi: |
14157 | case X86::BI__builtin_ia32_ktestchi: |
14158 | case X86::BI__builtin_ia32_ktestzhi: |
14159 | case X86::BI__builtin_ia32_ktestcsi: |
14160 | case X86::BI__builtin_ia32_ktestzsi: |
14161 | case X86::BI__builtin_ia32_ktestcdi: |
14162 | case X86::BI__builtin_ia32_ktestzdi: { |
14163 | Intrinsic::ID IID; |
14164 | switch (BuiltinID) { |
14165 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 14165); |
14166 | case X86::BI__builtin_ia32_ktestcqi: |
14167 | IID = Intrinsic::x86_avx512_ktestc_b; |
14168 | break; |
14169 | case X86::BI__builtin_ia32_ktestzqi: |
14170 | IID = Intrinsic::x86_avx512_ktestz_b; |
14171 | break; |
14172 | case X86::BI__builtin_ia32_ktestchi: |
14173 | IID = Intrinsic::x86_avx512_ktestc_w; |
14174 | break; |
14175 | case X86::BI__builtin_ia32_ktestzhi: |
14176 | IID = Intrinsic::x86_avx512_ktestz_w; |
14177 | break; |
14178 | case X86::BI__builtin_ia32_ktestcsi: |
14179 | IID = Intrinsic::x86_avx512_ktestc_d; |
14180 | break; |
14181 | case X86::BI__builtin_ia32_ktestzsi: |
14182 | IID = Intrinsic::x86_avx512_ktestz_d; |
14183 | break; |
14184 | case X86::BI__builtin_ia32_ktestcdi: |
14185 | IID = Intrinsic::x86_avx512_ktestc_q; |
14186 | break; |
14187 | case X86::BI__builtin_ia32_ktestzdi: |
14188 | IID = Intrinsic::x86_avx512_ktestz_q; |
14189 | break; |
14190 | } |
14191 | |
14192 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
14193 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); |
14194 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); |
14195 | Function *Intr = CGM.getIntrinsic(IID); |
14196 | return Builder.CreateCall(Intr, {LHS, RHS}); |
14197 | } |
14198 | |
14199 | case X86::BI__builtin_ia32_kaddqi: |
14200 | case X86::BI__builtin_ia32_kaddhi: |
14201 | case X86::BI__builtin_ia32_kaddsi: |
14202 | case X86::BI__builtin_ia32_kadddi: { |
14203 | Intrinsic::ID IID; |
14204 | switch (BuiltinID) { |
14205 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 14205); |
14206 | case X86::BI__builtin_ia32_kaddqi: |
14207 | IID = Intrinsic::x86_avx512_kadd_b; |
14208 | break; |
14209 | case X86::BI__builtin_ia32_kaddhi: |
14210 | IID = Intrinsic::x86_avx512_kadd_w; |
14211 | break; |
14212 | case X86::BI__builtin_ia32_kaddsi: |
14213 | IID = Intrinsic::x86_avx512_kadd_d; |
14214 | break; |
14215 | case X86::BI__builtin_ia32_kadddi: |
14216 | IID = Intrinsic::x86_avx512_kadd_q; |
14217 | break; |
14218 | } |
14219 | |
14220 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
14221 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); |
14222 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); |
14223 | Function *Intr = CGM.getIntrinsic(IID); |
14224 | Value *Res = Builder.CreateCall(Intr, {LHS, RHS}); |
14225 | return Builder.CreateBitCast(Res, Ops[0]->getType()); |
14226 | } |
14227 | case X86::BI__builtin_ia32_kandqi: |
14228 | case X86::BI__builtin_ia32_kandhi: |
14229 | case X86::BI__builtin_ia32_kandsi: |
14230 | case X86::BI__builtin_ia32_kanddi: |
14231 | return EmitX86MaskLogic(*this, Instruction::And, Ops); |
14232 | case X86::BI__builtin_ia32_kandnqi: |
14233 | case X86::BI__builtin_ia32_kandnhi: |
14234 | case X86::BI__builtin_ia32_kandnsi: |
14235 | case X86::BI__builtin_ia32_kandndi: |
14236 | return EmitX86MaskLogic(*this, Instruction::And, Ops, true); |
14237 | case X86::BI__builtin_ia32_korqi: |
14238 | case X86::BI__builtin_ia32_korhi: |
14239 | case X86::BI__builtin_ia32_korsi: |
14240 | case X86::BI__builtin_ia32_kordi: |
14241 | return EmitX86MaskLogic(*this, Instruction::Or, Ops); |
14242 | case X86::BI__builtin_ia32_kxnorqi: |
14243 | case X86::BI__builtin_ia32_kxnorhi: |
14244 | case X86::BI__builtin_ia32_kxnorsi: |
14245 | case X86::BI__builtin_ia32_kxnordi: |
14246 | return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true); |
14247 | case X86::BI__builtin_ia32_kxorqi: |
14248 | case X86::BI__builtin_ia32_kxorhi: |
14249 | case X86::BI__builtin_ia32_kxorsi: |
14250 | case X86::BI__builtin_ia32_kxordi: |
14251 | return EmitX86MaskLogic(*this, Instruction::Xor, Ops); |
14252 | case X86::BI__builtin_ia32_knotqi: |
14253 | case X86::BI__builtin_ia32_knothi: |
14254 | case X86::BI__builtin_ia32_knotsi: |
14255 | case X86::BI__builtin_ia32_knotdi: { |
14256 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
14257 | Value *Res = getMaskVecValue(*this, Ops[0], NumElts); |
14258 | return Builder.CreateBitCast(Builder.CreateNot(Res), |
14259 | Ops[0]->getType()); |
14260 | } |
14261 | case X86::BI__builtin_ia32_kmovb: |
14262 | case X86::BI__builtin_ia32_kmovw: |
14263 | case X86::BI__builtin_ia32_kmovd: |
14264 | case X86::BI__builtin_ia32_kmovq: { |
14265 | // Bitcast to vXi1 type and then back to integer. This gets the mask |
14266 | // register type into the IR, but might be optimized out depending on |
14267 | // what's around it. |
14268 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
14269 | Value *Res = getMaskVecValue(*this, Ops[0], NumElts); |
14270 | return Builder.CreateBitCast(Res, Ops[0]->getType()); |
14271 | } |
14272 | |
14273 | case X86::BI__builtin_ia32_kunpckdi: |
14274 | case X86::BI__builtin_ia32_kunpcksi: |
14275 | case X86::BI__builtin_ia32_kunpckhi: { |
14276 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
14277 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); |
14278 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); |
14279 | int Indices[64]; |
14280 | for (unsigned i = 0; i != NumElts; ++i) |
14281 | Indices[i] = i; |
14282 | |
14283 | // First extract half of each vector. This gives better codegen than |
14284 | // doing it in a single shuffle. |
14285 | LHS = Builder.CreateShuffleVector(LHS, LHS, |
14286 | makeArrayRef(Indices, NumElts / 2)); |
14287 | RHS = Builder.CreateShuffleVector(RHS, RHS, |
14288 | makeArrayRef(Indices, NumElts / 2)); |
14289 | // Concat the vectors. |
14290 | // NOTE: Operands are swapped to match the intrinsic definition. |
14291 | Value *Res = Builder.CreateShuffleVector(RHS, LHS, |
14292 | makeArrayRef(Indices, NumElts)); |
14293 | return Builder.CreateBitCast(Res, Ops[0]->getType()); |
14294 | } |
14295 | |
14296 | case X86::BI__builtin_ia32_vplzcntd_128: |
14297 | case X86::BI__builtin_ia32_vplzcntd_256: |
14298 | case X86::BI__builtin_ia32_vplzcntd_512: |
14299 | case X86::BI__builtin_ia32_vplzcntq_128: |
14300 | case X86::BI__builtin_ia32_vplzcntq_256: |
14301 | case X86::BI__builtin_ia32_vplzcntq_512: { |
14302 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); |
14303 | return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)}); |
14304 | } |
14305 | case X86::BI__builtin_ia32_sqrtss: |
14306 | case X86::BI__builtin_ia32_sqrtsd: { |
14307 | Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0); |
14308 | Function *F; |
14309 | if (Builder.getIsFPConstrained()) { |
14310 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
14311 | F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, |
14312 | A->getType()); |
14313 | A = Builder.CreateConstrainedFPCall(F, {A}); |
14314 | } else { |
14315 | F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); |
14316 | A = Builder.CreateCall(F, {A}); |
14317 | } |
14318 | return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); |
14319 | } |
14320 | case X86::BI__builtin_ia32_sqrtsh_round_mask: |
14321 | case X86::BI__builtin_ia32_sqrtsd_round_mask: |
14322 | case X86::BI__builtin_ia32_sqrtss_round_mask: { |
14323 | unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue(); |
14324 | // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), |
14325 | // otherwise keep the intrinsic. |
14326 | if (CC != 4) { |
14327 | Intrinsic::ID IID; |
14328 | |
14329 | switch (BuiltinID) { |
14330 | default: |
14331 | llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 14331); |
14332 | case X86::BI__builtin_ia32_sqrtsh_round_mask: |
14333 | IID = Intrinsic::x86_avx512fp16_mask_sqrt_sh; |
14334 | break; |
14335 | case X86::BI__builtin_ia32_sqrtsd_round_mask: |
14336 | IID = Intrinsic::x86_avx512_mask_sqrt_sd; |
14337 | break; |
14338 | case X86::BI__builtin_ia32_sqrtss_round_mask: |
14339 | IID = Intrinsic::x86_avx512_mask_sqrt_ss; |
14340 | break; |
14341 | } |
14342 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
14343 | } |
14344 | Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0); |
14345 | Function *F; |
14346 | if (Builder.getIsFPConstrained()) { |
14347 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
14348 | F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, |
14349 | A->getType()); |
14350 | A = Builder.CreateConstrainedFPCall(F, A); |
14351 | } else { |
14352 | F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); |
14353 | A = Builder.CreateCall(F, A); |
14354 | } |
14355 | Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0); |
14356 | A = EmitX86ScalarSelect(*this, Ops[3], A, Src); |
14357 | return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); |
14358 | } |
14359 | case X86::BI__builtin_ia32_sqrtpd256: |
14360 | case X86::BI__builtin_ia32_sqrtpd: |
14361 | case X86::BI__builtin_ia32_sqrtps256: |
14362 | case X86::BI__builtin_ia32_sqrtps: |
14363 | case X86::BI__builtin_ia32_sqrtph256: |
14364 | case X86::BI__builtin_ia32_sqrtph: |
14365 | case X86::BI__builtin_ia32_sqrtph512: |
14366 | case X86::BI__builtin_ia32_sqrtps512: |
14367 | case X86::BI__builtin_ia32_sqrtpd512: { |
14368 | if (Ops.size() == 2) { |
14369 | unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
14370 | // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), |
14371 | // otherwise keep the intrinsic. |
14372 | if (CC != 4) { |
14373 | Intrinsic::ID IID; |
14374 | |
14375 | switch (BuiltinID) { |
14376 | default: |
14377 | llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 14377); |
14378 | case X86::BI__builtin_ia32_sqrtph512: |
14379 | IID = Intrinsic::x86_avx512fp16_sqrt_ph_512; |
14380 | break; |
14381 | case X86::BI__builtin_ia32_sqrtps512: |
14382 | IID = Intrinsic::x86_avx512_sqrt_ps_512; |
14383 | break; |
14384 | case X86::BI__builtin_ia32_sqrtpd512: |
14385 | IID = Intrinsic::x86_avx512_sqrt_pd_512; |
14386 | break; |
14387 | } |
14388 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
14389 | } |
14390 | } |
14391 | if (Builder.getIsFPConstrained()) { |
14392 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
14393 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, |
14394 | Ops[0]->getType()); |
14395 | return Builder.CreateConstrainedFPCall(F, Ops[0]); |
14396 | } else { |
14397 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType()); |
14398 | return Builder.CreateCall(F, Ops[0]); |
14399 | } |
14400 | } |
14401 | |
14402 | case X86::BI__builtin_ia32_pmuludq128: |
14403 | case X86::BI__builtin_ia32_pmuludq256: |
14404 | case X86::BI__builtin_ia32_pmuludq512: |
14405 | return EmitX86Muldq(*this, /*IsSigned*/false, Ops); |
14406 | |
14407 | case X86::BI__builtin_ia32_pmuldq128: |
14408 | case X86::BI__builtin_ia32_pmuldq256: |
14409 | case X86::BI__builtin_ia32_pmuldq512: |
14410 | return EmitX86Muldq(*this, /*IsSigned*/true, Ops); |
14411 | |
14412 | case X86::BI__builtin_ia32_pternlogd512_mask: |
14413 | case X86::BI__builtin_ia32_pternlogq512_mask: |
14414 | case X86::BI__builtin_ia32_pternlogd128_mask: |
14415 | case X86::BI__builtin_ia32_pternlogd256_mask: |
14416 | case X86::BI__builtin_ia32_pternlogq128_mask: |
14417 | case X86::BI__builtin_ia32_pternlogq256_mask: |
14418 | return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops); |
14419 | |
14420 | case X86::BI__builtin_ia32_pternlogd512_maskz: |
14421 | case X86::BI__builtin_ia32_pternlogq512_maskz: |
14422 | case X86::BI__builtin_ia32_pternlogd128_maskz: |
14423 | case X86::BI__builtin_ia32_pternlogd256_maskz: |
14424 | case X86::BI__builtin_ia32_pternlogq128_maskz: |
14425 | case X86::BI__builtin_ia32_pternlogq256_maskz: |
14426 | return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops); |
14427 | |
14428 | case X86::BI__builtin_ia32_vpshldd128: |
14429 | case X86::BI__builtin_ia32_vpshldd256: |
14430 | case X86::BI__builtin_ia32_vpshldd512: |
14431 | case X86::BI__builtin_ia32_vpshldq128: |
14432 | case X86::BI__builtin_ia32_vpshldq256: |
14433 | case X86::BI__builtin_ia32_vpshldq512: |
14434 | case X86::BI__builtin_ia32_vpshldw128: |
14435 | case X86::BI__builtin_ia32_vpshldw256: |
14436 | case X86::BI__builtin_ia32_vpshldw512: |
14437 | return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false); |
14438 | |
14439 | case X86::BI__builtin_ia32_vpshrdd128: |
14440 | case X86::BI__builtin_ia32_vpshrdd256: |
14441 | case X86::BI__builtin_ia32_vpshrdd512: |
14442 | case X86::BI__builtin_ia32_vpshrdq128: |
14443 | case X86::BI__builtin_ia32_vpshrdq256: |
14444 | case X86::BI__builtin_ia32_vpshrdq512: |
14445 | case X86::BI__builtin_ia32_vpshrdw128: |
14446 | case X86::BI__builtin_ia32_vpshrdw256: |
14447 | case X86::BI__builtin_ia32_vpshrdw512: |
14448 | // Ops 0 and 1 are swapped. |
14449 | return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true); |
14450 | |
14451 | case X86::BI__builtin_ia32_vpshldvd128: |
14452 | case X86::BI__builtin_ia32_vpshldvd256: |
14453 | case X86::BI__builtin_ia32_vpshldvd512: |
14454 | case X86::BI__builtin_ia32_vpshldvq128: |
14455 | case X86::BI__builtin_ia32_vpshldvq256: |
14456 | case X86::BI__builtin_ia32_vpshldvq512: |
14457 | case X86::BI__builtin_ia32_vpshldvw128: |
14458 | case X86::BI__builtin_ia32_vpshldvw256: |
14459 | case X86::BI__builtin_ia32_vpshldvw512: |
14460 | return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false); |
14461 | |
14462 | case X86::BI__builtin_ia32_vpshrdvd128: |
14463 | case X86::BI__builtin_ia32_vpshrdvd256: |
14464 | case X86::BI__builtin_ia32_vpshrdvd512: |
14465 | case X86::BI__builtin_ia32_vpshrdvq128: |
14466 | case X86::BI__builtin_ia32_vpshrdvq256: |
14467 | case X86::BI__builtin_ia32_vpshrdvq512: |
14468 | case X86::BI__builtin_ia32_vpshrdvw128: |
14469 | case X86::BI__builtin_ia32_vpshrdvw256: |
14470 | case X86::BI__builtin_ia32_vpshrdvw512: |
14471 | // Ops 0 and 1 are swapped. |
14472 | return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true); |
14473 | |
14474 | // Reductions |
14475 | case X86::BI__builtin_ia32_reduce_add_d512: |
14476 | case X86::BI__builtin_ia32_reduce_add_q512: { |
14477 | Function *F = |
14478 | CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType()); |
14479 | return Builder.CreateCall(F, {Ops[0]}); |
14480 | } |
14481 | case X86::BI__builtin_ia32_reduce_fadd_pd512: |
14482 | case X86::BI__builtin_ia32_reduce_fadd_ps512: |
14483 | case X86::BI__builtin_ia32_reduce_fadd_ph512: |
14484 | case X86::BI__builtin_ia32_reduce_fadd_ph256: |
14485 | case X86::BI__builtin_ia32_reduce_fadd_ph128: { |
14486 | Function *F = |
14487 | CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType()); |
14488 | Builder.getFastMathFlags().setAllowReassoc(); |
14489 | return Builder.CreateCall(F, {Ops[0], Ops[1]}); |
14490 | } |
14491 | case X86::BI__builtin_ia32_reduce_fmul_pd512: |
14492 | case X86::BI__builtin_ia32_reduce_fmul_ps512: |
14493 | case X86::BI__builtin_ia32_reduce_fmul_ph512: |
14494 | case X86::BI__builtin_ia32_reduce_fmul_ph256: |
14495 | case X86::BI__builtin_ia32_reduce_fmul_ph128: { |
14496 | Function *F = |
14497 | CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType()); |
14498 | Builder.getFastMathFlags().setAllowReassoc(); |
14499 | return Builder.CreateCall(F, {Ops[0], Ops[1]}); |
14500 | } |
14501 | case X86::BI__builtin_ia32_reduce_fmax_pd512: |
14502 | case X86::BI__builtin_ia32_reduce_fmax_ps512: |
14503 | case X86::BI__builtin_ia32_reduce_fmax_ph512: |
14504 | case X86::BI__builtin_ia32_reduce_fmax_ph256: |
14505 | case X86::BI__builtin_ia32_reduce_fmax_ph128: { |
14506 | Function *F = |
14507 | CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType()); |
14508 | Builder.getFastMathFlags().setNoNaNs(); |
14509 | return Builder.CreateCall(F, {Ops[0]}); |
14510 | } |
14511 | case X86::BI__builtin_ia32_reduce_fmin_pd512: |
14512 | case X86::BI__builtin_ia32_reduce_fmin_ps512: |
14513 | case X86::BI__builtin_ia32_reduce_fmin_ph512: |
14514 | case X86::BI__builtin_ia32_reduce_fmin_ph256: |
14515 | case X86::BI__builtin_ia32_reduce_fmin_ph128: { |
14516 | Function *F = |
14517 | CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType()); |
14518 | Builder.getFastMathFlags().setNoNaNs(); |
14519 | return Builder.CreateCall(F, {Ops[0]}); |
14520 | } |
14521 | case X86::BI__builtin_ia32_reduce_mul_d512: |
14522 | case X86::BI__builtin_ia32_reduce_mul_q512: { |
14523 | Function *F = |
14524 | CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType()); |
14525 | return Builder.CreateCall(F, {Ops[0]}); |
14526 | } |
14527 | |
14528 | // 3DNow! |
14529 | case X86::BI__builtin_ia32_pswapdsf: |
14530 | case X86::BI__builtin_ia32_pswapdsi: { |
14531 | llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext()); |
14532 | Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast"); |
14533 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd); |
14534 | return Builder.CreateCall(F, Ops, "pswapd"); |
14535 | } |
14536 | case X86::BI__builtin_ia32_rdrand16_step: |
14537 | case X86::BI__builtin_ia32_rdrand32_step: |
14538 | case X86::BI__builtin_ia32_rdrand64_step: |
14539 | case X86::BI__builtin_ia32_rdseed16_step: |
14540 | case X86::BI__builtin_ia32_rdseed32_step: |
14541 | case X86::BI__builtin_ia32_rdseed64_step: { |
14542 | Intrinsic::ID ID; |
14543 | switch (BuiltinID) { |
14544 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 14544); |
14545 | case X86::BI__builtin_ia32_rdrand16_step: |
14546 | ID = Intrinsic::x86_rdrand_16; |
14547 | break; |
14548 | case X86::BI__builtin_ia32_rdrand32_step: |
14549 | ID = Intrinsic::x86_rdrand_32; |
14550 | break; |
14551 | case X86::BI__builtin_ia32_rdrand64_step: |
14552 | ID = Intrinsic::x86_rdrand_64; |
14553 | break; |
14554 | case X86::BI__builtin_ia32_rdseed16_step: |
14555 | ID = Intrinsic::x86_rdseed_16; |
14556 | break; |
14557 | case X86::BI__builtin_ia32_rdseed32_step: |
14558 | ID = Intrinsic::x86_rdseed_32; |
14559 | break; |
14560 | case X86::BI__builtin_ia32_rdseed64_step: |
14561 | ID = Intrinsic::x86_rdseed_64; |
14562 | break; |
14563 | } |
14564 | |
14565 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID)); |
14566 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0), |
14567 | Ops[0]); |
14568 | return Builder.CreateExtractValue(Call, 1); |
14569 | } |
14570 | case X86::BI__builtin_ia32_addcarryx_u32: |
14571 | case X86::BI__builtin_ia32_addcarryx_u64: |
14572 | case X86::BI__builtin_ia32_subborrow_u32: |
14573 | case X86::BI__builtin_ia32_subborrow_u64: { |
14574 | Intrinsic::ID IID; |
14575 | switch (BuiltinID) { |
14576 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 14576); |
14577 | case X86::BI__builtin_ia32_addcarryx_u32: |
14578 | IID = Intrinsic::x86_addcarry_32; |
14579 | break; |
14580 | case X86::BI__builtin_ia32_addcarryx_u64: |
14581 | IID = Intrinsic::x86_addcarry_64; |
14582 | break; |
14583 | case X86::BI__builtin_ia32_subborrow_u32: |
14584 | IID = Intrinsic::x86_subborrow_32; |
14585 | break; |
14586 | case X86::BI__builtin_ia32_subborrow_u64: |
14587 | IID = Intrinsic::x86_subborrow_64; |
14588 | break; |
14589 | } |
14590 | |
14591 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), |
14592 | { Ops[0], Ops[1], Ops[2] }); |
14593 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1), |
14594 | Ops[3]); |
14595 | return Builder.CreateExtractValue(Call, 0); |
14596 | } |
14597 | |
14598 | case X86::BI__builtin_ia32_fpclassps128_mask: |
14599 | case X86::BI__builtin_ia32_fpclassps256_mask: |
14600 | case X86::BI__builtin_ia32_fpclassps512_mask: |
14601 | case X86::BI__builtin_ia32_fpclassph128_mask: |
14602 | case X86::BI__builtin_ia32_fpclassph256_mask: |
14603 | case X86::BI__builtin_ia32_fpclassph512_mask: |
14604 | case X86::BI__builtin_ia32_fpclasspd128_mask: |
14605 | case X86::BI__builtin_ia32_fpclasspd256_mask: |
14606 | case X86::BI__builtin_ia32_fpclasspd512_mask: { |
14607 | unsigned NumElts = |
14608 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14609 | Value *MaskIn = Ops[2]; |
14610 | Ops.erase(&Ops[2]); |
14611 | |
14612 | Intrinsic::ID ID; |
14613 | switch (BuiltinID) { |
14614 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 14614); |
14615 | case X86::BI__builtin_ia32_fpclassph128_mask: |
14616 | ID = Intrinsic::x86_avx512fp16_fpclass_ph_128; |
14617 | break; |
14618 | case X86::BI__builtin_ia32_fpclassph256_mask: |
14619 | ID = Intrinsic::x86_avx512fp16_fpclass_ph_256; |
14620 | break; |
14621 | case X86::BI__builtin_ia32_fpclassph512_mask: |
14622 | ID = Intrinsic::x86_avx512fp16_fpclass_ph_512; |
14623 | break; |
14624 | case X86::BI__builtin_ia32_fpclassps128_mask: |
14625 | ID = Intrinsic::x86_avx512_fpclass_ps_128; |
14626 | break; |
14627 | case X86::BI__builtin_ia32_fpclassps256_mask: |
14628 | ID = Intrinsic::x86_avx512_fpclass_ps_256; |
14629 | break; |
14630 | case X86::BI__builtin_ia32_fpclassps512_mask: |
14631 | ID = Intrinsic::x86_avx512_fpclass_ps_512; |
14632 | break; |
14633 | case X86::BI__builtin_ia32_fpclasspd128_mask: |
14634 | ID = Intrinsic::x86_avx512_fpclass_pd_128; |
14635 | break; |
14636 | case X86::BI__builtin_ia32_fpclasspd256_mask: |
14637 | ID = Intrinsic::x86_avx512_fpclass_pd_256; |
14638 | break; |
14639 | case X86::BI__builtin_ia32_fpclasspd512_mask: |
14640 | ID = Intrinsic::x86_avx512_fpclass_pd_512; |
14641 | break; |
14642 | } |
14643 | |
14644 | Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
14645 | return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn); |
14646 | } |
14647 | |
14648 | case X86::BI__builtin_ia32_vp2intersect_q_512: |
14649 | case X86::BI__builtin_ia32_vp2intersect_q_256: |
14650 | case X86::BI__builtin_ia32_vp2intersect_q_128: |
14651 | case X86::BI__builtin_ia32_vp2intersect_d_512: |
14652 | case X86::BI__builtin_ia32_vp2intersect_d_256: |
14653 | case X86::BI__builtin_ia32_vp2intersect_d_128: { |
14654 | unsigned NumElts = |
14655 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14656 | Intrinsic::ID ID; |
14657 | |
14658 | switch (BuiltinID) { |
14659 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 14659); |
14660 | case X86::BI__builtin_ia32_vp2intersect_q_512: |
14661 | ID = Intrinsic::x86_avx512_vp2intersect_q_512; |
14662 | break; |
14663 | case X86::BI__builtin_ia32_vp2intersect_q_256: |
14664 | ID = Intrinsic::x86_avx512_vp2intersect_q_256; |
14665 | break; |
14666 | case X86::BI__builtin_ia32_vp2intersect_q_128: |
14667 | ID = Intrinsic::x86_avx512_vp2intersect_q_128; |
14668 | break; |
14669 | case X86::BI__builtin_ia32_vp2intersect_d_512: |
14670 | ID = Intrinsic::x86_avx512_vp2intersect_d_512; |
14671 | break; |
14672 | case X86::BI__builtin_ia32_vp2intersect_d_256: |
14673 | ID = Intrinsic::x86_avx512_vp2intersect_d_256; |
14674 | break; |
14675 | case X86::BI__builtin_ia32_vp2intersect_d_128: |
14676 | ID = Intrinsic::x86_avx512_vp2intersect_d_128; |
14677 | break; |
14678 | } |
14679 | |
14680 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]}); |
14681 | Value *Result = Builder.CreateExtractValue(Call, 0); |
14682 | Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr); |
14683 | Builder.CreateDefaultAlignedStore(Result, Ops[2]); |
14684 | |
14685 | Result = Builder.CreateExtractValue(Call, 1); |
14686 | Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr); |
14687 | return Builder.CreateDefaultAlignedStore(Result, Ops[3]); |
14688 | } |
14689 | |
14690 | case X86::BI__builtin_ia32_vpmultishiftqb128: |
14691 | case X86::BI__builtin_ia32_vpmultishiftqb256: |
14692 | case X86::BI__builtin_ia32_vpmultishiftqb512: { |
14693 | Intrinsic::ID ID; |
14694 | switch (BuiltinID) { |
14695 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 14695); |
14696 | case X86::BI__builtin_ia32_vpmultishiftqb128: |
14697 | ID = Intrinsic::x86_avx512_pmultishift_qb_128; |
14698 | break; |
14699 | case X86::BI__builtin_ia32_vpmultishiftqb256: |
14700 | ID = Intrinsic::x86_avx512_pmultishift_qb_256; |
14701 | break; |
14702 | case X86::BI__builtin_ia32_vpmultishiftqb512: |
14703 | ID = Intrinsic::x86_avx512_pmultishift_qb_512; |
14704 | break; |
14705 | } |
14706 | |
14707 | return Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
14708 | } |
14709 | |
14710 | case X86::BI__builtin_ia32_vpshufbitqmb128_mask: |
14711 | case X86::BI__builtin_ia32_vpshufbitqmb256_mask: |
14712 | case X86::BI__builtin_ia32_vpshufbitqmb512_mask: { |
14713 | unsigned NumElts = |
14714 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14715 | Value *MaskIn = Ops[2]; |
14716 | Ops.erase(&Ops[2]); |
14717 | |
14718 | Intrinsic::ID ID; |
14719 | switch (BuiltinID) { |
14720 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 14720); |
14721 | case X86::BI__builtin_ia32_vpshufbitqmb128_mask: |
14722 | ID = Intrinsic::x86_avx512_vpshufbitqmb_128; |
14723 | break; |
14724 | case X86::BI__builtin_ia32_vpshufbitqmb256_mask: |
14725 | ID = Intrinsic::x86_avx512_vpshufbitqmb_256; |
14726 | break; |
14727 | case X86::BI__builtin_ia32_vpshufbitqmb512_mask: |
14728 | ID = Intrinsic::x86_avx512_vpshufbitqmb_512; |
14729 | break; |
14730 | } |
14731 | |
14732 | Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
14733 | return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn); |
14734 | } |
14735 | |
14736 | // packed comparison intrinsics |
14737 | case X86::BI__builtin_ia32_cmpeqps: |
14738 | case X86::BI__builtin_ia32_cmpeqpd: |
14739 | return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false); |
14740 | case X86::BI__builtin_ia32_cmpltps: |
14741 | case X86::BI__builtin_ia32_cmpltpd: |
14742 | return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true); |
14743 | case X86::BI__builtin_ia32_cmpleps: |
14744 | case X86::BI__builtin_ia32_cmplepd: |
14745 | return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true); |
14746 | case X86::BI__builtin_ia32_cmpunordps: |
14747 | case X86::BI__builtin_ia32_cmpunordpd: |
14748 | return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false); |
14749 | case X86::BI__builtin_ia32_cmpneqps: |
14750 | case X86::BI__builtin_ia32_cmpneqpd: |
14751 | return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false); |
14752 | case X86::BI__builtin_ia32_cmpnltps: |
14753 | case X86::BI__builtin_ia32_cmpnltpd: |
14754 | return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true); |
14755 | case X86::BI__builtin_ia32_cmpnleps: |
14756 | case X86::BI__builtin_ia32_cmpnlepd: |
14757 | return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true); |
14758 | case X86::BI__builtin_ia32_cmpordps: |
14759 | case X86::BI__builtin_ia32_cmpordpd: |
14760 | return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false); |
14761 | case X86::BI__builtin_ia32_cmpph128_mask: |
14762 | case X86::BI__builtin_ia32_cmpph256_mask: |
14763 | case X86::BI__builtin_ia32_cmpph512_mask: |
14764 | case X86::BI__builtin_ia32_cmpps128_mask: |
14765 | case X86::BI__builtin_ia32_cmpps256_mask: |
14766 | case X86::BI__builtin_ia32_cmpps512_mask: |
14767 | case X86::BI__builtin_ia32_cmppd128_mask: |
14768 | case X86::BI__builtin_ia32_cmppd256_mask: |
14769 | case X86::BI__builtin_ia32_cmppd512_mask: |
14770 | IsMaskFCmp = true; |
14771 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
14772 | case X86::BI__builtin_ia32_cmpps: |
14773 | case X86::BI__builtin_ia32_cmpps256: |
14774 | case X86::BI__builtin_ia32_cmppd: |
14775 | case X86::BI__builtin_ia32_cmppd256: { |
14776 | // Lowering vector comparisons to fcmp instructions, while |
14777 | // ignoring signalling behaviour requested |
14778 | // ignoring rounding mode requested |
14779 | // This is only possible if fp-model is not strict and FENV_ACCESS is off. |
14780 | |
14781 | // The third argument is the comparison condition, and integer in the |
14782 | // range [0, 31] |
14783 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f; |
14784 | |
14785 | // Lowering to IR fcmp instruction. |
14786 | // Ignoring requested signaling behaviour, |
14787 | // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT. |
14788 | FCmpInst::Predicate Pred; |
14789 | bool IsSignaling; |
14790 | // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling |
14791 | // behavior is inverted. We'll handle that after the switch. |
14792 | switch (CC & 0xf) { |
14793 | case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break; |
14794 | case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break; |
14795 | case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break; |
14796 | case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break; |
14797 | case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break; |
14798 | case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break; |
14799 | case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break; |
14800 | case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break; |
14801 | case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break; |
14802 | case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break; |
14803 | case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break; |
14804 | case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break; |
14805 | case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break; |
14806 | case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break; |
14807 | case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break; |
14808 | case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break; |
14809 | default: llvm_unreachable("Unhandled CC")::llvm::llvm_unreachable_internal("Unhandled CC", "clang/lib/CodeGen/CGBuiltin.cpp" , 14809); |
14810 | } |
14811 | |
14812 | // Invert the signalling behavior for 16-31. |
14813 | if (CC & 0x10) |
14814 | IsSignaling = !IsSignaling; |
14815 | |
14816 | // If the predicate is true or false and we're using constrained intrinsics, |
14817 | // we don't have a compare intrinsic we can use. Just use the legacy X86 |
14818 | // specific intrinsic. |
14819 | // If the intrinsic is mask enabled and we're using constrained intrinsics, |
14820 | // use the legacy X86 specific intrinsic. |
14821 | if (Builder.getIsFPConstrained() && |
14822 | (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE || |
14823 | IsMaskFCmp)) { |
14824 | |
14825 | Intrinsic::ID IID; |
14826 | switch (BuiltinID) { |
14827 | default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "clang/lib/CodeGen/CGBuiltin.cpp" , 14827); |
14828 | case X86::BI__builtin_ia32_cmpps: |
14829 | IID = Intrinsic::x86_sse_cmp_ps; |
14830 | break; |
14831 | case X86::BI__builtin_ia32_cmpps256: |
14832 | IID = Intrinsic::x86_avx_cmp_ps_256; |
14833 | break; |
14834 | case X86::BI__builtin_ia32_cmppd: |
14835 | IID = Intrinsic::x86_sse2_cmp_pd; |
14836 | break; |
14837 | case X86::BI__builtin_ia32_cmppd256: |
14838 | IID = Intrinsic::x86_avx_cmp_pd_256; |
14839 | break; |
14840 | case X86::BI__builtin_ia32_cmpps512_mask: |
14841 | IID = Intrinsic::x86_avx512_mask_cmp_ps_512; |
14842 | break; |
14843 | case X86::BI__builtin_ia32_cmppd512_mask: |
14844 | IID = Intrinsic::x86_avx512_mask_cmp_pd_512; |
14845 | break; |
14846 | case X86::BI__builtin_ia32_cmpps128_mask: |
14847 | IID = Intrinsic::x86_avx512_mask_cmp_ps_128; |
14848 | break; |
14849 | case X86::BI__builtin_ia32_cmpps256_mask: |
14850 | IID = Intrinsic::x86_avx512_mask_cmp_ps_256; |
14851 | break; |
14852 | case X86::BI__builtin_ia32_cmppd128_mask: |
14853 | IID = Intrinsic::x86_avx512_mask_cmp_pd_128; |
14854 | break; |
14855 | case X86::BI__builtin_ia32_cmppd256_mask: |
14856 | IID = Intrinsic::x86_avx512_mask_cmp_pd_256; |
14857 | break; |
14858 | } |
14859 | |
14860 | Function *Intr = CGM.getIntrinsic(IID); |
14861 | if (IsMaskFCmp) { |
14862 | unsigned NumElts = |
14863 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14864 | Ops[3] = getMaskVecValue(*this, Ops[3], NumElts); |
14865 | Value *Cmp = Builder.CreateCall(Intr, Ops); |
14866 | return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr); |
14867 | } |
14868 | |
14869 | return Builder.CreateCall(Intr, Ops); |
14870 | } |
14871 | |
14872 | // Builtins without the _mask suffix return a vector of integers |
14873 | // of the same width as the input vectors |
14874 | if (IsMaskFCmp) { |
14875 | // We ignore SAE if strict FP is disabled. We only keep precise |
14876 | // exception behavior under strict FP. |
14877 | // NOTE: If strict FP does ever go through here a CGFPOptionsRAII |
14878 | // object will be required. |
14879 | unsigned NumElts = |
14880 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14881 | Value *Cmp; |
14882 | if (IsSignaling) |
14883 | Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]); |
14884 | else |
14885 | Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]); |
14886 | return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]); |
14887 | } |
14888 | |
14889 | return getVectorFCmpIR(Pred, IsSignaling); |
14890 | } |
14891 | |
14892 | // SSE scalar comparison intrinsics |
14893 | case X86::BI__builtin_ia32_cmpeqss: |
14894 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0); |
14895 | case X86::BI__builtin_ia32_cmpltss: |
14896 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1); |
14897 | case X86::BI__builtin_ia32_cmpless: |
14898 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2); |
14899 | case X86::BI__builtin_ia32_cmpunordss: |
14900 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3); |
14901 | case X86::BI__builtin_ia32_cmpneqss: |
14902 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4); |
14903 | case X86::BI__builtin_ia32_cmpnltss: |
14904 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5); |
14905 | case X86::BI__builtin_ia32_cmpnless: |
14906 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6); |
14907 | case X86::BI__builtin_ia32_cmpordss: |
14908 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7); |
14909 | case X86::BI__builtin_ia32_cmpeqsd: |
14910 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0); |
14911 | case X86::BI__builtin_ia32_cmpltsd: |
14912 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1); |
14913 | case X86::BI__builtin_ia32_cmplesd: |
14914 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2); |
14915 | case X86::BI__builtin_ia32_cmpunordsd: |
14916 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3); |
14917 | case X86::BI__builtin_ia32_cmpneqsd: |
14918 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4); |
14919 | case X86::BI__builtin_ia32_cmpnltsd: |
14920 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5); |
14921 | case X86::BI__builtin_ia32_cmpnlesd: |
14922 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6); |
14923 | case X86::BI__builtin_ia32_cmpordsd: |
14924 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7); |
14925 | |
14926 | // f16c half2float intrinsics |
14927 | case X86::BI__builtin_ia32_vcvtph2ps: |
14928 | case X86::BI__builtin_ia32_vcvtph2ps256: |
14929 | case X86::BI__builtin_ia32_vcvtph2ps_mask: |
14930 | case X86::BI__builtin_ia32_vcvtph2ps256_mask: |
14931 | case X86::BI__builtin_ia32_vcvtph2ps512_mask: { |
14932 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
14933 | return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType())); |
14934 | } |
14935 | |
14936 | // AVX512 bf16 intrinsics |
14937 | case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: { |
14938 | Ops[2] = getMaskVecValue( |
14939 | *this, Ops[2], |
14940 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements()); |
14941 | Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128; |
14942 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
14943 | } |
14944 | case X86::BI__builtin_ia32_cvtsbf162ss_32: |
14945 | return EmitX86CvtBF16ToFloatExpr(*this, E, Ops); |
14946 | |
14947 | case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: |
14948 | case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: { |
14949 | Intrinsic::ID IID; |
14950 | switch (BuiltinID) { |
14951 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "clang/lib/CodeGen/CGBuiltin.cpp" , 14951); |
14952 | case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: |
14953 | IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256; |
14954 | break; |
14955 | case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: |
14956 | IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512; |
14957 | break; |
14958 | } |
14959 | Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]); |
14960 | return EmitX86Select(*this, Ops[2], Res, Ops[1]); |
14961 | } |
14962 | |
14963 | case X86::BI__cpuid: |
14964 | case X86::BI__cpuidex: { |
14965 | Value *FuncId = EmitScalarExpr(E->getArg(1)); |
14966 | Value *SubFuncId = BuiltinID == X86::BI__cpuidex |
14967 | ? EmitScalarExpr(E->getArg(2)) |
14968 | : llvm::ConstantInt::get(Int32Ty, 0); |
14969 | |
14970 | llvm::StructType *CpuidRetTy = |
14971 | llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, Int32Ty); |
14972 | llvm::FunctionType *FTy = |
14973 | llvm::FunctionType::get(CpuidRetTy, {Int32Ty, Int32Ty}, false); |
14974 | |
14975 | StringRef Asm, Constraints; |
14976 | if (getTarget().getTriple().getArch() == llvm::Triple::x86) { |
14977 | Asm = "cpuid"; |
14978 | Constraints = "={ax},={bx},={cx},={dx},{ax},{cx}"; |
14979 | } else { |
14980 | // x86-64 uses %rbx as the base register, so preserve it. |
14981 | Asm = "xchgq %rbx, ${1:q}\n" |
14982 | "cpuid\n" |
14983 | "xchgq %rbx, ${1:q}"; |
14984 | Constraints = "={ax},=r,={cx},={dx},0,2"; |
14985 | } |
14986 | |
14987 | llvm::InlineAsm *IA = llvm::InlineAsm::get(FTy, Asm, Constraints, |
14988 | /*hasSideEffects=*/false); |
14989 | Value *IACall = Builder.CreateCall(IA, {FuncId, SubFuncId}); |
14990 | Value *BasePtr = EmitScalarExpr(E->getArg(0)); |
14991 | Value *Store = nullptr; |
14992 | for (unsigned i = 0; i < 4; i++) { |
14993 | Value *Extracted = Builder.CreateExtractValue(IACall, i); |
14994 | Value *StorePtr = Builder.CreateConstInBoundsGEP1_32(Int32Ty, BasePtr, i); |
14995 | Store = Builder.CreateAlignedStore(Extracted, StorePtr, getIntAlign()); |
14996 | } |
14997 | |
14998 | // Return the last store instruction to signal that we have emitted the |
14999 | // the intrinsic. |
15000 | return Store; |
15001 | } |
15002 | |
15003 | case X86::BI__emul: |
15004 | case X86::BI__emulu: { |
15005 | llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64); |
15006 | bool isSigned = (BuiltinID == X86::BI__emul); |
15007 | Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned); |
15008 | Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned); |
15009 | return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned); |
15010 | } |
15011 | case X86::BI__mulh: |
15012 | case X86::BI__umulh: |
15013 | case X86::BI_mul128: |
15014 | case X86::BI_umul128: { |
15015 | llvm::Type *ResType = ConvertType(E->getType()); |
15016 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
15017 | |
15018 | bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128); |
15019 | Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned); |
15020 | Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned); |
15021 | |
15022 | Value *MulResult, *HigherBits; |
15023 | if (IsSigned) { |
15024 | MulResult = Builder.CreateNSWMul(LHS, RHS); |
15025 | HigherBits = Builder.CreateAShr(MulResult, 64); |
15026 | } else { |
15027 | MulResult = Builder.CreateNUWMul(LHS, RHS); |
15028 | HigherBits = Builder.CreateLShr(MulResult, 64); |
15029 | } |
15030 | HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned); |
15031 | |
15032 | if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh) |
15033 | return HigherBits; |
15034 | |
15035 | Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2)); |
15036 | Builder.CreateStore(HigherBits, HighBitsAddress); |
15037 | return Builder.CreateIntCast(MulResult, ResType, IsSigned); |
15038 | } |
15039 | |
15040 | case X86::BI__faststorefence: { |
15041 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
15042 | llvm::SyncScope::System); |
15043 | } |
15044 | case X86::BI__shiftleft128: |
15045 | case X86::BI__shiftright128: { |
15046 | llvm::Function *F = CGM.getIntrinsic( |
15047 | BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr, |
15048 | Int64Ty); |
15049 | // Flip low/high ops and zero-extend amount to matching type. |
15050 | // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt) |
15051 | // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt) |
15052 | std::swap(Ops[0], Ops[1]); |
15053 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
15054 | return Builder.CreateCall(F, Ops); |
15055 | } |
15056 | case X86::BI_ReadWriteBarrier: |
15057 | case X86::BI_ReadBarrier: |
15058 | case X86::BI_WriteBarrier: { |
15059 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
15060 | llvm::SyncScope::SingleThread); |
15061 | } |
15062 | |
15063 | case X86::BI_AddressOfReturnAddress: { |
15064 | Function *F = |
15065 | CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy); |
15066 | return Builder.CreateCall(F); |
15067 | } |
15068 | case X86::BI__stosb: { |
15069 | // We treat __stosb as a volatile memset - it may not generate "rep stosb" |
15070 | // instruction, but it will create a memset that won't be optimized away. |
15071 | return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true); |
15072 | } |
15073 | case X86::BI__ud2: |
15074 | // llvm.trap makes a ud2a instruction on x86. |
15075 | return EmitTrapCall(Intrinsic::trap); |
15076 | case X86::BI__int2c: { |
15077 | // This syscall signals a driver assertion failure in x86 NT kernels. |
15078 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); |
15079 | llvm::InlineAsm *IA = |
15080 | llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true); |
15081 | llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( |
15082 | getLLVMContext(), llvm::AttributeList::FunctionIndex, |
15083 | llvm::Attribute::NoReturn); |
15084 | llvm::CallInst *CI = Builder.CreateCall(IA); |
15085 | CI->setAttributes(NoReturnAttr); |
15086 | return CI; |
15087 | } |
15088 | case X86::BI__readfsbyte: |
15089 | case X86::BI__readfsword: |
15090 | case X86::BI__readfsdword: |
15091 | case X86::BI__readfsqword: { |
15092 | llvm::Type *IntTy = ConvertType(E->getType()); |
15093 | Value *Ptr = |
15094 | Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257)); |
15095 | LoadInst *Load = Builder.CreateAlignedLoad( |
15096 | IntTy, Ptr, getContext().getTypeAlignInChars(E->getType())); |
15097 | Load->setVolatile(true); |
15098 | return Load; |
15099 | } |
15100 | case X86::BI__readgsbyte: |
15101 | case X86::BI__readgsword: |
15102 | case X86::BI__readgsdword: |
15103 | case X86::BI__readgsqword: { |
15104 | llvm::Type *IntTy = ConvertType(E->getType()); |
15105 | Value *Ptr = |
15106 | Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256)); |
15107 | LoadInst *Load = Builder.CreateAlignedLoad( |
15108 | IntTy, Ptr, getContext().getTypeAlignInChars(E->getType())); |
15109 | Load->setVolatile(true); |
15110 | return Load; |
15111 | } |
15112 | case X86::BI__builtin_ia32_encodekey128_u32: { |
15113 | Intrinsic::ID IID = Intrinsic::x86_encodekey128; |
15114 | |
15115 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]}); |
15116 | |
15117 | for (int i = 0; i < 3; ++i) { |
15118 | Value *Extract = Builder.CreateExtractValue(Call, i + 1); |
15119 | Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[2], i * 16); |
15120 | Ptr = Builder.CreateBitCast( |
15121 | Ptr, llvm::PointerType::getUnqual(Extract->getType())); |
15122 | Builder.CreateAlignedStore(Extract, Ptr, Align(1)); |
15123 | } |
15124 | |
15125 | return Builder.CreateExtractValue(Call, 0); |
15126 | } |
15127 | case X86::BI__builtin_ia32_encodekey256_u32: { |
15128 | Intrinsic::ID IID = Intrinsic::x86_encodekey256; |
15129 | |
15130 | Value *Call = |
15131 | Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]}); |
15132 | |
15133 | for (int i = 0; i < 4; ++i) { |
15134 | Value *Extract = Builder.CreateExtractValue(Call, i + 1); |
15135 | Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[3], i * 16); |
15136 | Ptr = Builder.CreateBitCast( |
15137 | Ptr, llvm::PointerType::getUnqual(Extract->getType())); |
15138 | Builder.CreateAlignedStore(Extract, Ptr, Align(1)); |
15139 | } |
15140 | |
15141 | return Builder.CreateExtractValue(Call, 0); |
15142 | } |
15143 | case X86::BI__builtin_ia32_aesenc128kl_u8: |
15144 | case X86::BI__builtin_ia32_aesdec128kl_u8: |
15145 | case X86::BI__builtin_ia32_aesenc256kl_u8: |
15146 | case X86::BI__builtin_ia32_aesdec256kl_u8: { |
15147 | Intrinsic::ID IID; |
15148 | StringRef BlockName; |
15149 | switch (BuiltinID) { |
15150 | default: |
15151 | llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "clang/lib/CodeGen/CGBuiltin.cpp" , 15151); |
15152 | case X86::BI__builtin_ia32_aesenc128kl_u8: |
15153 | IID = Intrinsic::x86_aesenc128kl; |
15154 | BlockName = "aesenc128kl"; |
15155 | break; |
15156 | case X86::BI__builtin_ia32_aesdec128kl_u8: |
15157 | IID = Intrinsic::x86_aesdec128kl; |
15158 | BlockName = "aesdec128kl"; |
15159 | break; |
15160 | case X86::BI__builtin_ia32_aesenc256kl_u8: |
15161 | IID = Intrinsic::x86_aesenc256kl; |
15162 | BlockName = "aesenc256kl"; |
15163 | break; |
15164 | case X86::BI__builtin_ia32_aesdec256kl_u8: |
15165 | IID = Intrinsic::x86_aesdec256kl; |
15166 | BlockName = "aesdec256kl"; |
15167 | break; |
15168 | } |
15169 | |
15170 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]}); |
15171 | |
15172 | BasicBlock *NoError = |
15173 | createBasicBlock(BlockName + "_no_error", this->CurFn); |
15174 | BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn); |
15175 | BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn); |
15176 | |
15177 | Value *Ret = Builder.CreateExtractValue(Call, 0); |
15178 | Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty()); |
15179 | Value *Out = Builder.CreateExtractValue(Call, 1); |
15180 | Builder.CreateCondBr(Succ, NoError, Error); |
15181 | |
15182 | Builder.SetInsertPoint(NoError); |
15183 | Builder.CreateDefaultAlignedStore(Out, Ops[0]); |
15184 | Builder.CreateBr(End); |
15185 | |
15186 | Builder.SetInsertPoint(Error); |
15187 | Constant *Zero = llvm::Constant::getNullValue(Out->getType()); |
15188 | Builder.CreateDefaultAlignedStore(Zero, Ops[0]); |
15189 | Builder.CreateBr(End); |
15190 | |
15191 | Builder.SetInsertPoint(End); |
15192 | return Builder.CreateExtractValue(Call, 0); |
15193 | } |
15194 | case X86::BI__builtin_ia32_aesencwide128kl_u8: |
15195 | case X86::BI__builtin_ia32_aesdecwide128kl_u8: |
15196 | case X86::BI__builtin_ia32_aesencwide256kl_u8: |
15197 | case X86::BI__builtin_ia32_aesdecwide256kl_u8: { |
15198 | Intrinsic::ID IID; |
15199 | StringRef BlockName; |
15200 | switch (BuiltinID) { |
15201 | case X86::BI__builtin_ia32_aesencwide128kl_u8: |
15202 | IID = Intrinsic::x86_aesencwide128kl; |
15203 | BlockName = "aesencwide128kl"; |
15204 | break; |
15205 | case X86::BI__builtin_ia32_aesdecwide128kl_u8: |
15206 | IID = Intrinsic::x86_aesdecwide128kl; |
15207 | BlockName = "aesdecwide128kl"; |
15208 | break; |
15209 | case X86::BI__builtin_ia32_aesencwide256kl_u8: |
15210 | IID = Intrinsic::x86_aesencwide256kl; |
15211 | BlockName = "aesencwide256kl"; |
15212 | break; |
15213 | case X86::BI__builtin_ia32_aesdecwide256kl_u8: |
15214 | IID = Intrinsic::x86_aesdecwide256kl; |
15215 | BlockName = "aesdecwide256kl"; |
15216 | break; |
15217 | } |
15218 | |
15219 | llvm::Type *Ty = FixedVectorType::get(Builder.getInt64Ty(), 2); |
15220 | Value *InOps[9]; |
15221 | InOps[0] = Ops[2]; |
15222 | for (int i = 0; i != 8; ++i) { |
15223 | Value *Ptr = Builder.CreateConstGEP1_32(Ty, Ops[1], i); |
15224 | InOps[i + 1] = Builder.CreateAlignedLoad(Ty, Ptr, Align(16)); |
15225 | } |
15226 | |
15227 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps); |
15228 | |
15229 | BasicBlock *NoError = |
15230 | createBasicBlock(BlockName + "_no_error", this->CurFn); |
15231 | BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn); |
15232 | BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn); |
15233 | |
15234 | Value *Ret = Builder.CreateExtractValue(Call, 0); |
15235 | Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty()); |
15236 | Builder.CreateCondBr(Succ, NoError, Error); |
15237 | |
15238 | Builder.SetInsertPoint(NoError); |
15239 | for (int i = 0; i != 8; ++i) { |
15240 | Value *Extract = Builder.CreateExtractValue(Call, i + 1); |
15241 | Value *Ptr = Builder.CreateConstGEP1_32(Extract->getType(), Ops[0], i); |
15242 | Builder.CreateAlignedStore(Extract, Ptr, Align(16)); |
15243 | } |
15244 | Builder.CreateBr(End); |
15245 | |
15246 | Builder.SetInsertPoint(Error); |
15247 | for (int i = 0; i != 8; ++i) { |
15248 | Value *Out = Builder.CreateExtractValue(Call, i + 1); |
15249 | Constant *Zero = llvm::Constant::getNullValue(Out->getType()); |
15250 | Value *Ptr = Builder.CreateConstGEP1_32(Out->getType(), Ops[0], i); |
15251 | Builder.CreateAlignedStore(Zero, Ptr, Align(16)); |
15252 | } |
15253 | Builder.CreateBr(End); |
15254 | |
15255 | Builder.SetInsertPoint(End); |
15256 | return Builder.CreateExtractValue(Call, 0); |
15257 | } |
15258 | case X86::BI__builtin_ia32_vfcmaddcph512_mask: |
15259 | IsConjFMA = true; |
15260 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
15261 | case X86::BI__builtin_ia32_vfmaddcph512_mask: { |
15262 | Intrinsic::ID IID = IsConjFMA |
15263 | ? Intrinsic::x86_avx512fp16_mask_vfcmadd_cph_512 |
15264 | : Intrinsic::x86_avx512fp16_mask_vfmadd_cph_512; |
15265 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
15266 | return EmitX86Select(*this, Ops[3], Call, Ops[0]); |
15267 | } |
15268 | case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: |
15269 | IsConjFMA = true; |
15270 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
15271 | case X86::BI__builtin_ia32_vfmaddcsh_round_mask: { |
15272 | Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh |
15273 | : Intrinsic::x86_avx512fp16_mask_vfmadd_csh; |
15274 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
15275 | Value *And = Builder.CreateAnd(Ops[3], llvm::ConstantInt::get(Int8Ty, 1)); |
15276 | return EmitX86Select(*this, And, Call, Ops[0]); |
15277 | } |
15278 | case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: |
15279 | IsConjFMA = true; |
15280 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
15281 | case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: { |
15282 | Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh |
15283 | : Intrinsic::x86_avx512fp16_mask_vfmadd_csh; |
15284 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
15285 | static constexpr int Mask[] = {0, 5, 6, 7}; |
15286 | return Builder.CreateShuffleVector(Call, Ops[2], Mask); |
15287 | } |
15288 | } |
15289 | } |
15290 | |
15291 | Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, |
15292 | const CallExpr *E) { |
15293 | // Do not emit the builtin arguments in the arguments of a function call, |
15294 | // because the evaluation order of function arguments is not specified in C++. |
15295 | // This is important when testing to ensure the arguments are emitted in the |
15296 | // same order every time. Eg: |
15297 | // Instead of: |
15298 | // return Builder.CreateFDiv(EmitScalarExpr(E->getArg(0)), |
15299 | // EmitScalarExpr(E->getArg(1)), "swdiv"); |
15300 | // Use: |
15301 | // Value *Op0 = EmitScalarExpr(E->getArg(0)); |
15302 | // Value *Op1 = EmitScalarExpr(E->getArg(1)); |
15303 | // return Builder.CreateFDiv(Op0, Op1, "swdiv") |
15304 | |
15305 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
15306 | |
15307 | switch (BuiltinID) { |
15308 | default: return nullptr; |
15309 | |
15310 | // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we |
15311 | // call __builtin_readcyclecounter. |
15312 | case PPC::BI__builtin_ppc_get_timebase: |
15313 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter)); |
15314 | |
15315 | // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr |
15316 | case PPC::BI__builtin_altivec_lvx: |
15317 | case PPC::BI__builtin_altivec_lvxl: |
15318 | case PPC::BI__builtin_altivec_lvebx: |
15319 | case PPC::BI__builtin_altivec_lvehx: |
15320 | case PPC::BI__builtin_altivec_lvewx: |
15321 | case PPC::BI__builtin_altivec_lvsl: |
15322 | case PPC::BI__builtin_altivec_lvsr: |
15323 | case PPC::BI__builtin_vsx_lxvd2x: |
15324 | case PPC::BI__builtin_vsx_lxvw4x: |
15325 | case PPC::BI__builtin_vsx_lxvd2x_be: |
15326 | case PPC::BI__builtin_vsx_lxvw4x_be: |
15327 | case PPC::BI__builtin_vsx_lxvl: |
15328 | case PPC::BI__builtin_vsx_lxvll: |
15329 | { |
15330 | SmallVector<Value *, 2> Ops; |
15331 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
15332 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
15333 | if(BuiltinID == PPC::BI__builtin_vsx_lxvl || |
15334 | BuiltinID == PPC::BI__builtin_vsx_lxvll){ |
15335 | Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy); |
15336 | }else { |
15337 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); |
15338 | Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]); |
15339 | Ops.pop_back(); |
15340 | } |
15341 | |
15342 | switch (BuiltinID) { |
15343 | default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!")::llvm::llvm_unreachable_internal("Unsupported ld/lvsl/lvsr intrinsic!" , "clang/lib/CodeGen/CGBuiltin.cpp", 15343); |
15344 | case PPC::BI__builtin_altivec_lvx: |
15345 | ID = Intrinsic::ppc_altivec_lvx; |
15346 | break; |
15347 | case PPC::BI__builtin_altivec_lvxl: |
15348 | ID = Intrinsic::ppc_altivec_lvxl; |
15349 | break; |
15350 | case PPC::BI__builtin_altivec_lvebx: |
15351 | ID = Intrinsic::ppc_altivec_lvebx; |
15352 | break; |
15353 | case PPC::BI__builtin_altivec_lvehx: |
15354 | ID = Intrinsic::ppc_altivec_lvehx; |
15355 | break; |
15356 | case PPC::BI__builtin_altivec_lvewx: |
15357 | ID = Intrinsic::ppc_altivec_lvewx; |
15358 | break; |
15359 | case PPC::BI__builtin_altivec_lvsl: |
15360 | ID = Intrinsic::ppc_altivec_lvsl; |
15361 | break; |
15362 | case PPC::BI__builtin_altivec_lvsr: |
15363 | ID = Intrinsic::ppc_altivec_lvsr; |
15364 | break; |
15365 | case PPC::BI__builtin_vsx_lxvd2x: |
15366 | ID = Intrinsic::ppc_vsx_lxvd2x; |
15367 | break; |
15368 | case PPC::BI__builtin_vsx_lxvw4x: |
15369 | ID = Intrinsic::ppc_vsx_lxvw4x; |
15370 | break; |
15371 | case PPC::BI__builtin_vsx_lxvd2x_be: |
15372 | ID = Intrinsic::ppc_vsx_lxvd2x_be; |
15373 | break; |
15374 | case PPC::BI__builtin_vsx_lxvw4x_be: |
15375 | ID = Intrinsic::ppc_vsx_lxvw4x_be; |
15376 | break; |
15377 | case PPC::BI__builtin_vsx_lxvl: |
15378 | ID = Intrinsic::ppc_vsx_lxvl; |
15379 | break; |
15380 | case PPC::BI__builtin_vsx_lxvll: |
15381 | ID = Intrinsic::ppc_vsx_lxvll; |
15382 | break; |
15383 | } |
15384 | llvm::Function *F = CGM.getIntrinsic(ID); |
15385 | return Builder.CreateCall(F, Ops, ""); |
15386 | } |
15387 | |
15388 | // vec_st, vec_xst_be |
15389 | case PPC::BI__builtin_altivec_stvx: |
15390 | case PPC::BI__builtin_altivec_stvxl: |
15391 | case PPC::BI__builtin_altivec_stvebx: |
15392 | case PPC::BI__builtin_altivec_stvehx: |
15393 | case PPC::BI__builtin_altivec_stvewx: |
15394 | case PPC::BI__builtin_vsx_stxvd2x: |
15395 | case PPC::BI__builtin_vsx_stxvw4x: |
15396 | case PPC::BI__builtin_vsx_stxvd2x_be: |
15397 | case PPC::BI__builtin_vsx_stxvw4x_be: |
15398 | case PPC::BI__builtin_vsx_stxvl: |
15399 | case PPC::BI__builtin_vsx_stxvll: |
15400 | { |
15401 | SmallVector<Value *, 3> Ops; |
15402 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
15403 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
15404 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
15405 | if(BuiltinID == PPC::BI__builtin_vsx_stxvl || |
15406 | BuiltinID == PPC::BI__builtin_vsx_stxvll ){ |
15407 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); |
15408 | }else { |
15409 | Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); |
15410 | Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]); |
15411 | Ops.pop_back(); |
15412 | } |
15413 | |
15414 | switch (BuiltinID) { |
15415 | default: llvm_unreachable("Unsupported st intrinsic!")::llvm::llvm_unreachable_internal("Unsupported st intrinsic!" , "clang/lib/CodeGen/CGBuiltin.cpp", 15415); |
15416 | case PPC::BI__builtin_altivec_stvx: |
15417 | ID = Intrinsic::ppc_altivec_stvx; |
15418 | break; |
15419 | case PPC::BI__builtin_altivec_stvxl: |
15420 | ID = Intrinsic::ppc_altivec_stvxl; |
15421 | break; |
15422 | case PPC::BI__builtin_altivec_stvebx: |
15423 | ID = Intrinsic::ppc_altivec_stvebx; |
15424 | break; |
15425 | case PPC::BI__builtin_altivec_stvehx: |
15426 | ID = Intrinsic::ppc_altivec_stvehx; |
15427 | break; |
15428 | case PPC::BI__builtin_altivec_stvewx: |
15429 | ID = Intrinsic::ppc_altivec_stvewx; |
15430 | break; |
15431 | case PPC::BI__builtin_vsx_stxvd2x: |
15432 | ID = Intrinsic::ppc_vsx_stxvd2x; |
15433 | break; |
15434 | case PPC::BI__builtin_vsx_stxvw4x: |
15435 | ID = Intrinsic::ppc_vsx_stxvw4x; |
15436 | break; |
15437 | case PPC::BI__builtin_vsx_stxvd2x_be: |
15438 | ID = Intrinsic::ppc_vsx_stxvd2x_be; |
15439 | break; |
15440 | case PPC::BI__builtin_vsx_stxvw4x_be: |
15441 | ID = Intrinsic::ppc_vsx_stxvw4x_be; |
15442 | break; |
15443 | case PPC::BI__builtin_vsx_stxvl: |
15444 | ID = Intrinsic::ppc_vsx_stxvl; |
15445 | break; |
15446 | case PPC::BI__builtin_vsx_stxvll: |
15447 | ID = Intrinsic::ppc_vsx_stxvll; |
15448 | break; |
15449 | } |
15450 | llvm::Function *F = CGM.getIntrinsic(ID); |
15451 | return Builder.CreateCall(F, Ops, ""); |
15452 | } |
15453 | case PPC::BI__builtin_vsx_ldrmb: { |
15454 | // Essentially boils down to performing an unaligned VMX load sequence so |
15455 | // as to avoid crossing a page boundary and then shuffling the elements |
15456 | // into the right side of the vector register. |
15457 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
15458 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
15459 | int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue(); |
15460 | llvm::Type *ResTy = ConvertType(E->getType()); |
15461 | bool IsLE = getTarget().isLittleEndian(); |
15462 | |
15463 | // If the user wants the entire vector, just load the entire vector. |
15464 | if (NumBytes == 16) { |
15465 | Value *BC = Builder.CreateBitCast(Op0, ResTy->getPointerTo()); |
15466 | Value *LD = |
15467 | Builder.CreateLoad(Address(BC, ResTy, CharUnits::fromQuantity(1))); |
15468 | if (!IsLE) |
15469 | return LD; |
15470 | |
15471 | // Reverse the bytes on LE. |
15472 | SmallVector<int, 16> RevMask; |
15473 | for (int Idx = 0; Idx < 16; Idx++) |
15474 | RevMask.push_back(15 - Idx); |
15475 | return Builder.CreateShuffleVector(LD, LD, RevMask); |
15476 | } |
15477 | |
15478 | llvm::Function *Lvx = CGM.getIntrinsic(Intrinsic::ppc_altivec_lvx); |
15479 | llvm::Function *Lvs = CGM.getIntrinsic(IsLE ? Intrinsic::ppc_altivec_lvsr |
15480 | : Intrinsic::ppc_altivec_lvsl); |
15481 | llvm::Function *Vperm = CGM.getIntrinsic(Intrinsic::ppc_altivec_vperm); |
15482 | Value *HiMem = Builder.CreateGEP( |
15483 | Int8Ty, Op0, ConstantInt::get(Op1->getType(), NumBytes - 1)); |
15484 | Value *LoLd = Builder.CreateCall(Lvx, Op0, "ld.lo"); |
15485 | Value *HiLd = Builder.CreateCall(Lvx, HiMem, "ld.hi"); |
15486 | Value *Mask1 = Builder.CreateCall(Lvs, Op0, "mask1"); |
15487 | |
15488 | Op0 = IsLE ? HiLd : LoLd; |
15489 | Op1 = IsLE ? LoLd : HiLd; |
15490 | Value *AllElts = Builder.CreateCall(Vperm, {Op0, Op1, Mask1}, "shuffle1"); |
15491 | Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType()); |
15492 | |
15493 | if (IsLE) { |
15494 | SmallVector<int, 16> Consts; |
15495 | for (int Idx = 0; Idx < 16; Idx++) { |
15496 | int Val = (NumBytes - Idx - 1 >= 0) ? (NumBytes - Idx - 1) |
15497 | : 16 - (NumBytes - Idx); |
15498 | Consts.push_back(Val); |
15499 | } |
15500 | return Builder.CreateShuffleVector(Builder.CreateBitCast(AllElts, ResTy), |
15501 | Zero, Consts); |
15502 | } |
15503 | SmallVector<Constant *, 16> Consts; |
15504 | for (int Idx = 0; Idx < 16; Idx++) |
15505 | Consts.push_back(Builder.getInt8(NumBytes + Idx)); |
15506 | Value *Mask2 = ConstantVector::get(Consts); |
15507 | return Builder.CreateBitCast( |
15508 | Builder.CreateCall(Vperm, {Zero, AllElts, Mask2}, "shuffle2"), ResTy); |
15509 | } |
15510 | case PPC::BI__builtin_vsx_strmb: { |
15511 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
15512 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
15513 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
15514 | int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue(); |
15515 | bool IsLE = getTarget().isLittleEndian(); |
15516 | auto StoreSubVec = [&](unsigned Width, unsigned Offset, unsigned EltNo) { |
15517 | // Storing the whole vector, simply store it on BE and reverse bytes and |
15518 | // store on LE. |
15519 | if (Width == 16) { |
15520 | Value *BC = Builder.CreateBitCast(Op0, Op2->getType()->getPointerTo()); |
15521 | Value *StVec = Op2; |
15522 | if (IsLE) { |
15523 | SmallVector<int, 16> RevMask; |
15524 | for (int Idx = 0; Idx < 16; Idx++) |
15525 | RevMask.push_back(15 - Idx); |
15526 | StVec = Builder.CreateShuffleVector(Op2, Op2, RevMask); |
15527 | } |
15528 | return Builder.CreateStore( |
15529 | StVec, Address(BC, Op2->getType(), CharUnits::fromQuantity(1))); |
15530 | } |
15531 | auto *ConvTy = Int64Ty; |
15532 | unsigned NumElts = 0; |
15533 | switch (Width) { |
15534 | default: |
15535 | llvm_unreachable("width for stores must be a power of 2")::llvm::llvm_unreachable_internal("width for stores must be a power of 2" , "clang/lib/CodeGen/CGBuiltin.cpp", 15535); |
15536 | case 8: |
15537 | ConvTy = Int64Ty; |
15538 | NumElts = 2; |
15539 | break; |
15540 | case 4: |
15541 | ConvTy = Int32Ty; |
15542 | NumElts = 4; |
15543 | break; |
15544 | case 2: |
15545 | ConvTy = Int16Ty; |
15546 | NumElts = 8; |
15547 | break; |
15548 | case 1: |
15549 | ConvTy = Int8Ty; |
15550 | NumElts = 16; |
15551 | break; |
15552 | } |
15553 | Value *Vec = Builder.CreateBitCast( |
15554 | Op2, llvm::FixedVectorType::get(ConvTy, NumElts)); |
15555 | Value *Ptr = |
15556 | Builder.CreateGEP(Int8Ty, Op0, ConstantInt::get(Int64Ty, Offset)); |
15557 | Value *PtrBC = Builder.CreateBitCast(Ptr, ConvTy->getPointerTo()); |
15558 | Value *Elt = Builder.CreateExtractElement(Vec, EltNo); |
15559 | if (IsLE && Width > 1) { |
15560 | Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy); |
15561 | Elt = Builder.CreateCall(F, Elt); |
15562 | } |
15563 | return Builder.CreateStore( |
15564 | Elt, Address(PtrBC, ConvTy, CharUnits::fromQuantity(1))); |
15565 | }; |
15566 | unsigned Stored = 0; |
15567 | unsigned RemainingBytes = NumBytes; |
15568 | Value *Result; |
15569 | if (NumBytes == 16) |
15570 | return StoreSubVec(16, 0, 0); |
15571 | if (NumBytes >= 8) { |
15572 | Result = StoreSubVec(8, NumBytes - 8, IsLE ? 0 : 1); |
15573 | RemainingBytes -= 8; |
15574 | Stored += 8; |
15575 | } |
15576 | if (RemainingBytes >= 4) { |
15577 | Result = StoreSubVec(4, NumBytes - Stored - 4, |
15578 | IsLE ? (Stored >> 2) : 3 - (Stored >> 2)); |
15579 | RemainingBytes -= 4; |
15580 | Stored += 4; |
15581 | } |
15582 | if (RemainingBytes >= 2) { |
15583 | Result = StoreSubVec(2, NumBytes - Stored - 2, |
15584 | IsLE ? (Stored >> 1) : 7 - (Stored >> 1)); |
15585 | RemainingBytes -= 2; |
15586 | Stored += 2; |
15587 | } |
15588 | if (RemainingBytes) |
15589 | Result = |
15590 | StoreSubVec(1, NumBytes - Stored - 1, IsLE ? Stored : 15 - Stored); |
15591 | return Result; |
15592 | } |
15593 | // Square root |
15594 | case PPC::BI__builtin_vsx_xvsqrtsp: |
15595 | case PPC::BI__builtin_vsx_xvsqrtdp: { |
15596 | llvm::Type *ResultType = ConvertType(E->getType()); |
15597 | Value *X = EmitScalarExpr(E->getArg(0)); |
15598 | if (Builder.getIsFPConstrained()) { |
15599 | llvm::Function *F = CGM.getIntrinsic( |
15600 | Intrinsic::experimental_constrained_sqrt, ResultType); |
15601 | return Builder.CreateConstrainedFPCall(F, X); |
15602 | } else { |
15603 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); |
15604 | return Builder.CreateCall(F, X); |
15605 | } |
15606 | } |
15607 | // Count leading zeros |
15608 | case PPC::BI__builtin_altivec_vclzb: |
15609 | case PPC::BI__builtin_altivec_vclzh: |
15610 | case PPC::BI__builtin_altivec_vclzw: |
15611 | case PPC::BI__builtin_altivec_vclzd: { |
15612 | llvm::Type *ResultType = ConvertType(E->getType()); |
15613 | Value *X = EmitScalarExpr(E->getArg(0)); |
15614 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
15615 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); |
15616 | return Builder.CreateCall(F, {X, Undef}); |
15617 | } |
15618 | case PPC::BI__builtin_altivec_vctzb: |
15619 | case PPC::BI__builtin_altivec_vctzh: |
15620 | case PPC::BI__builtin_altivec_vctzw: |
15621 | case PPC::BI__builtin_altivec_vctzd: { |
15622 | llvm::Type *ResultType = ConvertType(E->getType()); |
15623 | Value *X = EmitScalarExpr(E->getArg(0)); |
15624 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
15625 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType); |
15626 | return Builder.CreateCall(F, {X, Undef}); |
15627 | } |
15628 | case PPC::BI__builtin_altivec_vec_replace_elt: |
15629 | case PPC::BI__builtin_altivec_vec_replace_unaligned: { |
15630 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
15631 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
15632 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
15633 | // The third argument of vec_replace_elt and vec_replace_unaligned must |
15634 | // be a compile time constant and will be emitted either to the vinsw |
15635 | // or vinsd instruction. |
15636 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2); |
15637 | assert(ArgCI &&(static_cast <bool> (ArgCI && "Third Arg to vinsw/vinsd intrinsic must be a constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Third Arg to vinsw/vinsd intrinsic must be a constant integer!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 15638, __extension__ __PRETTY_FUNCTION__ )) |
15638 | "Third Arg to vinsw/vinsd intrinsic must be a constant integer!")(static_cast <bool> (ArgCI && "Third Arg to vinsw/vinsd intrinsic must be a constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Third Arg to vinsw/vinsd intrinsic must be a constant integer!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 15638, __extension__ __PRETTY_FUNCTION__ )); |
15639 | llvm::Type *ResultType = ConvertType(E->getType()); |
15640 | llvm::Function *F = nullptr; |
15641 | Value *Call = nullptr; |
15642 | int64_t ConstArg = ArgCI->getSExtValue(); |
15643 | unsigned ArgWidth = Op1->getType()->getPrimitiveSizeInBits(); |
15644 | bool Is32Bit = false; |
15645 | assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width")(static_cast <bool> ((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width") ? void (0) : __assert_fail ("(ArgWidth == 32 || ArgWidth == 64) && \"Invalid argument width\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 15645, __extension__ __PRETTY_FUNCTION__ )); |
15646 | // The input to vec_replace_elt is an element index, not a byte index. |
15647 | if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt) |
15648 | ConstArg *= ArgWidth / 8; |
15649 | if (ArgWidth == 32) { |
15650 | Is32Bit = true; |
15651 | // When the second argument is 32 bits, it can either be an integer or |
15652 | // a float. The vinsw intrinsic is used in this case. |
15653 | F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw); |
15654 | // Fix the constant according to endianess. |
15655 | if (getTarget().isLittleEndian()) |
15656 | ConstArg = 12 - ConstArg; |
15657 | } else { |
15658 | // When the second argument is 64 bits, it can either be a long long or |
15659 | // a double. The vinsd intrinsic is used in this case. |
15660 | F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd); |
15661 | // Fix the constant for little endian. |
15662 | if (getTarget().isLittleEndian()) |
15663 | ConstArg = 8 - ConstArg; |
15664 | } |
15665 | Op2 = ConstantInt::getSigned(Int32Ty, ConstArg); |
15666 | // Depending on ArgWidth, the input vector could be a float or a double. |
15667 | // If the input vector is a float type, bitcast the inputs to integers. Or, |
15668 | // if the input vector is a double, bitcast the inputs to 64-bit integers. |
15669 | if (!Op1->getType()->isIntegerTy(ArgWidth)) { |
15670 | Op0 = Builder.CreateBitCast( |
15671 | Op0, Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4) |
15672 | : llvm::FixedVectorType::get(Int64Ty, 2)); |
15673 | Op1 = Builder.CreateBitCast(Op1, Is32Bit ? Int32Ty : Int64Ty); |
15674 | } |
15675 | // Emit the call to vinsw or vinsd. |
15676 | Call = Builder.CreateCall(F, {Op0, Op1, Op2}); |
15677 | // Depending on the builtin, bitcast to the approriate result type. |
15678 | if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt && |
15679 | !Op1->getType()->isIntegerTy()) |
15680 | return Builder.CreateBitCast(Call, ResultType); |
15681 | else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt && |
15682 | Op1->getType()->isIntegerTy()) |
15683 | return Call; |
15684 | else |
15685 | return Builder.CreateBitCast(Call, |
15686 | llvm::FixedVectorType::get(Int8Ty, 16)); |
15687 | } |
15688 | case PPC::BI__builtin_altivec_vpopcntb: |
15689 | case PPC::BI__builtin_altivec_vpopcnth: |
15690 | case PPC::BI__builtin_altivec_vpopcntw: |
15691 | case PPC::BI__builtin_altivec_vpopcntd: { |
15692 | llvm::Type *ResultType = ConvertType(E->getType()); |
15693 | Value *X = EmitScalarExpr(E->getArg(0)); |
15694 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); |
15695 | return Builder.CreateCall(F, X); |
15696 | } |
15697 | case PPC::BI__builtin_altivec_vadduqm: |
15698 | case PPC::BI__builtin_altivec_vsubuqm: { |
15699 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
15700 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
15701 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
15702 | Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int128Ty, 1)); |
15703 | Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int128Ty, 1)); |
15704 | if (BuiltinID == PPC::BI__builtin_altivec_vadduqm) |
15705 | return Builder.CreateAdd(Op0, Op1, "vadduqm"); |
15706 | else |
15707 | return Builder.CreateSub(Op0, Op1, "vsubuqm"); |
15708 | } |
15709 | // Rotate and insert under mask operation. |
15710 | // __rldimi(rs, is, shift, mask) |
15711 | // (rotl64(rs, shift) & mask) | (is & ~mask) |
15712 | // __rlwimi(rs, is, shift, mask) |
15713 | // (rotl(rs, shift) & mask) | (is & ~mask) |
15714 | case PPC::BI__builtin_ppc_rldimi: |
15715 | case PPC::BI__builtin_ppc_rlwimi: { |
15716 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
15717 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
15718 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
15719 | Value *Op3 = EmitScalarExpr(E->getArg(3)); |
15720 | llvm::Type *Ty = Op0->getType(); |
15721 | Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty); |
15722 | if (BuiltinID == PPC::BI__builtin_ppc_rldimi) |
15723 | Op2 = Builder.CreateZExt(Op2, Int64Ty); |
15724 | Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op2}); |
15725 | Value *X = Builder.CreateAnd(Shift, Op3); |
15726 | Value *Y = Builder.CreateAnd(Op1, Builder.CreateNot(Op3)); |
15727 | return Builder.CreateOr(X, Y); |
15728 | } |
15729 | // Rotate and insert under mask operation. |
15730 | // __rlwnm(rs, shift, mask) |
15731 | // rotl(rs, shift) & mask |
15732 | case PPC::BI__builtin_ppc_rlwnm: { |
15733 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
15734 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
15735 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
15736 | llvm::Type *Ty = Op0->getType(); |
15737 | Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty); |
15738 | Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op1}); |
15739 | return Builder.CreateAnd(Shift, Op2); |
15740 | } |
15741 | case PPC::BI__builtin_ppc_poppar4: |
15742 | case PPC::BI__builtin_ppc_poppar8: { |
15743 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
15744 | llvm::Type *ArgType = Op0->getType(); |
15745 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
15746 | Value *Tmp = Builder.CreateCall(F, Op0); |
15747 | |
15748 | llvm::Type *ResultType = ConvertType(E->getType()); |
15749 | Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); |
15750 | if (Result->getType() != ResultType) |
15751 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
15752 | "cast"); |
15753 | return Result; |
15754 | } |
15755 | case PPC::BI__builtin_ppc_cmpb: { |
15756 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
15757 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
15758 | if (getTarget().getTriple().isPPC64()) { |
15759 | Function *F = |
15760 | CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int64Ty, Int64Ty, Int64Ty}); |
15761 | return Builder.CreateCall(F, {Op0, Op1}, "cmpb"); |
15762 | } |
15763 | // For 32 bit, emit the code as below: |
15764 | // %conv = trunc i64 %a to i32 |
15765 | // %conv1 = trunc i64 %b to i32 |
15766 | // %shr = lshr i64 %a, 32 |
15767 | // %conv2 = trunc i64 %shr to i32 |
15768 | // %shr3 = lshr i64 %b, 32 |
15769 | // %conv4 = trunc i64 %shr3 to i32 |
15770 | // %0 = tail call i32 @llvm.ppc.cmpb32(i32 %conv, i32 %conv1) |
15771 | // %conv5 = zext i32 %0 to i64 |
15772 | // %1 = tail call i32 @llvm.ppc.cmpb32(i32 %conv2, i32 %conv4) |
15773 | // %conv614 = zext i32 %1 to i64 |
15774 | // %shl = shl nuw i64 %conv614, 32 |
15775 | // %or = or i64 %shl, %conv5 |
15776 | // ret i64 %or |
15777 | Function *F = |
15778 | CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int32Ty, Int32Ty, Int32Ty}); |
15779 | Value *ArgOneLo = Builder.CreateTrunc(Op0, Int32Ty); |
15780 | Value *ArgTwoLo = Builder.CreateTrunc(Op1, Int32Ty); |
15781 | Constant *ShiftAmt = ConstantInt::get(Int64Ty, 32); |
15782 | Value *ArgOneHi = |
15783 | Builder.CreateTrunc(Builder.CreateLShr(Op0, ShiftAmt), Int32Ty); |
15784 | Value *ArgTwoHi = |
15785 | Builder.CreateTrunc(Builder.CreateLShr(Op1, ShiftAmt), Int32Ty); |
15786 | Value *ResLo = Builder.CreateZExt( |
15787 | Builder.CreateCall(F, {ArgOneLo, ArgTwoLo}, "cmpb"), Int64Ty); |
15788 | Value *ResHiShift = Builder.CreateZExt( |
15789 | Builder.CreateCall(F, {ArgOneHi, ArgTwoHi}, "cmpb"), Int64Ty); |
15790 | Value *ResHi = Builder.CreateShl(ResHiShift, ShiftAmt); |
15791 | return Builder.CreateOr(ResLo, ResHi); |
15792 | } |
15793 | // Copy sign |
15794 | case PPC::BI__builtin_vsx_xvcpsgnsp: |
15795 | case PPC::BI__builtin_vsx_xvcpsgndp: { |
15796 | llvm::Type *ResultType = ConvertType(E->getType()); |
15797 | Value *X = EmitScalarExpr(E->getArg(0)); |
15798 | Value *Y = EmitScalarExpr(E->getArg(1)); |
15799 | ID = Intrinsic::copysign; |
15800 | llvm::Function *F = CGM.getIntrinsic(ID, ResultType); |
15801 | return Builder.CreateCall(F, {X, Y}); |
15802 | } |
15803 | // Rounding/truncation |
15804 | case PPC::BI__builtin_vsx_xvrspip: |
15805 | case PPC::BI__builtin_vsx_xvrdpip: |
15806 | case PPC::BI__builtin_vsx_xvrdpim: |
15807 | case PPC::BI__builtin_vsx_xvrspim: |
15808 | case PPC::BI__builtin_vsx_xvrdpi: |
15809 | case PPC::BI__builtin_vsx_xvrspi: |
15810 | case PPC::BI__builtin_vsx_xvrdpic: |
15811 | case PPC::BI__builtin_vsx_xvrspic: |
15812 | case PPC::BI__builtin_vsx_xvrdpiz: |
15813 | case PPC::BI__builtin_vsx_xvrspiz: { |
15814 | llvm::Type *ResultType = ConvertType(E->getType()); |
15815 | Value *X = EmitScalarExpr(E->getArg(0)); |
15816 | if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim || |
15817 | BuiltinID == PPC::BI__builtin_vsx_xvrspim) |
15818 | ID = Builder.getIsFPConstrained() |
15819 | ? Intrinsic::experimental_constrained_floor |
15820 | : Intrinsic::floor; |
15821 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi || |
15822 | BuiltinID == PPC::BI__builtin_vsx_xvrspi) |
15823 | ID = Builder.getIsFPConstrained() |
15824 | ? Intrinsic::experimental_constrained_round |
15825 | : Intrinsic::round; |
15826 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic || |
15827 | BuiltinID == PPC::BI__builtin_vsx_xvrspic) |
15828 | ID = Builder.getIsFPConstrained() |
15829 | ? Intrinsic::experimental_constrained_rint |
15830 | : Intrinsic::rint; |
15831 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip || |
15832 | BuiltinID == PPC::BI__builtin_vsx_xvrspip) |
15833 | ID = Builder.getIsFPConstrained() |
15834 | ? Intrinsic::experimental_constrained_ceil |
15835 | : Intrinsic::ceil; |
15836 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz || |
15837 | BuiltinID == PPC::BI__builtin_vsx_xvrspiz) |
15838 | ID = Builder.getIsFPConstrained() |
15839 | ? Intrinsic::experimental_constrained_trunc |
15840 | : Intrinsic::trunc; |
15841 | llvm::Function *F = CGM.getIntrinsic(ID, ResultType); |
15842 | return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X) |
15843 | : Builder.CreateCall(F, X); |
15844 | } |
15845 | |
15846 | // Absolute value |
15847 | case PPC::BI__builtin_vsx_xvabsdp: |
15848 | case PPC::BI__builtin_vsx_xvabssp: { |
15849 | llvm::Type *ResultType = ConvertType(E->getType()); |
15850 | Value *X = EmitScalarExpr(E->getArg(0)); |
15851 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); |
15852 | return Builder.CreateCall(F, X); |
15853 | } |
15854 | |
15855 | // Fastmath by default |
15856 | case PPC::BI__builtin_ppc_recipdivf: |
15857 | case PPC::BI__builtin_ppc_recipdivd: |
15858 | case PPC::BI__builtin_ppc_rsqrtf: |
15859 | case PPC::BI__builtin_ppc_rsqrtd: { |
15860 | FastMathFlags FMF = Builder.getFastMathFlags(); |
15861 | Builder.getFastMathFlags().setFast(); |
15862 | llvm::Type *ResultType = ConvertType(E->getType()); |
15863 | Value *X = EmitScalarExpr(E->getArg(0)); |
15864 | |
15865 | if (BuiltinID == PPC::BI__builtin_ppc_recipdivf || |
15866 | BuiltinID == PPC::BI__builtin_ppc_recipdivd) { |
15867 | Value *Y = EmitScalarExpr(E->getArg(1)); |
15868 | Value *FDiv = Builder.CreateFDiv(X, Y, "recipdiv"); |
15869 | Builder.getFastMathFlags() &= (FMF); |
15870 | return FDiv; |
15871 | } |
15872 | auto *One = ConstantFP::get(ResultType, 1.0); |
15873 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); |
15874 | Value *FDiv = Builder.CreateFDiv(One, Builder.CreateCall(F, X), "rsqrt"); |
15875 | Builder.getFastMathFlags() &= (FMF); |
15876 | return FDiv; |
15877 | } |
15878 | case PPC::BI__builtin_ppc_alignx: { |
15879 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
15880 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
15881 | ConstantInt *AlignmentCI = cast<ConstantInt>(Op0); |
15882 | if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment)) |
15883 | AlignmentCI = ConstantInt::get(AlignmentCI->getType(), |
15884 | llvm::Value::MaximumAlignment); |
15885 | |
15886 | emitAlignmentAssumption(Op1, E->getArg(1), |
15887 | /*The expr loc is sufficient.*/ SourceLocation(), |
15888 | AlignmentCI, nullptr); |
15889 | return Op1; |
15890 | } |
15891 | case PPC::BI__builtin_ppc_rdlam: { |
15892 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
15893 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
15894 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
15895 | llvm::Type *Ty = Op0->getType(); |
15896 | Value *ShiftAmt = Builder.CreateIntCast(Op1, Ty, false); |
15897 | Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty); |
15898 | Value *Rotate = Builder.CreateCall(F, {Op0, Op0, ShiftAmt}); |
15899 | return Builder.CreateAnd(Rotate, Op2); |
15900 | } |
15901 | case PPC::BI__builtin_ppc_load2r: { |
15902 | Function *F = CGM.getIntrinsic(Intrinsic::ppc_load2r); |
15903 | Value *Op0 = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy); |
15904 | Value *LoadIntrinsic = Builder.CreateCall(F, {Op0}); |
15905 | return Builder.CreateTrunc(LoadIntrinsic, Int16Ty); |
15906 | } |
15907 | // FMA variations |
15908 | case PPC::BI__builtin_ppc_fnmsub: |
15909 | case PPC::BI__builtin_ppc_fnmsubs: |
15910 | case PPC::BI__builtin_vsx_xvmaddadp: |
15911 | case PPC::BI__builtin_vsx_xvmaddasp: |
15912 | case PPC::BI__builtin_vsx_xvnmaddadp: |
15913 | case PPC::BI__builtin_vsx_xvnmaddasp: |
15914 | case PPC::BI__builtin_vsx_xvmsubadp: |
15915 | case PPC::BI__builtin_vsx_xvmsubasp: |
15916 | case PPC::BI__builtin_vsx_xvnmsubadp: |
15917 | case PPC::BI__builtin_vsx_xvnmsubasp: { |
15918 | llvm::Type *ResultType = ConvertType(E->getType()); |
15919 | Value *X = EmitScalarExpr(E->getArg(0)); |
15920 | Value *Y = EmitScalarExpr(E->getArg(1)); |
15921 | Value *Z = EmitScalarExpr(E->getArg(2)); |
15922 | llvm::Function *F; |
15923 | if (Builder.getIsFPConstrained()) |
15924 | F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
15925 | else |
15926 | F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
15927 | switch (BuiltinID) { |
15928 | case PPC::BI__builtin_vsx_xvmaddadp: |
15929 | case PPC::BI__builtin_vsx_xvmaddasp: |
15930 | if (Builder.getIsFPConstrained()) |
15931 | return Builder.CreateConstrainedFPCall(F, {X, Y, Z}); |
15932 | else |
15933 | return Builder.CreateCall(F, {X, Y, Z}); |
15934 | case PPC::BI__builtin_vsx_xvnmaddadp: |
15935 | case PPC::BI__builtin_vsx_xvnmaddasp: |
15936 | if (Builder.getIsFPConstrained()) |
15937 | return Builder.CreateFNeg( |
15938 | Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg"); |
15939 | else |
15940 | return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg"); |
15941 | case PPC::BI__builtin_vsx_xvmsubadp: |
15942 | case PPC::BI__builtin_vsx_xvmsubasp: |
15943 | if (Builder.getIsFPConstrained()) |
15944 | return Builder.CreateConstrainedFPCall( |
15945 | F, {X, Y, Builder.CreateFNeg(Z, "neg")}); |
15946 | else |
15947 | return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); |
15948 | case PPC::BI__builtin_ppc_fnmsub: |
15949 | case PPC::BI__builtin_ppc_fnmsubs: |
15950 | case PPC::BI__builtin_vsx_xvnmsubadp: |
15951 | case PPC::BI__builtin_vsx_xvnmsubasp: |
15952 | if (Builder.getIsFPConstrained()) |
15953 | return Builder.CreateFNeg( |
15954 | Builder.CreateConstrainedFPCall( |
15955 | F, {X, Y, Builder.CreateFNeg(Z, "neg")}), |
15956 | "neg"); |
15957 | else |
15958 | return Builder.CreateCall( |
15959 | CGM.getIntrinsic(Intrinsic::ppc_fnmsub, ResultType), {X, Y, Z}); |
15960 | } |
15961 | llvm_unreachable("Unknown FMA operation")::llvm::llvm_unreachable_internal("Unknown FMA operation", "clang/lib/CodeGen/CGBuiltin.cpp" , 15961); |
15962 | return nullptr; // Suppress no-return warning |
15963 | } |
15964 | |
15965 | case PPC::BI__builtin_vsx_insertword: { |
15966 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
15967 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
15968 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
15969 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw); |
15970 | |
15971 | // Third argument is a compile time constant int. It must be clamped to |
15972 | // to the range [0, 12]. |
15973 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2); |
15974 | assert(ArgCI &&(static_cast <bool> (ArgCI && "Third arg to xxinsertw intrinsic must be constant integer" ) ? void (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 15975, __extension__ __PRETTY_FUNCTION__ )) |
15975 | "Third arg to xxinsertw intrinsic must be constant integer")(static_cast <bool> (ArgCI && "Third arg to xxinsertw intrinsic must be constant integer" ) ? void (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 15975, __extension__ __PRETTY_FUNCTION__ )); |
15976 | const int64_t MaxIndex = 12; |
15977 | int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex); |
15978 | |
15979 | // The builtin semantics don't exactly match the xxinsertw instructions |
15980 | // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the |
15981 | // word from the first argument, and inserts it in the second argument. The |
15982 | // instruction extracts the word from its second input register and inserts |
15983 | // it into its first input register, so swap the first and second arguments. |
15984 | std::swap(Op0, Op1); |
15985 | |
15986 | // Need to cast the second argument from a vector of unsigned int to a |
15987 | // vector of long long. |
15988 | Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int64Ty, 2)); |
15989 | |
15990 | if (getTarget().isLittleEndian()) { |
15991 | // Reverse the double words in the vector we will extract from. |
15992 | Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2)); |
15993 | Op0 = Builder.CreateShuffleVector(Op0, Op0, ArrayRef<int>{1, 0}); |
15994 | |
15995 | // Reverse the index. |
15996 | Index = MaxIndex - Index; |
15997 | } |
15998 | |
15999 | // Intrinsic expects the first arg to be a vector of int. |
16000 | Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4)); |
16001 | Op2 = ConstantInt::getSigned(Int32Ty, Index); |
16002 | return Builder.CreateCall(F, {Op0, Op1, Op2}); |
16003 | } |
16004 | |
16005 | case PPC::BI__builtin_vsx_extractuword: { |
16006 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16007 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16008 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw); |
16009 | |
16010 | // Intrinsic expects the first argument to be a vector of doublewords. |
16011 | Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2)); |
16012 | |
16013 | // The second argument is a compile time constant int that needs to |
16014 | // be clamped to the range [0, 12]. |
16015 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op1); |
16016 | assert(ArgCI &&(static_cast <bool> (ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 16017, __extension__ __PRETTY_FUNCTION__ )) |
16017 | "Second Arg to xxextractuw intrinsic must be a constant integer!")(static_cast <bool> (ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 16017, __extension__ __PRETTY_FUNCTION__ )); |
16018 | const int64_t MaxIndex = 12; |
16019 | int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex); |
16020 | |
16021 | if (getTarget().isLittleEndian()) { |
16022 | // Reverse the index. |
16023 | Index = MaxIndex - Index; |
16024 | Op1 = ConstantInt::getSigned(Int32Ty, Index); |
16025 | |
16026 | // Emit the call, then reverse the double words of the results vector. |
16027 | Value *Call = Builder.CreateCall(F, {Op0, Op1}); |
16028 | |
16029 | Value *ShuffleCall = |
16030 | Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0}); |
16031 | return ShuffleCall; |
16032 | } else { |
16033 | Op1 = ConstantInt::getSigned(Int32Ty, Index); |
16034 | return Builder.CreateCall(F, {Op0, Op1}); |
16035 | } |
16036 | } |
16037 | |
16038 | case PPC::BI__builtin_vsx_xxpermdi: { |
16039 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16040 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16041 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
16042 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2); |
16043 | assert(ArgCI && "Third arg must be constant integer!")(static_cast <bool> (ArgCI && "Third arg must be constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Third arg must be constant integer!\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 16043, __extension__ __PRETTY_FUNCTION__ )); |
16044 | |
16045 | unsigned Index = ArgCI->getZExtValue(); |
16046 | Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2)); |
16047 | Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int64Ty, 2)); |
16048 | |
16049 | // Account for endianness by treating this as just a shuffle. So we use the |
16050 | // same indices for both LE and BE in order to produce expected results in |
16051 | // both cases. |
16052 | int ElemIdx0 = (Index & 2) >> 1; |
16053 | int ElemIdx1 = 2 + (Index & 1); |
16054 | |
16055 | int ShuffleElts[2] = {ElemIdx0, ElemIdx1}; |
16056 | Value *ShuffleCall = Builder.CreateShuffleVector(Op0, Op1, ShuffleElts); |
16057 | QualType BIRetType = E->getType(); |
16058 | auto RetTy = ConvertType(BIRetType); |
16059 | return Builder.CreateBitCast(ShuffleCall, RetTy); |
16060 | } |
16061 | |
16062 | case PPC::BI__builtin_vsx_xxsldwi: { |
16063 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16064 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16065 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
16066 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2); |
16067 | assert(ArgCI && "Third argument must be a compile time constant")(static_cast <bool> (ArgCI && "Third argument must be a compile time constant" ) ? void (0) : __assert_fail ("ArgCI && \"Third argument must be a compile time constant\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 16067, __extension__ __PRETTY_FUNCTION__ )); |
16068 | unsigned Index = ArgCI->getZExtValue() & 0x3; |
16069 | Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4)); |
16070 | Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int32Ty, 4)); |
16071 | |
16072 | // Create a shuffle mask |
16073 | int ElemIdx0; |
16074 | int ElemIdx1; |
16075 | int ElemIdx2; |
16076 | int ElemIdx3; |
16077 | if (getTarget().isLittleEndian()) { |
16078 | // Little endian element N comes from element 8+N-Index of the |
16079 | // concatenated wide vector (of course, using modulo arithmetic on |
16080 | // the total number of elements). |
16081 | ElemIdx0 = (8 - Index) % 8; |
16082 | ElemIdx1 = (9 - Index) % 8; |
16083 | ElemIdx2 = (10 - Index) % 8; |
16084 | ElemIdx3 = (11 - Index) % 8; |
16085 | } else { |
16086 | // Big endian ElemIdx<N> = Index + N |
16087 | ElemIdx0 = Index; |
16088 | ElemIdx1 = Index + 1; |
16089 | ElemIdx2 = Index + 2; |
16090 | ElemIdx3 = Index + 3; |
16091 | } |
16092 | |
16093 | int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3}; |
16094 | Value *ShuffleCall = Builder.CreateShuffleVector(Op0, Op1, ShuffleElts); |
16095 | QualType BIRetType = E->getType(); |
16096 | auto RetTy = ConvertType(BIRetType); |
16097 | return Builder.CreateBitCast(ShuffleCall, RetTy); |
16098 | } |
16099 | |
16100 | case PPC::BI__builtin_pack_vector_int128: { |
16101 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16102 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16103 | bool isLittleEndian = getTarget().isLittleEndian(); |
16104 | Value *UndefValue = |
16105 | llvm::UndefValue::get(llvm::FixedVectorType::get(Op0->getType(), 2)); |
16106 | Value *Res = Builder.CreateInsertElement( |
16107 | UndefValue, Op0, (uint64_t)(isLittleEndian ? 1 : 0)); |
16108 | Res = Builder.CreateInsertElement(Res, Op1, |
16109 | (uint64_t)(isLittleEndian ? 0 : 1)); |
16110 | return Builder.CreateBitCast(Res, ConvertType(E->getType())); |
16111 | } |
16112 | |
16113 | case PPC::BI__builtin_unpack_vector_int128: { |
16114 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16115 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16116 | ConstantInt *Index = cast<ConstantInt>(Op1); |
16117 | Value *Unpacked = Builder.CreateBitCast( |
16118 | Op0, llvm::FixedVectorType::get(ConvertType(E->getType()), 2)); |
16119 | |
16120 | if (getTarget().isLittleEndian()) |
16121 | Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue()); |
16122 | |
16123 | return Builder.CreateExtractElement(Unpacked, Index); |
16124 | } |
16125 | |
16126 | case PPC::BI__builtin_ppc_sthcx: { |
16127 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_sthcx); |
16128 | Value *Op0 = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy); |
16129 | Value *Op1 = Builder.CreateSExt(EmitScalarExpr(E->getArg(1)), Int32Ty); |
16130 | return Builder.CreateCall(F, {Op0, Op1}); |
16131 | } |
16132 | |
16133 | // The PPC MMA builtins take a pointer to a __vector_quad as an argument. |
16134 | // Some of the MMA instructions accumulate their result into an existing |
16135 | // accumulator whereas the others generate a new accumulator. So we need to |
16136 | // use custom code generation to expand a builtin call with a pointer to a |
16137 | // load (if the corresponding instruction accumulates its result) followed by |
16138 | // the call to the intrinsic and a store of the result. |
16139 | #define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate) \ |
16140 | case PPC::BI__builtin_##Name: |
16141 | #include "clang/Basic/BuiltinsPPC.def" |
16142 | { |
16143 | SmallVector<Value *, 4> Ops; |
16144 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) |
16145 | if (E->getArg(i)->getType()->isArrayType()) |
16146 | Ops.push_back(EmitArrayToPointerDecay(E->getArg(i)).getPointer()); |
16147 | else |
16148 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
16149 | // The first argument of these two builtins is a pointer used to store their |
16150 | // result. However, the llvm intrinsics return their result in multiple |
16151 | // return values. So, here we emit code extracting these values from the |
16152 | // intrinsic results and storing them using that pointer. |
16153 | if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc || |
16154 | BuiltinID == PPC::BI__builtin_vsx_disassemble_pair || |
16155 | BuiltinID == PPC::BI__builtin_mma_disassemble_pair) { |
16156 | unsigned NumVecs = 2; |
16157 | auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair; |
16158 | if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) { |
16159 | NumVecs = 4; |
16160 | Intrinsic = Intrinsic::ppc_mma_disassemble_acc; |
16161 | } |
16162 | llvm::Function *F = CGM.getIntrinsic(Intrinsic); |
16163 | Address Addr = EmitPointerWithAlignment(E->getArg(1)); |
16164 | Value *Vec = Builder.CreateLoad(Addr); |
16165 | Value *Call = Builder.CreateCall(F, {Vec}); |
16166 | llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
16167 | Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo()); |
16168 | for (unsigned i=0; i<NumVecs; i++) { |
16169 | Value *Vec = Builder.CreateExtractValue(Call, i); |
16170 | llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i); |
16171 | Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index); |
16172 | Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16)); |
16173 | } |
16174 | return Call; |
16175 | } |
16176 | if (BuiltinID == PPC::BI__builtin_vsx_build_pair || |
16177 | BuiltinID == PPC::BI__builtin_mma_build_acc) { |
16178 | // Reverse the order of the operands for LE, so the |
16179 | // same builtin call can be used on both LE and BE |
16180 | // without the need for the programmer to swap operands. |
16181 | // The operands are reversed starting from the second argument, |
16182 | // the first operand is the pointer to the pair/accumulator |
16183 | // that is being built. |
16184 | if (getTarget().isLittleEndian()) |
16185 | std::reverse(Ops.begin() + 1, Ops.end()); |
16186 | } |
16187 | bool Accumulate; |
16188 | switch (BuiltinID) { |
16189 | #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ |
16190 | case PPC::BI__builtin_##Name: \ |
16191 | ID = Intrinsic::ppc_##Intr; \ |
16192 | Accumulate = Acc; \ |
16193 | break; |
16194 | #include "clang/Basic/BuiltinsPPC.def" |
16195 | } |
16196 | if (BuiltinID == PPC::BI__builtin_vsx_lxvp || |
16197 | BuiltinID == PPC::BI__builtin_vsx_stxvp || |
16198 | BuiltinID == PPC::BI__builtin_mma_lxvp || |
16199 | BuiltinID == PPC::BI__builtin_mma_stxvp) { |
16200 | if (BuiltinID == PPC::BI__builtin_vsx_lxvp || |
16201 | BuiltinID == PPC::BI__builtin_mma_lxvp) { |
16202 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); |
16203 | Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]); |
16204 | } else { |
16205 | Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); |
16206 | Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]); |
16207 | } |
16208 | Ops.pop_back(); |
16209 | llvm::Function *F = CGM.getIntrinsic(ID); |
16210 | return Builder.CreateCall(F, Ops, ""); |
16211 | } |
16212 | SmallVector<Value*, 4> CallOps; |
16213 | if (Accumulate) { |
16214 | Address Addr = EmitPointerWithAlignment(E->getArg(0)); |
16215 | Value *Acc = Builder.CreateLoad(Addr); |
16216 | CallOps.push_back(Acc); |
16217 | } |
16218 | for (unsigned i=1; i<Ops.size(); i++) |
16219 | CallOps.push_back(Ops[i]); |
16220 | llvm::Function *F = CGM.getIntrinsic(ID); |
16221 | Value *Call = Builder.CreateCall(F, CallOps); |
16222 | return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64)); |
16223 | } |
16224 | |
16225 | case PPC::BI__builtin_ppc_compare_and_swap: |
16226 | case PPC::BI__builtin_ppc_compare_and_swaplp: { |
16227 | Address Addr = EmitPointerWithAlignment(E->getArg(0)); |
16228 | Address OldValAddr = EmitPointerWithAlignment(E->getArg(1)); |
16229 | Value *OldVal = Builder.CreateLoad(OldValAddr); |
16230 | QualType AtomicTy = E->getArg(0)->getType()->getPointeeType(); |
16231 | LValue LV = MakeAddrLValue(Addr, AtomicTy); |
16232 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
16233 | auto Pair = EmitAtomicCompareExchange( |
16234 | LV, RValue::get(OldVal), RValue::get(Op2), E->getExprLoc(), |
16235 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true); |
16236 | // Unlike c11's atomic_compare_exchange, accroding to |
16237 | // https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp |
16238 | // > In either case, the contents of the memory location specified by addr |
16239 | // > are copied into the memory location specified by old_val_addr. |
16240 | // But it hasn't specified storing to OldValAddr is atomic or not and |
16241 | // which order to use. Now following XL's codegen, treat it as a normal |
16242 | // store. |
16243 | Value *LoadedVal = Pair.first.getScalarVal(); |
16244 | Builder.CreateStore(LoadedVal, OldValAddr); |
16245 | return Builder.CreateZExt(Pair.second, Builder.getInt32Ty()); |
16246 | } |
16247 | case PPC::BI__builtin_ppc_fetch_and_add: |
16248 | case PPC::BI__builtin_ppc_fetch_and_addlp: { |
16249 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
16250 | llvm::AtomicOrdering::Monotonic); |
16251 | } |
16252 | case PPC::BI__builtin_ppc_fetch_and_and: |
16253 | case PPC::BI__builtin_ppc_fetch_and_andlp: { |
16254 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
16255 | llvm::AtomicOrdering::Monotonic); |
16256 | } |
16257 | |
16258 | case PPC::BI__builtin_ppc_fetch_and_or: |
16259 | case PPC::BI__builtin_ppc_fetch_and_orlp: { |
16260 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
16261 | llvm::AtomicOrdering::Monotonic); |
16262 | } |
16263 | case PPC::BI__builtin_ppc_fetch_and_swap: |
16264 | case PPC::BI__builtin_ppc_fetch_and_swaplp: { |
16265 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
16266 | llvm::AtomicOrdering::Monotonic); |
16267 | } |
16268 | case PPC::BI__builtin_ppc_ldarx: |
16269 | case PPC::BI__builtin_ppc_lwarx: |
16270 | case PPC::BI__builtin_ppc_lharx: |
16271 | case PPC::BI__builtin_ppc_lbarx: |
16272 | return emitPPCLoadReserveIntrinsic(*this, BuiltinID, E); |
16273 | case PPC::BI__builtin_ppc_mfspr: { |
16274 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16275 | llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32 |
16276 | ? Int32Ty |
16277 | : Int64Ty; |
16278 | Function *F = CGM.getIntrinsic(Intrinsic::ppc_mfspr, RetType); |
16279 | return Builder.CreateCall(F, {Op0}); |
16280 | } |
16281 | case PPC::BI__builtin_ppc_mtspr: { |
16282 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16283 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16284 | llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32 |
16285 | ? Int32Ty |
16286 | : Int64Ty; |
16287 | Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtspr, RetType); |
16288 | return Builder.CreateCall(F, {Op0, Op1}); |
16289 | } |
16290 | case PPC::BI__builtin_ppc_popcntb: { |
16291 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
16292 | llvm::Type *ArgType = ArgValue->getType(); |
16293 | Function *F = CGM.getIntrinsic(Intrinsic::ppc_popcntb, {ArgType, ArgType}); |
16294 | return Builder.CreateCall(F, {ArgValue}, "popcntb"); |
16295 | } |
16296 | case PPC::BI__builtin_ppc_mtfsf: { |
16297 | // The builtin takes a uint32 that needs to be cast to an |
16298 | // f64 to be passed to the intrinsic. |
16299 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16300 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16301 | Value *Cast = Builder.CreateUIToFP(Op1, DoubleTy); |
16302 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtfsf); |
16303 | return Builder.CreateCall(F, {Op0, Cast}, ""); |
16304 | } |
16305 | |
16306 | case PPC::BI__builtin_ppc_swdiv_nochk: |
16307 | case PPC::BI__builtin_ppc_swdivs_nochk: { |
16308 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16309 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16310 | FastMathFlags FMF = Builder.getFastMathFlags(); |
16311 | Builder.getFastMathFlags().setFast(); |
16312 | Value *FDiv = Builder.CreateFDiv(Op0, Op1, "swdiv_nochk"); |
16313 | Builder.getFastMathFlags() &= (FMF); |
16314 | return FDiv; |
16315 | } |
16316 | case PPC::BI__builtin_ppc_fric: |
16317 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
16318 | *this, E, Intrinsic::rint, |
16319 | Intrinsic::experimental_constrained_rint)) |
16320 | .getScalarVal(); |
16321 | case PPC::BI__builtin_ppc_frim: |
16322 | case PPC::BI__builtin_ppc_frims: |
16323 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
16324 | *this, E, Intrinsic::floor, |
16325 | Intrinsic::experimental_constrained_floor)) |
16326 | .getScalarVal(); |
16327 | case PPC::BI__builtin_ppc_frin: |
16328 | case PPC::BI__builtin_ppc_frins: |
16329 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
16330 | *this, E, Intrinsic::round, |
16331 | Intrinsic::experimental_constrained_round)) |
16332 | .getScalarVal(); |
16333 | case PPC::BI__builtin_ppc_frip: |
16334 | case PPC::BI__builtin_ppc_frips: |
16335 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
16336 | *this, E, Intrinsic::ceil, |
16337 | Intrinsic::experimental_constrained_ceil)) |
16338 | .getScalarVal(); |
16339 | case PPC::BI__builtin_ppc_friz: |
16340 | case PPC::BI__builtin_ppc_frizs: |
16341 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
16342 | *this, E, Intrinsic::trunc, |
16343 | Intrinsic::experimental_constrained_trunc)) |
16344 | .getScalarVal(); |
16345 | case PPC::BI__builtin_ppc_fsqrt: |
16346 | case PPC::BI__builtin_ppc_fsqrts: |
16347 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
16348 | *this, E, Intrinsic::sqrt, |
16349 | Intrinsic::experimental_constrained_sqrt)) |
16350 | .getScalarVal(); |
16351 | case PPC::BI__builtin_ppc_test_data_class: { |
16352 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16353 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16354 | llvm::Type *ArgType = Op0->getType(); |
16355 | unsigned IntrinsicID; |
16356 | if (ArgType->isDoubleTy()) |
16357 | IntrinsicID = Intrinsic::ppc_test_data_class_d; |
16358 | else if (ArgType->isFloatTy()) |
16359 | IntrinsicID = Intrinsic::ppc_test_data_class_f; |
16360 | else |
16361 | llvm_unreachable("Invalid Argument Type")::llvm::llvm_unreachable_internal("Invalid Argument Type", "clang/lib/CodeGen/CGBuiltin.cpp" , 16361); |
16362 | return Builder.CreateCall(CGM.getIntrinsic(IntrinsicID), {Op0, Op1}, |
16363 | "test_data_class"); |
16364 | } |
16365 | case PPC::BI__builtin_ppc_maxfe: { |
16366 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16367 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16368 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
16369 | Value *Op3 = EmitScalarExpr(E->getArg(3)); |
16370 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfe), |
16371 | {Op0, Op1, Op2, Op3}); |
16372 | } |
16373 | case PPC::BI__builtin_ppc_maxfl: { |
16374 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16375 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16376 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
16377 | Value *Op3 = EmitScalarExpr(E->getArg(3)); |
16378 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfl), |
16379 | {Op0, Op1, Op2, Op3}); |
16380 | } |
16381 | case PPC::BI__builtin_ppc_maxfs: { |
16382 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16383 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16384 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
16385 | Value *Op3 = EmitScalarExpr(E->getArg(3)); |
16386 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfs), |
16387 | {Op0, Op1, Op2, Op3}); |
16388 | } |
16389 | case PPC::BI__builtin_ppc_minfe: { |
16390 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16391 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16392 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
16393 | Value *Op3 = EmitScalarExpr(E->getArg(3)); |
16394 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfe), |
16395 | {Op0, Op1, Op2, Op3}); |
16396 | } |
16397 | case PPC::BI__builtin_ppc_minfl: { |
16398 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16399 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16400 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
16401 | Value *Op3 = EmitScalarExpr(E->getArg(3)); |
16402 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfl), |
16403 | {Op0, Op1, Op2, Op3}); |
16404 | } |
16405 | case PPC::BI__builtin_ppc_minfs: { |
16406 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16407 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16408 | Value *Op2 = EmitScalarExpr(E->getArg(2)); |
16409 | Value *Op3 = EmitScalarExpr(E->getArg(3)); |
16410 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfs), |
16411 | {Op0, Op1, Op2, Op3}); |
16412 | } |
16413 | case PPC::BI__builtin_ppc_swdiv: |
16414 | case PPC::BI__builtin_ppc_swdivs: { |
16415 | Value *Op0 = EmitScalarExpr(E->getArg(0)); |
16416 | Value *Op1 = EmitScalarExpr(E->getArg(1)); |
16417 | return Builder.CreateFDiv(Op0, Op1, "swdiv"); |
16418 | } |
16419 | } |
16420 | } |
16421 | |
16422 | namespace { |
16423 | // If \p E is not null pointer, insert address space cast to match return |
16424 | // type of \p E if necessary. |
16425 | Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF, |
16426 | const CallExpr *E = nullptr) { |
16427 | auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr); |
16428 | auto *Call = CGF.Builder.CreateCall(F); |
16429 | Call->addRetAttr( |
16430 | Attribute::getWithDereferenceableBytes(Call->getContext(), 64)); |
16431 | Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(4))); |
16432 | if (!E) |
16433 | return Call; |
16434 | QualType BuiltinRetType = E->getType(); |
16435 | auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType)); |
16436 | if (RetTy == Call->getType()) |
16437 | return Call; |
16438 | return CGF.Builder.CreateAddrSpaceCast(Call, RetTy); |
16439 | } |
16440 | |
16441 | Value *EmitAMDGPUImplicitArgPtr(CodeGenFunction &CGF) { |
16442 | auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_implicitarg_ptr); |
16443 | auto *Call = CGF.Builder.CreateCall(F); |
16444 | Call->addRetAttr( |
16445 | Attribute::getWithDereferenceableBytes(Call->getContext(), 256)); |
16446 | Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(8))); |
16447 | return Call; |
16448 | } |
16449 | |
16450 | // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively. |
16451 | Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) { |
16452 | bool IsCOV_5 = CGF.getTarget().getTargetOpts().CodeObjectVersion == |
16453 | clang::TargetOptions::COV_5; |
16454 | Constant *Offset; |
16455 | Value *DP; |
16456 | if (IsCOV_5) { |
16457 | // Indexing the implicit kernarg segment. |
16458 | Offset = llvm::ConstantInt::get(CGF.Int32Ty, 12 + Index * 2); |
16459 | DP = EmitAMDGPUImplicitArgPtr(CGF); |
16460 | } else { |
16461 | // Indexing the HSA kernel_dispatch_packet struct. |
16462 | Offset = llvm::ConstantInt::get(CGF.Int32Ty, 4 + Index * 2); |
16463 | DP = EmitAMDGPUDispatchPtr(CGF); |
16464 | } |
16465 | |
16466 | auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset); |
16467 | auto *DstTy = |
16468 | CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace()); |
16469 | auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy); |
16470 | auto *LD = CGF.Builder.CreateLoad( |
16471 | Address(Cast, CGF.Int16Ty, CharUnits::fromQuantity(2))); |
16472 | llvm::MDBuilder MDHelper(CGF.getLLVMContext()); |
16473 | llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1), |
16474 | APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1)); |
16475 | LD->setMetadata(llvm::LLVMContext::MD_range, RNode); |
16476 | LD->setMetadata(llvm::LLVMContext::MD_invariant_load, |
16477 | llvm::MDNode::get(CGF.getLLVMContext(), None)); |
16478 | return LD; |
16479 | } |
16480 | |
16481 | // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively. |
16482 | Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) { |
16483 | const unsigned XOffset = 12; |
16484 | auto *DP = EmitAMDGPUDispatchPtr(CGF); |
16485 | // Indexing the HSA kernel_dispatch_packet struct. |
16486 | auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4); |
16487 | auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset); |
16488 | auto *DstTy = |
16489 | CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace()); |
16490 | auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy); |
16491 | auto *LD = CGF.Builder.CreateLoad( |
16492 | Address(Cast, CGF.Int32Ty, CharUnits::fromQuantity(4))); |
16493 | LD->setMetadata(llvm::LLVMContext::MD_invariant_load, |
16494 | llvm::MDNode::get(CGF.getLLVMContext(), None)); |
16495 | return LD; |
16496 | } |
16497 | } // namespace |
16498 | |
16499 | // For processing memory ordering and memory scope arguments of various |
16500 | // amdgcn builtins. |
16501 | // \p Order takes a C++11 comptabile memory-ordering specifier and converts |
16502 | // it into LLVM's memory ordering specifier using atomic C ABI, and writes |
16503 | // to \p AO. \p Scope takes a const char * and converts it into AMDGCN |
16504 | // specific SyncScopeID and writes it to \p SSID. |
16505 | bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope, |
16506 | llvm::AtomicOrdering &AO, |
16507 | llvm::SyncScope::ID &SSID) { |
16508 | if (isa<llvm::ConstantInt>(Order)) { |
16509 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
16510 | |
16511 | // Map C11/C++11 memory ordering to LLVM memory ordering |
16512 | assert(llvm::isValidAtomicOrderingCABI(ord))(static_cast <bool> (llvm::isValidAtomicOrderingCABI(ord )) ? void (0) : __assert_fail ("llvm::isValidAtomicOrderingCABI(ord)" , "clang/lib/CodeGen/CGBuiltin.cpp", 16512, __extension__ __PRETTY_FUNCTION__ )); |
16513 | switch (static_cast<llvm::AtomicOrderingCABI>(ord)) { |
16514 | case llvm::AtomicOrderingCABI::acquire: |
16515 | case llvm::AtomicOrderingCABI::consume: |
16516 | AO = llvm::AtomicOrdering::Acquire; |
16517 | break; |
16518 | case llvm::AtomicOrderingCABI::release: |
16519 | AO = llvm::AtomicOrdering::Release; |
16520 | break; |
16521 | case llvm::AtomicOrderingCABI::acq_rel: |
16522 | AO = llvm::AtomicOrdering::AcquireRelease; |
16523 | break; |
16524 | case llvm::AtomicOrderingCABI::seq_cst: |
16525 | AO = llvm::AtomicOrdering::SequentiallyConsistent; |
16526 | break; |
16527 | case llvm::AtomicOrderingCABI::relaxed: |
16528 | AO = llvm::AtomicOrdering::Monotonic; |
16529 | break; |
16530 | } |
16531 | |
16532 | StringRef scp; |
16533 | llvm::getConstantStringInfo(Scope, scp); |
16534 | SSID = getLLVMContext().getOrInsertSyncScopeID(scp); |
16535 | return true; |
16536 | } |
16537 | return false; |
16538 | } |
16539 | |
16540 | Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, |
16541 | const CallExpr *E) { |
16542 | llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent; |
16543 | llvm::SyncScope::ID SSID; |
16544 | switch (BuiltinID) { |
16545 | case AMDGPU::BI__builtin_amdgcn_div_scale: |
16546 | case AMDGPU::BI__builtin_amdgcn_div_scalef: { |
16547 | // Translate from the intrinsics's struct return to the builtin's out |
16548 | // argument. |
16549 | |
16550 | Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3)); |
16551 | |
16552 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
16553 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
16554 | llvm::Value *Z = EmitScalarExpr(E->getArg(2)); |
16555 | |
16556 | llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale, |
16557 | X->getType()); |
16558 | |
16559 | llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z}); |
16560 | |
16561 | llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0); |
16562 | llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1); |
16563 | |
16564 | llvm::Type *RealFlagType = FlagOutPtr.getElementType(); |
16565 | |
16566 | llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType); |
16567 | Builder.CreateStore(FlagExt, FlagOutPtr); |
16568 | return Result; |
16569 | } |
16570 | case AMDGPU::BI__builtin_amdgcn_div_fmas: |
16571 | case AMDGPU::BI__builtin_amdgcn_div_fmasf: { |
16572 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16573 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16574 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16575 | llvm::Value *Src3 = EmitScalarExpr(E->getArg(3)); |
16576 | |
16577 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas, |
16578 | Src0->getType()); |
16579 | llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3); |
16580 | return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool}); |
16581 | } |
16582 | |
16583 | case AMDGPU::BI__builtin_amdgcn_ds_swizzle: |
16584 | return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle); |
16585 | case AMDGPU::BI__builtin_amdgcn_mov_dpp8: |
16586 | return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8); |
16587 | case AMDGPU::BI__builtin_amdgcn_mov_dpp: |
16588 | case AMDGPU::BI__builtin_amdgcn_update_dpp: { |
16589 | llvm::SmallVector<llvm::Value *, 6> Args; |
16590 | for (unsigned I = 0; I != E->getNumArgs(); ++I) |
16591 | Args.push_back(EmitScalarExpr(E->getArg(I))); |
16592 | assert(Args.size() == 5 || Args.size() == 6)(static_cast <bool> (Args.size() == 5 || Args.size() == 6) ? void (0) : __assert_fail ("Args.size() == 5 || Args.size() == 6" , "clang/lib/CodeGen/CGBuiltin.cpp", 16592, __extension__ __PRETTY_FUNCTION__ )); |
16593 | if (Args.size() == 5) |
16594 | Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType())); |
16595 | Function *F = |
16596 | CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType()); |
16597 | return Builder.CreateCall(F, Args); |
16598 | } |
16599 | case AMDGPU::BI__builtin_amdgcn_div_fixup: |
16600 | case AMDGPU::BI__builtin_amdgcn_div_fixupf: |
16601 | case AMDGPU::BI__builtin_amdgcn_div_fixuph: |
16602 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup); |
16603 | case AMDGPU::BI__builtin_amdgcn_trig_preop: |
16604 | case AMDGPU::BI__builtin_amdgcn_trig_preopf: |
16605 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop); |
16606 | case AMDGPU::BI__builtin_amdgcn_rcp: |
16607 | case AMDGPU::BI__builtin_amdgcn_rcpf: |
16608 | case AMDGPU::BI__builtin_amdgcn_rcph: |
16609 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp); |
16610 | case AMDGPU::BI__builtin_amdgcn_sqrt: |
16611 | case AMDGPU::BI__builtin_amdgcn_sqrtf: |
16612 | case AMDGPU::BI__builtin_amdgcn_sqrth: |
16613 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt); |
16614 | case AMDGPU::BI__builtin_amdgcn_rsq: |
16615 | case AMDGPU::BI__builtin_amdgcn_rsqf: |
16616 | case AMDGPU::BI__builtin_amdgcn_rsqh: |
16617 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq); |
16618 | case AMDGPU::BI__builtin_amdgcn_rsq_clamp: |
16619 | case AMDGPU::BI__builtin_amdgcn_rsq_clampf: |
16620 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp); |
16621 | case AMDGPU::BI__builtin_amdgcn_sinf: |
16622 | case AMDGPU::BI__builtin_amdgcn_sinh: |
16623 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin); |
16624 | case AMDGPU::BI__builtin_amdgcn_cosf: |
16625 | case AMDGPU::BI__builtin_amdgcn_cosh: |
16626 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos); |
16627 | case AMDGPU::BI__builtin_amdgcn_dispatch_ptr: |
16628 | return EmitAMDGPUDispatchPtr(*this, E); |
16629 | case AMDGPU::BI__builtin_amdgcn_log_clampf: |
16630 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp); |
16631 | case AMDGPU::BI__builtin_amdgcn_ldexp: |
16632 | case AMDGPU::BI__builtin_amdgcn_ldexpf: |
16633 | case AMDGPU::BI__builtin_amdgcn_ldexph: |
16634 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp); |
16635 | case AMDGPU::BI__builtin_amdgcn_frexp_mant: |
16636 | case AMDGPU::BI__builtin_amdgcn_frexp_mantf: |
16637 | case AMDGPU::BI__builtin_amdgcn_frexp_manth: |
16638 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant); |
16639 | case AMDGPU::BI__builtin_amdgcn_frexp_exp: |
16640 | case AMDGPU::BI__builtin_amdgcn_frexp_expf: { |
16641 | Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16642 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp, |
16643 | { Builder.getInt32Ty(), Src0->getType() }); |
16644 | return Builder.CreateCall(F, Src0); |
16645 | } |
16646 | case AMDGPU::BI__builtin_amdgcn_frexp_exph: { |
16647 | Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16648 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp, |
16649 | { Builder.getInt16Ty(), Src0->getType() }); |
16650 | return Builder.CreateCall(F, Src0); |
16651 | } |
16652 | case AMDGPU::BI__builtin_amdgcn_fract: |
16653 | case AMDGPU::BI__builtin_amdgcn_fractf: |
16654 | case AMDGPU::BI__builtin_amdgcn_fracth: |
16655 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract); |
16656 | case AMDGPU::BI__builtin_amdgcn_lerp: |
16657 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp); |
16658 | case AMDGPU::BI__builtin_amdgcn_ubfe: |
16659 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe); |
16660 | case AMDGPU::BI__builtin_amdgcn_sbfe: |
16661 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe); |
16662 | case AMDGPU::BI__builtin_amdgcn_uicmp: |
16663 | case AMDGPU::BI__builtin_amdgcn_uicmpl: |
16664 | case AMDGPU::BI__builtin_amdgcn_sicmp: |
16665 | case AMDGPU::BI__builtin_amdgcn_sicmpl: { |
16666 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16667 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16668 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16669 | |
16670 | // FIXME-GFX10: How should 32 bit mask be handled? |
16671 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp, |
16672 | { Builder.getInt64Ty(), Src0->getType() }); |
16673 | return Builder.CreateCall(F, { Src0, Src1, Src2 }); |
16674 | } |
16675 | case AMDGPU::BI__builtin_amdgcn_fcmp: |
16676 | case AMDGPU::BI__builtin_amdgcn_fcmpf: { |
16677 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16678 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16679 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16680 | |
16681 | // FIXME-GFX10: How should 32 bit mask be handled? |
16682 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp, |
16683 | { Builder.getInt64Ty(), Src0->getType() }); |
16684 | return Builder.CreateCall(F, { Src0, Src1, Src2 }); |
16685 | } |
16686 | case AMDGPU::BI__builtin_amdgcn_class: |
16687 | case AMDGPU::BI__builtin_amdgcn_classf: |
16688 | case AMDGPU::BI__builtin_amdgcn_classh: |
16689 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class); |
16690 | case AMDGPU::BI__builtin_amdgcn_fmed3f: |
16691 | case AMDGPU::BI__builtin_amdgcn_fmed3h: |
16692 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3); |
16693 | case AMDGPU::BI__builtin_amdgcn_ds_append: |
16694 | case AMDGPU::BI__builtin_amdgcn_ds_consume: { |
16695 | Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ? |
16696 | Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume; |
16697 | Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16698 | Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() }); |
16699 | return Builder.CreateCall(F, { Src0, Builder.getFalse() }); |
16700 | } |
16701 | case AMDGPU::BI__builtin_amdgcn_ds_faddf: |
16702 | case AMDGPU::BI__builtin_amdgcn_ds_fminf: |
16703 | case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: { |
16704 | Intrinsic::ID Intrin; |
16705 | switch (BuiltinID) { |
16706 | case AMDGPU::BI__builtin_amdgcn_ds_faddf: |
16707 | Intrin = Intrinsic::amdgcn_ds_fadd; |
16708 | break; |
16709 | case AMDGPU::BI__builtin_amdgcn_ds_fminf: |
16710 | Intrin = Intrinsic::amdgcn_ds_fmin; |
16711 | break; |
16712 | case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: |
16713 | Intrin = Intrinsic::amdgcn_ds_fmax; |
16714 | break; |
16715 | } |
16716 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16717 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16718 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16719 | llvm::Value *Src3 = EmitScalarExpr(E->getArg(3)); |
16720 | llvm::Value *Src4 = EmitScalarExpr(E->getArg(4)); |
16721 | llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() }); |
16722 | llvm::FunctionType *FTy = F->getFunctionType(); |
16723 | llvm::Type *PTy = FTy->getParamType(0); |
16724 | Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy); |
16725 | return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 }); |
16726 | } |
16727 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64: |
16728 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32: |
16729 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16: |
16730 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64: |
16731 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64: |
16732 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64: |
16733 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64: |
16734 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64: |
16735 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32: |
16736 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16: { |
16737 | Intrinsic::ID IID; |
16738 | llvm::Type *ArgTy = llvm::Type::getDoubleTy(getLLVMContext()); |
16739 | switch (BuiltinID) { |
16740 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32: |
16741 | ArgTy = llvm::Type::getFloatTy(getLLVMContext()); |
16742 | IID = Intrinsic::amdgcn_global_atomic_fadd; |
16743 | break; |
16744 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16: |
16745 | ArgTy = llvm::FixedVectorType::get( |
16746 | llvm::Type::getHalfTy(getLLVMContext()), 2); |
16747 | IID = Intrinsic::amdgcn_global_atomic_fadd; |
16748 | break; |
16749 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64: |
16750 | IID = Intrinsic::amdgcn_global_atomic_fadd; |
16751 | break; |
16752 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64: |
16753 | IID = Intrinsic::amdgcn_global_atomic_fmin; |
16754 | break; |
16755 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64: |
16756 | IID = Intrinsic::amdgcn_global_atomic_fmax; |
16757 | break; |
16758 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64: |
16759 | IID = Intrinsic::amdgcn_flat_atomic_fadd; |
16760 | break; |
16761 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64: |
16762 | IID = Intrinsic::amdgcn_flat_atomic_fmin; |
16763 | break; |
16764 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64: |
16765 | IID = Intrinsic::amdgcn_flat_atomic_fmax; |
16766 | break; |
16767 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32: |
16768 | ArgTy = llvm::Type::getFloatTy(getLLVMContext()); |
16769 | IID = Intrinsic::amdgcn_flat_atomic_fadd; |
16770 | break; |
16771 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16: |
16772 | ArgTy = llvm::FixedVectorType::get( |
16773 | llvm::Type::getHalfTy(getLLVMContext()), 2); |
16774 | IID = Intrinsic::amdgcn_flat_atomic_fadd; |
16775 | break; |
16776 | } |
16777 | llvm::Value *Addr = EmitScalarExpr(E->getArg(0)); |
16778 | llvm::Value *Val = EmitScalarExpr(E->getArg(1)); |
16779 | llvm::Function *F = |
16780 | CGM.getIntrinsic(IID, {ArgTy, Addr->getType(), Val->getType()}); |
16781 | return Builder.CreateCall(F, {Addr, Val}); |
16782 | } |
16783 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16: |
16784 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16: { |
16785 | Intrinsic::ID IID; |
16786 | switch (BuiltinID) { |
16787 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16: |
16788 | IID = Intrinsic::amdgcn_global_atomic_fadd_v2bf16; |
16789 | break; |
16790 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16: |
16791 | IID = Intrinsic::amdgcn_flat_atomic_fadd_v2bf16; |
16792 | break; |
16793 | } |
16794 | llvm::Value *Addr = EmitScalarExpr(E->getArg(0)); |
16795 | llvm::Value *Val = EmitScalarExpr(E->getArg(1)); |
16796 | llvm::Function *F = CGM.getIntrinsic(IID, {Addr->getType()}); |
16797 | return Builder.CreateCall(F, {Addr, Val}); |
16798 | } |
16799 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64: |
16800 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32: { |
16801 | Intrinsic::ID IID; |
16802 | llvm::Type *ArgTy; |
16803 | switch (BuiltinID) { |
16804 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32: |
16805 | ArgTy = llvm::Type::getFloatTy(getLLVMContext()); |
16806 | IID = Intrinsic::amdgcn_ds_fadd; |
16807 | break; |
16808 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64: |
16809 | ArgTy = llvm::Type::getDoubleTy(getLLVMContext()); |
16810 | IID = Intrinsic::amdgcn_ds_fadd; |
16811 | break; |
16812 | } |
16813 | llvm::Value *Addr = EmitScalarExpr(E->getArg(0)); |
16814 | llvm::Value *Val = EmitScalarExpr(E->getArg(1)); |
16815 | llvm::Constant *ZeroI32 = llvm::ConstantInt::getIntegerValue( |
16816 | llvm::Type::getInt32Ty(getLLVMContext()), APInt(32, 0, true)); |
16817 | llvm::Constant *ZeroI1 = llvm::ConstantInt::getIntegerValue( |
16818 | llvm::Type::getInt1Ty(getLLVMContext()), APInt(1, 0)); |
16819 | llvm::Function *F = CGM.getIntrinsic(IID, {ArgTy}); |
16820 | return Builder.CreateCall(F, {Addr, Val, ZeroI32, ZeroI32, ZeroI1}); |
16821 | } |
16822 | case AMDGPU::BI__builtin_amdgcn_read_exec: { |
16823 | CallInst *CI = cast<CallInst>( |
16824 | EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec")); |
16825 | CI->setConvergent(); |
16826 | return CI; |
16827 | } |
16828 | case AMDGPU::BI__builtin_amdgcn_read_exec_lo: |
16829 | case AMDGPU::BI__builtin_amdgcn_read_exec_hi: { |
16830 | StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ? |
16831 | "exec_lo" : "exec_hi"; |
16832 | CallInst *CI = cast<CallInst>( |
16833 | EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName)); |
16834 | CI->setConvergent(); |
16835 | return CI; |
16836 | } |
16837 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray: |
16838 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h: |
16839 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l: |
16840 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_lh: { |
16841 | llvm::Value *NodePtr = EmitScalarExpr(E->getArg(0)); |
16842 | llvm::Value *RayExtent = EmitScalarExpr(E->getArg(1)); |
16843 | llvm::Value *RayOrigin = EmitScalarExpr(E->getArg(2)); |
16844 | llvm::Value *RayDir = EmitScalarExpr(E->getArg(3)); |
16845 | llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4)); |
16846 | llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5)); |
16847 | |
16848 | // The builtins take these arguments as vec4 where the last element is |
16849 | // ignored. The intrinsic takes them as vec3. |
16850 | RayOrigin = Builder.CreateShuffleVector(RayOrigin, RayOrigin, |
16851 | ArrayRef<int>{0, 1, 2}); |
16852 | RayDir = |
16853 | Builder.CreateShuffleVector(RayDir, RayDir, ArrayRef<int>{0, 1, 2}); |
16854 | RayInverseDir = Builder.CreateShuffleVector(RayInverseDir, RayInverseDir, |
16855 | ArrayRef<int>{0, 1, 2}); |
16856 | |
16857 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray, |
16858 | {NodePtr->getType(), RayDir->getType()}); |
16859 | return Builder.CreateCall(F, {NodePtr, RayExtent, RayOrigin, RayDir, |
16860 | RayInverseDir, TextureDescr}); |
16861 | } |
16862 | |
16863 | // amdgcn workitem |
16864 | case AMDGPU::BI__builtin_amdgcn_workitem_id_x: |
16865 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024); |
16866 | case AMDGPU::BI__builtin_amdgcn_workitem_id_y: |
16867 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024); |
16868 | case AMDGPU::BI__builtin_amdgcn_workitem_id_z: |
16869 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024); |
16870 | |
16871 | // amdgcn workgroup size |
16872 | case AMDGPU::BI__builtin_amdgcn_workgroup_size_x: |
16873 | return EmitAMDGPUWorkGroupSize(*this, 0); |
16874 | case AMDGPU::BI__builtin_amdgcn_workgroup_size_y: |
16875 | return EmitAMDGPUWorkGroupSize(*this, 1); |
16876 | case AMDGPU::BI__builtin_amdgcn_workgroup_size_z: |
16877 | return EmitAMDGPUWorkGroupSize(*this, 2); |
16878 | |
16879 | // amdgcn grid size |
16880 | case AMDGPU::BI__builtin_amdgcn_grid_size_x: |
16881 | return EmitAMDGPUGridSize(*this, 0); |
16882 | case AMDGPU::BI__builtin_amdgcn_grid_size_y: |
16883 | return EmitAMDGPUGridSize(*this, 1); |
16884 | case AMDGPU::BI__builtin_amdgcn_grid_size_z: |
16885 | return EmitAMDGPUGridSize(*this, 2); |
16886 | |
16887 | // r600 intrinsics |
16888 | case AMDGPU::BI__builtin_r600_recipsqrt_ieee: |
16889 | case AMDGPU::BI__builtin_r600_recipsqrt_ieeef: |
16890 | return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee); |
16891 | case AMDGPU::BI__builtin_r600_read_tidig_x: |
16892 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024); |
16893 | case AMDGPU::BI__builtin_r600_read_tidig_y: |
16894 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024); |
16895 | case AMDGPU::BI__builtin_r600_read_tidig_z: |
16896 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024); |
16897 | case AMDGPU::BI__builtin_amdgcn_alignbit: { |
16898 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16899 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16900 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16901 | Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType()); |
16902 | return Builder.CreateCall(F, { Src0, Src1, Src2 }); |
16903 | } |
16904 | |
16905 | case AMDGPU::BI__builtin_amdgcn_fence: { |
16906 | if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)), |
16907 | EmitScalarExpr(E->getArg(1)), AO, SSID)) |
16908 | return Builder.CreateFence(AO, SSID); |
16909 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
16910 | } |
16911 | case AMDGPU::BI__builtin_amdgcn_atomic_inc32: |
16912 | case AMDGPU::BI__builtin_amdgcn_atomic_inc64: |
16913 | case AMDGPU::BI__builtin_amdgcn_atomic_dec32: |
16914 | case AMDGPU::BI__builtin_amdgcn_atomic_dec64: { |
16915 | unsigned BuiltinAtomicOp; |
16916 | llvm::Type *ResultType = ConvertType(E->getType()); |
16917 | |
16918 | switch (BuiltinID) { |
16919 | case AMDGPU::BI__builtin_amdgcn_atomic_inc32: |
16920 | case AMDGPU::BI__builtin_amdgcn_atomic_inc64: |
16921 | BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc; |
16922 | break; |
16923 | case AMDGPU::BI__builtin_amdgcn_atomic_dec32: |
16924 | case AMDGPU::BI__builtin_amdgcn_atomic_dec64: |
16925 | BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec; |
16926 | break; |
16927 | } |
16928 | |
16929 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
16930 | Value *Val = EmitScalarExpr(E->getArg(1)); |
16931 | |
16932 | llvm::Function *F = |
16933 | CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()}); |
16934 | |
16935 | if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)), |
16936 | EmitScalarExpr(E->getArg(3)), AO, SSID)) { |
16937 | |
16938 | // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and |
16939 | // scope as unsigned values |
16940 | Value *MemOrder = Builder.getInt32(static_cast<int>(AO)); |
16941 | Value *MemScope = Builder.getInt32(static_cast<int>(SSID)); |
16942 | |
16943 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
16944 | bool Volatile = |
16945 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
16946 | Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile)); |
16947 | |
16948 | return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile}); |
16949 | } |
16950 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
16951 | } |
16952 | default: |
16953 | return nullptr; |
16954 | } |
16955 | } |
16956 | |
16957 | /// Handle a SystemZ function in which the final argument is a pointer |
16958 | /// to an int that receives the post-instruction CC value. At the LLVM level |
16959 | /// this is represented as a function that returns a {result, cc} pair. |
16960 | static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF, |
16961 | unsigned IntrinsicID, |
16962 | const CallExpr *E) { |
16963 | unsigned NumArgs = E->getNumArgs() - 1; |
16964 | SmallVector<Value *, 8> Args(NumArgs); |
16965 | for (unsigned I = 0; I < NumArgs; ++I) |
16966 | Args[I] = CGF.EmitScalarExpr(E->getArg(I)); |
16967 | Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs)); |
16968 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID); |
16969 | Value *Call = CGF.Builder.CreateCall(F, Args); |
16970 | Value *CC = CGF.Builder.CreateExtractValue(Call, 1); |
16971 | CGF.Builder.CreateStore(CC, CCPtr); |
16972 | return CGF.Builder.CreateExtractValue(Call, 0); |
16973 | } |
16974 | |
16975 | Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID, |
16976 | const CallExpr *E) { |
16977 | switch (BuiltinID) { |
16978 | case SystemZ::BI__builtin_tbegin: { |
16979 | Value *TDB = EmitScalarExpr(E->getArg(0)); |
16980 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); |
16981 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin); |
16982 | return Builder.CreateCall(F, {TDB, Control}); |
16983 | } |
16984 | case SystemZ::BI__builtin_tbegin_nofloat: { |
16985 | Value *TDB = EmitScalarExpr(E->getArg(0)); |
16986 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); |
16987 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat); |
16988 | return Builder.CreateCall(F, {TDB, Control}); |
16989 | } |
16990 | case SystemZ::BI__builtin_tbeginc: { |
16991 | Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy); |
16992 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08); |
16993 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc); |
16994 | return Builder.CreateCall(F, {TDB, Control}); |
16995 | } |
16996 | case SystemZ::BI__builtin_tabort: { |
16997 | Value *Data = EmitScalarExpr(E->getArg(0)); |
16998 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort); |
16999 | return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort")); |
17000 | } |
17001 | case SystemZ::BI__builtin_non_tx_store: { |
17002 | Value *Address = EmitScalarExpr(E->getArg(0)); |
17003 | Value *Data = EmitScalarExpr(E->getArg(1)); |
17004 | Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg); |
17005 | return Builder.CreateCall(F, {Data, Address}); |
17006 | } |
17007 | |
17008 | // Vector builtins. Note that most vector builtins are mapped automatically |
17009 | // to target-specific LLVM intrinsics. The ones handled specially here can |
17010 | // be represented via standard LLVM IR, which is preferable to enable common |
17011 | // LLVM optimizations. |
17012 | |
17013 | case SystemZ::BI__builtin_s390_vpopctb: |
17014 | case SystemZ::BI__builtin_s390_vpopcth: |
17015 | case SystemZ::BI__builtin_s390_vpopctf: |
17016 | case SystemZ::BI__builtin_s390_vpopctg: { |
17017 | llvm::Type *ResultType = ConvertType(E->getType()); |
17018 | Value *X = EmitScalarExpr(E->getArg(0)); |
17019 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); |
17020 | return Builder.CreateCall(F, X); |
17021 | } |
17022 | |
17023 | case SystemZ::BI__builtin_s390_vclzb: |
17024 | case SystemZ::BI__builtin_s390_vclzh: |
17025 | case SystemZ::BI__builtin_s390_vclzf: |
17026 | case SystemZ::BI__builtin_s390_vclzg: { |
17027 | llvm::Type *ResultType = ConvertType(E->getType()); |
17028 | Value *X = EmitScalarExpr(E->getArg(0)); |
17029 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
17030 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); |
17031 | return Builder.CreateCall(F, {X, Undef}); |
17032 | } |
17033 | |
17034 | case SystemZ::BI__builtin_s390_vctzb: |
17035 | case SystemZ::BI__builtin_s390_vctzh: |
17036 | case SystemZ::BI__builtin_s390_vctzf: |
17037 | case SystemZ::BI__builtin_s390_vctzg: { |
17038 | llvm::Type *ResultType = ConvertType(E->getType()); |
17039 | Value *X = EmitScalarExpr(E->getArg(0)); |
17040 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
17041 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType); |
17042 | return Builder.CreateCall(F, {X, Undef}); |
17043 | } |
17044 | |
17045 | case SystemZ::BI__builtin_s390_vfsqsb: |
17046 | case SystemZ::BI__builtin_s390_vfsqdb: { |
17047 | llvm::Type *ResultType = ConvertType(E->getType()); |
17048 | Value *X = EmitScalarExpr(E->getArg(0)); |
17049 | if (Builder.getIsFPConstrained()) { |
17050 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType); |
17051 | return Builder.CreateConstrainedFPCall(F, { X }); |
17052 | } else { |
17053 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); |
17054 | return Builder.CreateCall(F, X); |
17055 | } |
17056 | } |
17057 | case SystemZ::BI__builtin_s390_vfmasb: |
17058 | case SystemZ::BI__builtin_s390_vfmadb: { |
17059 | llvm::Type *ResultType = ConvertType(E->getType()); |
17060 | Value *X = EmitScalarExpr(E->getArg(0)); |
17061 | Value *Y = EmitScalarExpr(E->getArg(1)); |
17062 | Value *Z = EmitScalarExpr(E->getArg(2)); |
17063 | if (Builder.getIsFPConstrained()) { |
17064 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
17065 | return Builder.CreateConstrainedFPCall(F, {X, Y, Z}); |
17066 | } else { |
17067 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
17068 | return Builder.CreateCall(F, {X, Y, Z}); |
17069 | } |
17070 | } |
17071 | case SystemZ::BI__builtin_s390_vfmssb: |
17072 | case SystemZ::BI__builtin_s390_vfmsdb: { |
17073 | llvm::Type *ResultType = ConvertType(E->getType()); |
17074 | Value *X = EmitScalarExpr(E->getArg(0)); |
17075 | Value *Y = EmitScalarExpr(E->getArg(1)); |
17076 | Value *Z = EmitScalarExpr(E->getArg(2)); |
17077 | if (Builder.getIsFPConstrained()) { |
17078 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
17079 | return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); |
17080 | } else { |
17081 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
17082 | return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); |
17083 | } |
17084 | } |
17085 | case SystemZ::BI__builtin_s390_vfnmasb: |
17086 | case SystemZ::BI__builtin_s390_vfnmadb: { |
17087 | llvm::Type *ResultType = ConvertType(E->getType()); |
17088 | Value *X = EmitScalarExpr(E->getArg(0)); |
17089 | Value *Y = EmitScalarExpr(E->getArg(1)); |
17090 | Value *Z = EmitScalarExpr(E->getArg(2)); |
17091 | if (Builder.getIsFPConstrained()) { |
17092 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
17093 | return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg"); |
17094 | } else { |
17095 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
17096 | return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg"); |
17097 | } |
17098 | } |
17099 | case SystemZ::BI__builtin_s390_vfnmssb: |
17100 | case SystemZ::BI__builtin_s390_vfnmsdb: { |
17101 | llvm::Type *ResultType = ConvertType(E->getType()); |
17102 | Value *X = EmitScalarExpr(E->getArg(0)); |
17103 | Value *Y = EmitScalarExpr(E->getArg(1)); |
17104 | Value *Z = EmitScalarExpr(E->getArg(2)); |
17105 | if (Builder.getIsFPConstrained()) { |
17106 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
17107 | Value *NegZ = Builder.CreateFNeg(Z, "sub"); |
17108 | return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ})); |
17109 | } else { |
17110 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
17111 | Value *NegZ = Builder.CreateFNeg(Z, "neg"); |
17112 | return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ})); |
17113 | } |
17114 | } |
17115 | case SystemZ::BI__builtin_s390_vflpsb: |
17116 | case SystemZ::BI__builtin_s390_vflpdb: { |
17117 | llvm::Type *ResultType = ConvertType(E->getType()); |
17118 | Value *X = EmitScalarExpr(E->getArg(0)); |
17119 | Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); |
17120 | return Builder.CreateCall(F, X); |
17121 | } |
17122 | case SystemZ::BI__builtin_s390_vflnsb: |
17123 | case SystemZ::BI__builtin_s390_vflndb: { |
17124 | llvm::Type *ResultType = ConvertType(E->getType()); |
17125 | Value *X = EmitScalarExpr(E->getArg(0)); |
17126 | Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); |
17127 | return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg"); |
17128 | } |
17129 | case SystemZ::BI__builtin_s390_vfisb: |
17130 | case SystemZ::BI__builtin_s390_vfidb: { |
17131 | llvm::Type *ResultType = ConvertType(E->getType()); |
17132 | Value *X = EmitScalarExpr(E->getArg(0)); |
17133 | // Constant-fold the M4 and M5 mask arguments. |
17134 | llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext()); |
17135 | llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext()); |
17136 | // Check whether this instance can be represented via a LLVM standard |
17137 | // intrinsic. We only support some combinations of M4 and M5. |
17138 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
17139 | Intrinsic::ID CI; |
17140 | switch (M4.getZExtValue()) { |
17141 | default: break; |
17142 | case 0: // IEEE-inexact exception allowed |
17143 | switch (M5.getZExtValue()) { |
17144 | default: break; |
17145 | case 0: ID = Intrinsic::rint; |
17146 | CI = Intrinsic::experimental_constrained_rint; break; |
17147 | } |
17148 | break; |
17149 | case 4: // IEEE-inexact exception suppressed |
17150 | switch (M5.getZExtValue()) { |
17151 | default: break; |
17152 | case 0: ID = Intrinsic::nearbyint; |
17153 | CI = Intrinsic::experimental_constrained_nearbyint; break; |
17154 | case 1: ID = Intrinsic::round; |
17155 | CI = Intrinsic::experimental_constrained_round; break; |
17156 | case 5: ID = Intrinsic::trunc; |
17157 | CI = Intrinsic::experimental_constrained_trunc; break; |
17158 | case 6: ID = Intrinsic::ceil; |
17159 | CI = Intrinsic::experimental_constrained_ceil; break; |
17160 | case 7: ID = Intrinsic::floor; |
17161 | CI = Intrinsic::experimental_constrained_floor; break; |
17162 | } |
17163 | break; |
17164 | } |
17165 | if (ID != Intrinsic::not_intrinsic) { |
17166 | if (Builder.getIsFPConstrained()) { |
17167 | Function *F = CGM.getIntrinsic(CI, ResultType); |
17168 | return Builder.CreateConstrainedFPCall(F, X); |
17169 | } else { |
17170 | Function *F = CGM.getIntrinsic(ID, ResultType); |
17171 | return Builder.CreateCall(F, X); |
17172 | } |
17173 | } |
17174 | switch (BuiltinID) { // FIXME: constrained version? |
17175 | case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break; |
17176 | case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break; |
17177 | default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "clang/lib/CodeGen/CGBuiltin.cpp" , 17177); |
17178 | } |
17179 | Function *F = CGM.getIntrinsic(ID); |
17180 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); |
17181 | Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5); |
17182 | return Builder.CreateCall(F, {X, M4Value, M5Value}); |
17183 | } |
17184 | case SystemZ::BI__builtin_s390_vfmaxsb: |
17185 | case SystemZ::BI__builtin_s390_vfmaxdb: { |
17186 | llvm::Type *ResultType = ConvertType(E->getType()); |
17187 | Value *X = EmitScalarExpr(E->getArg(0)); |
17188 | Value *Y = EmitScalarExpr(E->getArg(1)); |
17189 | // Constant-fold the M4 mask argument. |
17190 | llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext()); |
17191 | // Check whether this instance can be represented via a LLVM standard |
17192 | // intrinsic. We only support some values of M4. |
17193 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
17194 | Intrinsic::ID CI; |
17195 | switch (M4.getZExtValue()) { |
17196 | default: break; |
17197 | case 4: ID = Intrinsic::maxnum; |
17198 | CI = Intrinsic::experimental_constrained_maxnum; break; |
17199 | } |
17200 | if (ID != Intrinsic::not_intrinsic) { |
17201 | if (Builder.getIsFPConstrained()) { |
17202 | Function *F = CGM.getIntrinsic(CI, ResultType); |
17203 | return Builder.CreateConstrainedFPCall(F, {X, Y}); |
17204 | } else { |
17205 | Function *F = CGM.getIntrinsic(ID, ResultType); |
17206 | return Builder.CreateCall(F, {X, Y}); |
17207 | } |
17208 | } |
17209 | switch (BuiltinID) { |
17210 | case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break; |
17211 | case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break; |
17212 | default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "clang/lib/CodeGen/CGBuiltin.cpp" , 17212); |
17213 | } |
17214 | Function *F = CGM.getIntrinsic(ID); |
17215 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); |
17216 | return Builder.CreateCall(F, {X, Y, M4Value}); |
17217 | } |
17218 | case SystemZ::BI__builtin_s390_vfminsb: |
17219 | case SystemZ::BI__builtin_s390_vfmindb: { |
17220 | llvm::Type *ResultType = ConvertType(E->getType()); |
17221 | Value *X = EmitScalarExpr(E->getArg(0)); |
17222 | Value *Y = EmitScalarExpr(E->getArg(1)); |
17223 | // Constant-fold the M4 mask argument. |
17224 | llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext()); |
17225 | // Check whether this instance can be represented via a LLVM standard |
17226 | // intrinsic. We only support some values of M4. |
17227 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
17228 | Intrinsic::ID CI; |
17229 | switch (M4.getZExtValue()) { |
17230 | default: break; |
17231 | case 4: ID = Intrinsic::minnum; |
17232 | CI = Intrinsic::experimental_constrained_minnum; break; |
17233 | } |
17234 | if (ID != Intrinsic::not_intrinsic) { |
17235 | if (Builder.getIsFPConstrained()) { |
17236 | Function *F = CGM.getIntrinsic(CI, ResultType); |
17237 | return Builder.CreateConstrainedFPCall(F, {X, Y}); |
17238 | } else { |
17239 | Function *F = CGM.getIntrinsic(ID, ResultType); |
17240 | return Builder.CreateCall(F, {X, Y}); |
17241 | } |
17242 | } |
17243 | switch (BuiltinID) { |
17244 | case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break; |
17245 | case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break; |
17246 | default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "clang/lib/CodeGen/CGBuiltin.cpp" , 17246); |
17247 | } |
17248 | Function *F = CGM.getIntrinsic(ID); |
17249 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); |
17250 | return Builder.CreateCall(F, {X, Y, M4Value}); |
17251 | } |
17252 | |
17253 | case SystemZ::BI__builtin_s390_vlbrh: |
17254 | case SystemZ::BI__builtin_s390_vlbrf: |
17255 | case SystemZ::BI__builtin_s390_vlbrg: { |
17256 | llvm::Type *ResultType = ConvertType(E->getType()); |
17257 | Value *X = EmitScalarExpr(E->getArg(0)); |
17258 | Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType); |
17259 | return Builder.CreateCall(F, X); |
17260 | } |
17261 | |
17262 | // Vector intrinsics that output the post-instruction CC value. |
17263 | |
17264 | #define INTRINSIC_WITH_CC(NAME) \ |
17265 | case SystemZ::BI__builtin_##NAME: \ |
17266 | return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E) |
17267 | |
17268 | INTRINSIC_WITH_CC(s390_vpkshs); |
17269 | INTRINSIC_WITH_CC(s390_vpksfs); |
17270 | INTRINSIC_WITH_CC(s390_vpksgs); |
17271 | |
17272 | INTRINSIC_WITH_CC(s390_vpklshs); |
17273 | INTRINSIC_WITH_CC(s390_vpklsfs); |
17274 | INTRINSIC_WITH_CC(s390_vpklsgs); |
17275 | |
17276 | INTRINSIC_WITH_CC(s390_vceqbs); |
17277 | INTRINSIC_WITH_CC(s390_vceqhs); |
17278 | INTRINSIC_WITH_CC(s390_vceqfs); |
17279 | INTRINSIC_WITH_CC(s390_vceqgs); |
17280 | |
17281 | INTRINSIC_WITH_CC(s390_vchbs); |
17282 | INTRINSIC_WITH_CC(s390_vchhs); |
17283 | INTRINSIC_WITH_CC(s390_vchfs); |
17284 | INTRINSIC_WITH_CC(s390_vchgs); |
17285 | |
17286 | INTRINSIC_WITH_CC(s390_vchlbs); |
17287 | INTRINSIC_WITH_CC(s390_vchlhs); |
17288 | INTRINSIC_WITH_CC(s390_vchlfs); |
17289 | INTRINSIC_WITH_CC(s390_vchlgs); |
17290 | |
17291 | INTRINSIC_WITH_CC(s390_vfaebs); |
17292 | INTRINSIC_WITH_CC(s390_vfaehs); |
17293 | INTRINSIC_WITH_CC(s390_vfaefs); |
17294 | |
17295 | INTRINSIC_WITH_CC(s390_vfaezbs); |
17296 | INTRINSIC_WITH_CC(s390_vfaezhs); |
17297 | INTRINSIC_WITH_CC(s390_vfaezfs); |
17298 | |
17299 | INTRINSIC_WITH_CC(s390_vfeebs); |
17300 | INTRINSIC_WITH_CC(s390_vfeehs); |
17301 | INTRINSIC_WITH_CC(s390_vfeefs); |
17302 | |
17303 | INTRINSIC_WITH_CC(s390_vfeezbs); |
17304 | INTRINSIC_WITH_CC(s390_vfeezhs); |
17305 | INTRINSIC_WITH_CC(s390_vfeezfs); |
17306 | |
17307 | INTRINSIC_WITH_CC(s390_vfenebs); |
17308 | INTRINSIC_WITH_CC(s390_vfenehs); |
17309 | INTRINSIC_WITH_CC(s390_vfenefs); |
17310 | |
17311 | INTRINSIC_WITH_CC(s390_vfenezbs); |
17312 | INTRINSIC_WITH_CC(s390_vfenezhs); |
17313 | INTRINSIC_WITH_CC(s390_vfenezfs); |
17314 | |
17315 | INTRINSIC_WITH_CC(s390_vistrbs); |
17316 | INTRINSIC_WITH_CC(s390_vistrhs); |
17317 | INTRINSIC_WITH_CC(s390_vistrfs); |
17318 | |
17319 | INTRINSIC_WITH_CC(s390_vstrcbs); |
17320 | INTRINSIC_WITH_CC(s390_vstrchs); |
17321 | INTRINSIC_WITH_CC(s390_vstrcfs); |
17322 | |
17323 | INTRINSIC_WITH_CC(s390_vstrczbs); |
17324 | INTRINSIC_WITH_CC(s390_vstrczhs); |
17325 | INTRINSIC_WITH_CC(s390_vstrczfs); |
17326 | |
17327 | INTRINSIC_WITH_CC(s390_vfcesbs); |
17328 | INTRINSIC_WITH_CC(s390_vfcedbs); |
17329 | INTRINSIC_WITH_CC(s390_vfchsbs); |
17330 | INTRINSIC_WITH_CC(s390_vfchdbs); |
17331 | INTRINSIC_WITH_CC(s390_vfchesbs); |
17332 | INTRINSIC_WITH_CC(s390_vfchedbs); |
17333 | |
17334 | INTRINSIC_WITH_CC(s390_vftcisb); |
17335 | INTRINSIC_WITH_CC(s390_vftcidb); |
17336 | |
17337 | INTRINSIC_WITH_CC(s390_vstrsb); |
17338 | INTRINSIC_WITH_CC(s390_vstrsh); |
17339 | INTRINSIC_WITH_CC(s390_vstrsf); |
17340 | |
17341 | INTRINSIC_WITH_CC(s390_vstrszb); |
17342 | INTRINSIC_WITH_CC(s390_vstrszh); |
17343 | INTRINSIC_WITH_CC(s390_vstrszf); |
17344 | |
17345 | #undef INTRINSIC_WITH_CC |
17346 | |
17347 | default: |
17348 | return nullptr; |
17349 | } |
17350 | } |
17351 | |
17352 | namespace { |
17353 | // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant. |
17354 | struct NVPTXMmaLdstInfo { |
17355 | unsigned NumResults; // Number of elements to load/store |
17356 | // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported. |
17357 | unsigned IID_col; |
17358 | unsigned IID_row; |
17359 | }; |
17360 | |
17361 | #define MMA_INTR(geom_op_type, layout) \ |
17362 | Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride |
17363 | #define MMA_LDST(n, geom_op_type) \ |
17364 | { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) } |
17365 | |
17366 | static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) { |
17367 | switch (BuiltinID) { |
17368 | // FP MMA loads |
17369 | case NVPTX::BI__hmma_m16n16k16_ld_a: |
17370 | return MMA_LDST(8, m16n16k16_load_a_f16); |
17371 | case NVPTX::BI__hmma_m16n16k16_ld_b: |
17372 | return MMA_LDST(8, m16n16k16_load_b_f16); |
17373 | case NVPTX::BI__hmma_m16n16k16_ld_c_f16: |
17374 | return MMA_LDST(4, m16n16k16_load_c_f16); |
17375 | case NVPTX::BI__hmma_m16n16k16_ld_c_f32: |
17376 | return MMA_LDST(8, m16n16k16_load_c_f32); |
17377 | case NVPTX::BI__hmma_m32n8k16_ld_a: |
17378 | return MMA_LDST(8, m32n8k16_load_a_f16); |
17379 | case NVPTX::BI__hmma_m32n8k16_ld_b: |
17380 | return MMA_LDST(8, m32n8k16_load_b_f16); |
17381 | case NVPTX::BI__hmma_m32n8k16_ld_c_f16: |
17382 | return MMA_LDST(4, m32n8k16_load_c_f16); |
17383 | case NVPTX::BI__hmma_m32n8k16_ld_c_f32: |
17384 | return MMA_LDST(8, m32n8k16_load_c_f32); |
17385 | case NVPTX::BI__hmma_m8n32k16_ld_a: |
17386 | return MMA_LDST(8, m8n32k16_load_a_f16); |
17387 | case NVPTX::BI__hmma_m8n32k16_ld_b: |
17388 | return MMA_LDST(8, m8n32k16_load_b_f16); |
17389 | case NVPTX::BI__hmma_m8n32k16_ld_c_f16: |
17390 | return MMA_LDST(4, m8n32k16_load_c_f16); |
17391 | case NVPTX::BI__hmma_m8n32k16_ld_c_f32: |
17392 | return MMA_LDST(8, m8n32k16_load_c_f32); |
17393 | |
17394 | // Integer MMA loads |
17395 | case NVPTX::BI__imma_m16n16k16_ld_a_s8: |
17396 | return MMA_LDST(2, m16n16k16_load_a_s8); |
17397 | case NVPTX::BI__imma_m16n16k16_ld_a_u8: |
17398 | return MMA_LDST(2, m16n16k16_load_a_u8); |
17399 | case NVPTX::BI__imma_m16n16k16_ld_b_s8: |
17400 | return MMA_LDST(2, m16n16k16_load_b_s8); |
17401 | case NVPTX::BI__imma_m16n16k16_ld_b_u8: |
17402 | return MMA_LDST(2, m16n16k16_load_b_u8); |
17403 | case NVPTX::BI__imma_m16n16k16_ld_c: |
17404 | return MMA_LDST(8, m16n16k16_load_c_s32); |
17405 | case NVPTX::BI__imma_m32n8k16_ld_a_s8: |
17406 | return MMA_LDST(4, m32n8k16_load_a_s8); |
17407 | case NVPTX::BI__imma_m32n8k16_ld_a_u8: |
17408 | return MMA_LDST(4, m32n8k16_load_a_u8); |
17409 | case NVPTX::BI__imma_m32n8k16_ld_b_s8: |
17410 | return MMA_LDST(1, m32n8k16_load_b_s8); |
17411 | case NVPTX::BI__imma_m32n8k16_ld_b_u8: |
17412 | return MMA_LDST(1, m32n8k16_load_b_u8); |
17413 | case NVPTX::BI__imma_m32n8k16_ld_c: |
17414 | return MMA_LDST(8, m32n8k16_load_c_s32); |
17415 | case NVPTX::BI__imma_m8n32k16_ld_a_s8: |
17416 | return MMA_LDST(1, m8n32k16_load_a_s8); |
17417 | case NVPTX::BI__imma_m8n32k16_ld_a_u8: |
17418 | return MMA_LDST(1, m8n32k16_load_a_u8); |
17419 | case NVPTX::BI__imma_m8n32k16_ld_b_s8: |
17420 | return MMA_LDST(4, m8n32k16_load_b_s8); |
17421 | case NVPTX::BI__imma_m8n32k16_ld_b_u8: |
17422 | return MMA_LDST(4, m8n32k16_load_b_u8); |
17423 | case NVPTX::BI__imma_m8n32k16_ld_c: |
17424 | return MMA_LDST(8, m8n32k16_load_c_s32); |
17425 | |
17426 | // Sub-integer MMA loads. |
17427 | // Only row/col layout is supported by A/B fragments. |
17428 | case NVPTX::BI__imma_m8n8k32_ld_a_s4: |
17429 | return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)}; |
17430 | case NVPTX::BI__imma_m8n8k32_ld_a_u4: |
17431 | return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)}; |
17432 | case NVPTX::BI__imma_m8n8k32_ld_b_s4: |
17433 | return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0}; |
17434 | case NVPTX::BI__imma_m8n8k32_ld_b_u4: |
17435 | return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0}; |
17436 | case NVPTX::BI__imma_m8n8k32_ld_c: |
17437 | return MMA_LDST(2, m8n8k32_load_c_s32); |
17438 | case NVPTX::BI__bmma_m8n8k128_ld_a_b1: |
17439 | return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)}; |
17440 | case NVPTX::BI__bmma_m8n8k128_ld_b_b1: |
17441 | return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0}; |
17442 | case NVPTX::BI__bmma_m8n8k128_ld_c: |
17443 | return MMA_LDST(2, m8n8k128_load_c_s32); |
17444 | |
17445 | // Double MMA loads |
17446 | case NVPTX::BI__dmma_m8n8k4_ld_a: |
17447 | return MMA_LDST(1, m8n8k4_load_a_f64); |
17448 | case NVPTX::BI__dmma_m8n8k4_ld_b: |
17449 | return MMA_LDST(1, m8n8k4_load_b_f64); |
17450 | case NVPTX::BI__dmma_m8n8k4_ld_c: |
17451 | return MMA_LDST(2, m8n8k4_load_c_f64); |
17452 | |
17453 | // Alternate float MMA loads |
17454 | case NVPTX::BI__mma_bf16_m16n16k16_ld_a: |
17455 | return MMA_LDST(4, m16n16k16_load_a_bf16); |
17456 | case NVPTX::BI__mma_bf16_m16n16k16_ld_b: |
17457 | return MMA_LDST(4, m16n16k16_load_b_bf16); |
17458 | case NVPTX::BI__mma_bf16_m8n32k16_ld_a: |
17459 | return MMA_LDST(2, m8n32k16_load_a_bf16); |
17460 | case NVPTX::BI__mma_bf16_m8n32k16_ld_b: |
17461 | return MMA_LDST(8, m8n32k16_load_b_bf16); |
17462 | case NVPTX::BI__mma_bf16_m32n8k16_ld_a: |
17463 | return MMA_LDST(8, m32n8k16_load_a_bf16); |
17464 | case NVPTX::BI__mma_bf16_m32n8k16_ld_b: |
17465 | return MMA_LDST(2, m32n8k16_load_b_bf16); |
17466 | case NVPTX::BI__mma_tf32_m16n16k8_ld_a: |
17467 | return MMA_LDST(4, m16n16k8_load_a_tf32); |
17468 | case NVPTX::BI__mma_tf32_m16n16k8_ld_b: |
17469 | return MMA_LDST(4, m16n16k8_load_b_tf32); |
17470 | case NVPTX::BI__mma_tf32_m16n16k8_ld_c: |
17471 | return MMA_LDST(8, m16n16k8_load_c_f32); |
17472 | |
17473 | // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike |
17474 | // PTX and LLVM IR where stores always use fragment D, NVCC builtins always |
17475 | // use fragment C for both loads and stores. |
17476 | // FP MMA stores. |
17477 | case NVPTX::BI__hmma_m16n16k16_st_c_f16: |
17478 | return MMA_LDST(4, m16n16k16_store_d_f16); |
17479 | case NVPTX::BI__hmma_m16n16k16_st_c_f32: |
17480 | return MMA_LDST(8, m16n16k16_store_d_f32); |
17481 | case NVPTX::BI__hmma_m32n8k16_st_c_f16: |
17482 | return MMA_LDST(4, m32n8k16_store_d_f16); |
17483 | case NVPTX::BI__hmma_m32n8k16_st_c_f32: |
17484 | return MMA_LDST(8, m32n8k16_store_d_f32); |
17485 | case NVPTX::BI__hmma_m8n32k16_st_c_f16: |
17486 | return MMA_LDST(4, m8n32k16_store_d_f16); |
17487 | case NVPTX::BI__hmma_m8n32k16_st_c_f32: |
17488 | return MMA_LDST(8, m8n32k16_store_d_f32); |
17489 | |
17490 | // Integer and sub-integer MMA stores. |
17491 | // Another naming quirk. Unlike other MMA builtins that use PTX types in the |
17492 | // name, integer loads/stores use LLVM's i32. |
17493 | case NVPTX::BI__imma_m16n16k16_st_c_i32: |
17494 | return MMA_LDST(8, m16n16k16_store_d_s32); |
17495 | case NVPTX::BI__imma_m32n8k16_st_c_i32: |
17496 | return MMA_LDST(8, m32n8k16_store_d_s32); |
17497 | case NVPTX::BI__imma_m8n32k16_st_c_i32: |
17498 | return MMA_LDST(8, m8n32k16_store_d_s32); |
17499 | case NVPTX::BI__imma_m8n8k32_st_c_i32: |
17500 | return MMA_LDST(2, m8n8k32_store_d_s32); |
17501 | case NVPTX::BI__bmma_m8n8k128_st_c_i32: |
17502 | return MMA_LDST(2, m8n8k128_store_d_s32); |
17503 | |
17504 | // Double MMA store |
17505 | case NVPTX::BI__dmma_m8n8k4_st_c_f64: |
17506 | return MMA_LDST(2, m8n8k4_store_d_f64); |
17507 | |
17508 | // Alternate float MMA store |
17509 | case NVPTX::BI__mma_m16n16k8_st_c_f32: |
17510 | return MMA_LDST(8, m16n16k8_store_d_f32); |
17511 | |
17512 | default: |
17513 | llvm_unreachable("Unknown MMA builtin")::llvm::llvm_unreachable_internal("Unknown MMA builtin", "clang/lib/CodeGen/CGBuiltin.cpp" , 17513); |
17514 | } |
17515 | } |
17516 | #undef MMA_LDST |
17517 | #undef MMA_INTR |
17518 | |
17519 | |
17520 | struct NVPTXMmaInfo { |
17521 | unsigned NumEltsA; |
17522 | unsigned NumEltsB; |
17523 | unsigned NumEltsC; |
17524 | unsigned NumEltsD; |
17525 | |
17526 | // Variants are ordered by layout-A/layout-B/satf, where 'row' has priority |
17527 | // over 'col' for layout. The index of non-satf variants is expected to match |
17528 | // the undocumented layout constants used by CUDA's mma.hpp. |
17529 | std::array<unsigned, 8> Variants; |
17530 | |
17531 | unsigned getMMAIntrinsic(int Layout, bool Satf) { |
17532 | unsigned Index = Layout + 4 * Satf; |
17533 | if (Index >= Variants.size()) |
17534 | return 0; |
17535 | return Variants[Index]; |
17536 | } |
17537 | }; |
17538 | |
17539 | // Returns an intrinsic that matches Layout and Satf for valid combinations of |
17540 | // Layout and Satf, 0 otherwise. |
17541 | static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) { |
17542 | // clang-format off |
17543 | #define MMA_VARIANTS(geom, type) \ |
17544 | Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \ |
17545 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \ |
17546 | Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \ |
17547 | Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type |
17548 | #define MMA_SATF_VARIANTS(geom, type) \ |
17549 | MMA_VARIANTS(geom, type), \ |
17550 | Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \ |
17551 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \ |
17552 | Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \ |
17553 | Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite |
17554 | // Sub-integer MMA only supports row.col layout. |
17555 | #define MMA_VARIANTS_I4(geom, type) \ |
17556 | 0, \ |
17557 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \ |
17558 | 0, \ |
17559 | 0, \ |
17560 | 0, \ |
17561 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \ |
17562 | 0, \ |
17563 | 0 |
17564 | // b1 MMA does not support .satfinite. |
17565 | #define MMA_VARIANTS_B1_XOR(geom, type) \ |
17566 | 0, \ |
17567 | Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type, \ |
17568 | 0, \ |
17569 | 0, \ |
17570 | 0, \ |
17571 | 0, \ |
17572 | 0, \ |
17573 | 0 |
17574 | #define MMA_VARIANTS_B1_AND(geom, type) \ |
17575 | 0, \ |
17576 | Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type, \ |
17577 | 0, \ |
17578 | 0, \ |
17579 | 0, \ |
17580 | 0, \ |
17581 | 0, \ |
17582 | 0 |
17583 | // clang-format on |
17584 | switch (BuiltinID) { |
17585 | // FP MMA |
17586 | // Note that 'type' argument of MMA_SATF_VARIANTS uses D_C notation, while |
17587 | // NumEltsN of return value are ordered as A,B,C,D. |
17588 | case NVPTX::BI__hmma_m16n16k16_mma_f16f16: |
17589 | return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f16)}}}; |
17590 | case NVPTX::BI__hmma_m16n16k16_mma_f32f16: |
17591 | return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f16)}}}; |
17592 | case NVPTX::BI__hmma_m16n16k16_mma_f16f32: |
17593 | return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f32)}}}; |
17594 | case NVPTX::BI__hmma_m16n16k16_mma_f32f32: |
17595 | return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f32)}}}; |
17596 | case NVPTX::BI__hmma_m32n8k16_mma_f16f16: |
17597 | return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f16)}}}; |
17598 | case NVPTX::BI__hmma_m32n8k16_mma_f32f16: |
17599 | return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f16)}}}; |
17600 | case NVPTX::BI__hmma_m32n8k16_mma_f16f32: |
17601 | return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f32)}}}; |
17602 | case NVPTX::BI__hmma_m32n8k16_mma_f32f32: |
17603 | return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f32)}}}; |
17604 | case NVPTX::BI__hmma_m8n32k16_mma_f16f16: |
17605 | return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f16)}}}; |
17606 | case NVPTX::BI__hmma_m8n32k16_mma_f32f16: |
17607 | return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f16)}}}; |
17608 | case NVPTX::BI__hmma_m8n32k16_mma_f16f32: |
17609 | return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f32)}}}; |
17610 | case NVPTX::BI__hmma_m8n32k16_mma_f32f32: |
17611 | return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f32)}}}; |
17612 | |
17613 | // Integer MMA |
17614 | case NVPTX::BI__imma_m16n16k16_mma_s8: |
17615 | return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, s8)}}}; |
17616 | case NVPTX::BI__imma_m16n16k16_mma_u8: |
17617 | return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, u8)}}}; |
17618 | case NVPTX::BI__imma_m32n8k16_mma_s8: |
17619 | return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, s8)}}}; |
17620 | case NVPTX::BI__imma_m32n8k16_mma_u8: |
17621 | return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, u8)}}}; |
17622 | case NVPTX::BI__imma_m8n32k16_mma_s8: |
17623 | return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, s8)}}}; |
17624 | case NVPTX::BI__imma_m8n32k16_mma_u8: |
17625 | return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, u8)}}}; |
17626 | |
17627 | // Sub-integer MMA |
17628 | case NVPTX::BI__imma_m8n8k32_mma_s4: |
17629 | return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, s4)}}}; |
17630 | case NVPTX::BI__imma_m8n8k32_mma_u4: |
17631 | return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, u4)}}}; |
17632 | case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: |
17633 | return {1, 1, 2, 2, {{MMA_VARIANTS_B1_XOR(m8n8k128, b1)}}}; |
17634 | case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1: |
17635 | return {1, 1, 2, 2, {{MMA_VARIANTS_B1_AND(m8n8k128, b1)}}}; |
17636 | |
17637 | // Double MMA |
17638 | case NVPTX::BI__dmma_m8n8k4_mma_f64: |
17639 | return {1, 1, 2, 2, {{MMA_VARIANTS(m8n8k4, f64)}}}; |
17640 | |
17641 | // Alternate FP MMA |
17642 | case NVPTX::BI__mma_bf16_m16n16k16_mma_f32: |
17643 | return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k16, bf16)}}}; |
17644 | case NVPTX::BI__mma_bf16_m8n32k16_mma_f32: |
17645 | return {2, 8, 8, 8, {{MMA_VARIANTS(m8n32k16, bf16)}}}; |
17646 | case NVPTX::BI__mma_bf16_m32n8k16_mma_f32: |
17647 | return {8, 2, 8, 8, {{MMA_VARIANTS(m32n8k16, bf16)}}}; |
17648 | case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: |
17649 | return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k8, tf32)}}}; |
17650 | default: |
17651 | llvm_unreachable("Unexpected builtin ID.")::llvm::llvm_unreachable_internal("Unexpected builtin ID.", "clang/lib/CodeGen/CGBuiltin.cpp" , 17651); |
17652 | } |
17653 | #undef MMA_VARIANTS |
17654 | #undef MMA_SATF_VARIANTS |
17655 | #undef MMA_VARIANTS_I4 |
17656 | #undef MMA_VARIANTS_B1_AND |
17657 | #undef MMA_VARIANTS_B1_XOR |
17658 | } |
17659 | |
17660 | } // namespace |
17661 | |
17662 | Value * |
17663 | CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) { |
17664 | auto MakeLdg = [&](unsigned IntrinsicID) { |
17665 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17666 | QualType ArgType = E->getArg(0)->getType(); |
17667 | clang::CharUnits Align = CGM.getNaturalPointeeTypeAlignment(ArgType); |
17668 | llvm::Type *ElemTy = ConvertTypeForMem(ArgType->getPointeeType()); |
17669 | return Builder.CreateCall( |
17670 | CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}), |
17671 | {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())}); |
17672 | }; |
17673 | auto MakeScopedAtomic = [&](unsigned IntrinsicID) { |
17674 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17675 | llvm::Type *ElemTy = |
17676 | ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType()); |
17677 | return Builder.CreateCall( |
17678 | CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}), |
17679 | {Ptr, EmitScalarExpr(E->getArg(1))}); |
17680 | }; |
17681 | switch (BuiltinID) { |
17682 | case NVPTX::BI__nvvm_atom_add_gen_i: |
17683 | case NVPTX::BI__nvvm_atom_add_gen_l: |
17684 | case NVPTX::BI__nvvm_atom_add_gen_ll: |
17685 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E); |
17686 | |
17687 | case NVPTX::BI__nvvm_atom_sub_gen_i: |
17688 | case NVPTX::BI__nvvm_atom_sub_gen_l: |
17689 | case NVPTX::BI__nvvm_atom_sub_gen_ll: |
17690 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E); |
17691 | |
17692 | case NVPTX::BI__nvvm_atom_and_gen_i: |
17693 | case NVPTX::BI__nvvm_atom_and_gen_l: |
17694 | case NVPTX::BI__nvvm_atom_and_gen_ll: |
17695 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E); |
17696 | |
17697 | case NVPTX::BI__nvvm_atom_or_gen_i: |
17698 | case NVPTX::BI__nvvm_atom_or_gen_l: |
17699 | case NVPTX::BI__nvvm_atom_or_gen_ll: |
17700 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E); |
17701 | |
17702 | case NVPTX::BI__nvvm_atom_xor_gen_i: |
17703 | case NVPTX::BI__nvvm_atom_xor_gen_l: |
17704 | case NVPTX::BI__nvvm_atom_xor_gen_ll: |
17705 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E); |
17706 | |
17707 | case NVPTX::BI__nvvm_atom_xchg_gen_i: |
17708 | case NVPTX::BI__nvvm_atom_xchg_gen_l: |
17709 | case NVPTX::BI__nvvm_atom_xchg_gen_ll: |
17710 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E); |
17711 | |
17712 | case NVPTX::BI__nvvm_atom_max_gen_i: |
17713 | case NVPTX::BI__nvvm_atom_max_gen_l: |
17714 | case NVPTX::BI__nvvm_atom_max_gen_ll: |
17715 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E); |
17716 | |
17717 | case NVPTX::BI__nvvm_atom_max_gen_ui: |
17718 | case NVPTX::BI__nvvm_atom_max_gen_ul: |
17719 | case NVPTX::BI__nvvm_atom_max_gen_ull: |
17720 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E); |
17721 | |
17722 | case NVPTX::BI__nvvm_atom_min_gen_i: |
17723 | case NVPTX::BI__nvvm_atom_min_gen_l: |
17724 | case NVPTX::BI__nvvm_atom_min_gen_ll: |
17725 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E); |
17726 | |
17727 | case NVPTX::BI__nvvm_atom_min_gen_ui: |
17728 | case NVPTX::BI__nvvm_atom_min_gen_ul: |
17729 | case NVPTX::BI__nvvm_atom_min_gen_ull: |
17730 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E); |
17731 | |
17732 | case NVPTX::BI__nvvm_atom_cas_gen_i: |
17733 | case NVPTX::BI__nvvm_atom_cas_gen_l: |
17734 | case NVPTX::BI__nvvm_atom_cas_gen_ll: |
17735 | // __nvvm_atom_cas_gen_* should return the old value rather than the |
17736 | // success flag. |
17737 | return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false); |
17738 | |
17739 | case NVPTX::BI__nvvm_atom_add_gen_f: |
17740 | case NVPTX::BI__nvvm_atom_add_gen_d: { |
17741 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17742 | Value *Val = EmitScalarExpr(E->getArg(1)); |
17743 | return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val, |
17744 | AtomicOrdering::SequentiallyConsistent); |
17745 | } |
17746 | |
17747 | case NVPTX::BI__nvvm_atom_inc_gen_ui: { |
17748 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17749 | Value *Val = EmitScalarExpr(E->getArg(1)); |
17750 | Function *FnALI32 = |
17751 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType()); |
17752 | return Builder.CreateCall(FnALI32, {Ptr, Val}); |
17753 | } |
17754 | |
17755 | case NVPTX::BI__nvvm_atom_dec_gen_ui: { |
17756 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17757 | Value *Val = EmitScalarExpr(E->getArg(1)); |
17758 | Function *FnALD32 = |
17759 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType()); |
17760 | return Builder.CreateCall(FnALD32, {Ptr, Val}); |
17761 | } |
17762 | |
17763 | case NVPTX::BI__nvvm_ldg_c: |
17764 | case NVPTX::BI__nvvm_ldg_c2: |
17765 | case NVPTX::BI__nvvm_ldg_c4: |
17766 | case NVPTX::BI__nvvm_ldg_s: |
17767 | case NVPTX::BI__nvvm_ldg_s2: |
17768 | case NVPTX::BI__nvvm_ldg_s4: |
17769 | case NVPTX::BI__nvvm_ldg_i: |
17770 | case NVPTX::BI__nvvm_ldg_i2: |
17771 | case NVPTX::BI__nvvm_ldg_i4: |
17772 | case NVPTX::BI__nvvm_ldg_l: |
17773 | case NVPTX::BI__nvvm_ldg_ll: |
17774 | case NVPTX::BI__nvvm_ldg_ll2: |
17775 | case NVPTX::BI__nvvm_ldg_uc: |
17776 | case NVPTX::BI__nvvm_ldg_uc2: |
17777 | case NVPTX::BI__nvvm_ldg_uc4: |
17778 | case NVPTX::BI__nvvm_ldg_us: |
17779 | case NVPTX::BI__nvvm_ldg_us2: |
17780 | case NVPTX::BI__nvvm_ldg_us4: |
17781 | case NVPTX::BI__nvvm_ldg_ui: |
17782 | case NVPTX::BI__nvvm_ldg_ui2: |
17783 | case NVPTX::BI__nvvm_ldg_ui4: |
17784 | case NVPTX::BI__nvvm_ldg_ul: |
17785 | case NVPTX::BI__nvvm_ldg_ull: |
17786 | case NVPTX::BI__nvvm_ldg_ull2: |
17787 | // PTX Interoperability section 2.2: "For a vector with an even number of |
17788 | // elements, its alignment is set to number of elements times the alignment |
17789 | // of its member: n*alignof(t)." |
17790 | return MakeLdg(Intrinsic::nvvm_ldg_global_i); |
17791 | case NVPTX::BI__nvvm_ldg_f: |
17792 | case NVPTX::BI__nvvm_ldg_f2: |
17793 | case NVPTX::BI__nvvm_ldg_f4: |
17794 | case NVPTX::BI__nvvm_ldg_d: |
17795 | case NVPTX::BI__nvvm_ldg_d2: |
17796 | return MakeLdg(Intrinsic::nvvm_ldg_global_f); |
17797 | |
17798 | case NVPTX::BI__nvvm_atom_cta_add_gen_i: |
17799 | case NVPTX::BI__nvvm_atom_cta_add_gen_l: |
17800 | case NVPTX::BI__nvvm_atom_cta_add_gen_ll: |
17801 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta); |
17802 | case NVPTX::BI__nvvm_atom_sys_add_gen_i: |
17803 | case NVPTX::BI__nvvm_atom_sys_add_gen_l: |
17804 | case NVPTX::BI__nvvm_atom_sys_add_gen_ll: |
17805 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys); |
17806 | case NVPTX::BI__nvvm_atom_cta_add_gen_f: |
17807 | case NVPTX::BI__nvvm_atom_cta_add_gen_d: |
17808 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta); |
17809 | case NVPTX::BI__nvvm_atom_sys_add_gen_f: |
17810 | case NVPTX::BI__nvvm_atom_sys_add_gen_d: |
17811 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys); |
17812 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_i: |
17813 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_l: |
17814 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll: |
17815 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta); |
17816 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_i: |
17817 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_l: |
17818 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll: |
17819 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys); |
17820 | case NVPTX::BI__nvvm_atom_cta_max_gen_i: |
17821 | case NVPTX::BI__nvvm_atom_cta_max_gen_ui: |
17822 | case NVPTX::BI__nvvm_atom_cta_max_gen_l: |
17823 | case NVPTX::BI__nvvm_atom_cta_max_gen_ul: |
17824 | case NVPTX::BI__nvvm_atom_cta_max_gen_ll: |
17825 | case NVPTX::BI__nvvm_atom_cta_max_gen_ull: |
17826 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta); |
17827 | case NVPTX::BI__nvvm_atom_sys_max_gen_i: |
17828 | case NVPTX::BI__nvvm_atom_sys_max_gen_ui: |
17829 | case NVPTX::BI__nvvm_atom_sys_max_gen_l: |
17830 | case NVPTX::BI__nvvm_atom_sys_max_gen_ul: |
17831 | case NVPTX::BI__nvvm_atom_sys_max_gen_ll: |
17832 | case NVPTX::BI__nvvm_atom_sys_max_gen_ull: |
17833 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys); |
17834 | case NVPTX::BI__nvvm_atom_cta_min_gen_i: |
17835 | case NVPTX::BI__nvvm_atom_cta_min_gen_ui: |
17836 | case NVPTX::BI__nvvm_atom_cta_min_gen_l: |
17837 | case NVPTX::BI__nvvm_atom_cta_min_gen_ul: |
17838 | case NVPTX::BI__nvvm_atom_cta_min_gen_ll: |
17839 | case NVPTX::BI__nvvm_atom_cta_min_gen_ull: |
17840 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta); |
17841 | case NVPTX::BI__nvvm_atom_sys_min_gen_i: |
17842 | case NVPTX::BI__nvvm_atom_sys_min_gen_ui: |
17843 | case NVPTX::BI__nvvm_atom_sys_min_gen_l: |
17844 | case NVPTX::BI__nvvm_atom_sys_min_gen_ul: |
17845 | case NVPTX::BI__nvvm_atom_sys_min_gen_ll: |
17846 | case NVPTX::BI__nvvm_atom_sys_min_gen_ull: |
17847 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys); |
17848 | case NVPTX::BI__nvvm_atom_cta_inc_gen_ui: |
17849 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta); |
17850 | case NVPTX::BI__nvvm_atom_cta_dec_gen_ui: |
17851 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta); |
17852 | case NVPTX::BI__nvvm_atom_sys_inc_gen_ui: |
17853 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys); |
17854 | case NVPTX::BI__nvvm_atom_sys_dec_gen_ui: |
17855 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys); |
17856 | case NVPTX::BI__nvvm_atom_cta_and_gen_i: |
17857 | case NVPTX::BI__nvvm_atom_cta_and_gen_l: |
17858 | case NVPTX::BI__nvvm_atom_cta_and_gen_ll: |
17859 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta); |
17860 | case NVPTX::BI__nvvm_atom_sys_and_gen_i: |
17861 | case NVPTX::BI__nvvm_atom_sys_and_gen_l: |
17862 | case NVPTX::BI__nvvm_atom_sys_and_gen_ll: |
17863 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys); |
17864 | case NVPTX::BI__nvvm_atom_cta_or_gen_i: |
17865 | case NVPTX::BI__nvvm_atom_cta_or_gen_l: |
17866 | case NVPTX::BI__nvvm_atom_cta_or_gen_ll: |
17867 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta); |
17868 | case NVPTX::BI__nvvm_atom_sys_or_gen_i: |
17869 | case NVPTX::BI__nvvm_atom_sys_or_gen_l: |
17870 | case NVPTX::BI__nvvm_atom_sys_or_gen_ll: |
17871 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys); |
17872 | case NVPTX::BI__nvvm_atom_cta_xor_gen_i: |
17873 | case NVPTX::BI__nvvm_atom_cta_xor_gen_l: |
17874 | case NVPTX::BI__nvvm_atom_cta_xor_gen_ll: |
17875 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta); |
17876 | case NVPTX::BI__nvvm_atom_sys_xor_gen_i: |
17877 | case NVPTX::BI__nvvm_atom_sys_xor_gen_l: |
17878 | case NVPTX::BI__nvvm_atom_sys_xor_gen_ll: |
17879 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys); |
17880 | case NVPTX::BI__nvvm_atom_cta_cas_gen_i: |
17881 | case NVPTX::BI__nvvm_atom_cta_cas_gen_l: |
17882 | case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: { |
17883 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17884 | llvm::Type *ElemTy = |
17885 | ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType()); |
17886 | return Builder.CreateCall( |
17887 | CGM.getIntrinsic( |
17888 | Intrinsic::nvvm_atomic_cas_gen_i_cta, {ElemTy, Ptr->getType()}), |
17889 | {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); |
17890 | } |
17891 | case NVPTX::BI__nvvm_atom_sys_cas_gen_i: |
17892 | case NVPTX::BI__nvvm_atom_sys_cas_gen_l: |
17893 | case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: { |
17894 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17895 | llvm::Type *ElemTy = |
17896 | ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType()); |
17897 | return Builder.CreateCall( |
17898 | CGM.getIntrinsic( |
17899 | Intrinsic::nvvm_atomic_cas_gen_i_sys, {ElemTy, Ptr->getType()}), |
17900 | {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); |
17901 | } |
17902 | case NVPTX::BI__nvvm_match_all_sync_i32p: |
17903 | case NVPTX::BI__nvvm_match_all_sync_i64p: { |
17904 | Value *Mask = EmitScalarExpr(E->getArg(0)); |
17905 | Value *Val = EmitScalarExpr(E->getArg(1)); |
17906 | Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2)); |
17907 | Value *ResultPair = Builder.CreateCall( |
17908 | CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p |
17909 | ? Intrinsic::nvvm_match_all_sync_i32p |
17910 | : Intrinsic::nvvm_match_all_sync_i64p), |
17911 | {Mask, Val}); |
17912 | Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1), |
17913 | PredOutPtr.getElementType()); |
17914 | Builder.CreateStore(Pred, PredOutPtr); |
17915 | return Builder.CreateExtractValue(ResultPair, 0); |
17916 | } |
17917 | |
17918 | // FP MMA loads |
17919 | case NVPTX::BI__hmma_m16n16k16_ld_a: |
17920 | case NVPTX::BI__hmma_m16n16k16_ld_b: |
17921 | case NVPTX::BI__hmma_m16n16k16_ld_c_f16: |
17922 | case NVPTX::BI__hmma_m16n16k16_ld_c_f32: |
17923 | case NVPTX::BI__hmma_m32n8k16_ld_a: |
17924 | case NVPTX::BI__hmma_m32n8k16_ld_b: |
17925 | case NVPTX::BI__hmma_m32n8k16_ld_c_f16: |
17926 | case NVPTX::BI__hmma_m32n8k16_ld_c_f32: |
17927 | case NVPTX::BI__hmma_m8n32k16_ld_a: |
17928 | case NVPTX::BI__hmma_m8n32k16_ld_b: |
17929 | case NVPTX::BI__hmma_m8n32k16_ld_c_f16: |
17930 | case NVPTX::BI__hmma_m8n32k16_ld_c_f32: |
17931 | // Integer MMA loads. |
17932 | case NVPTX::BI__imma_m16n16k16_ld_a_s8: |
17933 | case NVPTX::BI__imma_m16n16k16_ld_a_u8: |
17934 | case NVPTX::BI__imma_m16n16k16_ld_b_s8: |
17935 | case NVPTX::BI__imma_m16n16k16_ld_b_u8: |
17936 | case NVPTX::BI__imma_m16n16k16_ld_c: |
17937 | case NVPTX::BI__imma_m32n8k16_ld_a_s8: |
17938 | case NVPTX::BI__imma_m32n8k16_ld_a_u8: |
17939 | case NVPTX::BI__imma_m32n8k16_ld_b_s8: |
17940 | case NVPTX::BI__imma_m32n8k16_ld_b_u8: |
17941 | case NVPTX::BI__imma_m32n8k16_ld_c: |
17942 | case NVPTX::BI__imma_m8n32k16_ld_a_s8: |
17943 | case NVPTX::BI__imma_m8n32k16_ld_a_u8: |
17944 | case NVPTX::BI__imma_m8n32k16_ld_b_s8: |
17945 | case NVPTX::BI__imma_m8n32k16_ld_b_u8: |
17946 | case NVPTX::BI__imma_m8n32k16_ld_c: |
17947 | // Sub-integer MMA loads. |
17948 | case NVPTX::BI__imma_m8n8k32_ld_a_s4: |
17949 | case NVPTX::BI__imma_m8n8k32_ld_a_u4: |
17950 | case NVPTX::BI__imma_m8n8k32_ld_b_s4: |
17951 | case NVPTX::BI__imma_m8n8k32_ld_b_u4: |
17952 | case NVPTX::BI__imma_m8n8k32_ld_c: |
17953 | case NVPTX::BI__bmma_m8n8k128_ld_a_b1: |
17954 | case NVPTX::BI__bmma_m8n8k128_ld_b_b1: |
17955 | case NVPTX::BI__bmma_m8n8k128_ld_c: |
17956 | // Double MMA loads. |
17957 | case NVPTX::BI__dmma_m8n8k4_ld_a: |
17958 | case NVPTX::BI__dmma_m8n8k4_ld_b: |
17959 | case NVPTX::BI__dmma_m8n8k4_ld_c: |
17960 | // Alternate float MMA loads. |
17961 | case NVPTX::BI__mma_bf16_m16n16k16_ld_a: |
17962 | case NVPTX::BI__mma_bf16_m16n16k16_ld_b: |
17963 | case NVPTX::BI__mma_bf16_m8n32k16_ld_a: |
17964 | case NVPTX::BI__mma_bf16_m8n32k16_ld_b: |
17965 | case NVPTX::BI__mma_bf16_m32n8k16_ld_a: |
17966 | case NVPTX::BI__mma_bf16_m32n8k16_ld_b: |
17967 | case NVPTX::BI__mma_tf32_m16n16k8_ld_a: |
17968 | case NVPTX::BI__mma_tf32_m16n16k8_ld_b: |
17969 | case NVPTX::BI__mma_tf32_m16n16k8_ld_c: { |
17970 | Address Dst = EmitPointerWithAlignment(E->getArg(0)); |
17971 | Value *Src = EmitScalarExpr(E->getArg(1)); |
17972 | Value *Ldm = EmitScalarExpr(E->getArg(2)); |
17973 | Optional<llvm::APSInt> isColMajorArg = |
17974 | E->getArg(3)->getIntegerConstantExpr(getContext()); |
17975 | if (!isColMajorArg) |
17976 | return nullptr; |
17977 | bool isColMajor = isColMajorArg->getSExtValue(); |
17978 | NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID); |
17979 | unsigned IID = isColMajor ? II.IID_col : II.IID_row; |
17980 | if (IID == 0) |
17981 | return nullptr; |
17982 | |
17983 | Value *Result = |
17984 | Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm}); |
17985 | |
17986 | // Save returned values. |
17987 | assert(II.NumResults)(static_cast <bool> (II.NumResults) ? void (0) : __assert_fail ("II.NumResults", "clang/lib/CodeGen/CGBuiltin.cpp", 17987, __extension__ __PRETTY_FUNCTION__)); |
17988 | if (II.NumResults == 1) { |
17989 | Builder.CreateAlignedStore(Result, Dst.getPointer(), |
17990 | CharUnits::fromQuantity(4)); |
17991 | } else { |
17992 | for (unsigned i = 0; i < II.NumResults; ++i) { |
17993 | Builder.CreateAlignedStore( |
17994 | Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), |
17995 | Dst.getElementType()), |
17996 | Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(), |
17997 | llvm::ConstantInt::get(IntTy, i)), |
17998 | CharUnits::fromQuantity(4)); |
17999 | } |
18000 | } |
18001 | return Result; |
18002 | } |
18003 | |
18004 | case NVPTX::BI__hmma_m16n16k16_st_c_f16: |
18005 | case NVPTX::BI__hmma_m16n16k16_st_c_f32: |
18006 | case NVPTX::BI__hmma_m32n8k16_st_c_f16: |
18007 | case NVPTX::BI__hmma_m32n8k16_st_c_f32: |
18008 | case NVPTX::BI__hmma_m8n32k16_st_c_f16: |
18009 | case NVPTX::BI__hmma_m8n32k16_st_c_f32: |
18010 | case NVPTX::BI__imma_m16n16k16_st_c_i32: |
18011 | case NVPTX::BI__imma_m32n8k16_st_c_i32: |
18012 | case NVPTX::BI__imma_m8n32k16_st_c_i32: |
18013 | case NVPTX::BI__imma_m8n8k32_st_c_i32: |
18014 | case NVPTX::BI__bmma_m8n8k128_st_c_i32: |
18015 | case NVPTX::BI__dmma_m8n8k4_st_c_f64: |
18016 | case NVPTX::BI__mma_m16n16k8_st_c_f32: { |
18017 | Value *Dst = EmitScalarExpr(E->getArg(0)); |
18018 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
18019 | Value *Ldm = EmitScalarExpr(E->getArg(2)); |
18020 | Optional<llvm::APSInt> isColMajorArg = |
18021 | E->getArg(3)->getIntegerConstantExpr(getContext()); |
18022 | if (!isColMajorArg) |
18023 | return nullptr; |
18024 | bool isColMajor = isColMajorArg->getSExtValue(); |
18025 | NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID); |
18026 | unsigned IID = isColMajor ? II.IID_col : II.IID_row; |
18027 | if (IID == 0) |
18028 | return nullptr; |
18029 | Function *Intrinsic = |
18030 | CGM.getIntrinsic(IID, Dst->getType()); |
18031 | llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1); |
18032 | SmallVector<Value *, 10> Values = {Dst}; |
18033 | for (unsigned i = 0; i < II.NumResults; ++i) { |
18034 | Value *V = Builder.CreateAlignedLoad( |
18035 | Src.getElementType(), |
18036 | Builder.CreateGEP(Src.getElementType(), Src.getPointer(), |
18037 | llvm::ConstantInt::get(IntTy, i)), |
18038 | CharUnits::fromQuantity(4)); |
18039 | Values.push_back(Builder.CreateBitCast(V, ParamType)); |
18040 | } |
18041 | Values.push_back(Ldm); |
18042 | Value *Result = Builder.CreateCall(Intrinsic, Values); |
18043 | return Result; |
18044 | } |
18045 | |
18046 | // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) --> |
18047 | // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf> |
18048 | case NVPTX::BI__hmma_m16n16k16_mma_f16f16: |
18049 | case NVPTX::BI__hmma_m16n16k16_mma_f32f16: |
18050 | case NVPTX::BI__hmma_m16n16k16_mma_f32f32: |
18051 | case NVPTX::BI__hmma_m16n16k16_mma_f16f32: |
18052 | case NVPTX::BI__hmma_m32n8k16_mma_f16f16: |
18053 | case NVPTX::BI__hmma_m32n8k16_mma_f32f16: |
18054 | case NVPTX::BI__hmma_m32n8k16_mma_f32f32: |
18055 | case NVPTX::BI__hmma_m32n8k16_mma_f16f32: |
18056 | case NVPTX::BI__hmma_m8n32k16_mma_f16f16: |
18057 | case NVPTX::BI__hmma_m8n32k16_mma_f32f16: |
18058 | case NVPTX::BI__hmma_m8n32k16_mma_f32f32: |
18059 | case NVPTX::BI__hmma_m8n32k16_mma_f16f32: |
18060 | case NVPTX::BI__imma_m16n16k16_mma_s8: |
18061 | case NVPTX::BI__imma_m16n16k16_mma_u8: |
18062 | case NVPTX::BI__imma_m32n8k16_mma_s8: |
18063 | case NVPTX::BI__imma_m32n8k16_mma_u8: |
18064 | case NVPTX::BI__imma_m8n32k16_mma_s8: |
18065 | case NVPTX::BI__imma_m8n32k16_mma_u8: |
18066 | case NVPTX::BI__imma_m8n8k32_mma_s4: |
18067 | case NVPTX::BI__imma_m8n8k32_mma_u4: |
18068 | case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: |
18069 | case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1: |
18070 | case NVPTX::BI__dmma_m8n8k4_mma_f64: |
18071 | case NVPTX::BI__mma_bf16_m16n16k16_mma_f32: |
18072 | case NVPTX::BI__mma_bf16_m8n32k16_mma_f32: |
18073 | case NVPTX::BI__mma_bf16_m32n8k16_mma_f32: |
18074 | case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: { |
18075 | Address Dst = EmitPointerWithAlignment(E->getArg(0)); |
18076 | Address SrcA = EmitPointerWithAlignment(E->getArg(1)); |
18077 | Address SrcB = EmitPointerWithAlignment(E->getArg(2)); |
18078 | Address SrcC = EmitPointerWithAlignment(E->getArg(3)); |
18079 | Optional<llvm::APSInt> LayoutArg = |
18080 | E->getArg(4)->getIntegerConstantExpr(getContext()); |
18081 | if (!LayoutArg) |
18082 | return nullptr; |
18083 | int Layout = LayoutArg->getSExtValue(); |
18084 | if (Layout < 0 || Layout > 3) |
18085 | return nullptr; |
18086 | llvm::APSInt SatfArg; |
18087 | if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 || |
18088 | BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1) |
18089 | SatfArg = 0; // .b1 does not have satf argument. |
18090 | else if (Optional<llvm::APSInt> OptSatfArg = |
18091 | E->getArg(5)->getIntegerConstantExpr(getContext())) |
18092 | SatfArg = *OptSatfArg; |
18093 | else |
18094 | return nullptr; |
18095 | bool Satf = SatfArg.getSExtValue(); |
18096 | NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID); |
18097 | unsigned IID = MI.getMMAIntrinsic(Layout, Satf); |
18098 | if (IID == 0) // Unsupported combination of Layout/Satf. |
18099 | return nullptr; |
18100 | |
18101 | SmallVector<Value *, 24> Values; |
18102 | Function *Intrinsic = CGM.getIntrinsic(IID); |
18103 | llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0); |
18104 | // Load A |
18105 | for (unsigned i = 0; i < MI.NumEltsA; ++i) { |
18106 | Value *V = Builder.CreateAlignedLoad( |
18107 | SrcA.getElementType(), |
18108 | Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(), |
18109 | llvm::ConstantInt::get(IntTy, i)), |
18110 | CharUnits::fromQuantity(4)); |
18111 | Values.push_back(Builder.CreateBitCast(V, AType)); |
18112 | } |
18113 | // Load B |
18114 | llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA); |
18115 | for (unsigned i = 0; i < MI.NumEltsB; ++i) { |
18116 | Value *V = Builder.CreateAlignedLoad( |
18117 | SrcB.getElementType(), |
18118 | Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(), |
18119 | llvm::ConstantInt::get(IntTy, i)), |
18120 | CharUnits::fromQuantity(4)); |
18121 | Values.push_back(Builder.CreateBitCast(V, BType)); |
18122 | } |
18123 | // Load C |
18124 | llvm::Type *CType = |
18125 | Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB); |
18126 | for (unsigned i = 0; i < MI.NumEltsC; ++i) { |
18127 | Value *V = Builder.CreateAlignedLoad( |
18128 | SrcC.getElementType(), |
18129 | Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(), |
18130 | llvm::ConstantInt::get(IntTy, i)), |
18131 | CharUnits::fromQuantity(4)); |
18132 | Values.push_back(Builder.CreateBitCast(V, CType)); |
18133 | } |
18134 | Value *Result = Builder.CreateCall(Intrinsic, Values); |
18135 | llvm::Type *DType = Dst.getElementType(); |
18136 | for (unsigned i = 0; i < MI.NumEltsD; ++i) |
18137 | Builder.CreateAlignedStore( |
18138 | Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType), |
18139 | Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(), |
18140 | llvm::ConstantInt::get(IntTy, i)), |
18141 | CharUnits::fromQuantity(4)); |
18142 | return Result; |
18143 | } |
18144 | default: |
18145 | return nullptr; |
18146 | } |
18147 | } |
18148 | |
18149 | namespace { |
18150 | struct BuiltinAlignArgs { |
18151 | llvm::Value *Src = nullptr; |
18152 | llvm::Type *SrcType = nullptr; |
18153 | llvm::Value *Alignment = nullptr; |
18154 | llvm::Value *Mask = nullptr; |
18155 | llvm::IntegerType *IntType = nullptr; |
18156 | |
18157 | BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) { |
18158 | QualType AstType = E->getArg(0)->getType(); |
18159 | if (AstType->isArrayType()) |
18160 | Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer(); |
18161 | else |
18162 | Src = CGF.EmitScalarExpr(E->getArg(0)); |
18163 | SrcType = Src->getType(); |
18164 | if (SrcType->isPointerTy()) { |
18165 | IntType = IntegerType::get( |
18166 | CGF.getLLVMContext(), |
18167 | CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType)); |
18168 | } else { |
18169 | assert(SrcType->isIntegerTy())(static_cast <bool> (SrcType->isIntegerTy()) ? void ( 0) : __assert_fail ("SrcType->isIntegerTy()", "clang/lib/CodeGen/CGBuiltin.cpp" , 18169, __extension__ __PRETTY_FUNCTION__)); |
18170 | IntType = cast<llvm::IntegerType>(SrcType); |
18171 | } |
18172 | Alignment = CGF.EmitScalarExpr(E->getArg(1)); |
18173 | Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment"); |
18174 | auto *One = llvm::ConstantInt::get(IntType, 1); |
18175 | Mask = CGF.Builder.CreateSub(Alignment, One, "mask"); |
18176 | } |
18177 | }; |
18178 | } // namespace |
18179 | |
18180 | /// Generate (x & (y-1)) == 0. |
18181 | RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) { |
18182 | BuiltinAlignArgs Args(E, *this); |
18183 | llvm::Value *SrcAddress = Args.Src; |
18184 | if (Args.SrcType->isPointerTy()) |
18185 | SrcAddress = |
18186 | Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr"); |
18187 | return RValue::get(Builder.CreateICmpEQ( |
18188 | Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"), |
18189 | llvm::Constant::getNullValue(Args.IntType), "is_aligned")); |
18190 | } |
18191 | |
18192 | /// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up. |
18193 | /// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the |
18194 | /// llvm.ptrmask instrinsic (with a GEP before in the align_up case). |
18195 | /// TODO: actually use ptrmask once most optimization passes know about it. |
18196 | RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) { |
18197 | BuiltinAlignArgs Args(E, *this); |
18198 | llvm::Value *SrcAddr = Args.Src; |
18199 | if (Args.Src->getType()->isPointerTy()) |
18200 | SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr"); |
18201 | llvm::Value *SrcForMask = SrcAddr; |
18202 | if (AlignUp) { |
18203 | // When aligning up we have to first add the mask to ensure we go over the |
18204 | // next alignment value and then align down to the next valid multiple. |
18205 | // By adding the mask, we ensure that align_up on an already aligned |
18206 | // value will not change the value. |
18207 | SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary"); |
18208 | } |
18209 | // Invert the mask to only clear the lower bits. |
18210 | llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask"); |
18211 | llvm::Value *Result = |
18212 | Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result"); |
18213 | if (Args.Src->getType()->isPointerTy()) { |
18214 | /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well. |
18215 | // Result = Builder.CreateIntrinsic( |
18216 | // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType}, |
18217 | // {SrcForMask, NegatedMask}, nullptr, "aligned_result"); |
18218 | Result->setName("aligned_intptr"); |
18219 | llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff"); |
18220 | // The result must point to the same underlying allocation. This means we |
18221 | // can use an inbounds GEP to enable better optimization. |
18222 | Value *Base = EmitCastToVoidPtr(Args.Src); |
18223 | if (getLangOpts().isSignedOverflowDefined()) |
18224 | Result = Builder.CreateGEP(Int8Ty, Base, Difference, "aligned_result"); |
18225 | else |
18226 | Result = EmitCheckedInBoundsGEP(Int8Ty, Base, Difference, |
18227 | /*SignedIndices=*/true, |
18228 | /*isSubtraction=*/!AlignUp, |
18229 | E->getExprLoc(), "aligned_result"); |
18230 | Result = Builder.CreatePointerCast(Result, Args.SrcType); |
18231 | // Emit an alignment assumption to ensure that the new alignment is |
18232 | // propagated to loads/stores, etc. |
18233 | emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment); |
18234 | } |
18235 | assert(Result->getType() == Args.SrcType)(static_cast <bool> (Result->getType() == Args.SrcType ) ? void (0) : __assert_fail ("Result->getType() == Args.SrcType" , "clang/lib/CodeGen/CGBuiltin.cpp", 18235, __extension__ __PRETTY_FUNCTION__ )); |
18236 | return RValue::get(Result); |
18237 | } |
18238 | |
18239 | Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, |
18240 | const CallExpr *E) { |
18241 | switch (BuiltinID) { |
18242 | case WebAssembly::BI__builtin_wasm_memory_size: { |
18243 | llvm::Type *ResultType = ConvertType(E->getType()); |
18244 | Value *I = EmitScalarExpr(E->getArg(0)); |
18245 | Function *Callee = |
18246 | CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType); |
18247 | return Builder.CreateCall(Callee, I); |
18248 | } |
18249 | case WebAssembly::BI__builtin_wasm_memory_grow: { |
18250 | llvm::Type *ResultType = ConvertType(E->getType()); |
18251 | Value *Args[] = {EmitScalarExpr(E->getArg(0)), |
18252 | EmitScalarExpr(E->getArg(1))}; |
18253 | Function *Callee = |
18254 | CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType); |
18255 | return Builder.CreateCall(Callee, Args); |
18256 | } |
18257 | case WebAssembly::BI__builtin_wasm_tls_size: { |
18258 | llvm::Type *ResultType = ConvertType(E->getType()); |
18259 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType); |
18260 | return Builder.CreateCall(Callee); |
18261 | } |
18262 | case WebAssembly::BI__builtin_wasm_tls_align: { |
18263 | llvm::Type *ResultType = ConvertType(E->getType()); |
18264 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType); |
18265 | return Builder.CreateCall(Callee); |
18266 | } |
18267 | case WebAssembly::BI__builtin_wasm_tls_base: { |
18268 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base); |
18269 | return Builder.CreateCall(Callee); |
18270 | } |
18271 | case WebAssembly::BI__builtin_wasm_throw: { |
18272 | Value *Tag = EmitScalarExpr(E->getArg(0)); |
18273 | Value *Obj = EmitScalarExpr(E->getArg(1)); |
18274 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw); |
18275 | return Builder.CreateCall(Callee, {Tag, Obj}); |
18276 | } |
18277 | case WebAssembly::BI__builtin_wasm_rethrow: { |
18278 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow); |
18279 | return Builder.CreateCall(Callee); |
18280 | } |
18281 | case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: { |
18282 | Value *Addr = EmitScalarExpr(E->getArg(0)); |
18283 | Value *Expected = EmitScalarExpr(E->getArg(1)); |
18284 | Value *Timeout = EmitScalarExpr(E->getArg(2)); |
18285 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32); |
18286 | return Builder.CreateCall(Callee, {Addr, Expected, Timeout}); |
18287 | } |
18288 | case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: { |
18289 | Value *Addr = EmitScalarExpr(E->getArg(0)); |
18290 | Value *Expected = EmitScalarExpr(E->getArg(1)); |
18291 | Value *Timeout = EmitScalarExpr(E->getArg(2)); |
18292 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64); |
18293 | return Builder.CreateCall(Callee, {Addr, Expected, Timeout}); |
18294 | } |
18295 | case WebAssembly::BI__builtin_wasm_memory_atomic_notify: { |
18296 | Value *Addr = EmitScalarExpr(E->getArg(0)); |
18297 | Value *Count = EmitScalarExpr(E->getArg(1)); |
18298 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_notify); |
18299 | return Builder.CreateCall(Callee, {Addr, Count}); |
18300 | } |
18301 | case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32: |
18302 | case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64: |
18303 | case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32: |
18304 | case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: { |
18305 | Value *Src = EmitScalarExpr(E->getArg(0)); |
18306 | llvm::Type *ResT = ConvertType(E->getType()); |
18307 | Function *Callee = |
18308 | CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()}); |
18309 | return Builder.CreateCall(Callee, {Src}); |
18310 | } |
18311 | case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32: |
18312 | case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64: |
18313 | case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32: |
18314 | case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: { |
18315 | Value *Src = EmitScalarExpr(E->getArg(0)); |
18316 | llvm::Type *ResT = ConvertType(E->getType()); |
18317 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned, |
18318 | {ResT, Src->getType()}); |
18319 | return Builder.CreateCall(Callee, {Src}); |
18320 | } |
18321 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32: |
18322 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64: |
18323 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32: |
18324 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64: |
18325 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: { |
18326 | Value *Src = EmitScalarExpr(E->getArg(0)); |
18327 | llvm::Type *ResT = ConvertType(E->getType()); |
18328 | Function *Callee = |
18329 | CGM.getIntrinsic(Intrinsic::fptosi_sat, {ResT, Src->getType()}); |
18330 | return Builder.CreateCall(Callee, {Src}); |
18331 | } |
18332 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32: |
18333 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64: |
18334 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32: |
18335 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64: |
18336 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: { |
18337 | Value *Src = EmitScalarExpr(E->getArg(0)); |
18338 | llvm::Type *ResT = ConvertType(E->getType()); |
18339 | Function *Callee = |
18340 | CGM.getIntrinsic(Intrinsic::fptoui_sat, {ResT, Src->getType()}); |
18341 | return Builder.CreateCall(Callee, {Src}); |
18342 | } |
18343 | case WebAssembly::BI__builtin_wasm_min_f32: |
18344 | case WebAssembly::BI__builtin_wasm_min_f64: |
18345 | case WebAssembly::BI__builtin_wasm_min_f32x4: |
18346 | case WebAssembly::BI__builtin_wasm_min_f64x2: { |
18347 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18348 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18349 | Function *Callee = |
18350 | CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType())); |
18351 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18352 | } |
18353 | case WebAssembly::BI__builtin_wasm_max_f32: |
18354 | case WebAssembly::BI__builtin_wasm_max_f64: |
18355 | case WebAssembly::BI__builtin_wasm_max_f32x4: |
18356 | case WebAssembly::BI__builtin_wasm_max_f64x2: { |
18357 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18358 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18359 | Function *Callee = |
18360 | CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType())); |
18361 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18362 | } |
18363 | case WebAssembly::BI__builtin_wasm_pmin_f32x4: |
18364 | case WebAssembly::BI__builtin_wasm_pmin_f64x2: { |
18365 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18366 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18367 | Function *Callee = |
18368 | CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType())); |
18369 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18370 | } |
18371 | case WebAssembly::BI__builtin_wasm_pmax_f32x4: |
18372 | case WebAssembly::BI__builtin_wasm_pmax_f64x2: { |
18373 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18374 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18375 | Function *Callee = |
18376 | CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType())); |
18377 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18378 | } |
18379 | case WebAssembly::BI__builtin_wasm_ceil_f32x4: |
18380 | case WebAssembly::BI__builtin_wasm_floor_f32x4: |
18381 | case WebAssembly::BI__builtin_wasm_trunc_f32x4: |
18382 | case WebAssembly::BI__builtin_wasm_nearest_f32x4: |
18383 | case WebAssembly::BI__builtin_wasm_ceil_f64x2: |
18384 | case WebAssembly::BI__builtin_wasm_floor_f64x2: |
18385 | case WebAssembly::BI__builtin_wasm_trunc_f64x2: |
18386 | case WebAssembly::BI__builtin_wasm_nearest_f64x2: { |
18387 | unsigned IntNo; |
18388 | switch (BuiltinID) { |
18389 | case WebAssembly::BI__builtin_wasm_ceil_f32x4: |
18390 | case WebAssembly::BI__builtin_wasm_ceil_f64x2: |
18391 | IntNo = Intrinsic::ceil; |
18392 | break; |
18393 | case WebAssembly::BI__builtin_wasm_floor_f32x4: |
18394 | case WebAssembly::BI__builtin_wasm_floor_f64x2: |
18395 | IntNo = Intrinsic::floor; |
18396 | break; |
18397 | case WebAssembly::BI__builtin_wasm_trunc_f32x4: |
18398 | case WebAssembly::BI__builtin_wasm_trunc_f64x2: |
18399 | IntNo = Intrinsic::trunc; |
18400 | break; |
18401 | case WebAssembly::BI__builtin_wasm_nearest_f32x4: |
18402 | case WebAssembly::BI__builtin_wasm_nearest_f64x2: |
18403 | IntNo = Intrinsic::nearbyint; |
18404 | break; |
18405 | default: |
18406 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "clang/lib/CodeGen/CGBuiltin.cpp" , 18406); |
18407 | } |
18408 | Value *Value = EmitScalarExpr(E->getArg(0)); |
18409 | Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); |
18410 | return Builder.CreateCall(Callee, Value); |
18411 | } |
18412 | case WebAssembly::BI__builtin_wasm_swizzle_i8x16: { |
18413 | Value *Src = EmitScalarExpr(E->getArg(0)); |
18414 | Value *Indices = EmitScalarExpr(E->getArg(1)); |
18415 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle); |
18416 | return Builder.CreateCall(Callee, {Src, Indices}); |
18417 | } |
18418 | case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16: |
18419 | case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16: |
18420 | case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8: |
18421 | case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8: |
18422 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16: |
18423 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16: |
18424 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8: |
18425 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: { |
18426 | unsigned IntNo; |
18427 | switch (BuiltinID) { |
18428 | case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16: |
18429 | case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8: |
18430 | IntNo = Intrinsic::sadd_sat; |
18431 | break; |
18432 | case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16: |
18433 | case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8: |
18434 | IntNo = Intrinsic::uadd_sat; |
18435 | break; |
18436 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16: |
18437 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8: |
18438 | IntNo = Intrinsic::wasm_sub_sat_signed; |
18439 | break; |
18440 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16: |
18441 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: |
18442 | IntNo = Intrinsic::wasm_sub_sat_unsigned; |
18443 | break; |
18444 | default: |
18445 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "clang/lib/CodeGen/CGBuiltin.cpp" , 18445); |
18446 | } |
18447 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18448 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18449 | Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); |
18450 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18451 | } |
18452 | case WebAssembly::BI__builtin_wasm_abs_i8x16: |
18453 | case WebAssembly::BI__builtin_wasm_abs_i16x8: |
18454 | case WebAssembly::BI__builtin_wasm_abs_i32x4: |
18455 | case WebAssembly::BI__builtin_wasm_abs_i64x2: { |
18456 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18457 | Value *Neg = Builder.CreateNeg(Vec, "neg"); |
18458 | Constant *Zero = llvm::Constant::getNullValue(Vec->getType()); |
18459 | Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond"); |
18460 | return Builder.CreateSelect(ICmp, Neg, Vec, "abs"); |
18461 | } |
18462 | case WebAssembly::BI__builtin_wasm_min_s_i8x16: |
18463 | case WebAssembly::BI__builtin_wasm_min_u_i8x16: |
18464 | case WebAssembly::BI__builtin_wasm_max_s_i8x16: |
18465 | case WebAssembly::BI__builtin_wasm_max_u_i8x16: |
18466 | case WebAssembly::BI__builtin_wasm_min_s_i16x8: |
18467 | case WebAssembly::BI__builtin_wasm_min_u_i16x8: |
18468 | case WebAssembly::BI__builtin_wasm_max_s_i16x8: |
18469 | case WebAssembly::BI__builtin_wasm_max_u_i16x8: |
18470 | case WebAssembly::BI__builtin_wasm_min_s_i32x4: |
18471 | case WebAssembly::BI__builtin_wasm_min_u_i32x4: |
18472 | case WebAssembly::BI__builtin_wasm_max_s_i32x4: |
18473 | case WebAssembly::BI__builtin_wasm_max_u_i32x4: { |
18474 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18475 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18476 | Value *ICmp; |
18477 | switch (BuiltinID) { |
18478 | case WebAssembly::BI__builtin_wasm_min_s_i8x16: |
18479 | case WebAssembly::BI__builtin_wasm_min_s_i16x8: |
18480 | case WebAssembly::BI__builtin_wasm_min_s_i32x4: |
18481 | ICmp = Builder.CreateICmpSLT(LHS, RHS); |
18482 | break; |
18483 | case WebAssembly::BI__builtin_wasm_min_u_i8x16: |
18484 | case WebAssembly::BI__builtin_wasm_min_u_i16x8: |
18485 | case WebAssembly::BI__builtin_wasm_min_u_i32x4: |
18486 | ICmp = Builder.CreateICmpULT(LHS, RHS); |
18487 | break; |
18488 | case WebAssembly::BI__builtin_wasm_max_s_i8x16: |
18489 | case WebAssembly::BI__builtin_wasm_max_s_i16x8: |
18490 | case WebAssembly::BI__builtin_wasm_max_s_i32x4: |
18491 | ICmp = Builder.CreateICmpSGT(LHS, RHS); |
18492 | break; |
18493 | case WebAssembly::BI__builtin_wasm_max_u_i8x16: |
18494 | case WebAssembly::BI__builtin_wasm_max_u_i16x8: |
18495 | case WebAssembly::BI__builtin_wasm_max_u_i32x4: |
18496 | ICmp = Builder.CreateICmpUGT(LHS, RHS); |
18497 | break; |
18498 | default: |
18499 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "clang/lib/CodeGen/CGBuiltin.cpp" , 18499); |
18500 | } |
18501 | return Builder.CreateSelect(ICmp, LHS, RHS); |
18502 | } |
18503 | case WebAssembly::BI__builtin_wasm_avgr_u_i8x16: |
18504 | case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: { |
18505 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18506 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18507 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned, |
18508 | ConvertType(E->getType())); |
18509 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18510 | } |
18511 | case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8: { |
18512 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18513 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18514 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_q15mulr_sat_signed); |
18515 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18516 | } |
18517 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8: |
18518 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8: |
18519 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4: |
18520 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: { |
18521 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18522 | unsigned IntNo; |
18523 | switch (BuiltinID) { |
18524 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8: |
18525 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4: |
18526 | IntNo = Intrinsic::wasm_extadd_pairwise_signed; |
18527 | break; |
18528 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8: |
18529 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: |
18530 | IntNo = Intrinsic::wasm_extadd_pairwise_unsigned; |
18531 | break; |
18532 | default: |
18533 | llvm_unreachable("unexptected builtin ID")::llvm::llvm_unreachable_internal("unexptected builtin ID", "clang/lib/CodeGen/CGBuiltin.cpp" , 18533); |
18534 | } |
18535 | |
18536 | Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); |
18537 | return Builder.CreateCall(Callee, Vec); |
18538 | } |
18539 | case WebAssembly::BI__builtin_wasm_bitselect: { |
18540 | Value *V1 = EmitScalarExpr(E->getArg(0)); |
18541 | Value *V2 = EmitScalarExpr(E->getArg(1)); |
18542 | Value *C = EmitScalarExpr(E->getArg(2)); |
18543 | Function *Callee = |
18544 | CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType())); |
18545 | return Builder.CreateCall(Callee, {V1, V2, C}); |
18546 | } |
18547 | case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: { |
18548 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18549 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18550 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot); |
18551 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18552 | } |
18553 | case WebAssembly::BI__builtin_wasm_popcnt_i8x16: { |
18554 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18555 | Function *Callee = |
18556 | CGM.getIntrinsic(Intrinsic::ctpop, ConvertType(E->getType())); |
18557 | return Builder.CreateCall(Callee, {Vec}); |
18558 | } |
18559 | case WebAssembly::BI__builtin_wasm_any_true_v128: |
18560 | case WebAssembly::BI__builtin_wasm_all_true_i8x16: |
18561 | case WebAssembly::BI__builtin_wasm_all_true_i16x8: |
18562 | case WebAssembly::BI__builtin_wasm_all_true_i32x4: |
18563 | case WebAssembly::BI__builtin_wasm_all_true_i64x2: { |
18564 | unsigned IntNo; |
18565 | switch (BuiltinID) { |
18566 | case WebAssembly::BI__builtin_wasm_any_true_v128: |
18567 | IntNo = Intrinsic::wasm_anytrue; |
18568 | break; |
18569 | case WebAssembly::BI__builtin_wasm_all_true_i8x16: |
18570 | case WebAssembly::BI__builtin_wasm_all_true_i16x8: |
18571 | case WebAssembly::BI__builtin_wasm_all_true_i32x4: |
18572 | case WebAssembly::BI__builtin_wasm_all_true_i64x2: |
18573 | IntNo = Intrinsic::wasm_alltrue; |
18574 | break; |
18575 | default: |
18576 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "clang/lib/CodeGen/CGBuiltin.cpp" , 18576); |
18577 | } |
18578 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18579 | Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType()); |
18580 | return Builder.CreateCall(Callee, {Vec}); |
18581 | } |
18582 | case WebAssembly::BI__builtin_wasm_bitmask_i8x16: |
18583 | case WebAssembly::BI__builtin_wasm_bitmask_i16x8: |
18584 | case WebAssembly::BI__builtin_wasm_bitmask_i32x4: |
18585 | case WebAssembly::BI__builtin_wasm_bitmask_i64x2: { |
18586 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18587 | Function *Callee = |
18588 | CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType()); |
18589 | return Builder.CreateCall(Callee, {Vec}); |
18590 | } |
18591 | case WebAssembly::BI__builtin_wasm_abs_f32x4: |
18592 | case WebAssembly::BI__builtin_wasm_abs_f64x2: { |
18593 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18594 | Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType()); |
18595 | return Builder.CreateCall(Callee, {Vec}); |
18596 | } |
18597 | case WebAssembly::BI__builtin_wasm_sqrt_f32x4: |
18598 | case WebAssembly::BI__builtin_wasm_sqrt_f64x2: { |
18599 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18600 | Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType()); |
18601 | return Builder.CreateCall(Callee, {Vec}); |
18602 | } |
18603 | case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8: |
18604 | case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8: |
18605 | case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4: |
18606 | case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: { |
18607 | Value *Low = EmitScalarExpr(E->getArg(0)); |
18608 | Value *High = EmitScalarExpr(E->getArg(1)); |
18609 | unsigned IntNo; |
18610 | switch (BuiltinID) { |
18611 | case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8: |
18612 | case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4: |
18613 | IntNo = Intrinsic::wasm_narrow_signed; |
18614 | break; |
18615 | case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8: |
18616 | case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: |
18617 | IntNo = Intrinsic::wasm_narrow_unsigned; |
18618 | break; |
18619 | default: |
18620 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "clang/lib/CodeGen/CGBuiltin.cpp" , 18620); |
18621 | } |
18622 | Function *Callee = |
18623 | CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()}); |
18624 | return Builder.CreateCall(Callee, {Low, High}); |
18625 | } |
18626 | case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4: |
18627 | case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4: { |
18628 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18629 | unsigned IntNo; |
18630 | switch (BuiltinID) { |
18631 | case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4: |
18632 | IntNo = Intrinsic::fptosi_sat; |
18633 | break; |
18634 | case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4: |
18635 | IntNo = Intrinsic::fptoui_sat; |
18636 | break; |
18637 | default: |
18638 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "clang/lib/CodeGen/CGBuiltin.cpp" , 18638); |
18639 | } |
18640 | llvm::Type *SrcT = Vec->getType(); |
18641 | llvm::Type *TruncT = SrcT->getWithNewType(Builder.getInt32Ty()); |
18642 | Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT}); |
18643 | Value *Trunc = Builder.CreateCall(Callee, Vec); |
18644 | Value *Splat = Constant::getNullValue(TruncT); |
18645 | return Builder.CreateShuffleVector(Trunc, Splat, ArrayRef<int>{0, 1, 2, 3}); |
18646 | } |
18647 | case WebAssembly::BI__builtin_wasm_shuffle_i8x16: { |
18648 | Value *Ops[18]; |
18649 | size_t OpIdx = 0; |
18650 | Ops[OpIdx++] = EmitScalarExpr(E->getArg(0)); |
18651 | Ops[OpIdx++] = EmitScalarExpr(E->getArg(1)); |
18652 | while (OpIdx < 18) { |
18653 | Optional<llvm::APSInt> LaneConst = |
18654 | E->getArg(OpIdx)->getIntegerConstantExpr(getContext()); |
18655 | assert(LaneConst && "Constant arg isn't actually constant?")(static_cast <bool> (LaneConst && "Constant arg isn't actually constant?" ) ? void (0) : __assert_fail ("LaneConst && \"Constant arg isn't actually constant?\"" , "clang/lib/CodeGen/CGBuiltin.cpp", 18655, __extension__ __PRETTY_FUNCTION__ )); |
18656 | Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst); |
18657 | } |
18658 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle); |
18659 | return Builder.CreateCall(Callee, Ops); |
18660 | } |
18661 | case WebAssembly::BI__builtin_wasm_fma_f32x4: |
18662 | case WebAssembly::BI__builtin_wasm_fms_f32x4: |
18663 | case WebAssembly::BI__builtin_wasm_fma_f64x2: |
18664 | case WebAssembly::BI__builtin_wasm_fms_f64x2: { |
18665 | Value *A = EmitScalarExpr(E->getArg(0)); |
18666 | Value *B = EmitScalarExpr(E->getArg(1)); |
18667 | Value *C = EmitScalarExpr(E->getArg(2)); |
18668 | unsigned IntNo; |
18669 | switch (BuiltinID) { |
18670 | case WebAssembly::BI__builtin_wasm_fma_f32x4: |
18671 | case WebAssembly::BI__builtin_wasm_fma_f64x2: |
18672 | IntNo = Intrinsic::wasm_fma; |
18673 | break; |
18674 | case WebAssembly::BI__builtin_wasm_fms_f32x4: |
18675 | case WebAssembly::BI__builtin_wasm_fms_f64x2: |
18676 | IntNo = Intrinsic::wasm_fms; |
18677 | break; |
18678 | default: |
18679 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "clang/lib/CodeGen/CGBuiltin.cpp" , 18679); |
18680 | } |
18681 | Function *Callee = CGM.getIntrinsic(IntNo, A->getType()); |
18682 | return Builder.CreateCall(Callee, {A, B, C}); |
18683 | } |
18684 | case WebAssembly::BI__builtin_wasm_laneselect_i8x16: |
18685 | case WebAssembly::BI__builtin_wasm_laneselect_i16x8: |
18686 | case WebAssembly::BI__builtin_wasm_laneselect_i32x4: |
18687 | case WebAssembly::BI__builtin_wasm_laneselect_i64x2: { |
18688 | Value *A = EmitScalarExpr(E->getArg(0)); |
18689 | Value *B = EmitScalarExpr(E->getArg(1)); |
18690 | Value *C = EmitScalarExpr(E->getArg(2)); |
18691 | Function *Callee = |
18692 | CGM.getIntrinsic(Intrinsic::wasm_laneselect, A->getType()); |
18693 | return Builder.CreateCall(Callee, {A, B, C}); |
18694 | } |
18695 | case WebAssembly::BI__builtin_wasm_relaxed_swizzle_i8x16: { |
18696 | Value *Src = EmitScalarExpr(E->getArg(0)); |
18697 | Value *Indices = EmitScalarExpr(E->getArg(1)); |
18698 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_relaxed_swizzle); |
18699 | return Builder.CreateCall(Callee, {Src, Indices}); |
18700 | } |
18701 | case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4: |
18702 | case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4: |
18703 | case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2: |
18704 | case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2: { |
18705 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18706 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18707 | unsigned IntNo; |
18708 | switch (BuiltinID) { |
18709 | case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4: |
18710 | case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2: |
18711 | IntNo = Intrinsic::wasm_relaxed_min; |
18712 | break; |
18713 | case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4: |
18714 | case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2: |
18715 | IntNo = Intrinsic::wasm_relaxed_max; |
18716 | break; |
18717 | default: |
18718 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "clang/lib/CodeGen/CGBuiltin.cpp" , 18718); |
18719 | } |
18720 | Function *Callee = CGM.getIntrinsic(IntNo, LHS->getType()); |
18721 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18722 | } |
18723 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4: |
18724 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4: |
18725 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2: |
18726 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2: { |
18727 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18728 | unsigned IntNo; |
18729 | switch (BuiltinID) { |
18730 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4: |
18731 | IntNo = Intrinsic::wasm_relaxed_trunc_signed; |
18732 | break; |
18733 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4: |
18734 | IntNo = Intrinsic::wasm_relaxed_trunc_unsigned; |
18735 | break; |
18736 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2: |
18737 | IntNo = Intrinsic::wasm_relaxed_trunc_signed_zero; |
18738 | break; |
18739 | case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2: |
18740 | IntNo = Intrinsic::wasm_relaxed_trunc_unsigned_zero; |
18741 | break; |
18742 | default: |
18743 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "clang/lib/CodeGen/CGBuiltin.cpp" , 18743); |
18744 | } |
18745 | Function *Callee = CGM.getIntrinsic(IntNo); |
18746 | return Builder.CreateCall(Callee, {Vec}); |
18747 | } |
18748 | default: |
18749 | return nullptr; |
18750 | } |
18751 | } |
18752 | |
18753 | static std::pair<Intrinsic::ID, unsigned> |
18754 | getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) { |
18755 | struct Info { |
18756 | unsigned BuiltinID; |
18757 | Intrinsic::ID IntrinsicID; |
18758 | unsigned VecLen; |
18759 | }; |
18760 | Info Infos[] = { |
18761 | #define CUSTOM_BUILTIN_MAPPING(x,s) \ |
18762 | { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s }, |
18763 | CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0) |
18764 | CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0) |
18765 | CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0) |
18766 | CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0) |
18767 | CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0) |
18768 | CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0) |
18769 | CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0) |
18770 | CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0) |
18771 | CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0) |
18772 | CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0) |
18773 | CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0) |
18774 | CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0) |
18775 | CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0) |
18776 | CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0) |
18777 | CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0) |
18778 | CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0) |
18779 | CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0) |
18780 | CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0) |
18781 | CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0) |
18782 | CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0) |
18783 | CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0) |
18784 | CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0) |
18785 | // Legacy builtins that take a vector in place of a vector predicate. |
18786 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64) |
18787 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64) |
18788 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64) |
18789 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64) |
18790 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128) |
18791 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128) |
18792 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128) |
18793 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128) |
18794 | #include "clang/Basic/BuiltinsHexagonMapCustomDep.def" |
18795 | #undef CUSTOM_BUILTIN_MAPPING |
18796 | }; |
18797 | |
18798 | auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; }; |
18799 | static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true); |
18800 | (void)SortOnce; |
18801 | |
18802 | const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos), |
18803 | Info{BuiltinID, 0, 0}, CmpInfo); |
18804 | if (F == std::end(Infos) || F->BuiltinID != BuiltinID) |
18805 | return {Intrinsic::not_intrinsic, 0}; |
18806 | |
18807 | return {F->IntrinsicID, F->VecLen}; |
18808 | } |
18809 | |
18810 | Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID, |
18811 | const CallExpr *E) { |
18812 | Intrinsic::ID ID; |
18813 | unsigned VecLen; |
18814 | std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID); |
18815 | |
18816 | auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) { |
18817 | // The base pointer is passed by address, so it needs to be loaded. |
18818 | Address A = EmitPointerWithAlignment(E->getArg(0)); |
18819 | Address BP = Address(Builder.CreateBitCast( |
18820 | A.getPointer(), Int8PtrPtrTy), Int8PtrTy, A.getAlignment()); |
18821 | llvm::Value *Base = Builder.CreateLoad(BP); |
18822 | // The treatment of both loads and stores is the same: the arguments for |
18823 | // the builtin are the same as the arguments for the intrinsic. |
18824 | // Load: |
18825 | // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start) |
18826 | // builtin(Base, Mod, Start) -> intr(Base, Mod, Start) |
18827 | // Store: |
18828 | // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start) |
18829 | // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start) |
18830 | SmallVector<llvm::Value*,5> Ops = { Base }; |
18831 | for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i) |
18832 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
18833 | |
18834 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops); |
18835 | // The load intrinsics generate two results (Value, NewBase), stores |
18836 | // generate one (NewBase). The new base address needs to be stored. |
18837 | llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1) |
18838 | : Result; |
18839 | llvm::Value *LV = Builder.CreateBitCast( |
18840 | EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo()); |
18841 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
18842 | llvm::Value *RetVal = |
18843 | Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment()); |
18844 | if (IsLoad) |
18845 | RetVal = Builder.CreateExtractValue(Result, 0); |
18846 | return RetVal; |
18847 | }; |
18848 | |
18849 | // Handle the conversion of bit-reverse load intrinsics to bit code. |
18850 | // The intrinsic call after this function only reads from memory and the |
18851 | // write to memory is dealt by the store instruction. |
18852 | auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) { |
18853 | // The intrinsic generates one result, which is the new value for the base |
18854 | // pointer. It needs to be returned. The result of the load instruction is |
18855 | // passed to intrinsic by address, so the value needs to be stored. |
18856 | llvm::Value *BaseAddress = |
18857 | Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy); |
18858 | |
18859 | // Expressions like &(*pt++) will be incremented per evaluation. |
18860 | // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression |
18861 | // per call. |
18862 | Address DestAddr = EmitPointerWithAlignment(E->getArg(1)); |
18863 | DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy), |
18864 | Int8Ty, DestAddr.getAlignment()); |
18865 | llvm::Value *DestAddress = DestAddr.getPointer(); |
18866 | |
18867 | // Operands are Base, Dest, Modifier. |
18868 | // The intrinsic format in LLVM IR is defined as |
18869 | // { ValueType, i8* } (i8*, i32). |
18870 | llvm::Value *Result = Builder.CreateCall( |
18871 | CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))}); |
18872 | |
18873 | // The value needs to be stored as the variable is passed by reference. |
18874 | llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0); |
18875 | |
18876 | // The store needs to be truncated to fit the destination type. |
18877 | // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs |
18878 | // to be handled with stores of respective destination type. |
18879 | DestVal = Builder.CreateTrunc(DestVal, DestTy); |
18880 | |
18881 | llvm::Value *DestForStore = |
18882 | Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo()); |
18883 | Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment()); |
18884 | // The updated value of the base pointer is returned. |
18885 | return Builder.CreateExtractValue(Result, 1); |
18886 | }; |
18887 | |
18888 | auto V2Q = [this, VecLen] (llvm::Value *Vec) { |
18889 | Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B |
18890 | : Intrinsic::hexagon_V6_vandvrt; |
18891 | return Builder.CreateCall(CGM.getIntrinsic(ID), |
18892 | {Vec, Builder.getInt32(-1)}); |
18893 | }; |
18894 | auto Q2V = [this, VecLen] (llvm::Value *Pred) { |
18895 | Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B |
18896 | : Intrinsic::hexagon_V6_vandqrt; |
18897 | return Builder.CreateCall(CGM.getIntrinsic(ID), |
18898 | {Pred, Builder.getInt32(-1)}); |
18899 | }; |
18900 | |
18901 | switch (BuiltinID) { |
18902 | // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR, |
18903 | // and the corresponding C/C++ builtins use loads/stores to update |
18904 | // the predicate. |
18905 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry: |
18906 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: |
18907 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry: |
18908 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: { |
18909 | // Get the type from the 0-th argument. |
18910 | llvm::Type *VecType = ConvertType(E->getArg(0)->getType()); |
18911 | Address PredAddr = Builder.CreateElementBitCast( |
18912 | EmitPointerWithAlignment(E->getArg(2)), VecType); |
18913 | llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr)); |
18914 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), |
18915 | {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn}); |
18916 | |
18917 | llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1); |
18918 | Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(), |
18919 | PredAddr.getAlignment()); |
18920 | return Builder.CreateExtractValue(Result, 0); |
18921 | } |
18922 | |
18923 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq: |
18924 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq: |
18925 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq: |
18926 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq: |
18927 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq_128B: |
18928 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq_128B: |
18929 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq_128B: |
18930 | case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq_128B: { |
18931 | SmallVector<llvm::Value*,4> Ops; |
18932 | const Expr *PredOp = E->getArg(0); |
18933 | // There will be an implicit cast to a boolean vector. Strip it. |
18934 | if (auto *Cast = dyn_cast<ImplicitCastExpr>(PredOp)) { |
18935 | if (Cast->getCastKind() == CK_BitCast) |
18936 | PredOp = Cast->getSubExpr(); |
18937 | Ops.push_back(V2Q(EmitScalarExpr(PredOp))); |
18938 | } |
18939 | for (int i = 1, e = E->getNumArgs(); i != e; ++i) |
18940 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
18941 | return Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
18942 | } |
18943 | |
18944 | case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci: |
18945 | case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci: |
18946 | case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci: |
18947 | case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci: |
18948 | case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci: |
18949 | case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci: |
18950 | case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr: |
18951 | case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr: |
18952 | case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr: |
18953 | case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr: |
18954 | case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr: |
18955 | case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr: |
18956 | return MakeCircOp(ID, /*IsLoad=*/true); |
18957 | case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci: |
18958 | case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci: |
18959 | case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci: |
18960 | case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci: |
18961 | case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci: |
18962 | case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr: |
18963 | case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr: |
18964 | case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr: |
18965 | case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr: |
18966 | case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr: |
18967 | return MakeCircOp(ID, /*IsLoad=*/false); |
18968 | case Hexagon::BI__builtin_brev_ldub: |
18969 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty); |
18970 | case Hexagon::BI__builtin_brev_ldb: |
18971 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty); |
18972 | case Hexagon::BI__builtin_brev_lduh: |
18973 | return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty); |
18974 | case Hexagon::BI__builtin_brev_ldh: |
18975 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty); |
18976 | case Hexagon::BI__builtin_brev_ldw: |
18977 | return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty); |
18978 | case Hexagon::BI__builtin_brev_ldd: |
18979 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty); |
18980 | } // switch |
18981 | |
18982 | return nullptr; |
18983 | } |
18984 | |
18985 | Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID, |
18986 | const CallExpr *E, |
18987 | ReturnValueSlot ReturnValue) { |
18988 | SmallVector<Value *, 4> Ops; |
18989 | llvm::Type *ResultType = ConvertType(E->getType()); |
18990 | |
18991 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) |
18992 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
18993 | |
18994 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
18995 | unsigned NF = 1; |
18996 | constexpr unsigned TAIL_UNDISTURBED = 0; |
18997 | |
18998 | // Required for overloaded intrinsics. |
18999 | llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes; |
19000 | switch (BuiltinID) { |
19001 | default: llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "clang/lib/CodeGen/CGBuiltin.cpp" , 19001); |
19002 | case RISCV::BI__builtin_riscv_orc_b_32: |
19003 | case RISCV::BI__builtin_riscv_orc_b_64: |
19004 | case RISCV::BI__builtin_riscv_clz_32: |
19005 | case RISCV::BI__builtin_riscv_clz_64: |
19006 | case RISCV::BI__builtin_riscv_clmul: |
19007 | case RISCV::BI__builtin_riscv_clmulh: |
19008 | case RISCV::BI__builtin_riscv_clmulr: |
19009 | case RISCV::BI__builtin_riscv_bcompress_32: |
19010 | case RISCV::BI__builtin_riscv_bcompress_64: |
19011 | case RISCV::BI__builtin_riscv_bdecompress_32: |
19012 | case RISCV::BI__builtin_riscv_bdecompress_64: |
19013 | case RISCV::BI__builtin_riscv_bfp_32: |
19014 | case RISCV::BI__builtin_riscv_bfp_64: |
19015 | case RISCV::BI__builtin_riscv_grev_32: |
19016 | case RISCV::BI__builtin_riscv_grev_64: |
19017 | case RISCV::BI__builtin_riscv_gorc_32: |
19018 | case RISCV::BI__builtin_riscv_gorc_64: |
19019 | case RISCV::BI__builtin_riscv_shfl_32: |
19020 | case RISCV::BI__builtin_riscv_shfl_64: |
19021 | case RISCV::BI__builtin_riscv_unshfl_32: |
19022 | case RISCV::BI__builtin_riscv_unshfl_64: |
19023 | case RISCV::BI__builtin_riscv_xperm4: |
19024 | case RISCV::BI__builtin_riscv_xperm8: |
19025 | case RISCV::BI__builtin_riscv_xperm_n: |
19026 | case RISCV::BI__builtin_riscv_xperm_b: |
19027 | case RISCV::BI__builtin_riscv_xperm_h: |
19028 | case RISCV::BI__builtin_riscv_xperm_w: |
19029 | case RISCV::BI__builtin_riscv_crc32_b: |
19030 | case RISCV::BI__builtin_riscv_crc32_h: |
19031 | case RISCV::BI__builtin_riscv_crc32_w: |
19032 | case RISCV::BI__builtin_riscv_crc32_d: |
19033 | case RISCV::BI__builtin_riscv_crc32c_b: |
19034 | case RISCV::BI__builtin_riscv_crc32c_h: |
19035 | case RISCV::BI__builtin_riscv_crc32c_w: |
19036 | case RISCV::BI__builtin_riscv_crc32c_d: |
19037 | case RISCV::BI__builtin_riscv_fsl_32: |
19038 | case RISCV::BI__builtin_riscv_fsr_32: |
19039 | case RISCV::BI__builtin_riscv_fsl_64: |
19040 | case RISCV::BI__builtin_riscv_fsr_64: |
19041 | case RISCV::BI__builtin_riscv_brev8: |
19042 | case RISCV::BI__builtin_riscv_zip_32: |
19043 | case RISCV::BI__builtin_riscv_unzip_32: { |
19044 | switch (BuiltinID) { |
19045 | default: llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "clang/lib/CodeGen/CGBuiltin.cpp" , 19045); |
19046 | // Zbb |
19047 | case RISCV::BI__builtin_riscv_orc_b_32: |
19048 | case RISCV::BI__builtin_riscv_orc_b_64: |
19049 | ID = Intrinsic::riscv_orc_b; |
19050 | break; |
19051 | case RISCV::BI__builtin_riscv_clz_32: |
19052 | case RISCV::BI__builtin_riscv_clz_64: { |
19053 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); |
19054 | return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)}); |
19055 | } |
19056 | |
19057 | // Zbc |
19058 | case RISCV::BI__builtin_riscv_clmul: |
19059 | ID = Intrinsic::riscv_clmul; |
19060 | break; |
19061 | case RISCV::BI__builtin_riscv_clmulh: |
19062 | ID = Intrinsic::riscv_clmulh; |
19063 | break; |
19064 | case RISCV::BI__builtin_riscv_clmulr: |
19065 | ID = Intrinsic::riscv_clmulr; |
19066 | break; |
19067 | |
19068 | // Zbe |
19069 | case RISCV::BI__builtin_riscv_bcompress_32: |
19070 | case RISCV::BI__builtin_riscv_bcompress_64: |
19071 | ID = Intrinsic::riscv_bcompress; |
19072 | break; |
19073 | case RISCV::BI__builtin_riscv_bdecompress_32: |
19074 | case RISCV::BI__builtin_riscv_bdecompress_64: |
19075 | ID = Intrinsic::riscv_bdecompress; |
19076 | break; |
19077 | |
19078 | // Zbf |
19079 | case RISCV::BI__builtin_riscv_bfp_32: |
19080 | case RISCV::BI__builtin_riscv_bfp_64: |
19081 | ID = Intrinsic::riscv_bfp; |
19082 | break; |
19083 | |
19084 | // Zbp |
19085 | case RISCV::BI__builtin_riscv_grev_32: |
19086 | case RISCV::BI__builtin_riscv_grev_64: |
19087 | ID = Intrinsic::riscv_grev; |
19088 | break; |
19089 | case RISCV::BI__builtin_riscv_gorc_32: |
19090 | case RISCV::BI__builtin_riscv_gorc_64: |
19091 | ID = Intrinsic::riscv_gorc; |
19092 | break; |
19093 | case RISCV::BI__builtin_riscv_shfl_32: |
19094 | case RISCV::BI__builtin_riscv_shfl_64: |
19095 | ID = Intrinsic::riscv_shfl; |
19096 | break; |
19097 | case RISCV::BI__builtin_riscv_unshfl_32: |
19098 | case RISCV::BI__builtin_riscv_unshfl_64: |
19099 | ID = Intrinsic::riscv_unshfl; |
19100 | break; |
19101 | case RISCV::BI__builtin_riscv_xperm_n: |
19102 | ID = Intrinsic::riscv_xperm_n; |
19103 | break; |
19104 | case RISCV::BI__builtin_riscv_xperm_b: |
19105 | ID = Intrinsic::riscv_xperm_b; |
19106 | break; |
19107 | case RISCV::BI__builtin_riscv_xperm_h: |
19108 | ID = Intrinsic::riscv_xperm_h; |
19109 | break; |
19110 | case RISCV::BI__builtin_riscv_xperm_w: |
19111 | ID = Intrinsic::riscv_xperm_w; |
19112 | break; |
19113 | |
19114 | // Zbr |
19115 | case RISCV::BI__builtin_riscv_crc32_b: |
19116 | ID = Intrinsic::riscv_crc32_b; |
19117 | break; |
19118 | case RISCV::BI__builtin_riscv_crc32_h: |
19119 | ID = Intrinsic::riscv_crc32_h; |
19120 | break; |
19121 | case RISCV::BI__builtin_riscv_crc32_w: |
19122 | ID = Intrinsic::riscv_crc32_w; |
19123 | break; |
19124 | case RISCV::BI__builtin_riscv_crc32_d: |
19125 | ID = Intrinsic::riscv_crc32_d; |
19126 | break; |
19127 | case RISCV::BI__builtin_riscv_crc32c_b: |
19128 | ID = Intrinsic::riscv_crc32c_b; |
19129 | break; |
19130 | case RISCV::BI__builtin_riscv_crc32c_h: |
19131 | ID = Intrinsic::riscv_crc32c_h; |
19132 | break; |
19133 | case RISCV::BI__builtin_riscv_crc32c_w: |
19134 | ID = Intrinsic::riscv_crc32c_w; |
19135 | break; |
19136 | case RISCV::BI__builtin_riscv_crc32c_d: |
19137 | ID = Intrinsic::riscv_crc32c_d; |
19138 | break; |
19139 | |
19140 | // Zbt |
19141 | case RISCV::BI__builtin_riscv_fsl_32: |
19142 | case RISCV::BI__builtin_riscv_fsl_64: |
19143 | ID = Intrinsic::riscv_fsl; |
19144 | break; |
19145 | case RISCV::BI__builtin_riscv_fsr_32: |
19146 | case RISCV::BI__builtin_riscv_fsr_64: |
19147 | ID = Intrinsic::riscv_fsr; |
19148 | break; |
19149 | |
19150 | // Zbkx |
19151 | case RISCV::BI__builtin_riscv_xperm8: |
19152 | ID = Intrinsic::riscv_xperm8; |
19153 | break; |
19154 | case RISCV::BI__builtin_riscv_xperm4: |
19155 | ID = Intrinsic::riscv_xperm4; |
19156 | break; |
19157 | |
19158 | // Zbkb |
19159 | case RISCV::BI__builtin_riscv_brev8: |
19160 | ID = Intrinsic::riscv_brev8; |
19161 | break; |
19162 | case RISCV::BI__builtin_riscv_zip_32: |
19163 | ID = Intrinsic::riscv_zip; |
19164 | break; |
19165 | case RISCV::BI__builtin_riscv_unzip_32: |
19166 | ID = Intrinsic::riscv_unzip; |
19167 | break; |
19168 | } |
19169 | |
19170 | IntrinsicTypes = {ResultType}; |
19171 | break; |
19172 | } |
19173 | |
19174 | // Zk builtins |
19175 | |
19176 | // Zknd |
19177 | case RISCV::BI__builtin_riscv_aes32dsi_32: |
19178 | ID = Intrinsic::riscv_aes32dsi; |
19179 | break; |
19180 | case RISCV::BI__builtin_riscv_aes32dsmi_32: |
19181 | ID = Intrinsic::riscv_aes32dsmi; |
19182 | break; |
19183 | case RISCV::BI__builtin_riscv_aes64ds_64: |
19184 | ID = Intrinsic::riscv_aes64ds; |
19185 | break; |
19186 | case RISCV::BI__builtin_riscv_aes64dsm_64: |
19187 | ID = Intrinsic::riscv_aes64dsm; |
19188 | break; |
19189 | case RISCV::BI__builtin_riscv_aes64im_64: |
19190 | ID = Intrinsic::riscv_aes64im; |
19191 | break; |
19192 | |
19193 | // Zkne |
19194 | case RISCV::BI__builtin_riscv_aes32esi_32: |
19195 | ID = Intrinsic::riscv_aes32esi; |
19196 | break; |
19197 | case RISCV::BI__builtin_riscv_aes32esmi_32: |
19198 | ID = Intrinsic::riscv_aes32esmi; |
19199 | break; |
19200 | case RISCV::BI__builtin_riscv_aes64es_64: |
19201 | ID = Intrinsic::riscv_aes64es; |
19202 | break; |
19203 | case RISCV::BI__builtin_riscv_aes64esm_64: |
19204 | ID = Intrinsic::riscv_aes64esm; |
19205 | break; |
19206 | |
19207 | // Zknd & Zkne |
19208 | case RISCV::BI__builtin_riscv_aes64ks1i_64: |
19209 | ID = Intrinsic::riscv_aes64ks1i; |
19210 | break; |
19211 | case RISCV::BI__builtin_riscv_aes64ks2_64: |
19212 | ID = Intrinsic::riscv_aes64ks2; |
19213 | break; |
19214 | |
19215 | // Zknh |
19216 | case RISCV::BI__builtin_riscv_sha256sig0: |
19217 | ID = Intrinsic::riscv_sha256sig0; |
19218 | IntrinsicTypes = {ResultType}; |
19219 | break; |
19220 | case RISCV::BI__builtin_riscv_sha256sig1: |
19221 | ID = Intrinsic::riscv_sha256sig1; |
19222 | IntrinsicTypes = {ResultType}; |
19223 | break; |
19224 | case RISCV::BI__builtin_riscv_sha256sum0: |
19225 | ID = Intrinsic::riscv_sha256sum0; |
19226 | IntrinsicTypes = {ResultType}; |
19227 | break; |
19228 | case RISCV::BI__builtin_riscv_sha256sum1: |
19229 | ID = Intrinsic::riscv_sha256sum1; |
19230 | IntrinsicTypes = {ResultType}; |
19231 | break; |
19232 | case RISCV::BI__builtin_riscv_sha512sig0_64: |
19233 | ID = Intrinsic::riscv_sha512sig0; |
19234 | break; |
19235 | case RISCV::BI__builtin_riscv_sha512sig0h_32: |
19236 | ID = Intrinsic::riscv_sha512sig0h; |
19237 | break; |
19238 | case RISCV::BI__builtin_riscv_sha512sig0l_32: |
19239 | ID = Intrinsic::riscv_sha512sig0l; |
19240 | break; |
19241 | case RISCV::BI__builtin_riscv_sha512sig1_64: |
19242 | ID = Intrinsic::riscv_sha512sig1; |
19243 | break; |
19244 | case RISCV::BI__builtin_riscv_sha512sig1h_32: |
19245 | ID = Intrinsic::riscv_sha512sig1h; |
19246 | break; |
19247 | case RISCV::BI__builtin_riscv_sha512sig1l_32: |
19248 | ID = Intrinsic::riscv_sha512sig1l; |
19249 | break; |
19250 | case RISCV::BI__builtin_riscv_sha512sum0_64: |
19251 | ID = Intrinsic::riscv_sha512sum0; |
19252 | break; |
19253 | case RISCV::BI__builtin_riscv_sha512sum0r_32: |
19254 | ID = Intrinsic::riscv_sha512sum0r; |
19255 | break; |
19256 | case RISCV::BI__builtin_riscv_sha512sum1_64: |
19257 | ID = Intrinsic::riscv_sha512sum1; |
19258 | break; |
19259 | case RISCV::BI__builtin_riscv_sha512sum1r_32: |
19260 | ID = Intrinsic::riscv_sha512sum1r; |
19261 | break; |
19262 | |
19263 | // Zksed |
19264 | case RISCV::BI__builtin_riscv_sm4ks: |
19265 | ID = Intrinsic::riscv_sm4ks; |
19266 | IntrinsicTypes = {ResultType}; |
19267 | break; |
19268 | case RISCV::BI__builtin_riscv_sm4ed: |
19269 | ID = Intrinsic::riscv_sm4ed; |
19270 | IntrinsicTypes = {ResultType}; |
19271 | break; |
19272 | |
19273 | // Zksh |
19274 | case RISCV::BI__builtin_riscv_sm3p0: |
19275 | ID = Intrinsic::riscv_sm3p0; |
19276 | IntrinsicTypes = {ResultType}; |
19277 | break; |
19278 | case RISCV::BI__builtin_riscv_sm3p1: |
19279 | ID = Intrinsic::riscv_sm3p1; |
19280 | IntrinsicTypes = {ResultType}; |
19281 | break; |
19282 | |
19283 | // Vector builtins are handled from here. |
19284 | #include "clang/Basic/riscv_vector_builtin_cg.inc" |
19285 | } |
19286 | |
19287 | assert(ID != Intrinsic::not_intrinsic)(static_cast <bool> (ID != Intrinsic::not_intrinsic) ? void (0) : __assert_fail ("ID != Intrinsic::not_intrinsic", "clang/lib/CodeGen/CGBuiltin.cpp" , 19287, __extension__ __PRETTY_FUNCTION__)); |
19288 | |
19289 | llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); |
19290 | return Builder.CreateCall(F, Ops, ""); |
19291 | } |