File: | clang/lib/CodeGen/CGBuiltin.cpp |
Warning: | line 1004, column 22 Value stored to 'RetType' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code to emit Builtin calls as LLVM code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CGCUDARuntime.h" |
14 | #include "CGCXXABI.h" |
15 | #include "CGObjCRuntime.h" |
16 | #include "CGOpenCLRuntime.h" |
17 | #include "CGRecordLayout.h" |
18 | #include "CodeGenFunction.h" |
19 | #include "CodeGenModule.h" |
20 | #include "ConstantEmitter.h" |
21 | #include "PatternInit.h" |
22 | #include "TargetInfo.h" |
23 | #include "clang/AST/ASTContext.h" |
24 | #include "clang/AST/Attr.h" |
25 | #include "clang/AST/Decl.h" |
26 | #include "clang/AST/OSLog.h" |
27 | #include "clang/Basic/TargetBuiltins.h" |
28 | #include "clang/Basic/TargetInfo.h" |
29 | #include "clang/CodeGen/CGFunctionInfo.h" |
30 | #include "llvm/ADT/APFloat.h" |
31 | #include "llvm/ADT/APInt.h" |
32 | #include "llvm/ADT/SmallPtrSet.h" |
33 | #include "llvm/ADT/StringExtras.h" |
34 | #include "llvm/Analysis/ValueTracking.h" |
35 | #include "llvm/IR/DataLayout.h" |
36 | #include "llvm/IR/InlineAsm.h" |
37 | #include "llvm/IR/Intrinsics.h" |
38 | #include "llvm/IR/IntrinsicsAArch64.h" |
39 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
40 | #include "llvm/IR/IntrinsicsARM.h" |
41 | #include "llvm/IR/IntrinsicsBPF.h" |
42 | #include "llvm/IR/IntrinsicsHexagon.h" |
43 | #include "llvm/IR/IntrinsicsNVPTX.h" |
44 | #include "llvm/IR/IntrinsicsPowerPC.h" |
45 | #include "llvm/IR/IntrinsicsR600.h" |
46 | #include "llvm/IR/IntrinsicsRISCV.h" |
47 | #include "llvm/IR/IntrinsicsS390.h" |
48 | #include "llvm/IR/IntrinsicsWebAssembly.h" |
49 | #include "llvm/IR/IntrinsicsX86.h" |
50 | #include "llvm/IR/MDBuilder.h" |
51 | #include "llvm/IR/MatrixBuilder.h" |
52 | #include "llvm/Support/ConvertUTF.h" |
53 | #include "llvm/Support/ScopedPrinter.h" |
54 | #include "llvm/Support/X86TargetParser.h" |
55 | #include <sstream> |
56 | |
57 | using namespace clang; |
58 | using namespace CodeGen; |
59 | using namespace llvm; |
60 | |
61 | static |
62 | int64_t clamp(int64_t Value, int64_t Low, int64_t High) { |
63 | return std::min(High, std::max(Low, Value)); |
64 | } |
65 | |
66 | static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, |
67 | Align AlignmentInBytes) { |
68 | ConstantInt *Byte; |
69 | switch (CGF.getLangOpts().getTrivialAutoVarInit()) { |
70 | case LangOptions::TrivialAutoVarInitKind::Uninitialized: |
71 | // Nothing to initialize. |
72 | return; |
73 | case LangOptions::TrivialAutoVarInitKind::Zero: |
74 | Byte = CGF.Builder.getInt8(0x00); |
75 | break; |
76 | case LangOptions::TrivialAutoVarInitKind::Pattern: { |
77 | llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext()); |
78 | Byte = llvm::dyn_cast<llvm::ConstantInt>( |
79 | initializationPatternFor(CGF.CGM, Int8)); |
80 | break; |
81 | } |
82 | } |
83 | if (CGF.CGM.stopAutoInit()) |
84 | return; |
85 | auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes); |
86 | I->addAnnotationMetadata("auto-init"); |
87 | } |
88 | |
89 | /// getBuiltinLibFunction - Given a builtin id for a function like |
90 | /// "__builtin_fabsf", return a Function* for "fabsf". |
91 | llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, |
92 | unsigned BuiltinID) { |
93 | assert(Context.BuiltinInfo.isLibFunction(BuiltinID))(static_cast <bool> (Context.BuiltinInfo.isLibFunction( BuiltinID)) ? void (0) : __assert_fail ("Context.BuiltinInfo.isLibFunction(BuiltinID)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 93, __extension__ __PRETTY_FUNCTION__)); |
94 | |
95 | // Get the name, skip over the __builtin_ prefix (if necessary). |
96 | StringRef Name; |
97 | GlobalDecl D(FD); |
98 | |
99 | // If the builtin has been declared explicitly with an assembler label, |
100 | // use the mangled name. This differs from the plain label on platforms |
101 | // that prefix labels. |
102 | if (FD->hasAttr<AsmLabelAttr>()) |
103 | Name = getMangledName(D); |
104 | else |
105 | Name = Context.BuiltinInfo.getName(BuiltinID) + 10; |
106 | |
107 | llvm::FunctionType *Ty = |
108 | cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); |
109 | |
110 | return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); |
111 | } |
112 | |
113 | /// Emit the conversions required to turn the given value into an |
114 | /// integer of the given size. |
115 | static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, |
116 | QualType T, llvm::IntegerType *IntType) { |
117 | V = CGF.EmitToMemory(V, T); |
118 | |
119 | if (V->getType()->isPointerTy()) |
120 | return CGF.Builder.CreatePtrToInt(V, IntType); |
121 | |
122 | assert(V->getType() == IntType)(static_cast <bool> (V->getType() == IntType) ? void (0) : __assert_fail ("V->getType() == IntType", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 122, __extension__ __PRETTY_FUNCTION__)); |
123 | return V; |
124 | } |
125 | |
126 | static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, |
127 | QualType T, llvm::Type *ResultType) { |
128 | V = CGF.EmitFromMemory(V, T); |
129 | |
130 | if (ResultType->isPointerTy()) |
131 | return CGF.Builder.CreateIntToPtr(V, ResultType); |
132 | |
133 | assert(V->getType() == ResultType)(static_cast <bool> (V->getType() == ResultType) ? void (0) : __assert_fail ("V->getType() == ResultType", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 133, __extension__ __PRETTY_FUNCTION__)); |
134 | return V; |
135 | } |
136 | |
137 | /// Utility to insert an atomic instruction based on Intrinsic::ID |
138 | /// and the expression node. |
139 | static Value *MakeBinaryAtomicValue( |
140 | CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, |
141 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
142 | QualType T = E->getType(); |
143 | assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()-> isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 143, __extension__ __PRETTY_FUNCTION__)); |
144 | assert(CGF.getContext().hasSameUnqualifiedType(T,(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(0)->getType()->getPointeeType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 145, __extension__ __PRETTY_FUNCTION__)) |
145 | E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(0)->getType()->getPointeeType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 145, __extension__ __PRETTY_FUNCTION__)); |
146 | assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(1)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 146, __extension__ __PRETTY_FUNCTION__)); |
147 | |
148 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
149 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
150 | |
151 | llvm::IntegerType *IntType = |
152 | llvm::IntegerType::get(CGF.getLLVMContext(), |
153 | CGF.getContext().getTypeSize(T)); |
154 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
155 | |
156 | llvm::Value *Args[2]; |
157 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
158 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
159 | llvm::Type *ValueType = Args[1]->getType(); |
160 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
161 | |
162 | llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
163 | Kind, Args[0], Args[1], Ordering); |
164 | return EmitFromInt(CGF, Result, T, ValueType); |
165 | } |
166 | |
167 | static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) { |
168 | Value *Val = CGF.EmitScalarExpr(E->getArg(0)); |
169 | Value *Address = CGF.EmitScalarExpr(E->getArg(1)); |
170 | |
171 | // Convert the type of the pointer to a pointer to the stored type. |
172 | Val = CGF.EmitToMemory(Val, E->getArg(0)->getType()); |
173 | Value *BC = CGF.Builder.CreateBitCast( |
174 | Address, llvm::PointerType::getUnqual(Val->getType()), "cast"); |
175 | LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType()); |
176 | LV.setNontemporal(true); |
177 | CGF.EmitStoreOfScalar(Val, LV, false); |
178 | return nullptr; |
179 | } |
180 | |
181 | static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) { |
182 | Value *Address = CGF.EmitScalarExpr(E->getArg(0)); |
183 | |
184 | LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType()); |
185 | LV.setNontemporal(true); |
186 | return CGF.EmitLoadOfScalar(LV, E->getExprLoc()); |
187 | } |
188 | |
189 | static RValue EmitBinaryAtomic(CodeGenFunction &CGF, |
190 | llvm::AtomicRMWInst::BinOp Kind, |
191 | const CallExpr *E) { |
192 | return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E)); |
193 | } |
194 | |
195 | /// Utility to insert an atomic instruction based Intrinsic::ID and |
196 | /// the expression node, where the return value is the result of the |
197 | /// operation. |
198 | static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, |
199 | llvm::AtomicRMWInst::BinOp Kind, |
200 | const CallExpr *E, |
201 | Instruction::BinaryOps Op, |
202 | bool Invert = false) { |
203 | QualType T = E->getType(); |
204 | assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()-> isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 204, __extension__ __PRETTY_FUNCTION__)); |
205 | assert(CGF.getContext().hasSameUnqualifiedType(T,(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(0)->getType()->getPointeeType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 206, __extension__ __PRETTY_FUNCTION__)) |
206 | E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(0)->getType()->getPointeeType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 206, __extension__ __PRETTY_FUNCTION__)); |
207 | assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(1)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 207, __extension__ __PRETTY_FUNCTION__)); |
208 | |
209 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
210 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
211 | |
212 | llvm::IntegerType *IntType = |
213 | llvm::IntegerType::get(CGF.getLLVMContext(), |
214 | CGF.getContext().getTypeSize(T)); |
215 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
216 | |
217 | llvm::Value *Args[2]; |
218 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
219 | llvm::Type *ValueType = Args[1]->getType(); |
220 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
221 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
222 | |
223 | llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
224 | Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); |
225 | Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); |
226 | if (Invert) |
227 | Result = |
228 | CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result, |
229 | llvm::ConstantInt::getAllOnesValue(IntType)); |
230 | Result = EmitFromInt(CGF, Result, T, ValueType); |
231 | return RValue::get(Result); |
232 | } |
233 | |
234 | /// Utility to insert an atomic cmpxchg instruction. |
235 | /// |
236 | /// @param CGF The current codegen function. |
237 | /// @param E Builtin call expression to convert to cmpxchg. |
238 | /// arg0 - address to operate on |
239 | /// arg1 - value to compare with |
240 | /// arg2 - new value |
241 | /// @param ReturnBool Specifies whether to return success flag of |
242 | /// cmpxchg result or the old value. |
243 | /// |
244 | /// @returns result of cmpxchg, according to ReturnBool |
245 | /// |
246 | /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics |
247 | /// invoke the function EmitAtomicCmpXchgForMSIntrin. |
248 | static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, |
249 | bool ReturnBool) { |
250 | QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType(); |
251 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
252 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
253 | |
254 | llvm::IntegerType *IntType = llvm::IntegerType::get( |
255 | CGF.getLLVMContext(), CGF.getContext().getTypeSize(T)); |
256 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
257 | |
258 | Value *Args[3]; |
259 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
260 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
261 | llvm::Type *ValueType = Args[1]->getType(); |
262 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
263 | Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); |
264 | |
265 | Value *Pair = CGF.Builder.CreateAtomicCmpXchg( |
266 | Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent, |
267 | llvm::AtomicOrdering::SequentiallyConsistent); |
268 | if (ReturnBool) |
269 | // Extract boolean success flag and zext it to int. |
270 | return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1), |
271 | CGF.ConvertType(E->getType())); |
272 | else |
273 | // Extract old value and emit it using the same type as compare value. |
274 | return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T, |
275 | ValueType); |
276 | } |
277 | |
278 | /// This function should be invoked to emit atomic cmpxchg for Microsoft's |
279 | /// _InterlockedCompareExchange* intrinsics which have the following signature: |
280 | /// T _InterlockedCompareExchange(T volatile *Destination, |
281 | /// T Exchange, |
282 | /// T Comparand); |
283 | /// |
284 | /// Whereas the llvm 'cmpxchg' instruction has the following syntax: |
285 | /// cmpxchg *Destination, Comparand, Exchange. |
286 | /// So we need to swap Comparand and Exchange when invoking |
287 | /// CreateAtomicCmpXchg. That is the reason we could not use the above utility |
288 | /// function MakeAtomicCmpXchgValue since it expects the arguments to be |
289 | /// already swapped. |
290 | |
291 | static |
292 | Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, |
293 | AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) { |
294 | assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()-> isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 294, __extension__ __PRETTY_FUNCTION__)); |
295 | assert(CGF.getContext().hasSameUnqualifiedType((static_cast <bool> (CGF.getContext().hasSameUnqualifiedType ( E->getType(), E->getArg(0)->getType()->getPointeeType ())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 296, __extension__ __PRETTY_FUNCTION__)) |
296 | E->getType(), E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType ( E->getType(), E->getArg(0)->getType()->getPointeeType ())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 296, __extension__ __PRETTY_FUNCTION__)); |
297 | assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (E->getType(), E->getArg(1)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 298, __extension__ __PRETTY_FUNCTION__)) |
298 | E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (E->getType(), E->getArg(1)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 298, __extension__ __PRETTY_FUNCTION__)); |
299 | assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (E->getType(), E->getArg(2)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 300, __extension__ __PRETTY_FUNCTION__)) |
300 | E->getArg(2)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (E->getType(), E->getArg(2)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 300, __extension__ __PRETTY_FUNCTION__)); |
301 | |
302 | auto *Destination = CGF.EmitScalarExpr(E->getArg(0)); |
303 | auto *Comparand = CGF.EmitScalarExpr(E->getArg(2)); |
304 | auto *Exchange = CGF.EmitScalarExpr(E->getArg(1)); |
305 | |
306 | // For Release ordering, the failure ordering should be Monotonic. |
307 | auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ? |
308 | AtomicOrdering::Monotonic : |
309 | SuccessOrdering; |
310 | |
311 | // The atomic instruction is marked volatile for consistency with MSVC. This |
312 | // blocks the few atomics optimizations that LLVM has. If we want to optimize |
313 | // _Interlocked* operations in the future, we will have to remove the volatile |
314 | // marker. |
315 | auto *Result = CGF.Builder.CreateAtomicCmpXchg( |
316 | Destination, Comparand, Exchange, |
317 | SuccessOrdering, FailureOrdering); |
318 | Result->setVolatile(true); |
319 | return CGF.Builder.CreateExtractValue(Result, 0); |
320 | } |
321 | |
322 | // 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are |
323 | // prototyped like this: |
324 | // |
325 | // unsigned char _InterlockedCompareExchange128...( |
326 | // __int64 volatile * _Destination, |
327 | // __int64 _ExchangeHigh, |
328 | // __int64 _ExchangeLow, |
329 | // __int64 * _ComparandResult); |
330 | static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, |
331 | const CallExpr *E, |
332 | AtomicOrdering SuccessOrdering) { |
333 | assert(E->getNumArgs() == 4)(static_cast <bool> (E->getNumArgs() == 4) ? void (0 ) : __assert_fail ("E->getNumArgs() == 4", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 333, __extension__ __PRETTY_FUNCTION__)); |
334 | llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0)); |
335 | llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1)); |
336 | llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2)); |
337 | llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3)); |
338 | |
339 | assert(Destination->getType()->isPointerTy())(static_cast <bool> (Destination->getType()->isPointerTy ()) ? void (0) : __assert_fail ("Destination->getType()->isPointerTy()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 339, __extension__ __PRETTY_FUNCTION__)); |
340 | assert(!ExchangeHigh->getType()->isPointerTy())(static_cast <bool> (!ExchangeHigh->getType()->isPointerTy ()) ? void (0) : __assert_fail ("!ExchangeHigh->getType()->isPointerTy()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 340, __extension__ __PRETTY_FUNCTION__)); |
341 | assert(!ExchangeLow->getType()->isPointerTy())(static_cast <bool> (!ExchangeLow->getType()->isPointerTy ()) ? void (0) : __assert_fail ("!ExchangeLow->getType()->isPointerTy()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 341, __extension__ __PRETTY_FUNCTION__)); |
342 | assert(ComparandPtr->getType()->isPointerTy())(static_cast <bool> (ComparandPtr->getType()->isPointerTy ()) ? void (0) : __assert_fail ("ComparandPtr->getType()->isPointerTy()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 342, __extension__ __PRETTY_FUNCTION__)); |
343 | |
344 | // For Release ordering, the failure ordering should be Monotonic. |
345 | auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release |
346 | ? AtomicOrdering::Monotonic |
347 | : SuccessOrdering; |
348 | |
349 | // Convert to i128 pointers and values. |
350 | llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128); |
351 | llvm::Type *Int128PtrTy = Int128Ty->getPointerTo(); |
352 | Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy); |
353 | Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy), |
354 | CGF.getContext().toCharUnitsFromBits(128)); |
355 | |
356 | // (((i128)hi) << 64) | ((i128)lo) |
357 | ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty); |
358 | ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty); |
359 | ExchangeHigh = |
360 | CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64)); |
361 | llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow); |
362 | |
363 | // Load the comparand for the instruction. |
364 | llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult); |
365 | |
366 | auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, |
367 | SuccessOrdering, FailureOrdering); |
368 | |
369 | // The atomic instruction is marked volatile for consistency with MSVC. This |
370 | // blocks the few atomics optimizations that LLVM has. If we want to optimize |
371 | // _Interlocked* operations in the future, we will have to remove the volatile |
372 | // marker. |
373 | CXI->setVolatile(true); |
374 | |
375 | // Store the result as an outparameter. |
376 | CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0), |
377 | ComparandResult); |
378 | |
379 | // Get the success boolean and zero extend it to i8. |
380 | Value *Success = CGF.Builder.CreateExtractValue(CXI, 1); |
381 | return CGF.Builder.CreateZExt(Success, CGF.Int8Ty); |
382 | } |
383 | |
384 | static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, |
385 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
386 | assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()-> isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 386, __extension__ __PRETTY_FUNCTION__)); |
387 | |
388 | auto *IntTy = CGF.ConvertType(E->getType()); |
389 | auto *Result = CGF.Builder.CreateAtomicRMW( |
390 | AtomicRMWInst::Add, |
391 | CGF.EmitScalarExpr(E->getArg(0)), |
392 | ConstantInt::get(IntTy, 1), |
393 | Ordering); |
394 | return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1)); |
395 | } |
396 | |
397 | static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, |
398 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
399 | assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()-> isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 399, __extension__ __PRETTY_FUNCTION__)); |
400 | |
401 | auto *IntTy = CGF.ConvertType(E->getType()); |
402 | auto *Result = CGF.Builder.CreateAtomicRMW( |
403 | AtomicRMWInst::Sub, |
404 | CGF.EmitScalarExpr(E->getArg(0)), |
405 | ConstantInt::get(IntTy, 1), |
406 | Ordering); |
407 | return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1)); |
408 | } |
409 | |
410 | // Build a plain volatile load. |
411 | static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) { |
412 | Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); |
413 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
414 | CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy); |
415 | llvm::Type *ITy = |
416 | llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8); |
417 | Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
418 | llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize); |
419 | Load->setVolatile(true); |
420 | return Load; |
421 | } |
422 | |
423 | // Build a plain volatile store. |
424 | static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) { |
425 | Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); |
426 | Value *Value = CGF.EmitScalarExpr(E->getArg(1)); |
427 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
428 | CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy); |
429 | llvm::Type *ITy = |
430 | llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8); |
431 | Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
432 | llvm::StoreInst *Store = |
433 | CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize); |
434 | Store->setVolatile(true); |
435 | return Store; |
436 | } |
437 | |
438 | // Emit a simple mangled intrinsic that has 1 argument and a return type |
439 | // matching the argument type. Depending on mode, this may be a constrained |
440 | // floating-point intrinsic. |
441 | static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
442 | const CallExpr *E, unsigned IntrinsicID, |
443 | unsigned ConstrainedIntrinsicID) { |
444 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
445 | |
446 | if (CGF.Builder.getIsFPConstrained()) { |
447 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
448 | Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); |
449 | return CGF.Builder.CreateConstrainedFPCall(F, { Src0 }); |
450 | } else { |
451 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
452 | return CGF.Builder.CreateCall(F, Src0); |
453 | } |
454 | } |
455 | |
456 | // Emit an intrinsic that has 2 operands of the same type as its result. |
457 | // Depending on mode, this may be a constrained floating-point intrinsic. |
458 | static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
459 | const CallExpr *E, unsigned IntrinsicID, |
460 | unsigned ConstrainedIntrinsicID) { |
461 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
462 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
463 | |
464 | if (CGF.Builder.getIsFPConstrained()) { |
465 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
466 | Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); |
467 | return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 }); |
468 | } else { |
469 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
470 | return CGF.Builder.CreateCall(F, { Src0, Src1 }); |
471 | } |
472 | } |
473 | |
474 | // Emit an intrinsic that has 3 operands of the same type as its result. |
475 | // Depending on mode, this may be a constrained floating-point intrinsic. |
476 | static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
477 | const CallExpr *E, unsigned IntrinsicID, |
478 | unsigned ConstrainedIntrinsicID) { |
479 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
480 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
481 | llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); |
482 | |
483 | if (CGF.Builder.getIsFPConstrained()) { |
484 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
485 | Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); |
486 | return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 }); |
487 | } else { |
488 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
489 | return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); |
490 | } |
491 | } |
492 | |
493 | // Emit an intrinsic where all operands are of the same type as the result. |
494 | // Depending on mode, this may be a constrained floating-point intrinsic. |
495 | static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
496 | unsigned IntrinsicID, |
497 | unsigned ConstrainedIntrinsicID, |
498 | llvm::Type *Ty, |
499 | ArrayRef<Value *> Args) { |
500 | Function *F; |
501 | if (CGF.Builder.getIsFPConstrained()) |
502 | F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty); |
503 | else |
504 | F = CGF.CGM.getIntrinsic(IntrinsicID, Ty); |
505 | |
506 | if (CGF.Builder.getIsFPConstrained()) |
507 | return CGF.Builder.CreateConstrainedFPCall(F, Args); |
508 | else |
509 | return CGF.Builder.CreateCall(F, Args); |
510 | } |
511 | |
512 | // Emit a simple mangled intrinsic that has 1 argument and a return type |
513 | // matching the argument type. |
514 | static Value *emitUnaryBuiltin(CodeGenFunction &CGF, |
515 | const CallExpr *E, |
516 | unsigned IntrinsicID) { |
517 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
518 | |
519 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
520 | return CGF.Builder.CreateCall(F, Src0); |
521 | } |
522 | |
523 | // Emit an intrinsic that has 2 operands of the same type as its result. |
524 | static Value *emitBinaryBuiltin(CodeGenFunction &CGF, |
525 | const CallExpr *E, |
526 | unsigned IntrinsicID) { |
527 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
528 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
529 | |
530 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
531 | return CGF.Builder.CreateCall(F, { Src0, Src1 }); |
532 | } |
533 | |
534 | // Emit an intrinsic that has 3 operands of the same type as its result. |
535 | static Value *emitTernaryBuiltin(CodeGenFunction &CGF, |
536 | const CallExpr *E, |
537 | unsigned IntrinsicID) { |
538 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
539 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
540 | llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); |
541 | |
542 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
543 | return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); |
544 | } |
545 | |
546 | // Emit an intrinsic that has 1 float or double operand, and 1 integer. |
547 | static Value *emitFPIntBuiltin(CodeGenFunction &CGF, |
548 | const CallExpr *E, |
549 | unsigned IntrinsicID) { |
550 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
551 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
552 | |
553 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
554 | return CGF.Builder.CreateCall(F, {Src0, Src1}); |
555 | } |
556 | |
557 | // Emit an intrinsic that has overloaded integer result and fp operand. |
558 | static Value * |
559 | emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, |
560 | unsigned IntrinsicID, |
561 | unsigned ConstrainedIntrinsicID) { |
562 | llvm::Type *ResultType = CGF.ConvertType(E->getType()); |
563 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
564 | |
565 | if (CGF.Builder.getIsFPConstrained()) { |
566 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
567 | Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, |
568 | {ResultType, Src0->getType()}); |
569 | return CGF.Builder.CreateConstrainedFPCall(F, {Src0}); |
570 | } else { |
571 | Function *F = |
572 | CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()}); |
573 | return CGF.Builder.CreateCall(F, Src0); |
574 | } |
575 | } |
576 | |
577 | /// EmitFAbs - Emit a call to @llvm.fabs(). |
578 | static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) { |
579 | Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType()); |
580 | llvm::CallInst *Call = CGF.Builder.CreateCall(F, V); |
581 | Call->setDoesNotAccessMemory(); |
582 | return Call; |
583 | } |
584 | |
585 | /// Emit the computation of the sign bit for a floating point value. Returns |
586 | /// the i1 sign bit value. |
587 | static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) { |
588 | LLVMContext &C = CGF.CGM.getLLVMContext(); |
589 | |
590 | llvm::Type *Ty = V->getType(); |
591 | int Width = Ty->getPrimitiveSizeInBits(); |
592 | llvm::Type *IntTy = llvm::IntegerType::get(C, Width); |
593 | V = CGF.Builder.CreateBitCast(V, IntTy); |
594 | if (Ty->isPPC_FP128Ty()) { |
595 | // We want the sign bit of the higher-order double. The bitcast we just |
596 | // did works as if the double-double was stored to memory and then |
597 | // read as an i128. The "store" will put the higher-order double in the |
598 | // lower address in both little- and big-Endian modes, but the "load" |
599 | // will treat those bits as a different part of the i128: the low bits in |
600 | // little-Endian, the high bits in big-Endian. Therefore, on big-Endian |
601 | // we need to shift the high bits down to the low before truncating. |
602 | Width >>= 1; |
603 | if (CGF.getTarget().isBigEndian()) { |
604 | Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width); |
605 | V = CGF.Builder.CreateLShr(V, ShiftCst); |
606 | } |
607 | // We are truncating value in order to extract the higher-order |
608 | // double, which we will be using to extract the sign from. |
609 | IntTy = llvm::IntegerType::get(C, Width); |
610 | V = CGF.Builder.CreateTrunc(V, IntTy); |
611 | } |
612 | Value *Zero = llvm::Constant::getNullValue(IntTy); |
613 | return CGF.Builder.CreateICmpSLT(V, Zero); |
614 | } |
615 | |
616 | static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD, |
617 | const CallExpr *E, llvm::Constant *calleeValue) { |
618 | CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD)); |
619 | return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); |
620 | } |
621 | |
622 | /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.* |
623 | /// depending on IntrinsicID. |
624 | /// |
625 | /// \arg CGF The current codegen function. |
626 | /// \arg IntrinsicID The ID for the Intrinsic we wish to generate. |
627 | /// \arg X The first argument to the llvm.*.with.overflow.*. |
628 | /// \arg Y The second argument to the llvm.*.with.overflow.*. |
629 | /// \arg Carry The carry returned by the llvm.*.with.overflow.*. |
630 | /// \returns The result (i.e. sum/product) returned by the intrinsic. |
631 | static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, |
632 | const llvm::Intrinsic::ID IntrinsicID, |
633 | llvm::Value *X, llvm::Value *Y, |
634 | llvm::Value *&Carry) { |
635 | // Make sure we have integers of the same width. |
636 | assert(X->getType() == Y->getType() &&(static_cast <bool> (X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both " "arguments have the same integer width?)") ? void (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 638, __extension__ __PRETTY_FUNCTION__)) |
637 | "Arguments must be the same type. (Did you forget to make sure both "(static_cast <bool> (X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both " "arguments have the same integer width?)") ? void (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 638, __extension__ __PRETTY_FUNCTION__)) |
638 | "arguments have the same integer width?)")(static_cast <bool> (X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both " "arguments have the same integer width?)") ? void (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 638, __extension__ __PRETTY_FUNCTION__)); |
639 | |
640 | Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); |
641 | llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y}); |
642 | Carry = CGF.Builder.CreateExtractValue(Tmp, 1); |
643 | return CGF.Builder.CreateExtractValue(Tmp, 0); |
644 | } |
645 | |
646 | static Value *emitRangedBuiltin(CodeGenFunction &CGF, |
647 | unsigned IntrinsicID, |
648 | int low, int high) { |
649 | llvm::MDBuilder MDHelper(CGF.getLLVMContext()); |
650 | llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high)); |
651 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {}); |
652 | llvm::Instruction *Call = CGF.Builder.CreateCall(F); |
653 | Call->setMetadata(llvm::LLVMContext::MD_range, RNode); |
654 | return Call; |
655 | } |
656 | |
657 | namespace { |
658 | struct WidthAndSignedness { |
659 | unsigned Width; |
660 | bool Signed; |
661 | }; |
662 | } |
663 | |
664 | static WidthAndSignedness |
665 | getIntegerWidthAndSignedness(const clang::ASTContext &context, |
666 | const clang::QualType Type) { |
667 | assert(Type->isIntegerType() && "Given type is not an integer.")(static_cast <bool> (Type->isIntegerType() && "Given type is not an integer.") ? void (0) : __assert_fail ( "Type->isIntegerType() && \"Given type is not an integer.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 667, __extension__ __PRETTY_FUNCTION__)); |
668 | unsigned Width = Type->isBooleanType() ? 1 |
669 | : Type->isExtIntType() ? context.getIntWidth(Type) |
670 | : context.getTypeInfo(Type).Width; |
671 | bool Signed = Type->isSignedIntegerType(); |
672 | return {Width, Signed}; |
673 | } |
674 | |
675 | // Given one or more integer types, this function produces an integer type that |
676 | // encompasses them: any value in one of the given types could be expressed in |
677 | // the encompassing type. |
678 | static struct WidthAndSignedness |
679 | EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) { |
680 | assert(Types.size() > 0 && "Empty list of types.")(static_cast <bool> (Types.size() > 0 && "Empty list of types." ) ? void (0) : __assert_fail ("Types.size() > 0 && \"Empty list of types.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 680, __extension__ __PRETTY_FUNCTION__)); |
681 | |
682 | // If any of the given types is signed, we must return a signed type. |
683 | bool Signed = false; |
684 | for (const auto &Type : Types) { |
685 | Signed |= Type.Signed; |
686 | } |
687 | |
688 | // The encompassing type must have a width greater than or equal to the width |
689 | // of the specified types. Additionally, if the encompassing type is signed, |
690 | // its width must be strictly greater than the width of any unsigned types |
691 | // given. |
692 | unsigned Width = 0; |
693 | for (const auto &Type : Types) { |
694 | unsigned MinWidth = Type.Width + (Signed && !Type.Signed); |
695 | if (Width < MinWidth) { |
696 | Width = MinWidth; |
697 | } |
698 | } |
699 | |
700 | return {Width, Signed}; |
701 | } |
702 | |
703 | Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) { |
704 | llvm::Type *DestType = Int8PtrTy; |
705 | if (ArgValue->getType() != DestType) |
706 | ArgValue = |
707 | Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data()); |
708 | |
709 | Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend; |
710 | return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue); |
711 | } |
712 | |
713 | /// Checks if using the result of __builtin_object_size(p, @p From) in place of |
714 | /// __builtin_object_size(p, @p To) is correct |
715 | static bool areBOSTypesCompatible(int From, int To) { |
716 | // Note: Our __builtin_object_size implementation currently treats Type=0 and |
717 | // Type=2 identically. Encoding this implementation detail here may make |
718 | // improving __builtin_object_size difficult in the future, so it's omitted. |
719 | return From == To || (From == 0 && To == 1) || (From == 3 && To == 2); |
720 | } |
721 | |
722 | static llvm::Value * |
723 | getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) { |
724 | return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true); |
725 | } |
726 | |
727 | llvm::Value * |
728 | CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, |
729 | llvm::IntegerType *ResType, |
730 | llvm::Value *EmittedE, |
731 | bool IsDynamic) { |
732 | uint64_t ObjectSize; |
733 | if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) |
734 | return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic); |
735 | return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true); |
736 | } |
737 | |
738 | /// Returns a Value corresponding to the size of the given expression. |
739 | /// This Value may be either of the following: |
740 | /// - A llvm::Argument (if E is a param with the pass_object_size attribute on |
741 | /// it) |
742 | /// - A call to the @llvm.objectsize intrinsic |
743 | /// |
744 | /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null |
745 | /// and we wouldn't otherwise try to reference a pass_object_size parameter, |
746 | /// we'll call @llvm.objectsize on EmittedE, rather than emitting E. |
747 | llvm::Value * |
748 | CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, |
749 | llvm::IntegerType *ResType, |
750 | llvm::Value *EmittedE, bool IsDynamic) { |
751 | // We need to reference an argument if the pointer is a parameter with the |
752 | // pass_object_size attribute. |
753 | if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) { |
754 | auto *Param = dyn_cast<ParmVarDecl>(D->getDecl()); |
755 | auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>(); |
756 | if (Param != nullptr && PS != nullptr && |
757 | areBOSTypesCompatible(PS->getType(), Type)) { |
758 | auto Iter = SizeArguments.find(Param); |
759 | assert(Iter != SizeArguments.end())(static_cast <bool> (Iter != SizeArguments.end()) ? void (0) : __assert_fail ("Iter != SizeArguments.end()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 759, __extension__ __PRETTY_FUNCTION__)); |
760 | |
761 | const ImplicitParamDecl *D = Iter->second; |
762 | auto DIter = LocalDeclMap.find(D); |
763 | assert(DIter != LocalDeclMap.end())(static_cast <bool> (DIter != LocalDeclMap.end()) ? void (0) : __assert_fail ("DIter != LocalDeclMap.end()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 763, __extension__ __PRETTY_FUNCTION__)); |
764 | |
765 | return EmitLoadOfScalar(DIter->second, /*Volatile=*/false, |
766 | getContext().getSizeType(), E->getBeginLoc()); |
767 | } |
768 | } |
769 | |
770 | // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't |
771 | // evaluate E for side-effects. In either case, we shouldn't lower to |
772 | // @llvm.objectsize. |
773 | if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext()))) |
774 | return getDefaultBuiltinObjectSizeResult(Type, ResType); |
775 | |
776 | Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E); |
777 | assert(Ptr->getType()->isPointerTy() &&(static_cast <bool> (Ptr->getType()->isPointerTy( ) && "Non-pointer passed to __builtin_object_size?") ? void (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 778, __extension__ __PRETTY_FUNCTION__)) |
778 | "Non-pointer passed to __builtin_object_size?")(static_cast <bool> (Ptr->getType()->isPointerTy( ) && "Non-pointer passed to __builtin_object_size?") ? void (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 778, __extension__ __PRETTY_FUNCTION__)); |
779 | |
780 | Function *F = |
781 | CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()}); |
782 | |
783 | // LLVM only supports 0 and 2, make sure that we pass along that as a boolean. |
784 | Value *Min = Builder.getInt1((Type & 2) != 0); |
785 | // For GCC compatibility, __builtin_object_size treat NULL as unknown size. |
786 | Value *NullIsUnknown = Builder.getTrue(); |
787 | Value *Dynamic = Builder.getInt1(IsDynamic); |
788 | return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}); |
789 | } |
790 | |
791 | namespace { |
792 | /// A struct to generically describe a bit test intrinsic. |
793 | struct BitTest { |
794 | enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set }; |
795 | enum InterlockingKind : uint8_t { |
796 | Unlocked, |
797 | Sequential, |
798 | Acquire, |
799 | Release, |
800 | NoFence |
801 | }; |
802 | |
803 | ActionKind Action; |
804 | InterlockingKind Interlocking; |
805 | bool Is64Bit; |
806 | |
807 | static BitTest decodeBitTestBuiltin(unsigned BuiltinID); |
808 | }; |
809 | } // namespace |
810 | |
811 | BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) { |
812 | switch (BuiltinID) { |
813 | // Main portable variants. |
814 | case Builtin::BI_bittest: |
815 | return {TestOnly, Unlocked, false}; |
816 | case Builtin::BI_bittestandcomplement: |
817 | return {Complement, Unlocked, false}; |
818 | case Builtin::BI_bittestandreset: |
819 | return {Reset, Unlocked, false}; |
820 | case Builtin::BI_bittestandset: |
821 | return {Set, Unlocked, false}; |
822 | case Builtin::BI_interlockedbittestandreset: |
823 | return {Reset, Sequential, false}; |
824 | case Builtin::BI_interlockedbittestandset: |
825 | return {Set, Sequential, false}; |
826 | |
827 | // X86-specific 64-bit variants. |
828 | case Builtin::BI_bittest64: |
829 | return {TestOnly, Unlocked, true}; |
830 | case Builtin::BI_bittestandcomplement64: |
831 | return {Complement, Unlocked, true}; |
832 | case Builtin::BI_bittestandreset64: |
833 | return {Reset, Unlocked, true}; |
834 | case Builtin::BI_bittestandset64: |
835 | return {Set, Unlocked, true}; |
836 | case Builtin::BI_interlockedbittestandreset64: |
837 | return {Reset, Sequential, true}; |
838 | case Builtin::BI_interlockedbittestandset64: |
839 | return {Set, Sequential, true}; |
840 | |
841 | // ARM/AArch64-specific ordering variants. |
842 | case Builtin::BI_interlockedbittestandset_acq: |
843 | return {Set, Acquire, false}; |
844 | case Builtin::BI_interlockedbittestandset_rel: |
845 | return {Set, Release, false}; |
846 | case Builtin::BI_interlockedbittestandset_nf: |
847 | return {Set, NoFence, false}; |
848 | case Builtin::BI_interlockedbittestandreset_acq: |
849 | return {Reset, Acquire, false}; |
850 | case Builtin::BI_interlockedbittestandreset_rel: |
851 | return {Reset, Release, false}; |
852 | case Builtin::BI_interlockedbittestandreset_nf: |
853 | return {Reset, NoFence, false}; |
854 | } |
855 | llvm_unreachable("expected only bittest intrinsics")::llvm::llvm_unreachable_internal("expected only bittest intrinsics" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 855); |
856 | } |
857 | |
858 | static char bitActionToX86BTCode(BitTest::ActionKind A) { |
859 | switch (A) { |
860 | case BitTest::TestOnly: return '\0'; |
861 | case BitTest::Complement: return 'c'; |
862 | case BitTest::Reset: return 'r'; |
863 | case BitTest::Set: return 's'; |
864 | } |
865 | llvm_unreachable("invalid action")::llvm::llvm_unreachable_internal("invalid action", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 865); |
866 | } |
867 | |
868 | static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF, |
869 | BitTest BT, |
870 | const CallExpr *E, Value *BitBase, |
871 | Value *BitPos) { |
872 | char Action = bitActionToX86BTCode(BT.Action); |
873 | char SizeSuffix = BT.Is64Bit ? 'q' : 'l'; |
874 | |
875 | // Build the assembly. |
876 | SmallString<64> Asm; |
877 | raw_svector_ostream AsmOS(Asm); |
878 | if (BT.Interlocking != BitTest::Unlocked) |
879 | AsmOS << "lock "; |
880 | AsmOS << "bt"; |
881 | if (Action) |
882 | AsmOS << Action; |
883 | AsmOS << SizeSuffix << " $2, ($1)"; |
884 | |
885 | // Build the constraints. FIXME: We should support immediates when possible. |
886 | std::string Constraints = "={@ccc},r,r,~{cc},~{memory}"; |
887 | std::string MachineClobbers = CGF.getTarget().getClobbers(); |
888 | if (!MachineClobbers.empty()) { |
889 | Constraints += ','; |
890 | Constraints += MachineClobbers; |
891 | } |
892 | llvm::IntegerType *IntType = llvm::IntegerType::get( |
893 | CGF.getLLVMContext(), |
894 | CGF.getContext().getTypeSize(E->getArg(1)->getType())); |
895 | llvm::Type *IntPtrType = IntType->getPointerTo(); |
896 | llvm::FunctionType *FTy = |
897 | llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false); |
898 | |
899 | llvm::InlineAsm *IA = |
900 | llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true); |
901 | return CGF.Builder.CreateCall(IA, {BitBase, BitPos}); |
902 | } |
903 | |
904 | static llvm::AtomicOrdering |
905 | getBitTestAtomicOrdering(BitTest::InterlockingKind I) { |
906 | switch (I) { |
907 | case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic; |
908 | case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent; |
909 | case BitTest::Acquire: return llvm::AtomicOrdering::Acquire; |
910 | case BitTest::Release: return llvm::AtomicOrdering::Release; |
911 | case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic; |
912 | } |
913 | llvm_unreachable("invalid interlocking")::llvm::llvm_unreachable_internal("invalid interlocking", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 913); |
914 | } |
915 | |
916 | /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of |
917 | /// bits and a bit position and read and optionally modify the bit at that |
918 | /// position. The position index can be arbitrarily large, i.e. it can be larger |
919 | /// than 31 or 63, so we need an indexed load in the general case. |
920 | static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF, |
921 | unsigned BuiltinID, |
922 | const CallExpr *E) { |
923 | Value *BitBase = CGF.EmitScalarExpr(E->getArg(0)); |
924 | Value *BitPos = CGF.EmitScalarExpr(E->getArg(1)); |
925 | |
926 | BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID); |
927 | |
928 | // X86 has special BT, BTC, BTR, and BTS instructions that handle the array |
929 | // indexing operation internally. Use them if possible. |
930 | if (CGF.getTarget().getTriple().isX86()) |
931 | return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos); |
932 | |
933 | // Otherwise, use generic code to load one byte and test the bit. Use all but |
934 | // the bottom three bits as the array index, and the bottom three bits to form |
935 | // a mask. |
936 | // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0; |
937 | Value *ByteIndex = CGF.Builder.CreateAShr( |
938 | BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx"); |
939 | Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy); |
940 | Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8, |
941 | ByteIndex, "bittest.byteaddr"), |
942 | CharUnits::One()); |
943 | Value *PosLow = |
944 | CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty), |
945 | llvm::ConstantInt::get(CGF.Int8Ty, 0x7)); |
946 | |
947 | // The updating instructions will need a mask. |
948 | Value *Mask = nullptr; |
949 | if (BT.Action != BitTest::TestOnly) { |
950 | Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow, |
951 | "bittest.mask"); |
952 | } |
953 | |
954 | // Check the action and ordering of the interlocked intrinsics. |
955 | llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking); |
956 | |
957 | Value *OldByte = nullptr; |
958 | if (Ordering != llvm::AtomicOrdering::NotAtomic) { |
959 | // Emit a combined atomicrmw load/store operation for the interlocked |
960 | // intrinsics. |
961 | llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or; |
962 | if (BT.Action == BitTest::Reset) { |
963 | Mask = CGF.Builder.CreateNot(Mask); |
964 | RMWOp = llvm::AtomicRMWInst::And; |
965 | } |
966 | OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask, |
967 | Ordering); |
968 | } else { |
969 | // Emit a plain load for the non-interlocked intrinsics. |
970 | OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte"); |
971 | Value *NewByte = nullptr; |
972 | switch (BT.Action) { |
973 | case BitTest::TestOnly: |
974 | // Don't store anything. |
975 | break; |
976 | case BitTest::Complement: |
977 | NewByte = CGF.Builder.CreateXor(OldByte, Mask); |
978 | break; |
979 | case BitTest::Reset: |
980 | NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask)); |
981 | break; |
982 | case BitTest::Set: |
983 | NewByte = CGF.Builder.CreateOr(OldByte, Mask); |
984 | break; |
985 | } |
986 | if (NewByte) |
987 | CGF.Builder.CreateStore(NewByte, ByteAddr); |
988 | } |
989 | |
990 | // However we loaded the old byte, either by plain load or atomicrmw, shift |
991 | // the bit into the low position and mask it to 0 or 1. |
992 | Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr"); |
993 | return CGF.Builder.CreateAnd( |
994 | ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res"); |
995 | } |
996 | |
997 | static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF, |
998 | unsigned BuiltinID, |
999 | const CallExpr *E) { |
1000 | Value *Addr = CGF.EmitScalarExpr(E->getArg(0)); |
1001 | |
1002 | SmallString<64> Asm; |
1003 | raw_svector_ostream AsmOS(Asm); |
1004 | llvm::IntegerType *RetType = CGF.Int32Ty; |
Value stored to 'RetType' during its initialization is never read | |
1005 | |
1006 | switch (BuiltinID) { |
1007 | case clang::PPC::BI__builtin_ppc_ldarx: |
1008 | AsmOS << "ldarx "; |
1009 | RetType = CGF.Int64Ty; |
1010 | break; |
1011 | case clang::PPC::BI__builtin_ppc_lwarx: |
1012 | AsmOS << "lwarx "; |
1013 | RetType = CGF.Int32Ty; |
1014 | break; |
1015 | case clang::PPC::BI__builtin_ppc_lharx: |
1016 | AsmOS << "lharx "; |
1017 | RetType = CGF.Int16Ty; |
1018 | break; |
1019 | case clang::PPC::BI__builtin_ppc_lbarx: |
1020 | AsmOS << "lbarx "; |
1021 | RetType = CGF.Int8Ty; |
1022 | break; |
1023 | default: |
1024 | llvm_unreachable("Expected only PowerPC load reserve intrinsics")::llvm::llvm_unreachable_internal("Expected only PowerPC load reserve intrinsics" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1024); |
1025 | } |
1026 | |
1027 | AsmOS << "$0, ${1:y}"; |
1028 | |
1029 | std::string Constraints = "=r,*Z,~{memory}"; |
1030 | std::string MachineClobbers = CGF.getTarget().getClobbers(); |
1031 | if (!MachineClobbers.empty()) { |
1032 | Constraints += ','; |
1033 | Constraints += MachineClobbers; |
1034 | } |
1035 | |
1036 | llvm::Type *IntPtrType = RetType->getPointerTo(); |
1037 | llvm::FunctionType *FTy = |
1038 | llvm::FunctionType::get(RetType, {IntPtrType}, false); |
1039 | |
1040 | llvm::InlineAsm *IA = |
1041 | llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true); |
1042 | return CGF.Builder.CreateCall(IA, {Addr}); |
1043 | } |
1044 | |
1045 | namespace { |
1046 | enum class MSVCSetJmpKind { |
1047 | _setjmpex, |
1048 | _setjmp3, |
1049 | _setjmp |
1050 | }; |
1051 | } |
1052 | |
1053 | /// MSVC handles setjmp a bit differently on different platforms. On every |
1054 | /// architecture except 32-bit x86, the frame address is passed. On x86, extra |
1055 | /// parameters can be passed as variadic arguments, but we always pass none. |
1056 | static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, |
1057 | const CallExpr *E) { |
1058 | llvm::Value *Arg1 = nullptr; |
1059 | llvm::Type *Arg1Ty = nullptr; |
1060 | StringRef Name; |
1061 | bool IsVarArg = false; |
1062 | if (SJKind == MSVCSetJmpKind::_setjmp3) { |
1063 | Name = "_setjmp3"; |
1064 | Arg1Ty = CGF.Int32Ty; |
1065 | Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0); |
1066 | IsVarArg = true; |
1067 | } else { |
1068 | Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex"; |
1069 | Arg1Ty = CGF.Int8PtrTy; |
1070 | if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) { |
1071 | Arg1 = CGF.Builder.CreateCall( |
1072 | CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy)); |
1073 | } else |
1074 | Arg1 = CGF.Builder.CreateCall( |
1075 | CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy), |
1076 | llvm::ConstantInt::get(CGF.Int32Ty, 0)); |
1077 | } |
1078 | |
1079 | // Mark the call site and declaration with ReturnsTwice. |
1080 | llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty}; |
1081 | llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get( |
1082 | CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, |
1083 | llvm::Attribute::ReturnsTwice); |
1084 | llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction( |
1085 | llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name, |
1086 | ReturnsTwiceAttr, /*Local=*/true); |
1087 | |
1088 | llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast( |
1089 | CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy); |
1090 | llvm::Value *Args[] = {Buf, Arg1}; |
1091 | llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args); |
1092 | CB->setAttributes(ReturnsTwiceAttr); |
1093 | return RValue::get(CB); |
1094 | } |
1095 | |
1096 | // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code, |
1097 | // we handle them here. |
1098 | enum class CodeGenFunction::MSVCIntrin { |
1099 | _BitScanForward, |
1100 | _BitScanReverse, |
1101 | _InterlockedAnd, |
1102 | _InterlockedDecrement, |
1103 | _InterlockedExchange, |
1104 | _InterlockedExchangeAdd, |
1105 | _InterlockedExchangeSub, |
1106 | _InterlockedIncrement, |
1107 | _InterlockedOr, |
1108 | _InterlockedXor, |
1109 | _InterlockedExchangeAdd_acq, |
1110 | _InterlockedExchangeAdd_rel, |
1111 | _InterlockedExchangeAdd_nf, |
1112 | _InterlockedExchange_acq, |
1113 | _InterlockedExchange_rel, |
1114 | _InterlockedExchange_nf, |
1115 | _InterlockedCompareExchange_acq, |
1116 | _InterlockedCompareExchange_rel, |
1117 | _InterlockedCompareExchange_nf, |
1118 | _InterlockedCompareExchange128, |
1119 | _InterlockedCompareExchange128_acq, |
1120 | _InterlockedCompareExchange128_rel, |
1121 | _InterlockedCompareExchange128_nf, |
1122 | _InterlockedOr_acq, |
1123 | _InterlockedOr_rel, |
1124 | _InterlockedOr_nf, |
1125 | _InterlockedXor_acq, |
1126 | _InterlockedXor_rel, |
1127 | _InterlockedXor_nf, |
1128 | _InterlockedAnd_acq, |
1129 | _InterlockedAnd_rel, |
1130 | _InterlockedAnd_nf, |
1131 | _InterlockedIncrement_acq, |
1132 | _InterlockedIncrement_rel, |
1133 | _InterlockedIncrement_nf, |
1134 | _InterlockedDecrement_acq, |
1135 | _InterlockedDecrement_rel, |
1136 | _InterlockedDecrement_nf, |
1137 | __fastfail, |
1138 | }; |
1139 | |
1140 | static Optional<CodeGenFunction::MSVCIntrin> |
1141 | translateArmToMsvcIntrin(unsigned BuiltinID) { |
1142 | using MSVCIntrin = CodeGenFunction::MSVCIntrin; |
1143 | switch (BuiltinID) { |
1144 | default: |
1145 | return None; |
1146 | case ARM::BI_BitScanForward: |
1147 | case ARM::BI_BitScanForward64: |
1148 | return MSVCIntrin::_BitScanForward; |
1149 | case ARM::BI_BitScanReverse: |
1150 | case ARM::BI_BitScanReverse64: |
1151 | return MSVCIntrin::_BitScanReverse; |
1152 | case ARM::BI_InterlockedAnd64: |
1153 | return MSVCIntrin::_InterlockedAnd; |
1154 | case ARM::BI_InterlockedExchange64: |
1155 | return MSVCIntrin::_InterlockedExchange; |
1156 | case ARM::BI_InterlockedExchangeAdd64: |
1157 | return MSVCIntrin::_InterlockedExchangeAdd; |
1158 | case ARM::BI_InterlockedExchangeSub64: |
1159 | return MSVCIntrin::_InterlockedExchangeSub; |
1160 | case ARM::BI_InterlockedOr64: |
1161 | return MSVCIntrin::_InterlockedOr; |
1162 | case ARM::BI_InterlockedXor64: |
1163 | return MSVCIntrin::_InterlockedXor; |
1164 | case ARM::BI_InterlockedDecrement64: |
1165 | return MSVCIntrin::_InterlockedDecrement; |
1166 | case ARM::BI_InterlockedIncrement64: |
1167 | return MSVCIntrin::_InterlockedIncrement; |
1168 | case ARM::BI_InterlockedExchangeAdd8_acq: |
1169 | case ARM::BI_InterlockedExchangeAdd16_acq: |
1170 | case ARM::BI_InterlockedExchangeAdd_acq: |
1171 | case ARM::BI_InterlockedExchangeAdd64_acq: |
1172 | return MSVCIntrin::_InterlockedExchangeAdd_acq; |
1173 | case ARM::BI_InterlockedExchangeAdd8_rel: |
1174 | case ARM::BI_InterlockedExchangeAdd16_rel: |
1175 | case ARM::BI_InterlockedExchangeAdd_rel: |
1176 | case ARM::BI_InterlockedExchangeAdd64_rel: |
1177 | return MSVCIntrin::_InterlockedExchangeAdd_rel; |
1178 | case ARM::BI_InterlockedExchangeAdd8_nf: |
1179 | case ARM::BI_InterlockedExchangeAdd16_nf: |
1180 | case ARM::BI_InterlockedExchangeAdd_nf: |
1181 | case ARM::BI_InterlockedExchangeAdd64_nf: |
1182 | return MSVCIntrin::_InterlockedExchangeAdd_nf; |
1183 | case ARM::BI_InterlockedExchange8_acq: |
1184 | case ARM::BI_InterlockedExchange16_acq: |
1185 | case ARM::BI_InterlockedExchange_acq: |
1186 | case ARM::BI_InterlockedExchange64_acq: |
1187 | return MSVCIntrin::_InterlockedExchange_acq; |
1188 | case ARM::BI_InterlockedExchange8_rel: |
1189 | case ARM::BI_InterlockedExchange16_rel: |
1190 | case ARM::BI_InterlockedExchange_rel: |
1191 | case ARM::BI_InterlockedExchange64_rel: |
1192 | return MSVCIntrin::_InterlockedExchange_rel; |
1193 | case ARM::BI_InterlockedExchange8_nf: |
1194 | case ARM::BI_InterlockedExchange16_nf: |
1195 | case ARM::BI_InterlockedExchange_nf: |
1196 | case ARM::BI_InterlockedExchange64_nf: |
1197 | return MSVCIntrin::_InterlockedExchange_nf; |
1198 | case ARM::BI_InterlockedCompareExchange8_acq: |
1199 | case ARM::BI_InterlockedCompareExchange16_acq: |
1200 | case ARM::BI_InterlockedCompareExchange_acq: |
1201 | case ARM::BI_InterlockedCompareExchange64_acq: |
1202 | return MSVCIntrin::_InterlockedCompareExchange_acq; |
1203 | case ARM::BI_InterlockedCompareExchange8_rel: |
1204 | case ARM::BI_InterlockedCompareExchange16_rel: |
1205 | case ARM::BI_InterlockedCompareExchange_rel: |
1206 | case ARM::BI_InterlockedCompareExchange64_rel: |
1207 | return MSVCIntrin::_InterlockedCompareExchange_rel; |
1208 | case ARM::BI_InterlockedCompareExchange8_nf: |
1209 | case ARM::BI_InterlockedCompareExchange16_nf: |
1210 | case ARM::BI_InterlockedCompareExchange_nf: |
1211 | case ARM::BI_InterlockedCompareExchange64_nf: |
1212 | return MSVCIntrin::_InterlockedCompareExchange_nf; |
1213 | case ARM::BI_InterlockedOr8_acq: |
1214 | case ARM::BI_InterlockedOr16_acq: |
1215 | case ARM::BI_InterlockedOr_acq: |
1216 | case ARM::BI_InterlockedOr64_acq: |
1217 | return MSVCIntrin::_InterlockedOr_acq; |
1218 | case ARM::BI_InterlockedOr8_rel: |
1219 | case ARM::BI_InterlockedOr16_rel: |
1220 | case ARM::BI_InterlockedOr_rel: |
1221 | case ARM::BI_InterlockedOr64_rel: |
1222 | return MSVCIntrin::_InterlockedOr_rel; |
1223 | case ARM::BI_InterlockedOr8_nf: |
1224 | case ARM::BI_InterlockedOr16_nf: |
1225 | case ARM::BI_InterlockedOr_nf: |
1226 | case ARM::BI_InterlockedOr64_nf: |
1227 | return MSVCIntrin::_InterlockedOr_nf; |
1228 | case ARM::BI_InterlockedXor8_acq: |
1229 | case ARM::BI_InterlockedXor16_acq: |
1230 | case ARM::BI_InterlockedXor_acq: |
1231 | case ARM::BI_InterlockedXor64_acq: |
1232 | return MSVCIntrin::_InterlockedXor_acq; |
1233 | case ARM::BI_InterlockedXor8_rel: |
1234 | case ARM::BI_InterlockedXor16_rel: |
1235 | case ARM::BI_InterlockedXor_rel: |
1236 | case ARM::BI_InterlockedXor64_rel: |
1237 | return MSVCIntrin::_InterlockedXor_rel; |
1238 | case ARM::BI_InterlockedXor8_nf: |
1239 | case ARM::BI_InterlockedXor16_nf: |
1240 | case ARM::BI_InterlockedXor_nf: |
1241 | case ARM::BI_InterlockedXor64_nf: |
1242 | return MSVCIntrin::_InterlockedXor_nf; |
1243 | case ARM::BI_InterlockedAnd8_acq: |
1244 | case ARM::BI_InterlockedAnd16_acq: |
1245 | case ARM::BI_InterlockedAnd_acq: |
1246 | case ARM::BI_InterlockedAnd64_acq: |
1247 | return MSVCIntrin::_InterlockedAnd_acq; |
1248 | case ARM::BI_InterlockedAnd8_rel: |
1249 | case ARM::BI_InterlockedAnd16_rel: |
1250 | case ARM::BI_InterlockedAnd_rel: |
1251 | case ARM::BI_InterlockedAnd64_rel: |
1252 | return MSVCIntrin::_InterlockedAnd_rel; |
1253 | case ARM::BI_InterlockedAnd8_nf: |
1254 | case ARM::BI_InterlockedAnd16_nf: |
1255 | case ARM::BI_InterlockedAnd_nf: |
1256 | case ARM::BI_InterlockedAnd64_nf: |
1257 | return MSVCIntrin::_InterlockedAnd_nf; |
1258 | case ARM::BI_InterlockedIncrement16_acq: |
1259 | case ARM::BI_InterlockedIncrement_acq: |
1260 | case ARM::BI_InterlockedIncrement64_acq: |
1261 | return MSVCIntrin::_InterlockedIncrement_acq; |
1262 | case ARM::BI_InterlockedIncrement16_rel: |
1263 | case ARM::BI_InterlockedIncrement_rel: |
1264 | case ARM::BI_InterlockedIncrement64_rel: |
1265 | return MSVCIntrin::_InterlockedIncrement_rel; |
1266 | case ARM::BI_InterlockedIncrement16_nf: |
1267 | case ARM::BI_InterlockedIncrement_nf: |
1268 | case ARM::BI_InterlockedIncrement64_nf: |
1269 | return MSVCIntrin::_InterlockedIncrement_nf; |
1270 | case ARM::BI_InterlockedDecrement16_acq: |
1271 | case ARM::BI_InterlockedDecrement_acq: |
1272 | case ARM::BI_InterlockedDecrement64_acq: |
1273 | return MSVCIntrin::_InterlockedDecrement_acq; |
1274 | case ARM::BI_InterlockedDecrement16_rel: |
1275 | case ARM::BI_InterlockedDecrement_rel: |
1276 | case ARM::BI_InterlockedDecrement64_rel: |
1277 | return MSVCIntrin::_InterlockedDecrement_rel; |
1278 | case ARM::BI_InterlockedDecrement16_nf: |
1279 | case ARM::BI_InterlockedDecrement_nf: |
1280 | case ARM::BI_InterlockedDecrement64_nf: |
1281 | return MSVCIntrin::_InterlockedDecrement_nf; |
1282 | } |
1283 | llvm_unreachable("must return from switch")::llvm::llvm_unreachable_internal("must return from switch", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1283); |
1284 | } |
1285 | |
1286 | static Optional<CodeGenFunction::MSVCIntrin> |
1287 | translateAarch64ToMsvcIntrin(unsigned BuiltinID) { |
1288 | using MSVCIntrin = CodeGenFunction::MSVCIntrin; |
1289 | switch (BuiltinID) { |
1290 | default: |
1291 | return None; |
1292 | case AArch64::BI_BitScanForward: |
1293 | case AArch64::BI_BitScanForward64: |
1294 | return MSVCIntrin::_BitScanForward; |
1295 | case AArch64::BI_BitScanReverse: |
1296 | case AArch64::BI_BitScanReverse64: |
1297 | return MSVCIntrin::_BitScanReverse; |
1298 | case AArch64::BI_InterlockedAnd64: |
1299 | return MSVCIntrin::_InterlockedAnd; |
1300 | case AArch64::BI_InterlockedExchange64: |
1301 | return MSVCIntrin::_InterlockedExchange; |
1302 | case AArch64::BI_InterlockedExchangeAdd64: |
1303 | return MSVCIntrin::_InterlockedExchangeAdd; |
1304 | case AArch64::BI_InterlockedExchangeSub64: |
1305 | return MSVCIntrin::_InterlockedExchangeSub; |
1306 | case AArch64::BI_InterlockedOr64: |
1307 | return MSVCIntrin::_InterlockedOr; |
1308 | case AArch64::BI_InterlockedXor64: |
1309 | return MSVCIntrin::_InterlockedXor; |
1310 | case AArch64::BI_InterlockedDecrement64: |
1311 | return MSVCIntrin::_InterlockedDecrement; |
1312 | case AArch64::BI_InterlockedIncrement64: |
1313 | return MSVCIntrin::_InterlockedIncrement; |
1314 | case AArch64::BI_InterlockedExchangeAdd8_acq: |
1315 | case AArch64::BI_InterlockedExchangeAdd16_acq: |
1316 | case AArch64::BI_InterlockedExchangeAdd_acq: |
1317 | case AArch64::BI_InterlockedExchangeAdd64_acq: |
1318 | return MSVCIntrin::_InterlockedExchangeAdd_acq; |
1319 | case AArch64::BI_InterlockedExchangeAdd8_rel: |
1320 | case AArch64::BI_InterlockedExchangeAdd16_rel: |
1321 | case AArch64::BI_InterlockedExchangeAdd_rel: |
1322 | case AArch64::BI_InterlockedExchangeAdd64_rel: |
1323 | return MSVCIntrin::_InterlockedExchangeAdd_rel; |
1324 | case AArch64::BI_InterlockedExchangeAdd8_nf: |
1325 | case AArch64::BI_InterlockedExchangeAdd16_nf: |
1326 | case AArch64::BI_InterlockedExchangeAdd_nf: |
1327 | case AArch64::BI_InterlockedExchangeAdd64_nf: |
1328 | return MSVCIntrin::_InterlockedExchangeAdd_nf; |
1329 | case AArch64::BI_InterlockedExchange8_acq: |
1330 | case AArch64::BI_InterlockedExchange16_acq: |
1331 | case AArch64::BI_InterlockedExchange_acq: |
1332 | case AArch64::BI_InterlockedExchange64_acq: |
1333 | return MSVCIntrin::_InterlockedExchange_acq; |
1334 | case AArch64::BI_InterlockedExchange8_rel: |
1335 | case AArch64::BI_InterlockedExchange16_rel: |
1336 | case AArch64::BI_InterlockedExchange_rel: |
1337 | case AArch64::BI_InterlockedExchange64_rel: |
1338 | return MSVCIntrin::_InterlockedExchange_rel; |
1339 | case AArch64::BI_InterlockedExchange8_nf: |
1340 | case AArch64::BI_InterlockedExchange16_nf: |
1341 | case AArch64::BI_InterlockedExchange_nf: |
1342 | case AArch64::BI_InterlockedExchange64_nf: |
1343 | return MSVCIntrin::_InterlockedExchange_nf; |
1344 | case AArch64::BI_InterlockedCompareExchange8_acq: |
1345 | case AArch64::BI_InterlockedCompareExchange16_acq: |
1346 | case AArch64::BI_InterlockedCompareExchange_acq: |
1347 | case AArch64::BI_InterlockedCompareExchange64_acq: |
1348 | return MSVCIntrin::_InterlockedCompareExchange_acq; |
1349 | case AArch64::BI_InterlockedCompareExchange8_rel: |
1350 | case AArch64::BI_InterlockedCompareExchange16_rel: |
1351 | case AArch64::BI_InterlockedCompareExchange_rel: |
1352 | case AArch64::BI_InterlockedCompareExchange64_rel: |
1353 | return MSVCIntrin::_InterlockedCompareExchange_rel; |
1354 | case AArch64::BI_InterlockedCompareExchange8_nf: |
1355 | case AArch64::BI_InterlockedCompareExchange16_nf: |
1356 | case AArch64::BI_InterlockedCompareExchange_nf: |
1357 | case AArch64::BI_InterlockedCompareExchange64_nf: |
1358 | return MSVCIntrin::_InterlockedCompareExchange_nf; |
1359 | case AArch64::BI_InterlockedCompareExchange128: |
1360 | return MSVCIntrin::_InterlockedCompareExchange128; |
1361 | case AArch64::BI_InterlockedCompareExchange128_acq: |
1362 | return MSVCIntrin::_InterlockedCompareExchange128_acq; |
1363 | case AArch64::BI_InterlockedCompareExchange128_nf: |
1364 | return MSVCIntrin::_InterlockedCompareExchange128_nf; |
1365 | case AArch64::BI_InterlockedCompareExchange128_rel: |
1366 | return MSVCIntrin::_InterlockedCompareExchange128_rel; |
1367 | case AArch64::BI_InterlockedOr8_acq: |
1368 | case AArch64::BI_InterlockedOr16_acq: |
1369 | case AArch64::BI_InterlockedOr_acq: |
1370 | case AArch64::BI_InterlockedOr64_acq: |
1371 | return MSVCIntrin::_InterlockedOr_acq; |
1372 | case AArch64::BI_InterlockedOr8_rel: |
1373 | case AArch64::BI_InterlockedOr16_rel: |
1374 | case AArch64::BI_InterlockedOr_rel: |
1375 | case AArch64::BI_InterlockedOr64_rel: |
1376 | return MSVCIntrin::_InterlockedOr_rel; |
1377 | case AArch64::BI_InterlockedOr8_nf: |
1378 | case AArch64::BI_InterlockedOr16_nf: |
1379 | case AArch64::BI_InterlockedOr_nf: |
1380 | case AArch64::BI_InterlockedOr64_nf: |
1381 | return MSVCIntrin::_InterlockedOr_nf; |
1382 | case AArch64::BI_InterlockedXor8_acq: |
1383 | case AArch64::BI_InterlockedXor16_acq: |
1384 | case AArch64::BI_InterlockedXor_acq: |
1385 | case AArch64::BI_InterlockedXor64_acq: |
1386 | return MSVCIntrin::_InterlockedXor_acq; |
1387 | case AArch64::BI_InterlockedXor8_rel: |
1388 | case AArch64::BI_InterlockedXor16_rel: |
1389 | case AArch64::BI_InterlockedXor_rel: |
1390 | case AArch64::BI_InterlockedXor64_rel: |
1391 | return MSVCIntrin::_InterlockedXor_rel; |
1392 | case AArch64::BI_InterlockedXor8_nf: |
1393 | case AArch64::BI_InterlockedXor16_nf: |
1394 | case AArch64::BI_InterlockedXor_nf: |
1395 | case AArch64::BI_InterlockedXor64_nf: |
1396 | return MSVCIntrin::_InterlockedXor_nf; |
1397 | case AArch64::BI_InterlockedAnd8_acq: |
1398 | case AArch64::BI_InterlockedAnd16_acq: |
1399 | case AArch64::BI_InterlockedAnd_acq: |
1400 | case AArch64::BI_InterlockedAnd64_acq: |
1401 | return MSVCIntrin::_InterlockedAnd_acq; |
1402 | case AArch64::BI_InterlockedAnd8_rel: |
1403 | case AArch64::BI_InterlockedAnd16_rel: |
1404 | case AArch64::BI_InterlockedAnd_rel: |
1405 | case AArch64::BI_InterlockedAnd64_rel: |
1406 | return MSVCIntrin::_InterlockedAnd_rel; |
1407 | case AArch64::BI_InterlockedAnd8_nf: |
1408 | case AArch64::BI_InterlockedAnd16_nf: |
1409 | case AArch64::BI_InterlockedAnd_nf: |
1410 | case AArch64::BI_InterlockedAnd64_nf: |
1411 | return MSVCIntrin::_InterlockedAnd_nf; |
1412 | case AArch64::BI_InterlockedIncrement16_acq: |
1413 | case AArch64::BI_InterlockedIncrement_acq: |
1414 | case AArch64::BI_InterlockedIncrement64_acq: |
1415 | return MSVCIntrin::_InterlockedIncrement_acq; |
1416 | case AArch64::BI_InterlockedIncrement16_rel: |
1417 | case AArch64::BI_InterlockedIncrement_rel: |
1418 | case AArch64::BI_InterlockedIncrement64_rel: |
1419 | return MSVCIntrin::_InterlockedIncrement_rel; |
1420 | case AArch64::BI_InterlockedIncrement16_nf: |
1421 | case AArch64::BI_InterlockedIncrement_nf: |
1422 | case AArch64::BI_InterlockedIncrement64_nf: |
1423 | return MSVCIntrin::_InterlockedIncrement_nf; |
1424 | case AArch64::BI_InterlockedDecrement16_acq: |
1425 | case AArch64::BI_InterlockedDecrement_acq: |
1426 | case AArch64::BI_InterlockedDecrement64_acq: |
1427 | return MSVCIntrin::_InterlockedDecrement_acq; |
1428 | case AArch64::BI_InterlockedDecrement16_rel: |
1429 | case AArch64::BI_InterlockedDecrement_rel: |
1430 | case AArch64::BI_InterlockedDecrement64_rel: |
1431 | return MSVCIntrin::_InterlockedDecrement_rel; |
1432 | case AArch64::BI_InterlockedDecrement16_nf: |
1433 | case AArch64::BI_InterlockedDecrement_nf: |
1434 | case AArch64::BI_InterlockedDecrement64_nf: |
1435 | return MSVCIntrin::_InterlockedDecrement_nf; |
1436 | } |
1437 | llvm_unreachable("must return from switch")::llvm::llvm_unreachable_internal("must return from switch", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1437); |
1438 | } |
1439 | |
1440 | static Optional<CodeGenFunction::MSVCIntrin> |
1441 | translateX86ToMsvcIntrin(unsigned BuiltinID) { |
1442 | using MSVCIntrin = CodeGenFunction::MSVCIntrin; |
1443 | switch (BuiltinID) { |
1444 | default: |
1445 | return None; |
1446 | case clang::X86::BI_BitScanForward: |
1447 | case clang::X86::BI_BitScanForward64: |
1448 | return MSVCIntrin::_BitScanForward; |
1449 | case clang::X86::BI_BitScanReverse: |
1450 | case clang::X86::BI_BitScanReverse64: |
1451 | return MSVCIntrin::_BitScanReverse; |
1452 | case clang::X86::BI_InterlockedAnd64: |
1453 | return MSVCIntrin::_InterlockedAnd; |
1454 | case clang::X86::BI_InterlockedCompareExchange128: |
1455 | return MSVCIntrin::_InterlockedCompareExchange128; |
1456 | case clang::X86::BI_InterlockedExchange64: |
1457 | return MSVCIntrin::_InterlockedExchange; |
1458 | case clang::X86::BI_InterlockedExchangeAdd64: |
1459 | return MSVCIntrin::_InterlockedExchangeAdd; |
1460 | case clang::X86::BI_InterlockedExchangeSub64: |
1461 | return MSVCIntrin::_InterlockedExchangeSub; |
1462 | case clang::X86::BI_InterlockedOr64: |
1463 | return MSVCIntrin::_InterlockedOr; |
1464 | case clang::X86::BI_InterlockedXor64: |
1465 | return MSVCIntrin::_InterlockedXor; |
1466 | case clang::X86::BI_InterlockedDecrement64: |
1467 | return MSVCIntrin::_InterlockedDecrement; |
1468 | case clang::X86::BI_InterlockedIncrement64: |
1469 | return MSVCIntrin::_InterlockedIncrement; |
1470 | } |
1471 | llvm_unreachable("must return from switch")::llvm::llvm_unreachable_internal("must return from switch", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1471); |
1472 | } |
1473 | |
1474 | // Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated. |
1475 | Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, |
1476 | const CallExpr *E) { |
1477 | switch (BuiltinID) { |
1478 | case MSVCIntrin::_BitScanForward: |
1479 | case MSVCIntrin::_BitScanReverse: { |
1480 | Address IndexAddress(EmitPointerWithAlignment(E->getArg(0))); |
1481 | Value *ArgValue = EmitScalarExpr(E->getArg(1)); |
1482 | |
1483 | llvm::Type *ArgType = ArgValue->getType(); |
1484 | llvm::Type *IndexType = |
1485 | IndexAddress.getPointer()->getType()->getPointerElementType(); |
1486 | llvm::Type *ResultType = ConvertType(E->getType()); |
1487 | |
1488 | Value *ArgZero = llvm::Constant::getNullValue(ArgType); |
1489 | Value *ResZero = llvm::Constant::getNullValue(ResultType); |
1490 | Value *ResOne = llvm::ConstantInt::get(ResultType, 1); |
1491 | |
1492 | BasicBlock *Begin = Builder.GetInsertBlock(); |
1493 | BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn); |
1494 | Builder.SetInsertPoint(End); |
1495 | PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result"); |
1496 | |
1497 | Builder.SetInsertPoint(Begin); |
1498 | Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero); |
1499 | BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn); |
1500 | Builder.CreateCondBr(IsZero, End, NotZero); |
1501 | Result->addIncoming(ResZero, Begin); |
1502 | |
1503 | Builder.SetInsertPoint(NotZero); |
1504 | |
1505 | if (BuiltinID == MSVCIntrin::_BitScanForward) { |
1506 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
1507 | Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); |
1508 | ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); |
1509 | Builder.CreateStore(ZeroCount, IndexAddress, false); |
1510 | } else { |
1511 | unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth(); |
1512 | Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1); |
1513 | |
1514 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
1515 | Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); |
1516 | ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); |
1517 | Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount); |
1518 | Builder.CreateStore(Index, IndexAddress, false); |
1519 | } |
1520 | Builder.CreateBr(End); |
1521 | Result->addIncoming(ResOne, NotZero); |
1522 | |
1523 | Builder.SetInsertPoint(End); |
1524 | return Result; |
1525 | } |
1526 | case MSVCIntrin::_InterlockedAnd: |
1527 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E); |
1528 | case MSVCIntrin::_InterlockedExchange: |
1529 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E); |
1530 | case MSVCIntrin::_InterlockedExchangeAdd: |
1531 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E); |
1532 | case MSVCIntrin::_InterlockedExchangeSub: |
1533 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E); |
1534 | case MSVCIntrin::_InterlockedOr: |
1535 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E); |
1536 | case MSVCIntrin::_InterlockedXor: |
1537 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E); |
1538 | case MSVCIntrin::_InterlockedExchangeAdd_acq: |
1539 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
1540 | AtomicOrdering::Acquire); |
1541 | case MSVCIntrin::_InterlockedExchangeAdd_rel: |
1542 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
1543 | AtomicOrdering::Release); |
1544 | case MSVCIntrin::_InterlockedExchangeAdd_nf: |
1545 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
1546 | AtomicOrdering::Monotonic); |
1547 | case MSVCIntrin::_InterlockedExchange_acq: |
1548 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
1549 | AtomicOrdering::Acquire); |
1550 | case MSVCIntrin::_InterlockedExchange_rel: |
1551 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
1552 | AtomicOrdering::Release); |
1553 | case MSVCIntrin::_InterlockedExchange_nf: |
1554 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
1555 | AtomicOrdering::Monotonic); |
1556 | case MSVCIntrin::_InterlockedCompareExchange_acq: |
1557 | return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire); |
1558 | case MSVCIntrin::_InterlockedCompareExchange_rel: |
1559 | return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release); |
1560 | case MSVCIntrin::_InterlockedCompareExchange_nf: |
1561 | return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic); |
1562 | case MSVCIntrin::_InterlockedCompareExchange128: |
1563 | return EmitAtomicCmpXchg128ForMSIntrin( |
1564 | *this, E, AtomicOrdering::SequentiallyConsistent); |
1565 | case MSVCIntrin::_InterlockedCompareExchange128_acq: |
1566 | return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire); |
1567 | case MSVCIntrin::_InterlockedCompareExchange128_rel: |
1568 | return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release); |
1569 | case MSVCIntrin::_InterlockedCompareExchange128_nf: |
1570 | return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic); |
1571 | case MSVCIntrin::_InterlockedOr_acq: |
1572 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
1573 | AtomicOrdering::Acquire); |
1574 | case MSVCIntrin::_InterlockedOr_rel: |
1575 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
1576 | AtomicOrdering::Release); |
1577 | case MSVCIntrin::_InterlockedOr_nf: |
1578 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
1579 | AtomicOrdering::Monotonic); |
1580 | case MSVCIntrin::_InterlockedXor_acq: |
1581 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
1582 | AtomicOrdering::Acquire); |
1583 | case MSVCIntrin::_InterlockedXor_rel: |
1584 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
1585 | AtomicOrdering::Release); |
1586 | case MSVCIntrin::_InterlockedXor_nf: |
1587 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
1588 | AtomicOrdering::Monotonic); |
1589 | case MSVCIntrin::_InterlockedAnd_acq: |
1590 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
1591 | AtomicOrdering::Acquire); |
1592 | case MSVCIntrin::_InterlockedAnd_rel: |
1593 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
1594 | AtomicOrdering::Release); |
1595 | case MSVCIntrin::_InterlockedAnd_nf: |
1596 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
1597 | AtomicOrdering::Monotonic); |
1598 | case MSVCIntrin::_InterlockedIncrement_acq: |
1599 | return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire); |
1600 | case MSVCIntrin::_InterlockedIncrement_rel: |
1601 | return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release); |
1602 | case MSVCIntrin::_InterlockedIncrement_nf: |
1603 | return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic); |
1604 | case MSVCIntrin::_InterlockedDecrement_acq: |
1605 | return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire); |
1606 | case MSVCIntrin::_InterlockedDecrement_rel: |
1607 | return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release); |
1608 | case MSVCIntrin::_InterlockedDecrement_nf: |
1609 | return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic); |
1610 | |
1611 | case MSVCIntrin::_InterlockedDecrement: |
1612 | return EmitAtomicDecrementValue(*this, E); |
1613 | case MSVCIntrin::_InterlockedIncrement: |
1614 | return EmitAtomicIncrementValue(*this, E); |
1615 | |
1616 | case MSVCIntrin::__fastfail: { |
1617 | // Request immediate process termination from the kernel. The instruction |
1618 | // sequences to do this are documented on MSDN: |
1619 | // https://msdn.microsoft.com/en-us/library/dn774154.aspx |
1620 | llvm::Triple::ArchType ISA = getTarget().getTriple().getArch(); |
1621 | StringRef Asm, Constraints; |
1622 | switch (ISA) { |
1623 | default: |
1624 | ErrorUnsupported(E, "__fastfail call for this architecture"); |
1625 | break; |
1626 | case llvm::Triple::x86: |
1627 | case llvm::Triple::x86_64: |
1628 | Asm = "int $$0x29"; |
1629 | Constraints = "{cx}"; |
1630 | break; |
1631 | case llvm::Triple::thumb: |
1632 | Asm = "udf #251"; |
1633 | Constraints = "{r0}"; |
1634 | break; |
1635 | case llvm::Triple::aarch64: |
1636 | Asm = "brk #0xF003"; |
1637 | Constraints = "{w0}"; |
1638 | } |
1639 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false); |
1640 | llvm::InlineAsm *IA = |
1641 | llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true); |
1642 | llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( |
1643 | getLLVMContext(), llvm::AttributeList::FunctionIndex, |
1644 | llvm::Attribute::NoReturn); |
1645 | llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0))); |
1646 | CI->setAttributes(NoReturnAttr); |
1647 | return CI; |
1648 | } |
1649 | } |
1650 | llvm_unreachable("Incorrect MSVC intrinsic!")::llvm::llvm_unreachable_internal("Incorrect MSVC intrinsic!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1650); |
1651 | } |
1652 | |
1653 | namespace { |
1654 | // ARC cleanup for __builtin_os_log_format |
1655 | struct CallObjCArcUse final : EHScopeStack::Cleanup { |
1656 | CallObjCArcUse(llvm::Value *object) : object(object) {} |
1657 | llvm::Value *object; |
1658 | |
1659 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
1660 | CGF.EmitARCIntrinsicUse(object); |
1661 | } |
1662 | }; |
1663 | } |
1664 | |
1665 | Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E, |
1666 | BuiltinCheckKind Kind) { |
1667 | assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)(static_cast <bool> ((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && "Unsupported builtin check kind" ) ? void (0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1668, __extension__ __PRETTY_FUNCTION__)) |
1668 | && "Unsupported builtin check kind")(static_cast <bool> ((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && "Unsupported builtin check kind" ) ? void (0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1668, __extension__ __PRETTY_FUNCTION__)); |
1669 | |
1670 | Value *ArgValue = EmitScalarExpr(E); |
1671 | if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef()) |
1672 | return ArgValue; |
1673 | |
1674 | SanitizerScope SanScope(this); |
1675 | Value *Cond = Builder.CreateICmpNE( |
1676 | ArgValue, llvm::Constant::getNullValue(ArgValue->getType())); |
1677 | EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin), |
1678 | SanitizerHandler::InvalidBuiltin, |
1679 | {EmitCheckSourceLocation(E->getExprLoc()), |
1680 | llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)}, |
1681 | None); |
1682 | return ArgValue; |
1683 | } |
1684 | |
1685 | /// Get the argument type for arguments to os_log_helper. |
1686 | static CanQualType getOSLogArgType(ASTContext &C, int Size) { |
1687 | QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false); |
1688 | return C.getCanonicalType(UnsignedTy); |
1689 | } |
1690 | |
1691 | llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction( |
1692 | const analyze_os_log::OSLogBufferLayout &Layout, |
1693 | CharUnits BufferAlignment) { |
1694 | ASTContext &Ctx = getContext(); |
1695 | |
1696 | llvm::SmallString<64> Name; |
1697 | { |
1698 | raw_svector_ostream OS(Name); |
1699 | OS << "__os_log_helper"; |
1700 | OS << "_" << BufferAlignment.getQuantity(); |
1701 | OS << "_" << int(Layout.getSummaryByte()); |
1702 | OS << "_" << int(Layout.getNumArgsByte()); |
1703 | for (const auto &Item : Layout.Items) |
1704 | OS << "_" << int(Item.getSizeByte()) << "_" |
1705 | << int(Item.getDescriptorByte()); |
1706 | } |
1707 | |
1708 | if (llvm::Function *F = CGM.getModule().getFunction(Name)) |
1709 | return F; |
1710 | |
1711 | llvm::SmallVector<QualType, 4> ArgTys; |
1712 | FunctionArgList Args; |
1713 | Args.push_back(ImplicitParamDecl::Create( |
1714 | Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy, |
1715 | ImplicitParamDecl::Other)); |
1716 | ArgTys.emplace_back(Ctx.VoidPtrTy); |
1717 | |
1718 | for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) { |
1719 | char Size = Layout.Items[I].getSizeByte(); |
1720 | if (!Size) |
1721 | continue; |
1722 | |
1723 | QualType ArgTy = getOSLogArgType(Ctx, Size); |
1724 | Args.push_back(ImplicitParamDecl::Create( |
1725 | Ctx, nullptr, SourceLocation(), |
1726 | &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy, |
1727 | ImplicitParamDecl::Other)); |
1728 | ArgTys.emplace_back(ArgTy); |
1729 | } |
1730 | |
1731 | QualType ReturnTy = Ctx.VoidTy; |
1732 | |
1733 | // The helper function has linkonce_odr linkage to enable the linker to merge |
1734 | // identical functions. To ensure the merging always happens, 'noinline' is |
1735 | // attached to the function when compiling with -Oz. |
1736 | const CGFunctionInfo &FI = |
1737 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args); |
1738 | llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI); |
1739 | llvm::Function *Fn = llvm::Function::Create( |
1740 | FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule()); |
1741 | Fn->setVisibility(llvm::GlobalValue::HiddenVisibility); |
1742 | CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false); |
1743 | CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn); |
1744 | Fn->setDoesNotThrow(); |
1745 | |
1746 | // Attach 'noinline' at -Oz. |
1747 | if (CGM.getCodeGenOpts().OptimizeSize == 2) |
1748 | Fn->addFnAttr(llvm::Attribute::NoInline); |
1749 | |
1750 | auto NL = ApplyDebugLocation::CreateEmpty(*this); |
1751 | StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args); |
1752 | |
1753 | // Create a scope with an artificial location for the body of this function. |
1754 | auto AL = ApplyDebugLocation::CreateArtificial(*this); |
1755 | |
1756 | CharUnits Offset; |
1757 | Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), |
1758 | BufferAlignment); |
1759 | Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()), |
1760 | Builder.CreateConstByteGEP(BufAddr, Offset++, "summary")); |
1761 | Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()), |
1762 | Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs")); |
1763 | |
1764 | unsigned I = 1; |
1765 | for (const auto &Item : Layout.Items) { |
1766 | Builder.CreateStore( |
1767 | Builder.getInt8(Item.getDescriptorByte()), |
1768 | Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor")); |
1769 | Builder.CreateStore( |
1770 | Builder.getInt8(Item.getSizeByte()), |
1771 | Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize")); |
1772 | |
1773 | CharUnits Size = Item.size(); |
1774 | if (!Size.getQuantity()) |
1775 | continue; |
1776 | |
1777 | Address Arg = GetAddrOfLocalVar(Args[I]); |
1778 | Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData"); |
1779 | Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(), |
1780 | "argDataCast"); |
1781 | Builder.CreateStore(Builder.CreateLoad(Arg), Addr); |
1782 | Offset += Size; |
1783 | ++I; |
1784 | } |
1785 | |
1786 | FinishFunction(); |
1787 | |
1788 | return Fn; |
1789 | } |
1790 | |
1791 | RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) { |
1792 | assert(E.getNumArgs() >= 2 &&(static_cast <bool> (E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments" ) ? void (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1793, __extension__ __PRETTY_FUNCTION__)) |
1793 | "__builtin_os_log_format takes at least 2 arguments")(static_cast <bool> (E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments" ) ? void (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1793, __extension__ __PRETTY_FUNCTION__)); |
1794 | ASTContext &Ctx = getContext(); |
1795 | analyze_os_log::OSLogBufferLayout Layout; |
1796 | analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout); |
1797 | Address BufAddr = EmitPointerWithAlignment(E.getArg(0)); |
1798 | llvm::SmallVector<llvm::Value *, 4> RetainableOperands; |
1799 | |
1800 | // Ignore argument 1, the format string. It is not currently used. |
1801 | CallArgList Args; |
1802 | Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy); |
1803 | |
1804 | for (const auto &Item : Layout.Items) { |
1805 | int Size = Item.getSizeByte(); |
1806 | if (!Size) |
1807 | continue; |
1808 | |
1809 | llvm::Value *ArgVal; |
1810 | |
1811 | if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) { |
1812 | uint64_t Val = 0; |
1813 | for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I) |
1814 | Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8; |
1815 | ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val)); |
1816 | } else if (const Expr *TheExpr = Item.getExpr()) { |
1817 | ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false); |
1818 | |
1819 | // If a temporary object that requires destruction after the full |
1820 | // expression is passed, push a lifetime-extended cleanup to extend its |
1821 | // lifetime to the end of the enclosing block scope. |
1822 | auto LifetimeExtendObject = [&](const Expr *E) { |
1823 | E = E->IgnoreParenCasts(); |
1824 | // Extend lifetimes of objects returned by function calls and message |
1825 | // sends. |
1826 | |
1827 | // FIXME: We should do this in other cases in which temporaries are |
1828 | // created including arguments of non-ARC types (e.g., C++ |
1829 | // temporaries). |
1830 | if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E)) |
1831 | return true; |
1832 | return false; |
1833 | }; |
1834 | |
1835 | if (TheExpr->getType()->isObjCRetainableType() && |
1836 | getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) { |
1837 | assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&(static_cast <bool> (getEvaluationKind(TheExpr->getType ()) == TEK_Scalar && "Only scalar can be a ObjC retainable type" ) ? void (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1838, __extension__ __PRETTY_FUNCTION__)) |
1838 | "Only scalar can be a ObjC retainable type")(static_cast <bool> (getEvaluationKind(TheExpr->getType ()) == TEK_Scalar && "Only scalar can be a ObjC retainable type" ) ? void (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1838, __extension__ __PRETTY_FUNCTION__)); |
1839 | if (!isa<Constant>(ArgVal)) { |
1840 | CleanupKind Cleanup = getARCCleanupKind(); |
1841 | QualType Ty = TheExpr->getType(); |
1842 | Address Alloca = Address::invalid(); |
1843 | Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca); |
1844 | ArgVal = EmitARCRetain(Ty, ArgVal); |
1845 | Builder.CreateStore(ArgVal, Addr); |
1846 | pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty, |
1847 | CodeGenFunction::destroyARCStrongPrecise, |
1848 | Cleanup & EHCleanup); |
1849 | |
1850 | // Push a clang.arc.use call to ensure ARC optimizer knows that the |
1851 | // argument has to be alive. |
1852 | if (CGM.getCodeGenOpts().OptimizationLevel != 0) |
1853 | pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal); |
1854 | } |
1855 | } |
1856 | } else { |
1857 | ArgVal = Builder.getInt32(Item.getConstValue().getQuantity()); |
1858 | } |
1859 | |
1860 | unsigned ArgValSize = |
1861 | CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType()); |
1862 | llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(), |
1863 | ArgValSize); |
1864 | ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy); |
1865 | CanQualType ArgTy = getOSLogArgType(Ctx, Size); |
1866 | // If ArgVal has type x86_fp80, zero-extend ArgVal. |
1867 | ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy)); |
1868 | Args.add(RValue::get(ArgVal), ArgTy); |
1869 | } |
1870 | |
1871 | const CGFunctionInfo &FI = |
1872 | CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args); |
1873 | llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction( |
1874 | Layout, BufAddr.getAlignment()); |
1875 | EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args); |
1876 | return RValue::get(BufAddr.getPointer()); |
1877 | } |
1878 | |
1879 | static bool isSpecialUnsignedMultiplySignedResult( |
1880 | unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, |
1881 | WidthAndSignedness ResultInfo) { |
1882 | return BuiltinID == Builtin::BI__builtin_mul_overflow && |
1883 | Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width && |
1884 | !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed; |
1885 | } |
1886 | |
1887 | static RValue EmitCheckedUnsignedMultiplySignedResult( |
1888 | CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, |
1889 | const clang::Expr *Op2, WidthAndSignedness Op2Info, |
1890 | const clang::Expr *ResultArg, QualType ResultQTy, |
1891 | WidthAndSignedness ResultInfo) { |
1892 | assert(isSpecialUnsignedMultiplySignedResult((static_cast <bool> (isSpecialUnsignedMultiplySignedResult ( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo ) && "Cannot specialize this multiply") ? void (0) : __assert_fail ("isSpecialUnsignedMultiplySignedResult( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Cannot specialize this multiply\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1894, __extension__ __PRETTY_FUNCTION__)) |
1893 | Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&(static_cast <bool> (isSpecialUnsignedMultiplySignedResult ( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo ) && "Cannot specialize this multiply") ? void (0) : __assert_fail ("isSpecialUnsignedMultiplySignedResult( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Cannot specialize this multiply\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1894, __extension__ __PRETTY_FUNCTION__)) |
1894 | "Cannot specialize this multiply")(static_cast <bool> (isSpecialUnsignedMultiplySignedResult ( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo ) && "Cannot specialize this multiply") ? void (0) : __assert_fail ("isSpecialUnsignedMultiplySignedResult( Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Cannot specialize this multiply\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1894, __extension__ __PRETTY_FUNCTION__)); |
1895 | |
1896 | llvm::Value *V1 = CGF.EmitScalarExpr(Op1); |
1897 | llvm::Value *V2 = CGF.EmitScalarExpr(Op2); |
1898 | |
1899 | llvm::Value *HasOverflow; |
1900 | llvm::Value *Result = EmitOverflowIntrinsic( |
1901 | CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow); |
1902 | |
1903 | // The intrinsic call will detect overflow when the value is > UINT_MAX, |
1904 | // however, since the original builtin had a signed result, we need to report |
1905 | // an overflow when the result is greater than INT_MAX. |
1906 | auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width); |
1907 | llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax); |
1908 | |
1909 | llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue); |
1910 | HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow); |
1911 | |
1912 | bool isVolatile = |
1913 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
1914 | Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg); |
1915 | CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr, |
1916 | isVolatile); |
1917 | return RValue::get(HasOverflow); |
1918 | } |
1919 | |
1920 | /// Determine if a binop is a checked mixed-sign multiply we can specialize. |
1921 | static bool isSpecialMixedSignMultiply(unsigned BuiltinID, |
1922 | WidthAndSignedness Op1Info, |
1923 | WidthAndSignedness Op2Info, |
1924 | WidthAndSignedness ResultInfo) { |
1925 | return BuiltinID == Builtin::BI__builtin_mul_overflow && |
1926 | std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width && |
1927 | Op1Info.Signed != Op2Info.Signed; |
1928 | } |
1929 | |
1930 | /// Emit a checked mixed-sign multiply. This is a cheaper specialization of |
1931 | /// the generic checked-binop irgen. |
1932 | static RValue |
1933 | EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, |
1934 | WidthAndSignedness Op1Info, const clang::Expr *Op2, |
1935 | WidthAndSignedness Op2Info, |
1936 | const clang::Expr *ResultArg, QualType ResultQTy, |
1937 | WidthAndSignedness ResultInfo) { |
1938 | assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,(static_cast <bool> (isSpecialMixedSignMultiply(Builtin ::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize") ? void ( 0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1940, __extension__ __PRETTY_FUNCTION__)) |
1939 | Op2Info, ResultInfo) &&(static_cast <bool> (isSpecialMixedSignMultiply(Builtin ::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize") ? void ( 0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1940, __extension__ __PRETTY_FUNCTION__)) |
1940 | "Not a mixed-sign multipliction we can specialize")(static_cast <bool> (isSpecialMixedSignMultiply(Builtin ::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize") ? void ( 0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 1940, __extension__ __PRETTY_FUNCTION__)); |
1941 | |
1942 | // Emit the signed and unsigned operands. |
1943 | const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2; |
1944 | const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1; |
1945 | llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp); |
1946 | llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp); |
1947 | unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width; |
1948 | unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width; |
1949 | |
1950 | // One of the operands may be smaller than the other. If so, [s|z]ext it. |
1951 | if (SignedOpWidth < UnsignedOpWidth) |
1952 | Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext"); |
1953 | if (UnsignedOpWidth < SignedOpWidth) |
1954 | Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext"); |
1955 | |
1956 | llvm::Type *OpTy = Signed->getType(); |
1957 | llvm::Value *Zero = llvm::Constant::getNullValue(OpTy); |
1958 | Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg); |
1959 | llvm::Type *ResTy = ResultPtr.getElementType(); |
1960 | unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width); |
1961 | |
1962 | // Take the absolute value of the signed operand. |
1963 | llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero); |
1964 | llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed); |
1965 | llvm::Value *AbsSigned = |
1966 | CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed); |
1967 | |
1968 | // Perform a checked unsigned multiplication. |
1969 | llvm::Value *UnsignedOverflow; |
1970 | llvm::Value *UnsignedResult = |
1971 | EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned, |
1972 | Unsigned, UnsignedOverflow); |
1973 | |
1974 | llvm::Value *Overflow, *Result; |
1975 | if (ResultInfo.Signed) { |
1976 | // Signed overflow occurs if the result is greater than INT_MAX or lesser |
1977 | // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative). |
1978 | auto IntMax = |
1979 | llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth); |
1980 | llvm::Value *MaxResult = |
1981 | CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax), |
1982 | CGF.Builder.CreateZExt(IsNegative, OpTy)); |
1983 | llvm::Value *SignedOverflow = |
1984 | CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult); |
1985 | Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow); |
1986 | |
1987 | // Prepare the signed result (possibly by negating it). |
1988 | llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult); |
1989 | llvm::Value *SignedResult = |
1990 | CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult); |
1991 | Result = CGF.Builder.CreateTrunc(SignedResult, ResTy); |
1992 | } else { |
1993 | // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX. |
1994 | llvm::Value *Underflow = CGF.Builder.CreateAnd( |
1995 | IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult)); |
1996 | Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow); |
1997 | if (ResultInfo.Width < OpWidth) { |
1998 | auto IntMax = |
1999 | llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth); |
2000 | llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT( |
2001 | UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax)); |
2002 | Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow); |
2003 | } |
2004 | |
2005 | // Negate the product if it would be negative in infinite precision. |
2006 | Result = CGF.Builder.CreateSelect( |
2007 | IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult); |
2008 | |
2009 | Result = CGF.Builder.CreateTrunc(Result, ResTy); |
2010 | } |
2011 | assert(Overflow && Result && "Missing overflow or result")(static_cast <bool> (Overflow && Result && "Missing overflow or result") ? void (0) : __assert_fail ("Overflow && Result && \"Missing overflow or result\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 2011, __extension__ __PRETTY_FUNCTION__)); |
2012 | |
2013 | bool isVolatile = |
2014 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
2015 | CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr, |
2016 | isVolatile); |
2017 | return RValue::get(Overflow); |
2018 | } |
2019 | |
2020 | static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType, |
2021 | Value *&RecordPtr, CharUnits Align, |
2022 | llvm::FunctionCallee Func, int Lvl) { |
2023 | ASTContext &Context = CGF.getContext(); |
2024 | RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition(); |
2025 | std::string Pad = std::string(Lvl * 4, ' '); |
2026 | |
2027 | Value *GString = |
2028 | CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n"); |
2029 | Value *Res = CGF.Builder.CreateCall(Func, {GString}); |
2030 | |
2031 | static llvm::DenseMap<QualType, const char *> Types; |
2032 | if (Types.empty()) { |
2033 | Types[Context.CharTy] = "%c"; |
2034 | Types[Context.BoolTy] = "%d"; |
2035 | Types[Context.SignedCharTy] = "%hhd"; |
2036 | Types[Context.UnsignedCharTy] = "%hhu"; |
2037 | Types[Context.IntTy] = "%d"; |
2038 | Types[Context.UnsignedIntTy] = "%u"; |
2039 | Types[Context.LongTy] = "%ld"; |
2040 | Types[Context.UnsignedLongTy] = "%lu"; |
2041 | Types[Context.LongLongTy] = "%lld"; |
2042 | Types[Context.UnsignedLongLongTy] = "%llu"; |
2043 | Types[Context.ShortTy] = "%hd"; |
2044 | Types[Context.UnsignedShortTy] = "%hu"; |
2045 | Types[Context.VoidPtrTy] = "%p"; |
2046 | Types[Context.FloatTy] = "%f"; |
2047 | Types[Context.DoubleTy] = "%f"; |
2048 | Types[Context.LongDoubleTy] = "%Lf"; |
2049 | Types[Context.getPointerType(Context.CharTy)] = "%s"; |
2050 | Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s"; |
2051 | } |
2052 | |
2053 | for (const auto *FD : RD->fields()) { |
2054 | Value *FieldPtr = RecordPtr; |
2055 | if (RD->isUnion()) |
2056 | FieldPtr = CGF.Builder.CreatePointerCast( |
2057 | FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType()))); |
2058 | else |
2059 | FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr, |
2060 | FD->getFieldIndex()); |
2061 | |
2062 | GString = CGF.Builder.CreateGlobalStringPtr( |
2063 | llvm::Twine(Pad) |
2064 | .concat(FD->getType().getAsString()) |
2065 | .concat(llvm::Twine(' ')) |
2066 | .concat(FD->getNameAsString()) |
2067 | .concat(" : ") |
2068 | .str()); |
2069 | Value *TmpRes = CGF.Builder.CreateCall(Func, {GString}); |
2070 | Res = CGF.Builder.CreateAdd(Res, TmpRes); |
2071 | |
2072 | QualType CanonicalType = |
2073 | FD->getType().getUnqualifiedType().getCanonicalType(); |
2074 | |
2075 | // We check whether we are in a recursive type |
2076 | if (CanonicalType->isRecordType()) { |
2077 | TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1); |
2078 | Res = CGF.Builder.CreateAdd(TmpRes, Res); |
2079 | continue; |
2080 | } |
2081 | |
2082 | // We try to determine the best format to print the current field |
2083 | llvm::Twine Format = Types.find(CanonicalType) == Types.end() |
2084 | ? Types[Context.VoidPtrTy] |
2085 | : Types[CanonicalType]; |
2086 | |
2087 | Address FieldAddress = Address(FieldPtr, Align); |
2088 | FieldPtr = CGF.Builder.CreateLoad(FieldAddress); |
2089 | |
2090 | // FIXME Need to handle bitfield here |
2091 | GString = CGF.Builder.CreateGlobalStringPtr( |
2092 | Format.concat(llvm::Twine('\n')).str()); |
2093 | TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr}); |
2094 | Res = CGF.Builder.CreateAdd(Res, TmpRes); |
2095 | } |
2096 | |
2097 | GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n"); |
2098 | Value *TmpRes = CGF.Builder.CreateCall(Func, {GString}); |
2099 | Res = CGF.Builder.CreateAdd(Res, TmpRes); |
2100 | return Res; |
2101 | } |
2102 | |
2103 | static bool |
2104 | TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, |
2105 | llvm::SmallPtrSetImpl<const Decl *> &Seen) { |
2106 | if (const auto *Arr = Ctx.getAsArrayType(Ty)) |
2107 | Ty = Ctx.getBaseElementType(Arr); |
2108 | |
2109 | const auto *Record = Ty->getAsCXXRecordDecl(); |
2110 | if (!Record) |
2111 | return false; |
2112 | |
2113 | // We've already checked this type, or are in the process of checking it. |
2114 | if (!Seen.insert(Record).second) |
2115 | return false; |
2116 | |
2117 | assert(Record->hasDefinition() &&(static_cast <bool> (Record->hasDefinition() && "Incomplete types should already be diagnosed") ? void (0) : __assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 2118, __extension__ __PRETTY_FUNCTION__)) |
2118 | "Incomplete types should already be diagnosed")(static_cast <bool> (Record->hasDefinition() && "Incomplete types should already be diagnosed") ? void (0) : __assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 2118, __extension__ __PRETTY_FUNCTION__)); |
2119 | |
2120 | if (Record->isDynamicClass()) |
2121 | return true; |
2122 | |
2123 | for (FieldDecl *F : Record->fields()) { |
2124 | if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen)) |
2125 | return true; |
2126 | } |
2127 | return false; |
2128 | } |
2129 | |
2130 | /// Determine if the specified type requires laundering by checking if it is a |
2131 | /// dynamic class type or contains a subobject which is a dynamic class type. |
2132 | static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) { |
2133 | if (!CGM.getCodeGenOpts().StrictVTablePointers) |
2134 | return false; |
2135 | llvm::SmallPtrSet<const Decl *, 16> Seen; |
2136 | return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen); |
2137 | } |
2138 | |
2139 | RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) { |
2140 | llvm::Value *Src = EmitScalarExpr(E->getArg(0)); |
2141 | llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1)); |
2142 | |
2143 | // The builtin's shift arg may have a different type than the source arg and |
2144 | // result, but the LLVM intrinsic uses the same type for all values. |
2145 | llvm::Type *Ty = Src->getType(); |
2146 | ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false); |
2147 | |
2148 | // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same. |
2149 | unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl; |
2150 | Function *F = CGM.getIntrinsic(IID, Ty); |
2151 | return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt })); |
2152 | } |
2153 | |
2154 | // Map math builtins for long-double to f128 version. |
2155 | static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) { |
2156 | switch (BuiltinID) { |
2157 | #define MUTATE_LDBL(func) \ |
2158 | case Builtin::BI__builtin_##func##l: \ |
2159 | return Builtin::BI__builtin_##func##f128; |
2160 | MUTATE_LDBL(sqrt) |
2161 | MUTATE_LDBL(cbrt) |
2162 | MUTATE_LDBL(fabs) |
2163 | MUTATE_LDBL(log) |
2164 | MUTATE_LDBL(log2) |
2165 | MUTATE_LDBL(log10) |
2166 | MUTATE_LDBL(log1p) |
2167 | MUTATE_LDBL(logb) |
2168 | MUTATE_LDBL(exp) |
2169 | MUTATE_LDBL(exp2) |
2170 | MUTATE_LDBL(expm1) |
2171 | MUTATE_LDBL(fdim) |
2172 | MUTATE_LDBL(hypot) |
2173 | MUTATE_LDBL(ilogb) |
2174 | MUTATE_LDBL(pow) |
2175 | MUTATE_LDBL(fmin) |
2176 | MUTATE_LDBL(fmax) |
2177 | MUTATE_LDBL(ceil) |
2178 | MUTATE_LDBL(trunc) |
2179 | MUTATE_LDBL(rint) |
2180 | MUTATE_LDBL(nearbyint) |
2181 | MUTATE_LDBL(round) |
2182 | MUTATE_LDBL(floor) |
2183 | MUTATE_LDBL(lround) |
2184 | MUTATE_LDBL(llround) |
2185 | MUTATE_LDBL(lrint) |
2186 | MUTATE_LDBL(llrint) |
2187 | MUTATE_LDBL(fmod) |
2188 | MUTATE_LDBL(modf) |
2189 | MUTATE_LDBL(nan) |
2190 | MUTATE_LDBL(nans) |
2191 | MUTATE_LDBL(inf) |
2192 | MUTATE_LDBL(fma) |
2193 | MUTATE_LDBL(sin) |
2194 | MUTATE_LDBL(cos) |
2195 | MUTATE_LDBL(tan) |
2196 | MUTATE_LDBL(sinh) |
2197 | MUTATE_LDBL(cosh) |
2198 | MUTATE_LDBL(tanh) |
2199 | MUTATE_LDBL(asin) |
2200 | MUTATE_LDBL(acos) |
2201 | MUTATE_LDBL(atan) |
2202 | MUTATE_LDBL(asinh) |
2203 | MUTATE_LDBL(acosh) |
2204 | MUTATE_LDBL(atanh) |
2205 | MUTATE_LDBL(atan2) |
2206 | MUTATE_LDBL(erf) |
2207 | MUTATE_LDBL(erfc) |
2208 | MUTATE_LDBL(ldexp) |
2209 | MUTATE_LDBL(frexp) |
2210 | MUTATE_LDBL(huge_val) |
2211 | MUTATE_LDBL(copysign) |
2212 | MUTATE_LDBL(nextafter) |
2213 | MUTATE_LDBL(nexttoward) |
2214 | MUTATE_LDBL(remainder) |
2215 | MUTATE_LDBL(remquo) |
2216 | MUTATE_LDBL(scalbln) |
2217 | MUTATE_LDBL(scalbn) |
2218 | MUTATE_LDBL(tgamma) |
2219 | MUTATE_LDBL(lgamma) |
2220 | #undef MUTATE_LDBL |
2221 | default: |
2222 | return BuiltinID; |
2223 | } |
2224 | } |
2225 | |
2226 | RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, |
2227 | const CallExpr *E, |
2228 | ReturnValueSlot ReturnValue) { |
2229 | const FunctionDecl *FD = GD.getDecl()->getAsFunction(); |
2230 | // See if we can constant fold this builtin. If so, don't emit it at all. |
2231 | Expr::EvalResult Result; |
2232 | if (E->EvaluateAsRValue(Result, CGM.getContext()) && |
2233 | !Result.hasSideEffects()) { |
2234 | if (Result.Val.isInt()) |
2235 | return RValue::get(llvm::ConstantInt::get(getLLVMContext(), |
2236 | Result.Val.getInt())); |
2237 | if (Result.Val.isFloat()) |
2238 | return RValue::get(llvm::ConstantFP::get(getLLVMContext(), |
2239 | Result.Val.getFloat())); |
2240 | } |
2241 | |
2242 | // If current long-double semantics is IEEE 128-bit, replace math builtins |
2243 | // of long-double with f128 equivalent. |
2244 | // TODO: This mutation should also be applied to other targets other than PPC, |
2245 | // after backend supports IEEE 128-bit style libcalls. |
2246 | if (getTarget().getTriple().isPPC64() && |
2247 | &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad()) |
2248 | BuiltinID = mutateLongDoubleBuiltin(BuiltinID); |
2249 | |
2250 | // If the builtin has been declared explicitly with an assembler label, |
2251 | // disable the specialized emitting below. Ideally we should communicate the |
2252 | // rename in IR, or at least avoid generating the intrinsic calls that are |
2253 | // likely to get lowered to the renamed library functions. |
2254 | const unsigned BuiltinIDIfNoAsmLabel = |
2255 | FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID; |
2256 | |
2257 | // There are LLVM math intrinsics/instructions corresponding to math library |
2258 | // functions except the LLVM op will never set errno while the math library |
2259 | // might. Also, math builtins have the same semantics as their math library |
2260 | // twins. Thus, we can transform math library and builtin calls to their |
2261 | // LLVM counterparts if the call is marked 'const' (known to never set errno). |
2262 | if (FD->hasAttr<ConstAttr>()) { |
2263 | switch (BuiltinIDIfNoAsmLabel) { |
2264 | case Builtin::BIceil: |
2265 | case Builtin::BIceilf: |
2266 | case Builtin::BIceill: |
2267 | case Builtin::BI__builtin_ceil: |
2268 | case Builtin::BI__builtin_ceilf: |
2269 | case Builtin::BI__builtin_ceilf16: |
2270 | case Builtin::BI__builtin_ceill: |
2271 | case Builtin::BI__builtin_ceilf128: |
2272 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2273 | Intrinsic::ceil, |
2274 | Intrinsic::experimental_constrained_ceil)); |
2275 | |
2276 | case Builtin::BIcopysign: |
2277 | case Builtin::BIcopysignf: |
2278 | case Builtin::BIcopysignl: |
2279 | case Builtin::BI__builtin_copysign: |
2280 | case Builtin::BI__builtin_copysignf: |
2281 | case Builtin::BI__builtin_copysignf16: |
2282 | case Builtin::BI__builtin_copysignl: |
2283 | case Builtin::BI__builtin_copysignf128: |
2284 | return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign)); |
2285 | |
2286 | case Builtin::BIcos: |
2287 | case Builtin::BIcosf: |
2288 | case Builtin::BIcosl: |
2289 | case Builtin::BI__builtin_cos: |
2290 | case Builtin::BI__builtin_cosf: |
2291 | case Builtin::BI__builtin_cosf16: |
2292 | case Builtin::BI__builtin_cosl: |
2293 | case Builtin::BI__builtin_cosf128: |
2294 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2295 | Intrinsic::cos, |
2296 | Intrinsic::experimental_constrained_cos)); |
2297 | |
2298 | case Builtin::BIexp: |
2299 | case Builtin::BIexpf: |
2300 | case Builtin::BIexpl: |
2301 | case Builtin::BI__builtin_exp: |
2302 | case Builtin::BI__builtin_expf: |
2303 | case Builtin::BI__builtin_expf16: |
2304 | case Builtin::BI__builtin_expl: |
2305 | case Builtin::BI__builtin_expf128: |
2306 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2307 | Intrinsic::exp, |
2308 | Intrinsic::experimental_constrained_exp)); |
2309 | |
2310 | case Builtin::BIexp2: |
2311 | case Builtin::BIexp2f: |
2312 | case Builtin::BIexp2l: |
2313 | case Builtin::BI__builtin_exp2: |
2314 | case Builtin::BI__builtin_exp2f: |
2315 | case Builtin::BI__builtin_exp2f16: |
2316 | case Builtin::BI__builtin_exp2l: |
2317 | case Builtin::BI__builtin_exp2f128: |
2318 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2319 | Intrinsic::exp2, |
2320 | Intrinsic::experimental_constrained_exp2)); |
2321 | |
2322 | case Builtin::BIfabs: |
2323 | case Builtin::BIfabsf: |
2324 | case Builtin::BIfabsl: |
2325 | case Builtin::BI__builtin_fabs: |
2326 | case Builtin::BI__builtin_fabsf: |
2327 | case Builtin::BI__builtin_fabsf16: |
2328 | case Builtin::BI__builtin_fabsl: |
2329 | case Builtin::BI__builtin_fabsf128: |
2330 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs)); |
2331 | |
2332 | case Builtin::BIfloor: |
2333 | case Builtin::BIfloorf: |
2334 | case Builtin::BIfloorl: |
2335 | case Builtin::BI__builtin_floor: |
2336 | case Builtin::BI__builtin_floorf: |
2337 | case Builtin::BI__builtin_floorf16: |
2338 | case Builtin::BI__builtin_floorl: |
2339 | case Builtin::BI__builtin_floorf128: |
2340 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2341 | Intrinsic::floor, |
2342 | Intrinsic::experimental_constrained_floor)); |
2343 | |
2344 | case Builtin::BIfma: |
2345 | case Builtin::BIfmaf: |
2346 | case Builtin::BIfmal: |
2347 | case Builtin::BI__builtin_fma: |
2348 | case Builtin::BI__builtin_fmaf: |
2349 | case Builtin::BI__builtin_fmaf16: |
2350 | case Builtin::BI__builtin_fmal: |
2351 | case Builtin::BI__builtin_fmaf128: |
2352 | return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E, |
2353 | Intrinsic::fma, |
2354 | Intrinsic::experimental_constrained_fma)); |
2355 | |
2356 | case Builtin::BIfmax: |
2357 | case Builtin::BIfmaxf: |
2358 | case Builtin::BIfmaxl: |
2359 | case Builtin::BI__builtin_fmax: |
2360 | case Builtin::BI__builtin_fmaxf: |
2361 | case Builtin::BI__builtin_fmaxf16: |
2362 | case Builtin::BI__builtin_fmaxl: |
2363 | case Builtin::BI__builtin_fmaxf128: |
2364 | return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, |
2365 | Intrinsic::maxnum, |
2366 | Intrinsic::experimental_constrained_maxnum)); |
2367 | |
2368 | case Builtin::BIfmin: |
2369 | case Builtin::BIfminf: |
2370 | case Builtin::BIfminl: |
2371 | case Builtin::BI__builtin_fmin: |
2372 | case Builtin::BI__builtin_fminf: |
2373 | case Builtin::BI__builtin_fminf16: |
2374 | case Builtin::BI__builtin_fminl: |
2375 | case Builtin::BI__builtin_fminf128: |
2376 | return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, |
2377 | Intrinsic::minnum, |
2378 | Intrinsic::experimental_constrained_minnum)); |
2379 | |
2380 | // fmod() is a special-case. It maps to the frem instruction rather than an |
2381 | // LLVM intrinsic. |
2382 | case Builtin::BIfmod: |
2383 | case Builtin::BIfmodf: |
2384 | case Builtin::BIfmodl: |
2385 | case Builtin::BI__builtin_fmod: |
2386 | case Builtin::BI__builtin_fmodf: |
2387 | case Builtin::BI__builtin_fmodf16: |
2388 | case Builtin::BI__builtin_fmodl: |
2389 | case Builtin::BI__builtin_fmodf128: { |
2390 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
2391 | Value *Arg1 = EmitScalarExpr(E->getArg(0)); |
2392 | Value *Arg2 = EmitScalarExpr(E->getArg(1)); |
2393 | return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod")); |
2394 | } |
2395 | |
2396 | case Builtin::BIlog: |
2397 | case Builtin::BIlogf: |
2398 | case Builtin::BIlogl: |
2399 | case Builtin::BI__builtin_log: |
2400 | case Builtin::BI__builtin_logf: |
2401 | case Builtin::BI__builtin_logf16: |
2402 | case Builtin::BI__builtin_logl: |
2403 | case Builtin::BI__builtin_logf128: |
2404 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2405 | Intrinsic::log, |
2406 | Intrinsic::experimental_constrained_log)); |
2407 | |
2408 | case Builtin::BIlog10: |
2409 | case Builtin::BIlog10f: |
2410 | case Builtin::BIlog10l: |
2411 | case Builtin::BI__builtin_log10: |
2412 | case Builtin::BI__builtin_log10f: |
2413 | case Builtin::BI__builtin_log10f16: |
2414 | case Builtin::BI__builtin_log10l: |
2415 | case Builtin::BI__builtin_log10f128: |
2416 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2417 | Intrinsic::log10, |
2418 | Intrinsic::experimental_constrained_log10)); |
2419 | |
2420 | case Builtin::BIlog2: |
2421 | case Builtin::BIlog2f: |
2422 | case Builtin::BIlog2l: |
2423 | case Builtin::BI__builtin_log2: |
2424 | case Builtin::BI__builtin_log2f: |
2425 | case Builtin::BI__builtin_log2f16: |
2426 | case Builtin::BI__builtin_log2l: |
2427 | case Builtin::BI__builtin_log2f128: |
2428 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2429 | Intrinsic::log2, |
2430 | Intrinsic::experimental_constrained_log2)); |
2431 | |
2432 | case Builtin::BInearbyint: |
2433 | case Builtin::BInearbyintf: |
2434 | case Builtin::BInearbyintl: |
2435 | case Builtin::BI__builtin_nearbyint: |
2436 | case Builtin::BI__builtin_nearbyintf: |
2437 | case Builtin::BI__builtin_nearbyintl: |
2438 | case Builtin::BI__builtin_nearbyintf128: |
2439 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2440 | Intrinsic::nearbyint, |
2441 | Intrinsic::experimental_constrained_nearbyint)); |
2442 | |
2443 | case Builtin::BIpow: |
2444 | case Builtin::BIpowf: |
2445 | case Builtin::BIpowl: |
2446 | case Builtin::BI__builtin_pow: |
2447 | case Builtin::BI__builtin_powf: |
2448 | case Builtin::BI__builtin_powf16: |
2449 | case Builtin::BI__builtin_powl: |
2450 | case Builtin::BI__builtin_powf128: |
2451 | return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, |
2452 | Intrinsic::pow, |
2453 | Intrinsic::experimental_constrained_pow)); |
2454 | |
2455 | case Builtin::BIrint: |
2456 | case Builtin::BIrintf: |
2457 | case Builtin::BIrintl: |
2458 | case Builtin::BI__builtin_rint: |
2459 | case Builtin::BI__builtin_rintf: |
2460 | case Builtin::BI__builtin_rintf16: |
2461 | case Builtin::BI__builtin_rintl: |
2462 | case Builtin::BI__builtin_rintf128: |
2463 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2464 | Intrinsic::rint, |
2465 | Intrinsic::experimental_constrained_rint)); |
2466 | |
2467 | case Builtin::BIround: |
2468 | case Builtin::BIroundf: |
2469 | case Builtin::BIroundl: |
2470 | case Builtin::BI__builtin_round: |
2471 | case Builtin::BI__builtin_roundf: |
2472 | case Builtin::BI__builtin_roundf16: |
2473 | case Builtin::BI__builtin_roundl: |
2474 | case Builtin::BI__builtin_roundf128: |
2475 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2476 | Intrinsic::round, |
2477 | Intrinsic::experimental_constrained_round)); |
2478 | |
2479 | case Builtin::BIsin: |
2480 | case Builtin::BIsinf: |
2481 | case Builtin::BIsinl: |
2482 | case Builtin::BI__builtin_sin: |
2483 | case Builtin::BI__builtin_sinf: |
2484 | case Builtin::BI__builtin_sinf16: |
2485 | case Builtin::BI__builtin_sinl: |
2486 | case Builtin::BI__builtin_sinf128: |
2487 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2488 | Intrinsic::sin, |
2489 | Intrinsic::experimental_constrained_sin)); |
2490 | |
2491 | case Builtin::BIsqrt: |
2492 | case Builtin::BIsqrtf: |
2493 | case Builtin::BIsqrtl: |
2494 | case Builtin::BI__builtin_sqrt: |
2495 | case Builtin::BI__builtin_sqrtf: |
2496 | case Builtin::BI__builtin_sqrtf16: |
2497 | case Builtin::BI__builtin_sqrtl: |
2498 | case Builtin::BI__builtin_sqrtf128: |
2499 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2500 | Intrinsic::sqrt, |
2501 | Intrinsic::experimental_constrained_sqrt)); |
2502 | |
2503 | case Builtin::BItrunc: |
2504 | case Builtin::BItruncf: |
2505 | case Builtin::BItruncl: |
2506 | case Builtin::BI__builtin_trunc: |
2507 | case Builtin::BI__builtin_truncf: |
2508 | case Builtin::BI__builtin_truncf16: |
2509 | case Builtin::BI__builtin_truncl: |
2510 | case Builtin::BI__builtin_truncf128: |
2511 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2512 | Intrinsic::trunc, |
2513 | Intrinsic::experimental_constrained_trunc)); |
2514 | |
2515 | case Builtin::BIlround: |
2516 | case Builtin::BIlroundf: |
2517 | case Builtin::BIlroundl: |
2518 | case Builtin::BI__builtin_lround: |
2519 | case Builtin::BI__builtin_lroundf: |
2520 | case Builtin::BI__builtin_lroundl: |
2521 | case Builtin::BI__builtin_lroundf128: |
2522 | return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
2523 | *this, E, Intrinsic::lround, |
2524 | Intrinsic::experimental_constrained_lround)); |
2525 | |
2526 | case Builtin::BIllround: |
2527 | case Builtin::BIllroundf: |
2528 | case Builtin::BIllroundl: |
2529 | case Builtin::BI__builtin_llround: |
2530 | case Builtin::BI__builtin_llroundf: |
2531 | case Builtin::BI__builtin_llroundl: |
2532 | case Builtin::BI__builtin_llroundf128: |
2533 | return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
2534 | *this, E, Intrinsic::llround, |
2535 | Intrinsic::experimental_constrained_llround)); |
2536 | |
2537 | case Builtin::BIlrint: |
2538 | case Builtin::BIlrintf: |
2539 | case Builtin::BIlrintl: |
2540 | case Builtin::BI__builtin_lrint: |
2541 | case Builtin::BI__builtin_lrintf: |
2542 | case Builtin::BI__builtin_lrintl: |
2543 | case Builtin::BI__builtin_lrintf128: |
2544 | return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
2545 | *this, E, Intrinsic::lrint, |
2546 | Intrinsic::experimental_constrained_lrint)); |
2547 | |
2548 | case Builtin::BIllrint: |
2549 | case Builtin::BIllrintf: |
2550 | case Builtin::BIllrintl: |
2551 | case Builtin::BI__builtin_llrint: |
2552 | case Builtin::BI__builtin_llrintf: |
2553 | case Builtin::BI__builtin_llrintl: |
2554 | case Builtin::BI__builtin_llrintf128: |
2555 | return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
2556 | *this, E, Intrinsic::llrint, |
2557 | Intrinsic::experimental_constrained_llrint)); |
2558 | |
2559 | default: |
2560 | break; |
2561 | } |
2562 | } |
2563 | |
2564 | switch (BuiltinIDIfNoAsmLabel) { |
2565 | default: break; |
2566 | case Builtin::BI__builtin___CFStringMakeConstantString: |
2567 | case Builtin::BI__builtin___NSStringMakeConstantString: |
2568 | return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType())); |
2569 | case Builtin::BI__builtin_stdarg_start: |
2570 | case Builtin::BI__builtin_va_start: |
2571 | case Builtin::BI__va_start: |
2572 | case Builtin::BI__builtin_va_end: |
2573 | return RValue::get( |
2574 | EmitVAStartEnd(BuiltinID == Builtin::BI__va_start |
2575 | ? EmitScalarExpr(E->getArg(0)) |
2576 | : EmitVAListRef(E->getArg(0)).getPointer(), |
2577 | BuiltinID != Builtin::BI__builtin_va_end)); |
2578 | case Builtin::BI__builtin_va_copy: { |
2579 | Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer(); |
2580 | Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer(); |
2581 | |
2582 | llvm::Type *Type = Int8PtrTy; |
2583 | |
2584 | DstPtr = Builder.CreateBitCast(DstPtr, Type); |
2585 | SrcPtr = Builder.CreateBitCast(SrcPtr, Type); |
2586 | return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), |
2587 | {DstPtr, SrcPtr})); |
2588 | } |
2589 | case Builtin::BI__builtin_abs: |
2590 | case Builtin::BI__builtin_labs: |
2591 | case Builtin::BI__builtin_llabs: { |
2592 | // X < 0 ? -X : X |
2593 | // The negation has 'nsw' because abs of INT_MIN is undefined. |
2594 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2595 | Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg"); |
2596 | Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType()); |
2597 | Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond"); |
2598 | Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs"); |
2599 | return RValue::get(Result); |
2600 | } |
2601 | case Builtin::BI__builtin_complex: { |
2602 | Value *Real = EmitScalarExpr(E->getArg(0)); |
2603 | Value *Imag = EmitScalarExpr(E->getArg(1)); |
2604 | return RValue::getComplex({Real, Imag}); |
2605 | } |
2606 | case Builtin::BI__builtin_conj: |
2607 | case Builtin::BI__builtin_conjf: |
2608 | case Builtin::BI__builtin_conjl: |
2609 | case Builtin::BIconj: |
2610 | case Builtin::BIconjf: |
2611 | case Builtin::BIconjl: { |
2612 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2613 | Value *Real = ComplexVal.first; |
2614 | Value *Imag = ComplexVal.second; |
2615 | Imag = Builder.CreateFNeg(Imag, "neg"); |
2616 | return RValue::getComplex(std::make_pair(Real, Imag)); |
2617 | } |
2618 | case Builtin::BI__builtin_creal: |
2619 | case Builtin::BI__builtin_crealf: |
2620 | case Builtin::BI__builtin_creall: |
2621 | case Builtin::BIcreal: |
2622 | case Builtin::BIcrealf: |
2623 | case Builtin::BIcreall: { |
2624 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2625 | return RValue::get(ComplexVal.first); |
2626 | } |
2627 | |
2628 | case Builtin::BI__builtin_dump_struct: { |
2629 | llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy); |
2630 | llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get( |
2631 | LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true); |
2632 | |
2633 | Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts()); |
2634 | CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment(); |
2635 | |
2636 | const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts(); |
2637 | QualType Arg0Type = Arg0->getType()->getPointeeType(); |
2638 | |
2639 | Value *RecordPtr = EmitScalarExpr(Arg0); |
2640 | Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align, |
2641 | {LLVMFuncType, Func}, 0); |
2642 | return RValue::get(Res); |
2643 | } |
2644 | |
2645 | case Builtin::BI__builtin_preserve_access_index: { |
2646 | // Only enabled preserved access index region when debuginfo |
2647 | // is available as debuginfo is needed to preserve user-level |
2648 | // access pattern. |
2649 | if (!getDebugInfo()) { |
2650 | CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g"); |
2651 | return RValue::get(EmitScalarExpr(E->getArg(0))); |
2652 | } |
2653 | |
2654 | // Nested builtin_preserve_access_index() not supported |
2655 | if (IsInPreservedAIRegion) { |
2656 | CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported"); |
2657 | return RValue::get(EmitScalarExpr(E->getArg(0))); |
2658 | } |
2659 | |
2660 | IsInPreservedAIRegion = true; |
2661 | Value *Res = EmitScalarExpr(E->getArg(0)); |
2662 | IsInPreservedAIRegion = false; |
2663 | return RValue::get(Res); |
2664 | } |
2665 | |
2666 | case Builtin::BI__builtin_cimag: |
2667 | case Builtin::BI__builtin_cimagf: |
2668 | case Builtin::BI__builtin_cimagl: |
2669 | case Builtin::BIcimag: |
2670 | case Builtin::BIcimagf: |
2671 | case Builtin::BIcimagl: { |
2672 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2673 | return RValue::get(ComplexVal.second); |
2674 | } |
2675 | |
2676 | case Builtin::BI__builtin_clrsb: |
2677 | case Builtin::BI__builtin_clrsbl: |
2678 | case Builtin::BI__builtin_clrsbll: { |
2679 | // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or |
2680 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2681 | |
2682 | llvm::Type *ArgType = ArgValue->getType(); |
2683 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
2684 | |
2685 | llvm::Type *ResultType = ConvertType(E->getType()); |
2686 | Value *Zero = llvm::Constant::getNullValue(ArgType); |
2687 | Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg"); |
2688 | Value *Inverse = Builder.CreateNot(ArgValue, "not"); |
2689 | Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue); |
2690 | Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()}); |
2691 | Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1)); |
2692 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2693 | "cast"); |
2694 | return RValue::get(Result); |
2695 | } |
2696 | case Builtin::BI__builtin_ctzs: |
2697 | case Builtin::BI__builtin_ctz: |
2698 | case Builtin::BI__builtin_ctzl: |
2699 | case Builtin::BI__builtin_ctzll: { |
2700 | Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero); |
2701 | |
2702 | llvm::Type *ArgType = ArgValue->getType(); |
2703 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
2704 | |
2705 | llvm::Type *ResultType = ConvertType(E->getType()); |
2706 | Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
2707 | Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
2708 | if (Result->getType() != ResultType) |
2709 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2710 | "cast"); |
2711 | return RValue::get(Result); |
2712 | } |
2713 | case Builtin::BI__builtin_clzs: |
2714 | case Builtin::BI__builtin_clz: |
2715 | case Builtin::BI__builtin_clzl: |
2716 | case Builtin::BI__builtin_clzll: { |
2717 | Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero); |
2718 | |
2719 | llvm::Type *ArgType = ArgValue->getType(); |
2720 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
2721 | |
2722 | llvm::Type *ResultType = ConvertType(E->getType()); |
2723 | Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
2724 | Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
2725 | if (Result->getType() != ResultType) |
2726 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2727 | "cast"); |
2728 | return RValue::get(Result); |
2729 | } |
2730 | case Builtin::BI__builtin_ffs: |
2731 | case Builtin::BI__builtin_ffsl: |
2732 | case Builtin::BI__builtin_ffsll: { |
2733 | // ffs(x) -> x ? cttz(x) + 1 : 0 |
2734 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2735 | |
2736 | llvm::Type *ArgType = ArgValue->getType(); |
2737 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
2738 | |
2739 | llvm::Type *ResultType = ConvertType(E->getType()); |
2740 | Value *Tmp = |
2741 | Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}), |
2742 | llvm::ConstantInt::get(ArgType, 1)); |
2743 | Value *Zero = llvm::Constant::getNullValue(ArgType); |
2744 | Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); |
2745 | Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); |
2746 | if (Result->getType() != ResultType) |
2747 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2748 | "cast"); |
2749 | return RValue::get(Result); |
2750 | } |
2751 | case Builtin::BI__builtin_parity: |
2752 | case Builtin::BI__builtin_parityl: |
2753 | case Builtin::BI__builtin_parityll: { |
2754 | // parity(x) -> ctpop(x) & 1 |
2755 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2756 | |
2757 | llvm::Type *ArgType = ArgValue->getType(); |
2758 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
2759 | |
2760 | llvm::Type *ResultType = ConvertType(E->getType()); |
2761 | Value *Tmp = Builder.CreateCall(F, ArgValue); |
2762 | Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); |
2763 | if (Result->getType() != ResultType) |
2764 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2765 | "cast"); |
2766 | return RValue::get(Result); |
2767 | } |
2768 | case Builtin::BI__lzcnt16: |
2769 | case Builtin::BI__lzcnt: |
2770 | case Builtin::BI__lzcnt64: { |
2771 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2772 | |
2773 | llvm::Type *ArgType = ArgValue->getType(); |
2774 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
2775 | |
2776 | llvm::Type *ResultType = ConvertType(E->getType()); |
2777 | Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()}); |
2778 | if (Result->getType() != ResultType) |
2779 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2780 | "cast"); |
2781 | return RValue::get(Result); |
2782 | } |
2783 | case Builtin::BI__popcnt16: |
2784 | case Builtin::BI__popcnt: |
2785 | case Builtin::BI__popcnt64: |
2786 | case Builtin::BI__builtin_popcount: |
2787 | case Builtin::BI__builtin_popcountl: |
2788 | case Builtin::BI__builtin_popcountll: { |
2789 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2790 | |
2791 | llvm::Type *ArgType = ArgValue->getType(); |
2792 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
2793 | |
2794 | llvm::Type *ResultType = ConvertType(E->getType()); |
2795 | Value *Result = Builder.CreateCall(F, ArgValue); |
2796 | if (Result->getType() != ResultType) |
2797 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2798 | "cast"); |
2799 | return RValue::get(Result); |
2800 | } |
2801 | case Builtin::BI__builtin_unpredictable: { |
2802 | // Always return the argument of __builtin_unpredictable. LLVM does not |
2803 | // handle this builtin. Metadata for this builtin should be added directly |
2804 | // to instructions such as branches or switches that use it. |
2805 | return RValue::get(EmitScalarExpr(E->getArg(0))); |
2806 | } |
2807 | case Builtin::BI__builtin_expect: { |
2808 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2809 | llvm::Type *ArgType = ArgValue->getType(); |
2810 | |
2811 | Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); |
2812 | // Don't generate llvm.expect on -O0 as the backend won't use it for |
2813 | // anything. |
2814 | // Note, we still IRGen ExpectedValue because it could have side-effects. |
2815 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
2816 | return RValue::get(ArgValue); |
2817 | |
2818 | Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); |
2819 | Value *Result = |
2820 | Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval"); |
2821 | return RValue::get(Result); |
2822 | } |
2823 | case Builtin::BI__builtin_expect_with_probability: { |
2824 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2825 | llvm::Type *ArgType = ArgValue->getType(); |
2826 | |
2827 | Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); |
2828 | llvm::APFloat Probability(0.0); |
2829 | const Expr *ProbArg = E->getArg(2); |
2830 | bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext()); |
2831 | assert(EvalSucceed && "probability should be able to evaluate as float")(static_cast <bool> (EvalSucceed && "probability should be able to evaluate as float" ) ? void (0) : __assert_fail ("EvalSucceed && \"probability should be able to evaluate as float\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 2831, __extension__ __PRETTY_FUNCTION__)); |
2832 | (void)EvalSucceed; |
2833 | bool LoseInfo = false; |
2834 | Probability.convert(llvm::APFloat::IEEEdouble(), |
2835 | llvm::RoundingMode::Dynamic, &LoseInfo); |
2836 | llvm::Type *Ty = ConvertType(ProbArg->getType()); |
2837 | Constant *Confidence = ConstantFP::get(Ty, Probability); |
2838 | // Don't generate llvm.expect.with.probability on -O0 as the backend |
2839 | // won't use it for anything. |
2840 | // Note, we still IRGen ExpectedValue because it could have side-effects. |
2841 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
2842 | return RValue::get(ArgValue); |
2843 | |
2844 | Function *FnExpect = |
2845 | CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType); |
2846 | Value *Result = Builder.CreateCall( |
2847 | FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval"); |
2848 | return RValue::get(Result); |
2849 | } |
2850 | case Builtin::BI__builtin_assume_aligned: { |
2851 | const Expr *Ptr = E->getArg(0); |
2852 | Value *PtrValue = EmitScalarExpr(Ptr); |
2853 | Value *OffsetValue = |
2854 | (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr; |
2855 | |
2856 | Value *AlignmentValue = EmitScalarExpr(E->getArg(1)); |
2857 | ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue); |
2858 | if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment)) |
2859 | AlignmentCI = ConstantInt::get(AlignmentCI->getType(), |
2860 | llvm::Value::MaximumAlignment); |
2861 | |
2862 | emitAlignmentAssumption(PtrValue, Ptr, |
2863 | /*The expr loc is sufficient.*/ SourceLocation(), |
2864 | AlignmentCI, OffsetValue); |
2865 | return RValue::get(PtrValue); |
2866 | } |
2867 | case Builtin::BI__assume: |
2868 | case Builtin::BI__builtin_assume: { |
2869 | if (E->getArg(0)->HasSideEffects(getContext())) |
2870 | return RValue::get(nullptr); |
2871 | |
2872 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2873 | Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume); |
2874 | return RValue::get(Builder.CreateCall(FnAssume, ArgValue)); |
2875 | } |
2876 | case Builtin::BI__arithmetic_fence: { |
2877 | // Create the builtin call if FastMath is selected, and the target |
2878 | // supports the builtin, otherwise just return the argument. |
2879 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
2880 | llvm::FastMathFlags FMF = Builder.getFastMathFlags(); |
2881 | bool isArithmeticFenceEnabled = |
2882 | FMF.allowReassoc() && |
2883 | getContext().getTargetInfo().checkArithmeticFenceSupported(); |
2884 | QualType ArgType = E->getArg(0)->getType(); |
2885 | if (ArgType->isComplexType()) { |
2886 | if (isArithmeticFenceEnabled) { |
2887 | QualType ElementType = ArgType->castAs<ComplexType>()->getElementType(); |
2888 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2889 | Value *Real = Builder.CreateArithmeticFence(ComplexVal.first, |
2890 | ConvertType(ElementType)); |
2891 | Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second, |
2892 | ConvertType(ElementType)); |
2893 | return RValue::getComplex(std::make_pair(Real, Imag)); |
2894 | } |
2895 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2896 | Value *Real = ComplexVal.first; |
2897 | Value *Imag = ComplexVal.second; |
2898 | return RValue::getComplex(std::make_pair(Real, Imag)); |
2899 | } |
2900 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2901 | if (isArithmeticFenceEnabled) |
2902 | return RValue::get( |
2903 | Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType))); |
2904 | return RValue::get(ArgValue); |
2905 | } |
2906 | case Builtin::BI__builtin_bswap16: |
2907 | case Builtin::BI__builtin_bswap32: |
2908 | case Builtin::BI__builtin_bswap64: { |
2909 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap)); |
2910 | } |
2911 | case Builtin::BI__builtin_bitreverse8: |
2912 | case Builtin::BI__builtin_bitreverse16: |
2913 | case Builtin::BI__builtin_bitreverse32: |
2914 | case Builtin::BI__builtin_bitreverse64: { |
2915 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse)); |
2916 | } |
2917 | case Builtin::BI__builtin_rotateleft8: |
2918 | case Builtin::BI__builtin_rotateleft16: |
2919 | case Builtin::BI__builtin_rotateleft32: |
2920 | case Builtin::BI__builtin_rotateleft64: |
2921 | case Builtin::BI_rotl8: // Microsoft variants of rotate left |
2922 | case Builtin::BI_rotl16: |
2923 | case Builtin::BI_rotl: |
2924 | case Builtin::BI_lrotl: |
2925 | case Builtin::BI_rotl64: |
2926 | return emitRotate(E, false); |
2927 | |
2928 | case Builtin::BI__builtin_rotateright8: |
2929 | case Builtin::BI__builtin_rotateright16: |
2930 | case Builtin::BI__builtin_rotateright32: |
2931 | case Builtin::BI__builtin_rotateright64: |
2932 | case Builtin::BI_rotr8: // Microsoft variants of rotate right |
2933 | case Builtin::BI_rotr16: |
2934 | case Builtin::BI_rotr: |
2935 | case Builtin::BI_lrotr: |
2936 | case Builtin::BI_rotr64: |
2937 | return emitRotate(E, true); |
2938 | |
2939 | case Builtin::BI__builtin_constant_p: { |
2940 | llvm::Type *ResultType = ConvertType(E->getType()); |
2941 | |
2942 | const Expr *Arg = E->getArg(0); |
2943 | QualType ArgType = Arg->getType(); |
2944 | // FIXME: The allowance for Obj-C pointers and block pointers is historical |
2945 | // and likely a mistake. |
2946 | if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() && |
2947 | !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType()) |
2948 | // Per the GCC documentation, only numeric constants are recognized after |
2949 | // inlining. |
2950 | return RValue::get(ConstantInt::get(ResultType, 0)); |
2951 | |
2952 | if (Arg->HasSideEffects(getContext())) |
2953 | // The argument is unevaluated, so be conservative if it might have |
2954 | // side-effects. |
2955 | return RValue::get(ConstantInt::get(ResultType, 0)); |
2956 | |
2957 | Value *ArgValue = EmitScalarExpr(Arg); |
2958 | if (ArgType->isObjCObjectPointerType()) { |
2959 | // Convert Objective-C objects to id because we cannot distinguish between |
2960 | // LLVM types for Obj-C classes as they are opaque. |
2961 | ArgType = CGM.getContext().getObjCIdType(); |
2962 | ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType)); |
2963 | } |
2964 | Function *F = |
2965 | CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType)); |
2966 | Value *Result = Builder.CreateCall(F, ArgValue); |
2967 | if (Result->getType() != ResultType) |
2968 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false); |
2969 | return RValue::get(Result); |
2970 | } |
2971 | case Builtin::BI__builtin_dynamic_object_size: |
2972 | case Builtin::BI__builtin_object_size: { |
2973 | unsigned Type = |
2974 | E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
2975 | auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType())); |
2976 | |
2977 | // We pass this builtin onto the optimizer so that it can figure out the |
2978 | // object size in more complex cases. |
2979 | bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size; |
2980 | return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, |
2981 | /*EmittedE=*/nullptr, IsDynamic)); |
2982 | } |
2983 | case Builtin::BI__builtin_prefetch: { |
2984 | Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); |
2985 | // FIXME: Technically these constants should of type 'int', yes? |
2986 | RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : |
2987 | llvm::ConstantInt::get(Int32Ty, 0); |
2988 | Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : |
2989 | llvm::ConstantInt::get(Int32Ty, 3); |
2990 | Value *Data = llvm::ConstantInt::get(Int32Ty, 1); |
2991 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
2992 | return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data})); |
2993 | } |
2994 | case Builtin::BI__builtin_readcyclecounter: { |
2995 | Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); |
2996 | return RValue::get(Builder.CreateCall(F)); |
2997 | } |
2998 | case Builtin::BI__builtin___clear_cache: { |
2999 | Value *Begin = EmitScalarExpr(E->getArg(0)); |
3000 | Value *End = EmitScalarExpr(E->getArg(1)); |
3001 | Function *F = CGM.getIntrinsic(Intrinsic::clear_cache); |
3002 | return RValue::get(Builder.CreateCall(F, {Begin, End})); |
3003 | } |
3004 | case Builtin::BI__builtin_trap: |
3005 | return RValue::get(EmitTrapCall(Intrinsic::trap)); |
3006 | case Builtin::BI__debugbreak: |
3007 | return RValue::get(EmitTrapCall(Intrinsic::debugtrap)); |
3008 | case Builtin::BI__builtin_unreachable: { |
3009 | EmitUnreachable(E->getExprLoc()); |
3010 | |
3011 | // We do need to preserve an insertion point. |
3012 | EmitBlock(createBasicBlock("unreachable.cont")); |
3013 | |
3014 | return RValue::get(nullptr); |
3015 | } |
3016 | |
3017 | case Builtin::BI__builtin_powi: |
3018 | case Builtin::BI__builtin_powif: |
3019 | case Builtin::BI__builtin_powil: { |
3020 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
3021 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
3022 | |
3023 | if (Builder.getIsFPConstrained()) { |
3024 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3025 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi, |
3026 | Src0->getType()); |
3027 | return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 })); |
3028 | } |
3029 | |
3030 | Function *F = CGM.getIntrinsic(Intrinsic::powi, |
3031 | { Src0->getType(), Src1->getType() }); |
3032 | return RValue::get(Builder.CreateCall(F, { Src0, Src1 })); |
3033 | } |
3034 | case Builtin::BI__builtin_isgreater: |
3035 | case Builtin::BI__builtin_isgreaterequal: |
3036 | case Builtin::BI__builtin_isless: |
3037 | case Builtin::BI__builtin_islessequal: |
3038 | case Builtin::BI__builtin_islessgreater: |
3039 | case Builtin::BI__builtin_isunordered: { |
3040 | // Ordered comparisons: we know the arguments to these are matching scalar |
3041 | // floating point values. |
3042 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3043 | // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here. |
3044 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
3045 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
3046 | |
3047 | switch (BuiltinID) { |
3048 | default: llvm_unreachable("Unknown ordered comparison")::llvm::llvm_unreachable_internal("Unknown ordered comparison" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 3048); |
3049 | case Builtin::BI__builtin_isgreater: |
3050 | LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); |
3051 | break; |
3052 | case Builtin::BI__builtin_isgreaterequal: |
3053 | LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); |
3054 | break; |
3055 | case Builtin::BI__builtin_isless: |
3056 | LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); |
3057 | break; |
3058 | case Builtin::BI__builtin_islessequal: |
3059 | LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); |
3060 | break; |
3061 | case Builtin::BI__builtin_islessgreater: |
3062 | LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); |
3063 | break; |
3064 | case Builtin::BI__builtin_isunordered: |
3065 | LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); |
3066 | break; |
3067 | } |
3068 | // ZExt bool to int type. |
3069 | return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); |
3070 | } |
3071 | |
3072 | case Builtin::BI__builtin_isnan: { |
3073 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3074 | Value *V = EmitScalarExpr(E->getArg(0)); |
3075 | |
3076 | if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM)) |
3077 | return RValue::get(Result); |
3078 | |
3079 | Function *F = CGM.getIntrinsic(Intrinsic::isnan, V->getType()); |
3080 | Value *Call = Builder.CreateCall(F, V); |
3081 | return RValue::get(Builder.CreateZExt(Call, ConvertType(E->getType()))); |
3082 | } |
3083 | |
3084 | case Builtin::BI__builtin_matrix_transpose: { |
3085 | const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>(); |
3086 | Value *MatValue = EmitScalarExpr(E->getArg(0)); |
3087 | MatrixBuilder<CGBuilderTy> MB(Builder); |
3088 | Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(), |
3089 | MatrixTy->getNumColumns()); |
3090 | return RValue::get(Result); |
3091 | } |
3092 | |
3093 | case Builtin::BI__builtin_matrix_column_major_load: { |
3094 | MatrixBuilder<CGBuilderTy> MB(Builder); |
3095 | // Emit everything that isn't dependent on the first parameter type |
3096 | Value *Stride = EmitScalarExpr(E->getArg(3)); |
3097 | const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>(); |
3098 | auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>(); |
3099 | assert(PtrTy && "arg0 must be of pointer type")(static_cast <bool> (PtrTy && "arg0 must be of pointer type" ) ? void (0) : __assert_fail ("PtrTy && \"arg0 must be of pointer type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 3099, __extension__ __PRETTY_FUNCTION__)); |
3100 | bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified(); |
3101 | |
3102 | Address Src = EmitPointerWithAlignment(E->getArg(0)); |
3103 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(), |
3104 | E->getArg(0)->getExprLoc(), FD, 0); |
3105 | Value *Result = MB.CreateColumnMajorLoad( |
3106 | Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride, |
3107 | IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(), |
3108 | "matrix"); |
3109 | return RValue::get(Result); |
3110 | } |
3111 | |
3112 | case Builtin::BI__builtin_matrix_column_major_store: { |
3113 | MatrixBuilder<CGBuilderTy> MB(Builder); |
3114 | Value *Matrix = EmitScalarExpr(E->getArg(0)); |
3115 | Address Dst = EmitPointerWithAlignment(E->getArg(1)); |
3116 | Value *Stride = EmitScalarExpr(E->getArg(2)); |
3117 | |
3118 | const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>(); |
3119 | auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>(); |
3120 | assert(PtrTy && "arg1 must be of pointer type")(static_cast <bool> (PtrTy && "arg1 must be of pointer type" ) ? void (0) : __assert_fail ("PtrTy && \"arg1 must be of pointer type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 3120, __extension__ __PRETTY_FUNCTION__)); |
3121 | bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified(); |
3122 | |
3123 | EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(), |
3124 | E->getArg(1)->getExprLoc(), FD, 0); |
3125 | Value *Result = MB.CreateColumnMajorStore( |
3126 | Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()), |
3127 | Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns()); |
3128 | return RValue::get(Result); |
3129 | } |
3130 | |
3131 | case Builtin::BIfinite: |
3132 | case Builtin::BI__finite: |
3133 | case Builtin::BIfinitef: |
3134 | case Builtin::BI__finitef: |
3135 | case Builtin::BIfinitel: |
3136 | case Builtin::BI__finitel: |
3137 | case Builtin::BI__builtin_isinf: |
3138 | case Builtin::BI__builtin_isfinite: { |
3139 | // isinf(x) --> fabs(x) == infinity |
3140 | // isfinite(x) --> fabs(x) != infinity |
3141 | // x != NaN via the ordered compare in either case. |
3142 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3143 | Value *V = EmitScalarExpr(E->getArg(0)); |
3144 | llvm::Type *Ty = V->getType(); |
3145 | if (!Builder.getIsFPConstrained() || |
3146 | Builder.getDefaultConstrainedExcept() == fp::ebIgnore || |
3147 | !Ty->isIEEE()) { |
3148 | Value *Fabs = EmitFAbs(*this, V); |
3149 | Constant *Infinity = ConstantFP::getInfinity(V->getType()); |
3150 | CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf) |
3151 | ? CmpInst::FCMP_OEQ |
3152 | : CmpInst::FCMP_ONE; |
3153 | Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf"); |
3154 | return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType()))); |
3155 | } |
3156 | |
3157 | if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM)) |
3158 | return RValue::get(Result); |
3159 | |
3160 | // Inf values have all exp bits set and a zero significand. Therefore: |
3161 | // isinf(V) == ((V << 1) == ((exp mask) << 1)) |
3162 | // isfinite(V) == ((V << 1) < ((exp mask) << 1)) using unsigned comparison |
3163 | unsigned bitsize = Ty->getScalarSizeInBits(); |
3164 | llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize); |
3165 | Value *IntV = Builder.CreateBitCast(V, IntTy); |
3166 | Value *Shl1 = Builder.CreateShl(IntV, 1); |
3167 | const llvm::fltSemantics &Semantics = Ty->getFltSemantics(); |
3168 | APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt(); |
3169 | Value *ExpMaskShl1 = llvm::ConstantInt::get(IntTy, ExpMask.shl(1)); |
3170 | if (BuiltinID == Builtin::BI__builtin_isinf) |
3171 | V = Builder.CreateICmpEQ(Shl1, ExpMaskShl1); |
3172 | else |
3173 | V = Builder.CreateICmpULT(Shl1, ExpMaskShl1); |
3174 | return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
3175 | } |
3176 | |
3177 | case Builtin::BI__builtin_isinf_sign: { |
3178 | // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0 |
3179 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3180 | // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here. |
3181 | Value *Arg = EmitScalarExpr(E->getArg(0)); |
3182 | Value *AbsArg = EmitFAbs(*this, Arg); |
3183 | Value *IsInf = Builder.CreateFCmpOEQ( |
3184 | AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf"); |
3185 | Value *IsNeg = EmitSignBit(*this, Arg); |
3186 | |
3187 | llvm::Type *IntTy = ConvertType(E->getType()); |
3188 | Value *Zero = Constant::getNullValue(IntTy); |
3189 | Value *One = ConstantInt::get(IntTy, 1); |
3190 | Value *NegativeOne = ConstantInt::get(IntTy, -1); |
3191 | Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One); |
3192 | Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero); |
3193 | return RValue::get(Result); |
3194 | } |
3195 | |
3196 | case Builtin::BI__builtin_isnormal: { |
3197 | // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min |
3198 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3199 | // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here. |
3200 | Value *V = EmitScalarExpr(E->getArg(0)); |
3201 | Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); |
3202 | |
3203 | Value *Abs = EmitFAbs(*this, V); |
3204 | Value *IsLessThanInf = |
3205 | Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); |
3206 | APFloat Smallest = APFloat::getSmallestNormalized( |
3207 | getContext().getFloatTypeSemantics(E->getArg(0)->getType())); |
3208 | Value *IsNormal = |
3209 | Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), |
3210 | "isnormal"); |
3211 | V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); |
3212 | V = Builder.CreateAnd(V, IsNormal, "and"); |
3213 | return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
3214 | } |
3215 | |
3216 | case Builtin::BI__builtin_flt_rounds: { |
3217 | Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds); |
3218 | |
3219 | llvm::Type *ResultType = ConvertType(E->getType()); |
3220 | Value *Result = Builder.CreateCall(F); |
3221 | if (Result->getType() != ResultType) |
3222 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
3223 | "cast"); |
3224 | return RValue::get(Result); |
3225 | } |
3226 | |
3227 | case Builtin::BI__builtin_fpclassify: { |
3228 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3229 | // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here. |
3230 | Value *V = EmitScalarExpr(E->getArg(5)); |
3231 | llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); |
3232 | |
3233 | // Create Result |
3234 | BasicBlock *Begin = Builder.GetInsertBlock(); |
3235 | BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); |
3236 | Builder.SetInsertPoint(End); |
3237 | PHINode *Result = |
3238 | Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, |
3239 | "fpclassify_result"); |
3240 | |
3241 | // if (V==0) return FP_ZERO |
3242 | Builder.SetInsertPoint(Begin); |
3243 | Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), |
3244 | "iszero"); |
3245 | Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); |
3246 | BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); |
3247 | Builder.CreateCondBr(IsZero, End, NotZero); |
3248 | Result->addIncoming(ZeroLiteral, Begin); |
3249 | |
3250 | // if (V != V) return FP_NAN |
3251 | Builder.SetInsertPoint(NotZero); |
3252 | Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); |
3253 | Value *NanLiteral = EmitScalarExpr(E->getArg(0)); |
3254 | BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); |
3255 | Builder.CreateCondBr(IsNan, End, NotNan); |
3256 | Result->addIncoming(NanLiteral, NotZero); |
3257 | |
3258 | // if (fabs(V) == infinity) return FP_INFINITY |
3259 | Builder.SetInsertPoint(NotNan); |
3260 | Value *VAbs = EmitFAbs(*this, V); |
3261 | Value *IsInf = |
3262 | Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), |
3263 | "isinf"); |
3264 | Value *InfLiteral = EmitScalarExpr(E->getArg(1)); |
3265 | BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); |
3266 | Builder.CreateCondBr(IsInf, End, NotInf); |
3267 | Result->addIncoming(InfLiteral, NotNan); |
3268 | |
3269 | // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL |
3270 | Builder.SetInsertPoint(NotInf); |
3271 | APFloat Smallest = APFloat::getSmallestNormalized( |
3272 | getContext().getFloatTypeSemantics(E->getArg(5)->getType())); |
3273 | Value *IsNormal = |
3274 | Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), |
3275 | "isnormal"); |
3276 | Value *NormalResult = |
3277 | Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), |
3278 | EmitScalarExpr(E->getArg(3))); |
3279 | Builder.CreateBr(End); |
3280 | Result->addIncoming(NormalResult, NotInf); |
3281 | |
3282 | // return Result |
3283 | Builder.SetInsertPoint(End); |
3284 | return RValue::get(Result); |
3285 | } |
3286 | |
3287 | case Builtin::BIalloca: |
3288 | case Builtin::BI_alloca: |
3289 | case Builtin::BI__builtin_alloca: { |
3290 | Value *Size = EmitScalarExpr(E->getArg(0)); |
3291 | const TargetInfo &TI = getContext().getTargetInfo(); |
3292 | // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__. |
3293 | const Align SuitableAlignmentInBytes = |
3294 | CGM.getContext() |
3295 | .toCharUnitsFromBits(TI.getSuitableAlign()) |
3296 | .getAsAlign(); |
3297 | AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size); |
3298 | AI->setAlignment(SuitableAlignmentInBytes); |
3299 | initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes); |
3300 | return RValue::get(AI); |
3301 | } |
3302 | |
3303 | case Builtin::BI__builtin_alloca_with_align: { |
3304 | Value *Size = EmitScalarExpr(E->getArg(0)); |
3305 | Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1)); |
3306 | auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue); |
3307 | unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue(); |
3308 | const Align AlignmentInBytes = |
3309 | CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign(); |
3310 | AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size); |
3311 | AI->setAlignment(AlignmentInBytes); |
3312 | initializeAlloca(*this, AI, Size, AlignmentInBytes); |
3313 | return RValue::get(AI); |
3314 | } |
3315 | |
3316 | case Builtin::BIbzero: |
3317 | case Builtin::BI__builtin_bzero: { |
3318 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3319 | Value *SizeVal = EmitScalarExpr(E->getArg(1)); |
3320 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3321 | E->getArg(0)->getExprLoc(), FD, 0); |
3322 | Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false); |
3323 | return RValue::get(nullptr); |
3324 | } |
3325 | case Builtin::BImemcpy: |
3326 | case Builtin::BI__builtin_memcpy: |
3327 | case Builtin::BImempcpy: |
3328 | case Builtin::BI__builtin_mempcpy: { |
3329 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3330 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3331 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
3332 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3333 | E->getArg(0)->getExprLoc(), FD, 0); |
3334 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
3335 | E->getArg(1)->getExprLoc(), FD, 1); |
3336 | Builder.CreateMemCpy(Dest, Src, SizeVal, false); |
3337 | if (BuiltinID == Builtin::BImempcpy || |
3338 | BuiltinID == Builtin::BI__builtin_mempcpy) |
3339 | return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(), |
3340 | Dest.getPointer(), SizeVal)); |
3341 | else |
3342 | return RValue::get(Dest.getPointer()); |
3343 | } |
3344 | |
3345 | case Builtin::BI__builtin_memcpy_inline: { |
3346 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3347 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3348 | uint64_t Size = |
3349 | E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
3350 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3351 | E->getArg(0)->getExprLoc(), FD, 0); |
3352 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
3353 | E->getArg(1)->getExprLoc(), FD, 1); |
3354 | Builder.CreateMemCpyInline(Dest, Src, Size); |
3355 | return RValue::get(nullptr); |
3356 | } |
3357 | |
3358 | case Builtin::BI__builtin_char_memchr: |
3359 | BuiltinID = Builtin::BI__builtin_memchr; |
3360 | break; |
3361 | |
3362 | case Builtin::BI__builtin___memcpy_chk: { |
3363 | // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. |
3364 | Expr::EvalResult SizeResult, DstSizeResult; |
3365 | if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || |
3366 | !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) |
3367 | break; |
3368 | llvm::APSInt Size = SizeResult.Val.getInt(); |
3369 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
3370 | if (Size.ugt(DstSize)) |
3371 | break; |
3372 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3373 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3374 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
3375 | Builder.CreateMemCpy(Dest, Src, SizeVal, false); |
3376 | return RValue::get(Dest.getPointer()); |
3377 | } |
3378 | |
3379 | case Builtin::BI__builtin_objc_memmove_collectable: { |
3380 | Address DestAddr = EmitPointerWithAlignment(E->getArg(0)); |
3381 | Address SrcAddr = EmitPointerWithAlignment(E->getArg(1)); |
3382 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
3383 | CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, |
3384 | DestAddr, SrcAddr, SizeVal); |
3385 | return RValue::get(DestAddr.getPointer()); |
3386 | } |
3387 | |
3388 | case Builtin::BI__builtin___memmove_chk: { |
3389 | // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2. |
3390 | Expr::EvalResult SizeResult, DstSizeResult; |
3391 | if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || |
3392 | !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) |
3393 | break; |
3394 | llvm::APSInt Size = SizeResult.Val.getInt(); |
3395 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
3396 | if (Size.ugt(DstSize)) |
3397 | break; |
3398 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3399 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3400 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
3401 | Builder.CreateMemMove(Dest, Src, SizeVal, false); |
3402 | return RValue::get(Dest.getPointer()); |
3403 | } |
3404 | |
3405 | case Builtin::BImemmove: |
3406 | case Builtin::BI__builtin_memmove: { |
3407 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3408 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3409 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
3410 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3411 | E->getArg(0)->getExprLoc(), FD, 0); |
3412 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
3413 | E->getArg(1)->getExprLoc(), FD, 1); |
3414 | Builder.CreateMemMove(Dest, Src, SizeVal, false); |
3415 | return RValue::get(Dest.getPointer()); |
3416 | } |
3417 | case Builtin::BImemset: |
3418 | case Builtin::BI__builtin_memset: { |
3419 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3420 | Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), |
3421 | Builder.getInt8Ty()); |
3422 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
3423 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3424 | E->getArg(0)->getExprLoc(), FD, 0); |
3425 | Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); |
3426 | return RValue::get(Dest.getPointer()); |
3427 | } |
3428 | case Builtin::BI__builtin___memset_chk: { |
3429 | // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. |
3430 | Expr::EvalResult SizeResult, DstSizeResult; |
3431 | if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || |
3432 | !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) |
3433 | break; |
3434 | llvm::APSInt Size = SizeResult.Val.getInt(); |
3435 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
3436 | if (Size.ugt(DstSize)) |
3437 | break; |
3438 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3439 | Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), |
3440 | Builder.getInt8Ty()); |
3441 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
3442 | Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); |
3443 | return RValue::get(Dest.getPointer()); |
3444 | } |
3445 | case Builtin::BI__builtin_wmemchr: { |
3446 | // The MSVC runtime library does not provide a definition of wmemchr, so we |
3447 | // need an inline implementation. |
3448 | if (!getTarget().getTriple().isOSMSVCRT()) |
3449 | break; |
3450 | |
3451 | llvm::Type *WCharTy = ConvertType(getContext().WCharTy); |
3452 | Value *Str = EmitScalarExpr(E->getArg(0)); |
3453 | Value *Chr = EmitScalarExpr(E->getArg(1)); |
3454 | Value *Size = EmitScalarExpr(E->getArg(2)); |
3455 | |
3456 | BasicBlock *Entry = Builder.GetInsertBlock(); |
3457 | BasicBlock *CmpEq = createBasicBlock("wmemchr.eq"); |
3458 | BasicBlock *Next = createBasicBlock("wmemchr.next"); |
3459 | BasicBlock *Exit = createBasicBlock("wmemchr.exit"); |
3460 | Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0)); |
3461 | Builder.CreateCondBr(SizeEq0, Exit, CmpEq); |
3462 | |
3463 | EmitBlock(CmpEq); |
3464 | PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2); |
3465 | StrPhi->addIncoming(Str, Entry); |
3466 | PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2); |
3467 | SizePhi->addIncoming(Size, Entry); |
3468 | CharUnits WCharAlign = |
3469 | getContext().getTypeAlignInChars(getContext().WCharTy); |
3470 | Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign); |
3471 | Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0); |
3472 | Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr); |
3473 | Builder.CreateCondBr(StrEqChr, Exit, Next); |
3474 | |
3475 | EmitBlock(Next); |
3476 | Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1); |
3477 | Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1)); |
3478 | Value *NextSizeEq0 = |
3479 | Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0)); |
3480 | Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq); |
3481 | StrPhi->addIncoming(NextStr, Next); |
3482 | SizePhi->addIncoming(NextSize, Next); |
3483 | |
3484 | EmitBlock(Exit); |
3485 | PHINode *Ret = Builder.CreatePHI(Str->getType(), 3); |
3486 | Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry); |
3487 | Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next); |
3488 | Ret->addIncoming(FoundChr, CmpEq); |
3489 | return RValue::get(Ret); |
3490 | } |
3491 | case Builtin::BI__builtin_wmemcmp: { |
3492 | // The MSVC runtime library does not provide a definition of wmemcmp, so we |
3493 | // need an inline implementation. |
3494 | if (!getTarget().getTriple().isOSMSVCRT()) |
3495 | break; |
3496 | |
3497 | llvm::Type *WCharTy = ConvertType(getContext().WCharTy); |
3498 | |
3499 | Value *Dst = EmitScalarExpr(E->getArg(0)); |
3500 | Value *Src = EmitScalarExpr(E->getArg(1)); |
3501 | Value *Size = EmitScalarExpr(E->getArg(2)); |
3502 | |
3503 | BasicBlock *Entry = Builder.GetInsertBlock(); |
3504 | BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt"); |
3505 | BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt"); |
3506 | BasicBlock *Next = createBasicBlock("wmemcmp.next"); |
3507 | BasicBlock *Exit = createBasicBlock("wmemcmp.exit"); |
3508 | Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0)); |
3509 | Builder.CreateCondBr(SizeEq0, Exit, CmpGT); |
3510 | |
3511 | EmitBlock(CmpGT); |
3512 | PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2); |
3513 | DstPhi->addIncoming(Dst, Entry); |
3514 | PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2); |
3515 | SrcPhi->addIncoming(Src, Entry); |
3516 | PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2); |
3517 | SizePhi->addIncoming(Size, Entry); |
3518 | CharUnits WCharAlign = |
3519 | getContext().getTypeAlignInChars(getContext().WCharTy); |
3520 | Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign); |
3521 | Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign); |
3522 | Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh); |
3523 | Builder.CreateCondBr(DstGtSrc, Exit, CmpLT); |
3524 | |
3525 | EmitBlock(CmpLT); |
3526 | Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh); |
3527 | Builder.CreateCondBr(DstLtSrc, Exit, Next); |
3528 | |
3529 | EmitBlock(Next); |
3530 | Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1); |
3531 | Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1); |
3532 | Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1)); |
3533 | Value *NextSizeEq0 = |
3534 | Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0)); |
3535 | Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT); |
3536 | DstPhi->addIncoming(NextDst, Next); |
3537 | SrcPhi->addIncoming(NextSrc, Next); |
3538 | SizePhi->addIncoming(NextSize, Next); |
3539 | |
3540 | EmitBlock(Exit); |
3541 | PHINode *Ret = Builder.CreatePHI(IntTy, 4); |
3542 | Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry); |
3543 | Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT); |
3544 | Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT); |
3545 | Ret->addIncoming(ConstantInt::get(IntTy, 0), Next); |
3546 | return RValue::get(Ret); |
3547 | } |
3548 | case Builtin::BI__builtin_dwarf_cfa: { |
3549 | // The offset in bytes from the first argument to the CFA. |
3550 | // |
3551 | // Why on earth is this in the frontend? Is there any reason at |
3552 | // all that the backend can't reasonably determine this while |
3553 | // lowering llvm.eh.dwarf.cfa()? |
3554 | // |
3555 | // TODO: If there's a satisfactory reason, add a target hook for |
3556 | // this instead of hard-coding 0, which is correct for most targets. |
3557 | int32_t Offset = 0; |
3558 | |
3559 | Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); |
3560 | return RValue::get(Builder.CreateCall(F, |
3561 | llvm::ConstantInt::get(Int32Ty, Offset))); |
3562 | } |
3563 | case Builtin::BI__builtin_return_address: { |
3564 | Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), |
3565 | getContext().UnsignedIntTy); |
3566 | Function *F = CGM.getIntrinsic(Intrinsic::returnaddress); |
3567 | return RValue::get(Builder.CreateCall(F, Depth)); |
3568 | } |
3569 | case Builtin::BI_ReturnAddress: { |
3570 | Function *F = CGM.getIntrinsic(Intrinsic::returnaddress); |
3571 | return RValue::get(Builder.CreateCall(F, Builder.getInt32(0))); |
3572 | } |
3573 | case Builtin::BI__builtin_frame_address: { |
3574 | Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), |
3575 | getContext().UnsignedIntTy); |
3576 | Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy); |
3577 | return RValue::get(Builder.CreateCall(F, Depth)); |
3578 | } |
3579 | case Builtin::BI__builtin_extract_return_addr: { |
3580 | Value *Address = EmitScalarExpr(E->getArg(0)); |
3581 | Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); |
3582 | return RValue::get(Result); |
3583 | } |
3584 | case Builtin::BI__builtin_frob_return_addr: { |
3585 | Value *Address = EmitScalarExpr(E->getArg(0)); |
3586 | Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); |
3587 | return RValue::get(Result); |
3588 | } |
3589 | case Builtin::BI__builtin_dwarf_sp_column: { |
3590 | llvm::IntegerType *Ty |
3591 | = cast<llvm::IntegerType>(ConvertType(E->getType())); |
3592 | int Column = getTargetHooks().getDwarfEHStackPointer(CGM); |
3593 | if (Column == -1) { |
3594 | CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); |
3595 | return RValue::get(llvm::UndefValue::get(Ty)); |
3596 | } |
3597 | return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); |
3598 | } |
3599 | case Builtin::BI__builtin_init_dwarf_reg_size_table: { |
3600 | Value *Address = EmitScalarExpr(E->getArg(0)); |
3601 | if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) |
3602 | CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); |
3603 | return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); |
3604 | } |
3605 | case Builtin::BI__builtin_eh_return: { |
3606 | Value *Int = EmitScalarExpr(E->getArg(0)); |
3607 | Value *Ptr = EmitScalarExpr(E->getArg(1)); |
3608 | |
3609 | llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); |
3610 | assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&(static_cast <bool> ((IntTy->getBitWidth() == 32 || IntTy ->getBitWidth() == 64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants" ) ? void (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 3611, __extension__ __PRETTY_FUNCTION__)) |
3611 | "LLVM's __builtin_eh_return only supports 32- and 64-bit variants")(static_cast <bool> ((IntTy->getBitWidth() == 32 || IntTy ->getBitWidth() == 64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants" ) ? void (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 3611, __extension__ __PRETTY_FUNCTION__)); |
3612 | Function *F = |
3613 | CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32 |
3614 | : Intrinsic::eh_return_i64); |
3615 | Builder.CreateCall(F, {Int, Ptr}); |
3616 | Builder.CreateUnreachable(); |
3617 | |
3618 | // We do need to preserve an insertion point. |
3619 | EmitBlock(createBasicBlock("builtin_eh_return.cont")); |
3620 | |
3621 | return RValue::get(nullptr); |
3622 | } |
3623 | case Builtin::BI__builtin_unwind_init: { |
3624 | Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); |
3625 | return RValue::get(Builder.CreateCall(F)); |
3626 | } |
3627 | case Builtin::BI__builtin_extend_pointer: { |
3628 | // Extends a pointer to the size of an _Unwind_Word, which is |
3629 | // uint64_t on all platforms. Generally this gets poked into a |
3630 | // register and eventually used as an address, so if the |
3631 | // addressing registers are wider than pointers and the platform |
3632 | // doesn't implicitly ignore high-order bits when doing |
3633 | // addressing, we need to make sure we zext / sext based on |
3634 | // the platform's expectations. |
3635 | // |
3636 | // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html |
3637 | |
3638 | // Cast the pointer to intptr_t. |
3639 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
3640 | Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); |
3641 | |
3642 | // If that's 64 bits, we're done. |
3643 | if (IntPtrTy->getBitWidth() == 64) |
3644 | return RValue::get(Result); |
3645 | |
3646 | // Otherwise, ask the codegen data what to do. |
3647 | if (getTargetHooks().extendPointerWithSExt()) |
3648 | return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); |
3649 | else |
3650 | return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); |
3651 | } |
3652 | case Builtin::BI__builtin_setjmp: { |
3653 | // Buffer is a void**. |
3654 | Address Buf = EmitPointerWithAlignment(E->getArg(0)); |
3655 | |
3656 | // Store the frame pointer to the setjmp buffer. |
3657 | Value *FrameAddr = Builder.CreateCall( |
3658 | CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy), |
3659 | ConstantInt::get(Int32Ty, 0)); |
3660 | Builder.CreateStore(FrameAddr, Buf); |
3661 | |
3662 | // Store the stack pointer to the setjmp buffer. |
3663 | Value *StackAddr = |
3664 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); |
3665 | Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2); |
3666 | Builder.CreateStore(StackAddr, StackSaveSlot); |
3667 | |
3668 | // Call LLVM's EH setjmp, which is lightweight. |
3669 | Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); |
3670 | Buf = Builder.CreateBitCast(Buf, Int8PtrTy); |
3671 | return RValue::get(Builder.CreateCall(F, Buf.getPointer())); |
3672 | } |
3673 | case Builtin::BI__builtin_longjmp: { |
3674 | Value *Buf = EmitScalarExpr(E->getArg(0)); |
3675 | Buf = Builder.CreateBitCast(Buf, Int8PtrTy); |
3676 | |
3677 | // Call LLVM's EH longjmp, which is lightweight. |
3678 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); |
3679 | |
3680 | // longjmp doesn't return; mark this as unreachable. |
3681 | Builder.CreateUnreachable(); |
3682 | |
3683 | // We do need to preserve an insertion point. |
3684 | EmitBlock(createBasicBlock("longjmp.cont")); |
3685 | |
3686 | return RValue::get(nullptr); |
3687 | } |
3688 | case Builtin::BI__builtin_launder: { |
3689 | const Expr *Arg = E->getArg(0); |
3690 | QualType ArgTy = Arg->getType()->getPointeeType(); |
3691 | Value *Ptr = EmitScalarExpr(Arg); |
3692 | if (TypeRequiresBuiltinLaunder(CGM, ArgTy)) |
3693 | Ptr = Builder.CreateLaunderInvariantGroup(Ptr); |
3694 | |
3695 | return RValue::get(Ptr); |
3696 | } |
3697 | case Builtin::BI__sync_fetch_and_add: |
3698 | case Builtin::BI__sync_fetch_and_sub: |
3699 | case Builtin::BI__sync_fetch_and_or: |
3700 | case Builtin::BI__sync_fetch_and_and: |
3701 | case Builtin::BI__sync_fetch_and_xor: |
3702 | case Builtin::BI__sync_fetch_and_nand: |
3703 | case Builtin::BI__sync_add_and_fetch: |
3704 | case Builtin::BI__sync_sub_and_fetch: |
3705 | case Builtin::BI__sync_and_and_fetch: |
3706 | case Builtin::BI__sync_or_and_fetch: |
3707 | case Builtin::BI__sync_xor_and_fetch: |
3708 | case Builtin::BI__sync_nand_and_fetch: |
3709 | case Builtin::BI__sync_val_compare_and_swap: |
3710 | case Builtin::BI__sync_bool_compare_and_swap: |
3711 | case Builtin::BI__sync_lock_test_and_set: |
3712 | case Builtin::BI__sync_lock_release: |
3713 | case Builtin::BI__sync_swap: |
3714 | llvm_unreachable("Shouldn't make it through sema")::llvm::llvm_unreachable_internal("Shouldn't make it through sema" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 3714); |
3715 | case Builtin::BI__sync_fetch_and_add_1: |
3716 | case Builtin::BI__sync_fetch_and_add_2: |
3717 | case Builtin::BI__sync_fetch_and_add_4: |
3718 | case Builtin::BI__sync_fetch_and_add_8: |
3719 | case Builtin::BI__sync_fetch_and_add_16: |
3720 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E); |
3721 | case Builtin::BI__sync_fetch_and_sub_1: |
3722 | case Builtin::BI__sync_fetch_and_sub_2: |
3723 | case Builtin::BI__sync_fetch_and_sub_4: |
3724 | case Builtin::BI__sync_fetch_and_sub_8: |
3725 | case Builtin::BI__sync_fetch_and_sub_16: |
3726 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E); |
3727 | case Builtin::BI__sync_fetch_and_or_1: |
3728 | case Builtin::BI__sync_fetch_and_or_2: |
3729 | case Builtin::BI__sync_fetch_and_or_4: |
3730 | case Builtin::BI__sync_fetch_and_or_8: |
3731 | case Builtin::BI__sync_fetch_and_or_16: |
3732 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E); |
3733 | case Builtin::BI__sync_fetch_and_and_1: |
3734 | case Builtin::BI__sync_fetch_and_and_2: |
3735 | case Builtin::BI__sync_fetch_and_and_4: |
3736 | case Builtin::BI__sync_fetch_and_and_8: |
3737 | case Builtin::BI__sync_fetch_and_and_16: |
3738 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E); |
3739 | case Builtin::BI__sync_fetch_and_xor_1: |
3740 | case Builtin::BI__sync_fetch_and_xor_2: |
3741 | case Builtin::BI__sync_fetch_and_xor_4: |
3742 | case Builtin::BI__sync_fetch_and_xor_8: |
3743 | case Builtin::BI__sync_fetch_and_xor_16: |
3744 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E); |
3745 | case Builtin::BI__sync_fetch_and_nand_1: |
3746 | case Builtin::BI__sync_fetch_and_nand_2: |
3747 | case Builtin::BI__sync_fetch_and_nand_4: |
3748 | case Builtin::BI__sync_fetch_and_nand_8: |
3749 | case Builtin::BI__sync_fetch_and_nand_16: |
3750 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E); |
3751 | |
3752 | // Clang extensions: not overloaded yet. |
3753 | case Builtin::BI__sync_fetch_and_min: |
3754 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E); |
3755 | case Builtin::BI__sync_fetch_and_max: |
3756 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E); |
3757 | case Builtin::BI__sync_fetch_and_umin: |
3758 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E); |
3759 | case Builtin::BI__sync_fetch_and_umax: |
3760 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E); |
3761 | |
3762 | case Builtin::BI__sync_add_and_fetch_1: |
3763 | case Builtin::BI__sync_add_and_fetch_2: |
3764 | case Builtin::BI__sync_add_and_fetch_4: |
3765 | case Builtin::BI__sync_add_and_fetch_8: |
3766 | case Builtin::BI__sync_add_and_fetch_16: |
3767 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E, |
3768 | llvm::Instruction::Add); |
3769 | case Builtin::BI__sync_sub_and_fetch_1: |
3770 | case Builtin::BI__sync_sub_and_fetch_2: |
3771 | case Builtin::BI__sync_sub_and_fetch_4: |
3772 | case Builtin::BI__sync_sub_and_fetch_8: |
3773 | case Builtin::BI__sync_sub_and_fetch_16: |
3774 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E, |
3775 | llvm::Instruction::Sub); |
3776 | case Builtin::BI__sync_and_and_fetch_1: |
3777 | case Builtin::BI__sync_and_and_fetch_2: |
3778 | case Builtin::BI__sync_and_and_fetch_4: |
3779 | case Builtin::BI__sync_and_and_fetch_8: |
3780 | case Builtin::BI__sync_and_and_fetch_16: |
3781 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E, |
3782 | llvm::Instruction::And); |
3783 | case Builtin::BI__sync_or_and_fetch_1: |
3784 | case Builtin::BI__sync_or_and_fetch_2: |
3785 | case Builtin::BI__sync_or_and_fetch_4: |
3786 | case Builtin::BI__sync_or_and_fetch_8: |
3787 | case Builtin::BI__sync_or_and_fetch_16: |
3788 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E, |
3789 | llvm::Instruction::Or); |
3790 | case Builtin::BI__sync_xor_and_fetch_1: |
3791 | case Builtin::BI__sync_xor_and_fetch_2: |
3792 | case Builtin::BI__sync_xor_and_fetch_4: |
3793 | case Builtin::BI__sync_xor_and_fetch_8: |
3794 | case Builtin::BI__sync_xor_and_fetch_16: |
3795 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E, |
3796 | llvm::Instruction::Xor); |
3797 | case Builtin::BI__sync_nand_and_fetch_1: |
3798 | case Builtin::BI__sync_nand_and_fetch_2: |
3799 | case Builtin::BI__sync_nand_and_fetch_4: |
3800 | case Builtin::BI__sync_nand_and_fetch_8: |
3801 | case Builtin::BI__sync_nand_and_fetch_16: |
3802 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E, |
3803 | llvm::Instruction::And, true); |
3804 | |
3805 | case Builtin::BI__sync_val_compare_and_swap_1: |
3806 | case Builtin::BI__sync_val_compare_and_swap_2: |
3807 | case Builtin::BI__sync_val_compare_and_swap_4: |
3808 | case Builtin::BI__sync_val_compare_and_swap_8: |
3809 | case Builtin::BI__sync_val_compare_and_swap_16: |
3810 | return RValue::get(MakeAtomicCmpXchgValue(*this, E, false)); |
3811 | |
3812 | case Builtin::BI__sync_bool_compare_and_swap_1: |
3813 | case Builtin::BI__sync_bool_compare_and_swap_2: |
3814 | case Builtin::BI__sync_bool_compare_and_swap_4: |
3815 | case Builtin::BI__sync_bool_compare_and_swap_8: |
3816 | case Builtin::BI__sync_bool_compare_and_swap_16: |
3817 | return RValue::get(MakeAtomicCmpXchgValue(*this, E, true)); |
3818 | |
3819 | case Builtin::BI__sync_swap_1: |
3820 | case Builtin::BI__sync_swap_2: |
3821 | case Builtin::BI__sync_swap_4: |
3822 | case Builtin::BI__sync_swap_8: |
3823 | case Builtin::BI__sync_swap_16: |
3824 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
3825 | |
3826 | case Builtin::BI__sync_lock_test_and_set_1: |
3827 | case Builtin::BI__sync_lock_test_and_set_2: |
3828 | case Builtin::BI__sync_lock_test_and_set_4: |
3829 | case Builtin::BI__sync_lock_test_and_set_8: |
3830 | case Builtin::BI__sync_lock_test_and_set_16: |
3831 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
3832 | |
3833 | case Builtin::BI__sync_lock_release_1: |
3834 | case Builtin::BI__sync_lock_release_2: |
3835 | case Builtin::BI__sync_lock_release_4: |
3836 | case Builtin::BI__sync_lock_release_8: |
3837 | case Builtin::BI__sync_lock_release_16: { |
3838 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
3839 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
3840 | CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); |
3841 | llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), |
3842 | StoreSize.getQuantity() * 8); |
3843 | Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
3844 | llvm::StoreInst *Store = |
3845 | Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr, |
3846 | StoreSize); |
3847 | Store->setAtomic(llvm::AtomicOrdering::Release); |
3848 | return RValue::get(nullptr); |
3849 | } |
3850 | |
3851 | case Builtin::BI__sync_synchronize: { |
3852 | // We assume this is supposed to correspond to a C++0x-style |
3853 | // sequentially-consistent fence (i.e. this is only usable for |
3854 | // synchronization, not device I/O or anything like that). This intrinsic |
3855 | // is really badly designed in the sense that in theory, there isn't |
3856 | // any way to safely use it... but in practice, it mostly works |
3857 | // to use it with non-atomic loads and stores to get acquire/release |
3858 | // semantics. |
3859 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent); |
3860 | return RValue::get(nullptr); |
3861 | } |
3862 | |
3863 | case Builtin::BI__builtin_nontemporal_load: |
3864 | return RValue::get(EmitNontemporalLoad(*this, E)); |
3865 | case Builtin::BI__builtin_nontemporal_store: |
3866 | return RValue::get(EmitNontemporalStore(*this, E)); |
3867 | case Builtin::BI__c11_atomic_is_lock_free: |
3868 | case Builtin::BI__atomic_is_lock_free: { |
3869 | // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the |
3870 | // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since |
3871 | // _Atomic(T) is always properly-aligned. |
3872 | const char *LibCallName = "__atomic_is_lock_free"; |
3873 | CallArgList Args; |
3874 | Args.add(RValue::get(EmitScalarExpr(E->getArg(0))), |
3875 | getContext().getSizeType()); |
3876 | if (BuiltinID == Builtin::BI__atomic_is_lock_free) |
3877 | Args.add(RValue::get(EmitScalarExpr(E->getArg(1))), |
3878 | getContext().VoidPtrTy); |
3879 | else |
3880 | Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)), |
3881 | getContext().VoidPtrTy); |
3882 | const CGFunctionInfo &FuncInfo = |
3883 | CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args); |
3884 | llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); |
3885 | llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName); |
3886 | return EmitCall(FuncInfo, CGCallee::forDirect(Func), |
3887 | ReturnValueSlot(), Args); |
3888 | } |
3889 | |
3890 | case Builtin::BI__atomic_test_and_set: { |
3891 | // Look at the argument type to determine whether this is a volatile |
3892 | // operation. The parameter type is always volatile. |
3893 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
3894 | bool Volatile = |
3895 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
3896 | |
3897 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
3898 | unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); |
3899 | Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); |
3900 | Value *NewVal = Builder.getInt8(1); |
3901 | Value *Order = EmitScalarExpr(E->getArg(1)); |
3902 | if (isa<llvm::ConstantInt>(Order)) { |
3903 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
3904 | AtomicRMWInst *Result = nullptr; |
3905 | switch (ord) { |
3906 | case 0: // memory_order_relaxed |
3907 | default: // invalid order |
3908 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
3909 | llvm::AtomicOrdering::Monotonic); |
3910 | break; |
3911 | case 1: // memory_order_consume |
3912 | case 2: // memory_order_acquire |
3913 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
3914 | llvm::AtomicOrdering::Acquire); |
3915 | break; |
3916 | case 3: // memory_order_release |
3917 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
3918 | llvm::AtomicOrdering::Release); |
3919 | break; |
3920 | case 4: // memory_order_acq_rel |
3921 | |
3922 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
3923 | llvm::AtomicOrdering::AcquireRelease); |
3924 | break; |
3925 | case 5: // memory_order_seq_cst |
3926 | Result = Builder.CreateAtomicRMW( |
3927 | llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
3928 | llvm::AtomicOrdering::SequentiallyConsistent); |
3929 | break; |
3930 | } |
3931 | Result->setVolatile(Volatile); |
3932 | return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); |
3933 | } |
3934 | |
3935 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
3936 | |
3937 | llvm::BasicBlock *BBs[5] = { |
3938 | createBasicBlock("monotonic", CurFn), |
3939 | createBasicBlock("acquire", CurFn), |
3940 | createBasicBlock("release", CurFn), |
3941 | createBasicBlock("acqrel", CurFn), |
3942 | createBasicBlock("seqcst", CurFn) |
3943 | }; |
3944 | llvm::AtomicOrdering Orders[5] = { |
3945 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire, |
3946 | llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease, |
3947 | llvm::AtomicOrdering::SequentiallyConsistent}; |
3948 | |
3949 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
3950 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); |
3951 | |
3952 | Builder.SetInsertPoint(ContBB); |
3953 | PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set"); |
3954 | |
3955 | for (unsigned i = 0; i < 5; ++i) { |
3956 | Builder.SetInsertPoint(BBs[i]); |
3957 | AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, |
3958 | Ptr, NewVal, Orders[i]); |
3959 | RMW->setVolatile(Volatile); |
3960 | Result->addIncoming(RMW, BBs[i]); |
3961 | Builder.CreateBr(ContBB); |
3962 | } |
3963 | |
3964 | SI->addCase(Builder.getInt32(0), BBs[0]); |
3965 | SI->addCase(Builder.getInt32(1), BBs[1]); |
3966 | SI->addCase(Builder.getInt32(2), BBs[1]); |
3967 | SI->addCase(Builder.getInt32(3), BBs[2]); |
3968 | SI->addCase(Builder.getInt32(4), BBs[3]); |
3969 | SI->addCase(Builder.getInt32(5), BBs[4]); |
3970 | |
3971 | Builder.SetInsertPoint(ContBB); |
3972 | return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); |
3973 | } |
3974 | |
3975 | case Builtin::BI__atomic_clear: { |
3976 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
3977 | bool Volatile = |
3978 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
3979 | |
3980 | Address Ptr = EmitPointerWithAlignment(E->getArg(0)); |
3981 | unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace(); |
3982 | Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); |
3983 | Value *NewVal = Builder.getInt8(0); |
3984 | Value *Order = EmitScalarExpr(E->getArg(1)); |
3985 | if (isa<llvm::ConstantInt>(Order)) { |
3986 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
3987 | StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); |
3988 | switch (ord) { |
3989 | case 0: // memory_order_relaxed |
3990 | default: // invalid order |
3991 | Store->setOrdering(llvm::AtomicOrdering::Monotonic); |
3992 | break; |
3993 | case 3: // memory_order_release |
3994 | Store->setOrdering(llvm::AtomicOrdering::Release); |
3995 | break; |
3996 | case 5: // memory_order_seq_cst |
3997 | Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); |
3998 | break; |
3999 | } |
4000 | return RValue::get(nullptr); |
4001 | } |
4002 | |
4003 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
4004 | |
4005 | llvm::BasicBlock *BBs[3] = { |
4006 | createBasicBlock("monotonic", CurFn), |
4007 | createBasicBlock("release", CurFn), |
4008 | createBasicBlock("seqcst", CurFn) |
4009 | }; |
4010 | llvm::AtomicOrdering Orders[3] = { |
4011 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release, |
4012 | llvm::AtomicOrdering::SequentiallyConsistent}; |
4013 | |
4014 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
4015 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); |
4016 | |
4017 | for (unsigned i = 0; i < 3; ++i) { |
4018 | Builder.SetInsertPoint(BBs[i]); |
4019 | StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); |
4020 | Store->setOrdering(Orders[i]); |
4021 | Builder.CreateBr(ContBB); |
4022 | } |
4023 | |
4024 | SI->addCase(Builder.getInt32(0), BBs[0]); |
4025 | SI->addCase(Builder.getInt32(3), BBs[1]); |
4026 | SI->addCase(Builder.getInt32(5), BBs[2]); |
4027 | |
4028 | Builder.SetInsertPoint(ContBB); |
4029 | return RValue::get(nullptr); |
4030 | } |
4031 | |
4032 | case Builtin::BI__atomic_thread_fence: |
4033 | case Builtin::BI__atomic_signal_fence: |
4034 | case Builtin::BI__c11_atomic_thread_fence: |
4035 | case Builtin::BI__c11_atomic_signal_fence: { |
4036 | llvm::SyncScope::ID SSID; |
4037 | if (BuiltinID == Builtin::BI__atomic_signal_fence || |
4038 | BuiltinID == Builtin::BI__c11_atomic_signal_fence) |
4039 | SSID = llvm::SyncScope::SingleThread; |
4040 | else |
4041 | SSID = llvm::SyncScope::System; |
4042 | Value *Order = EmitScalarExpr(E->getArg(0)); |
4043 | if (isa<llvm::ConstantInt>(Order)) { |
4044 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
4045 | switch (ord) { |
4046 | case 0: // memory_order_relaxed |
4047 | default: // invalid order |
4048 | break; |
4049 | case 1: // memory_order_consume |
4050 | case 2: // memory_order_acquire |
4051 | Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); |
4052 | break; |
4053 | case 3: // memory_order_release |
4054 | Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); |
4055 | break; |
4056 | case 4: // memory_order_acq_rel |
4057 | Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); |
4058 | break; |
4059 | case 5: // memory_order_seq_cst |
4060 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); |
4061 | break; |
4062 | } |
4063 | return RValue::get(nullptr); |
4064 | } |
4065 | |
4066 | llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; |
4067 | AcquireBB = createBasicBlock("acquire", CurFn); |
4068 | ReleaseBB = createBasicBlock("release", CurFn); |
4069 | AcqRelBB = createBasicBlock("acqrel", CurFn); |
4070 | SeqCstBB = createBasicBlock("seqcst", CurFn); |
4071 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
4072 | |
4073 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
4074 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB); |
4075 | |
4076 | Builder.SetInsertPoint(AcquireBB); |
4077 | Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); |
4078 | Builder.CreateBr(ContBB); |
4079 | SI->addCase(Builder.getInt32(1), AcquireBB); |
4080 | SI->addCase(Builder.getInt32(2), AcquireBB); |
4081 | |
4082 | Builder.SetInsertPoint(ReleaseBB); |
4083 | Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); |
4084 | Builder.CreateBr(ContBB); |
4085 | SI->addCase(Builder.getInt32(3), ReleaseBB); |
4086 | |
4087 | Builder.SetInsertPoint(AcqRelBB); |
4088 | Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); |
4089 | Builder.CreateBr(ContBB); |
4090 | SI->addCase(Builder.getInt32(4), AcqRelBB); |
4091 | |
4092 | Builder.SetInsertPoint(SeqCstBB); |
4093 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); |
4094 | Builder.CreateBr(ContBB); |
4095 | SI->addCase(Builder.getInt32(5), SeqCstBB); |
4096 | |
4097 | Builder.SetInsertPoint(ContBB); |
4098 | return RValue::get(nullptr); |
4099 | } |
4100 | |
4101 | case Builtin::BI__builtin_signbit: |
4102 | case Builtin::BI__builtin_signbitf: |
4103 | case Builtin::BI__builtin_signbitl: { |
4104 | return RValue::get( |
4105 | Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))), |
4106 | ConvertType(E->getType()))); |
4107 | } |
4108 | case Builtin::BI__warn_memset_zero_len: |
4109 | return RValue::getIgnored(); |
4110 | case Builtin::BI__annotation: { |
4111 | // Re-encode each wide string to UTF8 and make an MDString. |
4112 | SmallVector<Metadata *, 1> Strings; |
4113 | for (const Expr *Arg : E->arguments()) { |
4114 | const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts()); |
4115 | assert(Str->getCharByteWidth() == 2)(static_cast <bool> (Str->getCharByteWidth() == 2) ? void (0) : __assert_fail ("Str->getCharByteWidth() == 2", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 4115, __extension__ __PRETTY_FUNCTION__)); |
4116 | StringRef WideBytes = Str->getBytes(); |
4117 | std::string StrUtf8; |
4118 | if (!convertUTF16ToUTF8String( |
4119 | makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) { |
4120 | CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument"); |
4121 | continue; |
4122 | } |
4123 | Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8)); |
4124 | } |
4125 | |
4126 | // Build and MDTuple of MDStrings and emit the intrinsic call. |
4127 | llvm::Function *F = |
4128 | CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {}); |
4129 | MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings); |
4130 | Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple)); |
4131 | return RValue::getIgnored(); |
4132 | } |
4133 | case Builtin::BI__builtin_annotation: { |
4134 | llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); |
4135 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, |
4136 | AnnVal->getType()); |
4137 | |
4138 | // Get the annotation string, go through casts. Sema requires this to be a |
4139 | // non-wide string literal, potentially casted, so the cast<> is safe. |
4140 | const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); |
4141 | StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); |
4142 | return RValue::get( |
4143 | EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr)); |
4144 | } |
4145 | case Builtin::BI__builtin_addcb: |
4146 | case Builtin::BI__builtin_addcs: |
4147 | case Builtin::BI__builtin_addc: |
4148 | case Builtin::BI__builtin_addcl: |
4149 | case Builtin::BI__builtin_addcll: |
4150 | case Builtin::BI__builtin_subcb: |
4151 | case Builtin::BI__builtin_subcs: |
4152 | case Builtin::BI__builtin_subc: |
4153 | case Builtin::BI__builtin_subcl: |
4154 | case Builtin::BI__builtin_subcll: { |
4155 | |
4156 | // We translate all of these builtins from expressions of the form: |
4157 | // int x = ..., y = ..., carryin = ..., carryout, result; |
4158 | // result = __builtin_addc(x, y, carryin, &carryout); |
4159 | // |
4160 | // to LLVM IR of the form: |
4161 | // |
4162 | // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) |
4163 | // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0 |
4164 | // %carry1 = extractvalue {i32, i1} %tmp1, 1 |
4165 | // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1, |
4166 | // i32 %carryin) |
4167 | // %result = extractvalue {i32, i1} %tmp2, 0 |
4168 | // %carry2 = extractvalue {i32, i1} %tmp2, 1 |
4169 | // %tmp3 = or i1 %carry1, %carry2 |
4170 | // %tmp4 = zext i1 %tmp3 to i32 |
4171 | // store i32 %tmp4, i32* %carryout |
4172 | |
4173 | // Scalarize our inputs. |
4174 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
4175 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
4176 | llvm::Value *Carryin = EmitScalarExpr(E->getArg(2)); |
4177 | Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3)); |
4178 | |
4179 | // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow. |
4180 | llvm::Intrinsic::ID IntrinsicId; |
4181 | switch (BuiltinID) { |
4182 | default: llvm_unreachable("Unknown multiprecision builtin id.")::llvm::llvm_unreachable_internal("Unknown multiprecision builtin id." , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 4182); |
4183 | case Builtin::BI__builtin_addcb: |
4184 | case Builtin::BI__builtin_addcs: |
4185 | case Builtin::BI__builtin_addc: |
4186 | case Builtin::BI__builtin_addcl: |
4187 | case Builtin::BI__builtin_addcll: |
4188 | IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
4189 | break; |
4190 | case Builtin::BI__builtin_subcb: |
4191 | case Builtin::BI__builtin_subcs: |
4192 | case Builtin::BI__builtin_subc: |
4193 | case Builtin::BI__builtin_subcl: |
4194 | case Builtin::BI__builtin_subcll: |
4195 | IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
4196 | break; |
4197 | } |
4198 | |
4199 | // Construct our resulting LLVM IR expression. |
4200 | llvm::Value *Carry1; |
4201 | llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, |
4202 | X, Y, Carry1); |
4203 | llvm::Value *Carry2; |
4204 | llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId, |
4205 | Sum1, Carryin, Carry2); |
4206 | llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), |
4207 | X->getType()); |
4208 | Builder.CreateStore(CarryOut, CarryOutPtr); |
4209 | return RValue::get(Sum2); |
4210 | } |
4211 | |
4212 | case Builtin::BI__builtin_add_overflow: |
4213 | case Builtin::BI__builtin_sub_overflow: |
4214 | case Builtin::BI__builtin_mul_overflow: { |
4215 | const clang::Expr *LeftArg = E->getArg(0); |
4216 | const clang::Expr *RightArg = E->getArg(1); |
4217 | const clang::Expr *ResultArg = E->getArg(2); |
4218 | |
4219 | clang::QualType ResultQTy = |
4220 | ResultArg->getType()->castAs<PointerType>()->getPointeeType(); |
4221 | |
4222 | WidthAndSignedness LeftInfo = |
4223 | getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType()); |
4224 | WidthAndSignedness RightInfo = |
4225 | getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType()); |
4226 | WidthAndSignedness ResultInfo = |
4227 | getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy); |
4228 | |
4229 | // Handle mixed-sign multiplication as a special case, because adding |
4230 | // runtime or backend support for our generic irgen would be too expensive. |
4231 | if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo)) |
4232 | return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg, |
4233 | RightInfo, ResultArg, ResultQTy, |
4234 | ResultInfo); |
4235 | |
4236 | if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo, |
4237 | ResultInfo)) |
4238 | return EmitCheckedUnsignedMultiplySignedResult( |
4239 | *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy, |
4240 | ResultInfo); |
4241 | |
4242 | WidthAndSignedness EncompassingInfo = |
4243 | EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo}); |
4244 | |
4245 | llvm::Type *EncompassingLLVMTy = |
4246 | llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width); |
4247 | |
4248 | llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy); |
4249 | |
4250 | llvm::Intrinsic::ID IntrinsicId; |
4251 | switch (BuiltinID) { |
4252 | default: |
4253 | llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id." , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 4253); |
4254 | case Builtin::BI__builtin_add_overflow: |
4255 | IntrinsicId = EncompassingInfo.Signed |
4256 | ? llvm::Intrinsic::sadd_with_overflow |
4257 | : llvm::Intrinsic::uadd_with_overflow; |
4258 | break; |
4259 | case Builtin::BI__builtin_sub_overflow: |
4260 | IntrinsicId = EncompassingInfo.Signed |
4261 | ? llvm::Intrinsic::ssub_with_overflow |
4262 | : llvm::Intrinsic::usub_with_overflow; |
4263 | break; |
4264 | case Builtin::BI__builtin_mul_overflow: |
4265 | IntrinsicId = EncompassingInfo.Signed |
4266 | ? llvm::Intrinsic::smul_with_overflow |
4267 | : llvm::Intrinsic::umul_with_overflow; |
4268 | break; |
4269 | } |
4270 | |
4271 | llvm::Value *Left = EmitScalarExpr(LeftArg); |
4272 | llvm::Value *Right = EmitScalarExpr(RightArg); |
4273 | Address ResultPtr = EmitPointerWithAlignment(ResultArg); |
4274 | |
4275 | // Extend each operand to the encompassing type. |
4276 | Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed); |
4277 | Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed); |
4278 | |
4279 | // Perform the operation on the extended values. |
4280 | llvm::Value *Overflow, *Result; |
4281 | Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow); |
4282 | |
4283 | if (EncompassingInfo.Width > ResultInfo.Width) { |
4284 | // The encompassing type is wider than the result type, so we need to |
4285 | // truncate it. |
4286 | llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy); |
4287 | |
4288 | // To see if the truncation caused an overflow, we will extend |
4289 | // the result and then compare it to the original result. |
4290 | llvm::Value *ResultTruncExt = Builder.CreateIntCast( |
4291 | ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed); |
4292 | llvm::Value *TruncationOverflow = |
4293 | Builder.CreateICmpNE(Result, ResultTruncExt); |
4294 | |
4295 | Overflow = Builder.CreateOr(Overflow, TruncationOverflow); |
4296 | Result = ResultTrunc; |
4297 | } |
4298 | |
4299 | // Finally, store the result using the pointer. |
4300 | bool isVolatile = |
4301 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
4302 | Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile); |
4303 | |
4304 | return RValue::get(Overflow); |
4305 | } |
4306 | |
4307 | case Builtin::BI__builtin_uadd_overflow: |
4308 | case Builtin::BI__builtin_uaddl_overflow: |
4309 | case Builtin::BI__builtin_uaddll_overflow: |
4310 | case Builtin::BI__builtin_usub_overflow: |
4311 | case Builtin::BI__builtin_usubl_overflow: |
4312 | case Builtin::BI__builtin_usubll_overflow: |
4313 | case Builtin::BI__builtin_umul_overflow: |
4314 | case Builtin::BI__builtin_umull_overflow: |
4315 | case Builtin::BI__builtin_umulll_overflow: |
4316 | case Builtin::BI__builtin_sadd_overflow: |
4317 | case Builtin::BI__builtin_saddl_overflow: |
4318 | case Builtin::BI__builtin_saddll_overflow: |
4319 | case Builtin::BI__builtin_ssub_overflow: |
4320 | case Builtin::BI__builtin_ssubl_overflow: |
4321 | case Builtin::BI__builtin_ssubll_overflow: |
4322 | case Builtin::BI__builtin_smul_overflow: |
4323 | case Builtin::BI__builtin_smull_overflow: |
4324 | case Builtin::BI__builtin_smulll_overflow: { |
4325 | |
4326 | // We translate all of these builtins directly to the relevant llvm IR node. |
4327 | |
4328 | // Scalarize our inputs. |
4329 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
4330 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
4331 | Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2)); |
4332 | |
4333 | // Decide which of the overflow intrinsics we are lowering to: |
4334 | llvm::Intrinsic::ID IntrinsicId; |
4335 | switch (BuiltinID) { |
4336 | default: llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id." , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 4336); |
4337 | case Builtin::BI__builtin_uadd_overflow: |
4338 | case Builtin::BI__builtin_uaddl_overflow: |
4339 | case Builtin::BI__builtin_uaddll_overflow: |
4340 | IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
4341 | break; |
4342 | case Builtin::BI__builtin_usub_overflow: |
4343 | case Builtin::BI__builtin_usubl_overflow: |
4344 | case Builtin::BI__builtin_usubll_overflow: |
4345 | IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
4346 | break; |
4347 | case Builtin::BI__builtin_umul_overflow: |
4348 | case Builtin::BI__builtin_umull_overflow: |
4349 | case Builtin::BI__builtin_umulll_overflow: |
4350 | IntrinsicId = llvm::Intrinsic::umul_with_overflow; |
4351 | break; |
4352 | case Builtin::BI__builtin_sadd_overflow: |
4353 | case Builtin::BI__builtin_saddl_overflow: |
4354 | case Builtin::BI__builtin_saddll_overflow: |
4355 | IntrinsicId = llvm::Intrinsic::sadd_with_overflow; |
4356 | break; |
4357 | case Builtin::BI__builtin_ssub_overflow: |
4358 | case Builtin::BI__builtin_ssubl_overflow: |
4359 | case Builtin::BI__builtin_ssubll_overflow: |
4360 | IntrinsicId = llvm::Intrinsic::ssub_with_overflow; |
4361 | break; |
4362 | case Builtin::BI__builtin_smul_overflow: |
4363 | case Builtin::BI__builtin_smull_overflow: |
4364 | case Builtin::BI__builtin_smulll_overflow: |
4365 | IntrinsicId = llvm::Intrinsic::smul_with_overflow; |
4366 | break; |
4367 | } |
4368 | |
4369 | |
4370 | llvm::Value *Carry; |
4371 | llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry); |
4372 | Builder.CreateStore(Sum, SumOutPtr); |
4373 | |
4374 | return RValue::get(Carry); |
4375 | } |
4376 | case Builtin::BI__builtin_addressof: |
4377 | return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this)); |
4378 | case Builtin::BI__builtin_operator_new: |
4379 | return EmitBuiltinNewDeleteCall( |
4380 | E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false); |
4381 | case Builtin::BI__builtin_operator_delete: |
4382 | return EmitBuiltinNewDeleteCall( |
4383 | E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true); |
4384 | |
4385 | case Builtin::BI__builtin_is_aligned: |
4386 | return EmitBuiltinIsAligned(E); |
4387 | case Builtin::BI__builtin_align_up: |
4388 | return EmitBuiltinAlignTo(E, true); |
4389 | case Builtin::BI__builtin_align_down: |
4390 | return EmitBuiltinAlignTo(E, false); |
4391 | |
4392 | case Builtin::BI__noop: |
4393 | // __noop always evaluates to an integer literal zero. |
4394 | return RValue::get(ConstantInt::get(IntTy, 0)); |
4395 | case Builtin::BI__builtin_call_with_static_chain: { |
4396 | const CallExpr *Call = cast<CallExpr>(E->getArg(0)); |
4397 | const Expr *Chain = E->getArg(1); |
4398 | return EmitCall(Call->getCallee()->getType(), |
4399 | EmitCallee(Call->getCallee()), Call, ReturnValue, |
4400 | EmitScalarExpr(Chain)); |
4401 | } |
4402 | case Builtin::BI_InterlockedExchange8: |
4403 | case Builtin::BI_InterlockedExchange16: |
4404 | case Builtin::BI_InterlockedExchange: |
4405 | case Builtin::BI_InterlockedExchangePointer: |
4406 | return RValue::get( |
4407 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E)); |
4408 | case Builtin::BI_InterlockedCompareExchangePointer: |
4409 | case Builtin::BI_InterlockedCompareExchangePointer_nf: { |
4410 | llvm::Type *RTy; |
4411 | llvm::IntegerType *IntType = |
4412 | IntegerType::get(getLLVMContext(), |
4413 | getContext().getTypeSize(E->getType())); |
4414 | llvm::Type *IntPtrType = IntType->getPointerTo(); |
4415 | |
4416 | llvm::Value *Destination = |
4417 | Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType); |
4418 | |
4419 | llvm::Value *Exchange = EmitScalarExpr(E->getArg(1)); |
4420 | RTy = Exchange->getType(); |
4421 | Exchange = Builder.CreatePtrToInt(Exchange, IntType); |
4422 | |
4423 | llvm::Value *Comparand = |
4424 | Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType); |
4425 | |
4426 | auto Ordering = |
4427 | BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ? |
4428 | AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent; |
4429 | |
4430 | auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, |
4431 | Ordering, Ordering); |
4432 | Result->setVolatile(true); |
4433 | |
4434 | return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result, |
4435 | 0), |
4436 | RTy)); |
4437 | } |
4438 | case Builtin::BI_InterlockedCompareExchange8: |
4439 | case Builtin::BI_InterlockedCompareExchange16: |
4440 | case Builtin::BI_InterlockedCompareExchange: |
4441 | case Builtin::BI_InterlockedCompareExchange64: |
4442 | return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E)); |
4443 | case Builtin::BI_InterlockedIncrement16: |
4444 | case Builtin::BI_InterlockedIncrement: |
4445 | return RValue::get( |
4446 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E)); |
4447 | case Builtin::BI_InterlockedDecrement16: |
4448 | case Builtin::BI_InterlockedDecrement: |
4449 | return RValue::get( |
4450 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E)); |
4451 | case Builtin::BI_InterlockedAnd8: |
4452 | case Builtin::BI_InterlockedAnd16: |
4453 | case Builtin::BI_InterlockedAnd: |
4454 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E)); |
4455 | case Builtin::BI_InterlockedExchangeAdd8: |
4456 | case Builtin::BI_InterlockedExchangeAdd16: |
4457 | case Builtin::BI_InterlockedExchangeAdd: |
4458 | return RValue::get( |
4459 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E)); |
4460 | case Builtin::BI_InterlockedExchangeSub8: |
4461 | case Builtin::BI_InterlockedExchangeSub16: |
4462 | case Builtin::BI_InterlockedExchangeSub: |
4463 | return RValue::get( |
4464 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E)); |
4465 | case Builtin::BI_InterlockedOr8: |
4466 | case Builtin::BI_InterlockedOr16: |
4467 | case Builtin::BI_InterlockedOr: |
4468 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E)); |
4469 | case Builtin::BI_InterlockedXor8: |
4470 | case Builtin::BI_InterlockedXor16: |
4471 | case Builtin::BI_InterlockedXor: |
4472 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E)); |
4473 | |
4474 | case Builtin::BI_bittest64: |
4475 | case Builtin::BI_bittest: |
4476 | case Builtin::BI_bittestandcomplement64: |
4477 | case Builtin::BI_bittestandcomplement: |
4478 | case Builtin::BI_bittestandreset64: |
4479 | case Builtin::BI_bittestandreset: |
4480 | case Builtin::BI_bittestandset64: |
4481 | case Builtin::BI_bittestandset: |
4482 | case Builtin::BI_interlockedbittestandreset: |
4483 | case Builtin::BI_interlockedbittestandreset64: |
4484 | case Builtin::BI_interlockedbittestandset64: |
4485 | case Builtin::BI_interlockedbittestandset: |
4486 | case Builtin::BI_interlockedbittestandset_acq: |
4487 | case Builtin::BI_interlockedbittestandset_rel: |
4488 | case Builtin::BI_interlockedbittestandset_nf: |
4489 | case Builtin::BI_interlockedbittestandreset_acq: |
4490 | case Builtin::BI_interlockedbittestandreset_rel: |
4491 | case Builtin::BI_interlockedbittestandreset_nf: |
4492 | return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E)); |
4493 | |
4494 | // These builtins exist to emit regular volatile loads and stores not |
4495 | // affected by the -fms-volatile setting. |
4496 | case Builtin::BI__iso_volatile_load8: |
4497 | case Builtin::BI__iso_volatile_load16: |
4498 | case Builtin::BI__iso_volatile_load32: |
4499 | case Builtin::BI__iso_volatile_load64: |
4500 | return RValue::get(EmitISOVolatileLoad(*this, E)); |
4501 | case Builtin::BI__iso_volatile_store8: |
4502 | case Builtin::BI__iso_volatile_store16: |
4503 | case Builtin::BI__iso_volatile_store32: |
4504 | case Builtin::BI__iso_volatile_store64: |
4505 | return RValue::get(EmitISOVolatileStore(*this, E)); |
4506 | |
4507 | case Builtin::BI__exception_code: |
4508 | case Builtin::BI_exception_code: |
4509 | return RValue::get(EmitSEHExceptionCode()); |
4510 | case Builtin::BI__exception_info: |
4511 | case Builtin::BI_exception_info: |
4512 | return RValue::get(EmitSEHExceptionInfo()); |
4513 | case Builtin::BI__abnormal_termination: |
4514 | case Builtin::BI_abnormal_termination: |
4515 | return RValue::get(EmitSEHAbnormalTermination()); |
4516 | case Builtin::BI_setjmpex: |
4517 | if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 && |
4518 | E->getArg(0)->getType()->isPointerType()) |
4519 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E); |
4520 | break; |
4521 | case Builtin::BI_setjmp: |
4522 | if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 && |
4523 | E->getArg(0)->getType()->isPointerType()) { |
4524 | if (getTarget().getTriple().getArch() == llvm::Triple::x86) |
4525 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E); |
4526 | else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64) |
4527 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E); |
4528 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E); |
4529 | } |
4530 | break; |
4531 | |
4532 | case Builtin::BI__GetExceptionInfo: { |
4533 | if (llvm::GlobalVariable *GV = |
4534 | CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType())) |
4535 | return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy)); |
4536 | break; |
4537 | } |
4538 | |
4539 | case Builtin::BI__fastfail: |
4540 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E)); |
4541 | |
4542 | case Builtin::BI__builtin_coro_size: { |
4543 | auto & Context = getContext(); |
4544 | auto SizeTy = Context.getSizeType(); |
4545 | auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); |
4546 | Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T); |
4547 | return RValue::get(Builder.CreateCall(F)); |
4548 | } |
4549 | |
4550 | case Builtin::BI__builtin_coro_id: |
4551 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_id); |
4552 | case Builtin::BI__builtin_coro_promise: |
4553 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise); |
4554 | case Builtin::BI__builtin_coro_resume: |
4555 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume); |
4556 | case Builtin::BI__builtin_coro_frame: |
4557 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame); |
4558 | case Builtin::BI__builtin_coro_noop: |
4559 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop); |
4560 | case Builtin::BI__builtin_coro_free: |
4561 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_free); |
4562 | case Builtin::BI__builtin_coro_destroy: |
4563 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy); |
4564 | case Builtin::BI__builtin_coro_done: |
4565 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_done); |
4566 | case Builtin::BI__builtin_coro_alloc: |
4567 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc); |
4568 | case Builtin::BI__builtin_coro_begin: |
4569 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin); |
4570 | case Builtin::BI__builtin_coro_end: |
4571 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_end); |
4572 | case Builtin::BI__builtin_coro_suspend: |
4573 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend); |
4574 | case Builtin::BI__builtin_coro_param: |
4575 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_param); |
4576 | |
4577 | // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions |
4578 | case Builtin::BIread_pipe: |
4579 | case Builtin::BIwrite_pipe: { |
4580 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
4581 | *Arg1 = EmitScalarExpr(E->getArg(1)); |
4582 | CGOpenCLRuntime OpenCLRT(CGM); |
4583 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
4584 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
4585 | |
4586 | // Type of the generic packet parameter. |
4587 | unsigned GenericAS = |
4588 | getContext().getTargetAddressSpace(LangAS::opencl_generic); |
4589 | llvm::Type *I8PTy = llvm::PointerType::get( |
4590 | llvm::Type::getInt8Ty(getLLVMContext()), GenericAS); |
4591 | |
4592 | // Testing which overloaded version we should generate the call for. |
4593 | if (2U == E->getNumArgs()) { |
4594 | const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2" |
4595 | : "__write_pipe_2"; |
4596 | // Creating a generic function type to be able to call with any builtin or |
4597 | // user defined type. |
4598 | llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty}; |
4599 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4600 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4601 | Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy); |
4602 | return RValue::get( |
4603 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4604 | {Arg0, BCast, PacketSize, PacketAlign})); |
4605 | } else { |
4606 | assert(4 == E->getNumArgs() &&(static_cast <bool> (4 == E->getNumArgs() && "Illegal number of parameters to pipe function") ? void (0) : __assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 4607, __extension__ __PRETTY_FUNCTION__)) |
4607 | "Illegal number of parameters to pipe function")(static_cast <bool> (4 == E->getNumArgs() && "Illegal number of parameters to pipe function") ? void (0) : __assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 4607, __extension__ __PRETTY_FUNCTION__)); |
4608 | const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4" |
4609 | : "__write_pipe_4"; |
4610 | |
4611 | llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy, |
4612 | Int32Ty, Int32Ty}; |
4613 | Value *Arg2 = EmitScalarExpr(E->getArg(2)), |
4614 | *Arg3 = EmitScalarExpr(E->getArg(3)); |
4615 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4616 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4617 | Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy); |
4618 | // We know the third argument is an integer type, but we may need to cast |
4619 | // it to i32. |
4620 | if (Arg2->getType() != Int32Ty) |
4621 | Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty); |
4622 | return RValue::get( |
4623 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4624 | {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign})); |
4625 | } |
4626 | } |
4627 | // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write |
4628 | // functions |
4629 | case Builtin::BIreserve_read_pipe: |
4630 | case Builtin::BIreserve_write_pipe: |
4631 | case Builtin::BIwork_group_reserve_read_pipe: |
4632 | case Builtin::BIwork_group_reserve_write_pipe: |
4633 | case Builtin::BIsub_group_reserve_read_pipe: |
4634 | case Builtin::BIsub_group_reserve_write_pipe: { |
4635 | // Composing the mangled name for the function. |
4636 | const char *Name; |
4637 | if (BuiltinID == Builtin::BIreserve_read_pipe) |
4638 | Name = "__reserve_read_pipe"; |
4639 | else if (BuiltinID == Builtin::BIreserve_write_pipe) |
4640 | Name = "__reserve_write_pipe"; |
4641 | else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe) |
4642 | Name = "__work_group_reserve_read_pipe"; |
4643 | else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe) |
4644 | Name = "__work_group_reserve_write_pipe"; |
4645 | else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe) |
4646 | Name = "__sub_group_reserve_read_pipe"; |
4647 | else |
4648 | Name = "__sub_group_reserve_write_pipe"; |
4649 | |
4650 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
4651 | *Arg1 = EmitScalarExpr(E->getArg(1)); |
4652 | llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy); |
4653 | CGOpenCLRuntime OpenCLRT(CGM); |
4654 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
4655 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
4656 | |
4657 | // Building the generic function prototype. |
4658 | llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty}; |
4659 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4660 | ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4661 | // We know the second argument is an integer type, but we may need to cast |
4662 | // it to i32. |
4663 | if (Arg1->getType() != Int32Ty) |
4664 | Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty); |
4665 | return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4666 | {Arg0, Arg1, PacketSize, PacketAlign})); |
4667 | } |
4668 | // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write |
4669 | // functions |
4670 | case Builtin::BIcommit_read_pipe: |
4671 | case Builtin::BIcommit_write_pipe: |
4672 | case Builtin::BIwork_group_commit_read_pipe: |
4673 | case Builtin::BIwork_group_commit_write_pipe: |
4674 | case Builtin::BIsub_group_commit_read_pipe: |
4675 | case Builtin::BIsub_group_commit_write_pipe: { |
4676 | const char *Name; |
4677 | if (BuiltinID == Builtin::BIcommit_read_pipe) |
4678 | Name = "__commit_read_pipe"; |
4679 | else if (BuiltinID == Builtin::BIcommit_write_pipe) |
4680 | Name = "__commit_write_pipe"; |
4681 | else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe) |
4682 | Name = "__work_group_commit_read_pipe"; |
4683 | else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe) |
4684 | Name = "__work_group_commit_write_pipe"; |
4685 | else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe) |
4686 | Name = "__sub_group_commit_read_pipe"; |
4687 | else |
4688 | Name = "__sub_group_commit_write_pipe"; |
4689 | |
4690 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
4691 | *Arg1 = EmitScalarExpr(E->getArg(1)); |
4692 | CGOpenCLRuntime OpenCLRT(CGM); |
4693 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
4694 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
4695 | |
4696 | // Building the generic function prototype. |
4697 | llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty}; |
4698 | llvm::FunctionType *FTy = |
4699 | llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), |
4700 | llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4701 | |
4702 | return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4703 | {Arg0, Arg1, PacketSize, PacketAlign})); |
4704 | } |
4705 | // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions |
4706 | case Builtin::BIget_pipe_num_packets: |
4707 | case Builtin::BIget_pipe_max_packets: { |
4708 | const char *BaseName; |
4709 | const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>(); |
4710 | if (BuiltinID == Builtin::BIget_pipe_num_packets) |
4711 | BaseName = "__get_pipe_num_packets"; |
4712 | else |
4713 | BaseName = "__get_pipe_max_packets"; |
4714 | std::string Name = std::string(BaseName) + |
4715 | std::string(PipeTy->isReadOnly() ? "_ro" : "_wo"); |
4716 | |
4717 | // Building the generic function prototype. |
4718 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
4719 | CGOpenCLRuntime OpenCLRT(CGM); |
4720 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
4721 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
4722 | llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty}; |
4723 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4724 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4725 | |
4726 | return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4727 | {Arg0, PacketSize, PacketAlign})); |
4728 | } |
4729 | |
4730 | // OpenCL v2.0 s6.13.9 - Address space qualifier functions. |
4731 | case Builtin::BIto_global: |
4732 | case Builtin::BIto_local: |
4733 | case Builtin::BIto_private: { |
4734 | auto Arg0 = EmitScalarExpr(E->getArg(0)); |
4735 | auto NewArgT = llvm::PointerType::get(Int8Ty, |
4736 | CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4737 | auto NewRetT = llvm::PointerType::get(Int8Ty, |
4738 | CGM.getContext().getTargetAddressSpace( |
4739 | E->getType()->getPointeeType().getAddressSpace())); |
4740 | auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false); |
4741 | llvm::Value *NewArg; |
4742 | if (Arg0->getType()->getPointerAddressSpace() != |
4743 | NewArgT->getPointerAddressSpace()) |
4744 | NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT); |
4745 | else |
4746 | NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT); |
4747 | auto NewName = std::string("__") + E->getDirectCallee()->getName().str(); |
4748 | auto NewCall = |
4749 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg}); |
4750 | return RValue::get(Builder.CreateBitOrPointerCast(NewCall, |
4751 | ConvertType(E->getType()))); |
4752 | } |
4753 | |
4754 | // OpenCL v2.0, s6.13.17 - Enqueue kernel function. |
4755 | // It contains four different overload formats specified in Table 6.13.17.1. |
4756 | case Builtin::BIenqueue_kernel: { |
4757 | StringRef Name; // Generated function call name |
4758 | unsigned NumArgs = E->getNumArgs(); |
4759 | |
4760 | llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy); |
4761 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
4762 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4763 | |
4764 | llvm::Value *Queue = EmitScalarExpr(E->getArg(0)); |
4765 | llvm::Value *Flags = EmitScalarExpr(E->getArg(1)); |
4766 | LValue NDRangeL = EmitAggExprToLValue(E->getArg(2)); |
4767 | llvm::Value *Range = NDRangeL.getAddress(*this).getPointer(); |
4768 | llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType(); |
4769 | |
4770 | if (NumArgs == 4) { |
4771 | // The most basic form of the call with parameters: |
4772 | // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void) |
4773 | Name = "__enqueue_kernel_basic"; |
4774 | llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy, |
4775 | GenericVoidPtrTy}; |
4776 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4777 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4778 | |
4779 | auto Info = |
4780 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3)); |
4781 | llvm::Value *Kernel = |
4782 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4783 | llvm::Value *Block = |
4784 | Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4785 | |
4786 | AttrBuilder B; |
4787 | B.addByValAttr(NDRangeL.getAddress(*this).getElementType()); |
4788 | llvm::AttributeList ByValAttrSet = |
4789 | llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B); |
4790 | |
4791 | auto RTCall = |
4792 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet), |
4793 | {Queue, Flags, Range, Kernel, Block}); |
4794 | RTCall->setAttributes(ByValAttrSet); |
4795 | return RValue::get(RTCall); |
4796 | } |
4797 | assert(NumArgs >= 5 && "Invalid enqueue_kernel signature")(static_cast <bool> (NumArgs >= 5 && "Invalid enqueue_kernel signature" ) ? void (0) : __assert_fail ("NumArgs >= 5 && \"Invalid enqueue_kernel signature\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 4797, __extension__ __PRETTY_FUNCTION__)); |
4798 | |
4799 | // Create a temporary array to hold the sizes of local pointer arguments |
4800 | // for the block. \p First is the position of the first size argument. |
4801 | auto CreateArrayForSizeVar = [=](unsigned First) |
4802 | -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> { |
4803 | llvm::APInt ArraySize(32, NumArgs - First); |
4804 | QualType SizeArrayTy = getContext().getConstantArrayType( |
4805 | getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal, |
4806 | /*IndexTypeQuals=*/0); |
4807 | auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes"); |
4808 | llvm::Value *TmpPtr = Tmp.getPointer(); |
4809 | llvm::Value *TmpSize = EmitLifetimeStart( |
4810 | CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr); |
4811 | llvm::Value *ElemPtr; |
4812 | // Each of the following arguments specifies the size of the corresponding |
4813 | // argument passed to the enqueued block. |
4814 | auto *Zero = llvm::ConstantInt::get(IntTy, 0); |
4815 | for (unsigned I = First; I < NumArgs; ++I) { |
4816 | auto *Index = llvm::ConstantInt::get(IntTy, I - First); |
4817 | auto *GEP = Builder.CreateGEP(Tmp.getElementType(), TmpPtr, |
4818 | {Zero, Index}); |
4819 | if (I == First) |
4820 | ElemPtr = GEP; |
4821 | auto *V = |
4822 | Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy); |
4823 | Builder.CreateAlignedStore( |
4824 | V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy)); |
4825 | } |
4826 | return std::tie(ElemPtr, TmpSize, TmpPtr); |
4827 | }; |
4828 | |
4829 | // Could have events and/or varargs. |
4830 | if (E->getArg(3)->getType()->isBlockPointerType()) { |
4831 | // No events passed, but has variadic arguments. |
4832 | Name = "__enqueue_kernel_varargs"; |
4833 | auto Info = |
4834 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3)); |
4835 | llvm::Value *Kernel = |
4836 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4837 | auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4838 | llvm::Value *ElemPtr, *TmpSize, *TmpPtr; |
4839 | std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4); |
4840 | |
4841 | // Create a vector of the arguments, as well as a constant value to |
4842 | // express to the runtime the number of variadic arguments. |
4843 | llvm::Value *const Args[] = {Queue, Flags, |
4844 | Range, Kernel, |
4845 | Block, ConstantInt::get(IntTy, NumArgs - 4), |
4846 | ElemPtr}; |
4847 | llvm::Type *const ArgTys[] = { |
4848 | QueueTy, IntTy, RangeTy, GenericVoidPtrTy, |
4849 | GenericVoidPtrTy, IntTy, ElemPtr->getType()}; |
4850 | |
4851 | llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false); |
4852 | auto Call = RValue::get( |
4853 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args)); |
4854 | if (TmpSize) |
4855 | EmitLifetimeEnd(TmpSize, TmpPtr); |
4856 | return Call; |
4857 | } |
4858 | // Any calls now have event arguments passed. |
4859 | if (NumArgs >= 7) { |
4860 | llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy); |
4861 | llvm::PointerType *EventPtrTy = EventTy->getPointerTo( |
4862 | CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4863 | |
4864 | llvm::Value *NumEvents = |
4865 | Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty); |
4866 | |
4867 | // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments |
4868 | // to be a null pointer constant (including `0` literal), we can take it |
4869 | // into account and emit null pointer directly. |
4870 | llvm::Value *EventWaitList = nullptr; |
4871 | if (E->getArg(4)->isNullPointerConstant( |
4872 | getContext(), Expr::NPC_ValueDependentIsNotNull)) { |
4873 | EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy); |
4874 | } else { |
4875 | EventWaitList = E->getArg(4)->getType()->isArrayType() |
4876 | ? EmitArrayToPointerDecay(E->getArg(4)).getPointer() |
4877 | : EmitScalarExpr(E->getArg(4)); |
4878 | // Convert to generic address space. |
4879 | EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy); |
4880 | } |
4881 | llvm::Value *EventRet = nullptr; |
4882 | if (E->getArg(5)->isNullPointerConstant( |
4883 | getContext(), Expr::NPC_ValueDependentIsNotNull)) { |
4884 | EventRet = llvm::ConstantPointerNull::get(EventPtrTy); |
4885 | } else { |
4886 | EventRet = |
4887 | Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy); |
4888 | } |
4889 | |
4890 | auto Info = |
4891 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6)); |
4892 | llvm::Value *Kernel = |
4893 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4894 | llvm::Value *Block = |
4895 | Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4896 | |
4897 | std::vector<llvm::Type *> ArgTys = { |
4898 | QueueTy, Int32Ty, RangeTy, Int32Ty, |
4899 | EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy}; |
4900 | |
4901 | std::vector<llvm::Value *> Args = {Queue, Flags, Range, |
4902 | NumEvents, EventWaitList, EventRet, |
4903 | Kernel, Block}; |
4904 | |
4905 | if (NumArgs == 7) { |
4906 | // Has events but no variadics. |
4907 | Name = "__enqueue_kernel_basic_events"; |
4908 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4909 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4910 | return RValue::get( |
4911 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4912 | llvm::ArrayRef<llvm::Value *>(Args))); |
4913 | } |
4914 | // Has event info and variadics |
4915 | // Pass the number of variadics to the runtime function too. |
4916 | Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7)); |
4917 | ArgTys.push_back(Int32Ty); |
4918 | Name = "__enqueue_kernel_events_varargs"; |
4919 | |
4920 | llvm::Value *ElemPtr, *TmpSize, *TmpPtr; |
4921 | std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7); |
4922 | Args.push_back(ElemPtr); |
4923 | ArgTys.push_back(ElemPtr->getType()); |
4924 | |
4925 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4926 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4927 | auto Call = |
4928 | RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4929 | llvm::ArrayRef<llvm::Value *>(Args))); |
4930 | if (TmpSize) |
4931 | EmitLifetimeEnd(TmpSize, TmpPtr); |
4932 | return Call; |
4933 | } |
4934 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
4935 | } |
4936 | // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block |
4937 | // parameter. |
4938 | case Builtin::BIget_kernel_work_group_size: { |
4939 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
4940 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4941 | auto Info = |
4942 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0)); |
4943 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4944 | Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4945 | return RValue::get(EmitRuntimeCall( |
4946 | CGM.CreateRuntimeFunction( |
4947 | llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy}, |
4948 | false), |
4949 | "__get_kernel_work_group_size_impl"), |
4950 | {Kernel, Arg})); |
4951 | } |
4952 | case Builtin::BIget_kernel_preferred_work_group_size_multiple: { |
4953 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
4954 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4955 | auto Info = |
4956 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0)); |
4957 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4958 | Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4959 | return RValue::get(EmitRuntimeCall( |
4960 | CGM.CreateRuntimeFunction( |
4961 | llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy}, |
4962 | false), |
4963 | "__get_kernel_preferred_work_group_size_multiple_impl"), |
4964 | {Kernel, Arg})); |
4965 | } |
4966 | case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: |
4967 | case Builtin::BIget_kernel_sub_group_count_for_ndrange: { |
4968 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
4969 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4970 | LValue NDRangeL = EmitAggExprToLValue(E->getArg(0)); |
4971 | llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer(); |
4972 | auto Info = |
4973 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1)); |
4974 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4975 | Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4976 | const char *Name = |
4977 | BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange |
4978 | ? "__get_kernel_max_sub_group_size_for_ndrange_impl" |
4979 | : "__get_kernel_sub_group_count_for_ndrange_impl"; |
4980 | return RValue::get(EmitRuntimeCall( |
4981 | CGM.CreateRuntimeFunction( |
4982 | llvm::FunctionType::get( |
4983 | IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy}, |
4984 | false), |
4985 | Name), |
4986 | {NDRange, Kernel, Block})); |
4987 | } |
4988 | |
4989 | case Builtin::BI__builtin_store_half: |
4990 | case Builtin::BI__builtin_store_halff: { |
4991 | Value *Val = EmitScalarExpr(E->getArg(0)); |
4992 | Address Address = EmitPointerWithAlignment(E->getArg(1)); |
4993 | Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy()); |
4994 | return RValue::get(Builder.CreateStore(HalfVal, Address)); |
4995 | } |
4996 | case Builtin::BI__builtin_load_half: { |
4997 | Address Address = EmitPointerWithAlignment(E->getArg(0)); |
4998 | Value *HalfVal = Builder.CreateLoad(Address); |
4999 | return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy())); |
5000 | } |
5001 | case Builtin::BI__builtin_load_halff: { |
5002 | Address Address = EmitPointerWithAlignment(E->getArg(0)); |
5003 | Value *HalfVal = Builder.CreateLoad(Address); |
5004 | return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy())); |
5005 | } |
5006 | case Builtin::BIprintf: |
5007 | if (getTarget().getTriple().isNVPTX()) |
5008 | return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue); |
5009 | if (getTarget().getTriple().getArch() == Triple::amdgcn && |
5010 | getLangOpts().HIP) |
5011 | return EmitAMDGPUDevicePrintfCallExpr(E, ReturnValue); |
5012 | break; |
5013 | case Builtin::BI__builtin_canonicalize: |
5014 | case Builtin::BI__builtin_canonicalizef: |
5015 | case Builtin::BI__builtin_canonicalizef16: |
5016 | case Builtin::BI__builtin_canonicalizel: |
5017 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize)); |
5018 | |
5019 | case Builtin::BI__builtin_thread_pointer: { |
5020 | if (!getContext().getTargetInfo().isTLSSupported()) |
5021 | CGM.ErrorUnsupported(E, "__builtin_thread_pointer"); |
5022 | // Fall through - it's already mapped to the intrinsic by GCCBuiltin. |
5023 | break; |
5024 | } |
5025 | case Builtin::BI__builtin_os_log_format: |
5026 | return emitBuiltinOSLogFormat(*E); |
5027 | |
5028 | case Builtin::BI__xray_customevent: { |
5029 | if (!ShouldXRayInstrumentFunction()) |
5030 | return RValue::getIgnored(); |
5031 | |
5032 | if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( |
5033 | XRayInstrKind::Custom)) |
5034 | return RValue::getIgnored(); |
5035 | |
5036 | if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) |
5037 | if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents()) |
5038 | return RValue::getIgnored(); |
5039 | |
5040 | Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent); |
5041 | auto FTy = F->getFunctionType(); |
5042 | auto Arg0 = E->getArg(0); |
5043 | auto Arg0Val = EmitScalarExpr(Arg0); |
5044 | auto Arg0Ty = Arg0->getType(); |
5045 | auto PTy0 = FTy->getParamType(0); |
5046 | if (PTy0 != Arg0Val->getType()) { |
5047 | if (Arg0Ty->isArrayType()) |
5048 | Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer(); |
5049 | else |
5050 | Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0); |
5051 | } |
5052 | auto Arg1 = EmitScalarExpr(E->getArg(1)); |
5053 | auto PTy1 = FTy->getParamType(1); |
5054 | if (PTy1 != Arg1->getType()) |
5055 | Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1); |
5056 | return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1})); |
5057 | } |
5058 | |
5059 | case Builtin::BI__xray_typedevent: { |
5060 | // TODO: There should be a way to always emit events even if the current |
5061 | // function is not instrumented. Losing events in a stream can cripple |
5062 | // a trace. |
5063 | if (!ShouldXRayInstrumentFunction()) |
5064 | return RValue::getIgnored(); |
5065 | |
5066 | if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( |
5067 | XRayInstrKind::Typed)) |
5068 | return RValue::getIgnored(); |
5069 | |
5070 | if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) |
5071 | if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents()) |
5072 | return RValue::getIgnored(); |
5073 | |
5074 | Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent); |
5075 | auto FTy = F->getFunctionType(); |
5076 | auto Arg0 = EmitScalarExpr(E->getArg(0)); |
5077 | auto PTy0 = FTy->getParamType(0); |
5078 | if (PTy0 != Arg0->getType()) |
5079 | Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0); |
5080 | auto Arg1 = E->getArg(1); |
5081 | auto Arg1Val = EmitScalarExpr(Arg1); |
5082 | auto Arg1Ty = Arg1->getType(); |
5083 | auto PTy1 = FTy->getParamType(1); |
5084 | if (PTy1 != Arg1Val->getType()) { |
5085 | if (Arg1Ty->isArrayType()) |
5086 | Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer(); |
5087 | else |
5088 | Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1); |
5089 | } |
5090 | auto Arg2 = EmitScalarExpr(E->getArg(2)); |
5091 | auto PTy2 = FTy->getParamType(2); |
5092 | if (PTy2 != Arg2->getType()) |
5093 | Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2); |
5094 | return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2})); |
5095 | } |
5096 | |
5097 | case Builtin::BI__builtin_ms_va_start: |
5098 | case Builtin::BI__builtin_ms_va_end: |
5099 | return RValue::get( |
5100 | EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(), |
5101 | BuiltinID == Builtin::BI__builtin_ms_va_start)); |
5102 | |
5103 | case Builtin::BI__builtin_ms_va_copy: { |
5104 | // Lower this manually. We can't reliably determine whether or not any |
5105 | // given va_copy() is for a Win64 va_list from the calling convention |
5106 | // alone, because it's legal to do this from a System V ABI function. |
5107 | // With opaque pointer types, we won't have enough information in LLVM |
5108 | // IR to determine this from the argument types, either. Best to do it |
5109 | // now, while we have enough information. |
5110 | Address DestAddr = EmitMSVAListRef(E->getArg(0)); |
5111 | Address SrcAddr = EmitMSVAListRef(E->getArg(1)); |
5112 | |
5113 | llvm::Type *BPP = Int8PtrPtrTy; |
5114 | |
5115 | DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"), |
5116 | DestAddr.getAlignment()); |
5117 | SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"), |
5118 | SrcAddr.getAlignment()); |
5119 | |
5120 | Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val"); |
5121 | return RValue::get(Builder.CreateStore(ArgPtr, DestAddr)); |
5122 | } |
5123 | |
5124 | case Builtin::BI__builtin_get_device_side_mangled_name: { |
5125 | auto Name = CGM.getCUDARuntime().getDeviceSideName( |
5126 | cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl()); |
5127 | auto Str = CGM.GetAddrOfConstantCString(Name, ""); |
5128 | llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0), |
5129 | llvm::ConstantInt::get(SizeTy, 0)}; |
5130 | auto *Ptr = llvm::ConstantExpr::getGetElementPtr(Str.getElementType(), |
5131 | Str.getPointer(), Zeros); |
5132 | return RValue::get(Ptr); |
5133 | } |
5134 | } |
5135 | |
5136 | // If this is an alias for a lib function (e.g. __builtin_sin), emit |
5137 | // the call using the normal call path, but using the unmangled |
5138 | // version of the function name. |
5139 | if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) |
5140 | return emitLibraryCall(*this, FD, E, |
5141 | CGM.getBuiltinLibFunction(FD, BuiltinID)); |
5142 | |
5143 | // If this is a predefined lib function (e.g. malloc), emit the call |
5144 | // using exactly the normal call path. |
5145 | if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) |
5146 | return emitLibraryCall(*this, FD, E, |
5147 | cast<llvm::Constant>(EmitScalarExpr(E->getCallee()))); |
5148 | |
5149 | // Check that a call to a target specific builtin has the correct target |
5150 | // features. |
5151 | // This is down here to avoid non-target specific builtins, however, if |
5152 | // generic builtins start to require generic target features then we |
5153 | // can move this up to the beginning of the function. |
5154 | checkTargetFeatures(E, FD); |
5155 | |
5156 | if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID)) |
5157 | LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth); |
5158 | |
5159 | // See if we have a target specific intrinsic. |
5160 | const char *Name = getContext().BuiltinInfo.getName(BuiltinID); |
5161 | Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; |
5162 | StringRef Prefix = |
5163 | llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch()); |
5164 | if (!Prefix.empty()) { |
5165 | IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name); |
5166 | // NOTE we don't need to perform a compatibility flag check here since the |
5167 | // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the |
5168 | // MS builtins via ALL_MS_LANGUAGES and are filtered earlier. |
5169 | if (IntrinsicID == Intrinsic::not_intrinsic) |
5170 | IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name); |
5171 | } |
5172 | |
5173 | if (IntrinsicID != Intrinsic::not_intrinsic) { |
5174 | SmallVector<Value*, 16> Args; |
5175 | |
5176 | // Find out if any arguments are required to be integer constant |
5177 | // expressions. |
5178 | unsigned ICEArguments = 0; |
5179 | ASTContext::GetBuiltinTypeError Error; |
5180 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
5181 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 5181, __extension__ __PRETTY_FUNCTION__)); |
5182 | |
5183 | Function *F = CGM.getIntrinsic(IntrinsicID); |
5184 | llvm::FunctionType *FTy = F->getFunctionType(); |
5185 | |
5186 | for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { |
5187 | Value *ArgValue; |
5188 | // If this is a normal argument, just emit it as a scalar. |
5189 | if ((ICEArguments & (1 << i)) == 0) { |
5190 | ArgValue = EmitScalarExpr(E->getArg(i)); |
5191 | } else { |
5192 | // If this is required to be a constant, constant fold it so that we |
5193 | // know that the generated intrinsic gets a ConstantInt. |
5194 | ArgValue = llvm::ConstantInt::get( |
5195 | getLLVMContext(), |
5196 | *E->getArg(i)->getIntegerConstantExpr(getContext())); |
5197 | } |
5198 | |
5199 | // If the intrinsic arg type is different from the builtin arg type |
5200 | // we need to do a bit cast. |
5201 | llvm::Type *PTy = FTy->getParamType(i); |
5202 | if (PTy != ArgValue->getType()) { |
5203 | // XXX - vector of pointers? |
5204 | if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) { |
5205 | if (PtrTy->getAddressSpace() != |
5206 | ArgValue->getType()->getPointerAddressSpace()) { |
5207 | ArgValue = Builder.CreateAddrSpaceCast( |
5208 | ArgValue, |
5209 | ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace())); |
5210 | } |
5211 | } |
5212 | |
5213 | assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&(static_cast <bool> (PTy->canLosslesslyBitCastTo(FTy ->getParamType(i)) && "Must be able to losslessly bit cast to param" ) ? void (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 5214, __extension__ __PRETTY_FUNCTION__)) |
5214 | "Must be able to losslessly bit cast to param")(static_cast <bool> (PTy->canLosslesslyBitCastTo(FTy ->getParamType(i)) && "Must be able to losslessly bit cast to param" ) ? void (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 5214, __extension__ __PRETTY_FUNCTION__)); |
5215 | ArgValue = Builder.CreateBitCast(ArgValue, PTy); |
5216 | } |
5217 | |
5218 | Args.push_back(ArgValue); |
5219 | } |
5220 | |
5221 | Value *V = Builder.CreateCall(F, Args); |
5222 | QualType BuiltinRetType = E->getType(); |
5223 | |
5224 | llvm::Type *RetTy = VoidTy; |
5225 | if (!BuiltinRetType->isVoidType()) |
5226 | RetTy = ConvertType(BuiltinRetType); |
5227 | |
5228 | if (RetTy != V->getType()) { |
5229 | // XXX - vector of pointers? |
5230 | if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) { |
5231 | if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) { |
5232 | V = Builder.CreateAddrSpaceCast( |
5233 | V, V->getType()->getPointerTo(PtrTy->getAddressSpace())); |
5234 | } |
5235 | } |
5236 | |
5237 | assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&(static_cast <bool> (V->getType()->canLosslesslyBitCastTo (RetTy) && "Must be able to losslessly bit cast result type" ) ? void (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 5238, __extension__ __PRETTY_FUNCTION__)) |
5238 | "Must be able to losslessly bit cast result type")(static_cast <bool> (V->getType()->canLosslesslyBitCastTo (RetTy) && "Must be able to losslessly bit cast result type" ) ? void (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 5238, __extension__ __PRETTY_FUNCTION__)); |
5239 | V = Builder.CreateBitCast(V, RetTy); |
5240 | } |
5241 | |
5242 | return RValue::get(V); |
5243 | } |
5244 | |
5245 | // Some target-specific builtins can have aggregate return values, e.g. |
5246 | // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force |
5247 | // ReturnValue to be non-null, so that the target-specific emission code can |
5248 | // always just emit into it. |
5249 | TypeEvaluationKind EvalKind = getEvaluationKind(E->getType()); |
5250 | if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) { |
5251 | Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp"); |
5252 | ReturnValue = ReturnValueSlot(DestPtr, false); |
5253 | } |
5254 | |
5255 | // Now see if we can emit a target-specific builtin. |
5256 | if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { |
5257 | switch (EvalKind) { |
5258 | case TEK_Scalar: |
5259 | return RValue::get(V); |
5260 | case TEK_Aggregate: |
5261 | return RValue::getAggregate(ReturnValue.getValue(), |
5262 | ReturnValue.isVolatile()); |
5263 | case TEK_Complex: |
5264 | llvm_unreachable("No current target builtin returns complex")::llvm::llvm_unreachable_internal("No current target builtin returns complex" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 5264); |
5265 | } |
5266 | llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr")::llvm::llvm_unreachable_internal("Bad evaluation kind in EmitBuiltinExpr" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 5266); |
5267 | } |
5268 | |
5269 | ErrorUnsupported(E, "builtin function"); |
5270 | |
5271 | // Unknown builtin, for now just dump it out and return undef. |
5272 | return GetUndefRValue(E->getType()); |
5273 | } |
5274 | |
5275 | static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, |
5276 | unsigned BuiltinID, const CallExpr *E, |
5277 | ReturnValueSlot ReturnValue, |
5278 | llvm::Triple::ArchType Arch) { |
5279 | switch (Arch) { |
5280 | case llvm::Triple::arm: |
5281 | case llvm::Triple::armeb: |
5282 | case llvm::Triple::thumb: |
5283 | case llvm::Triple::thumbeb: |
5284 | return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch); |
5285 | case llvm::Triple::aarch64: |
5286 | case llvm::Triple::aarch64_32: |
5287 | case llvm::Triple::aarch64_be: |
5288 | return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch); |
5289 | case llvm::Triple::bpfeb: |
5290 | case llvm::Triple::bpfel: |
5291 | return CGF->EmitBPFBuiltinExpr(BuiltinID, E); |
5292 | case llvm::Triple::x86: |
5293 | case llvm::Triple::x86_64: |
5294 | return CGF->EmitX86BuiltinExpr(BuiltinID, E); |
5295 | case llvm::Triple::ppc: |
5296 | case llvm::Triple::ppcle: |
5297 | case llvm::Triple::ppc64: |
5298 | case llvm::Triple::ppc64le: |
5299 | return CGF->EmitPPCBuiltinExpr(BuiltinID, E); |
5300 | case llvm::Triple::r600: |
5301 | case llvm::Triple::amdgcn: |
5302 | return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E); |
5303 | case llvm::Triple::systemz: |
5304 | return CGF->EmitSystemZBuiltinExpr(BuiltinID, E); |
5305 | case llvm::Triple::nvptx: |
5306 | case llvm::Triple::nvptx64: |
5307 | return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E); |
5308 | case llvm::Triple::wasm32: |
5309 | case llvm::Triple::wasm64: |
5310 | return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E); |
5311 | case llvm::Triple::hexagon: |
5312 | return CGF->EmitHexagonBuiltinExpr(BuiltinID, E); |
5313 | case llvm::Triple::riscv32: |
5314 | case llvm::Triple::riscv64: |
5315 | return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue); |
5316 | default: |
5317 | return nullptr; |
5318 | } |
5319 | } |
5320 | |
5321 | Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, |
5322 | const CallExpr *E, |
5323 | ReturnValueSlot ReturnValue) { |
5324 | if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) { |
5325 | assert(getContext().getAuxTargetInfo() && "Missing aux target info")(static_cast <bool> (getContext().getAuxTargetInfo() && "Missing aux target info") ? void (0) : __assert_fail ("getContext().getAuxTargetInfo() && \"Missing aux target info\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 5325, __extension__ __PRETTY_FUNCTION__)); |
5326 | return EmitTargetArchBuiltinExpr( |
5327 | this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E, |
5328 | ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch()); |
5329 | } |
5330 | |
5331 | return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue, |
5332 | getTarget().getTriple().getArch()); |
5333 | } |
5334 | |
5335 | static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF, |
5336 | NeonTypeFlags TypeFlags, |
5337 | bool HasLegalHalfType = true, |
5338 | bool V1Ty = false, |
5339 | bool AllowBFloatArgsAndRet = true) { |
5340 | int IsQuad = TypeFlags.isQuad(); |
5341 | switch (TypeFlags.getEltType()) { |
5342 | case NeonTypeFlags::Int8: |
5343 | case NeonTypeFlags::Poly8: |
5344 | return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad)); |
5345 | case NeonTypeFlags::Int16: |
5346 | case NeonTypeFlags::Poly16: |
5347 | return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); |
5348 | case NeonTypeFlags::BFloat16: |
5349 | if (AllowBFloatArgsAndRet) |
5350 | return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad)); |
5351 | else |
5352 | return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); |
5353 | case NeonTypeFlags::Float16: |
5354 | if (HasLegalHalfType) |
5355 | return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad)); |
5356 | else |
5357 | return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); |
5358 | case NeonTypeFlags::Int32: |
5359 | return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad)); |
5360 | case NeonTypeFlags::Int64: |
5361 | case NeonTypeFlags::Poly64: |
5362 | return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad)); |
5363 | case NeonTypeFlags::Poly128: |
5364 | // FIXME: i128 and f128 doesn't get fully support in Clang and llvm. |
5365 | // There is a lot of i128 and f128 API missing. |
5366 | // so we use v16i8 to represent poly128 and get pattern matched. |
5367 | return llvm::FixedVectorType::get(CGF->Int8Ty, 16); |
5368 | case NeonTypeFlags::Float32: |
5369 | return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad)); |
5370 | case NeonTypeFlags::Float64: |
5371 | return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad)); |
5372 | } |
5373 | llvm_unreachable("Unknown vector element type!")::llvm::llvm_unreachable_internal("Unknown vector element type!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 5373); |
5374 | } |
5375 | |
5376 | static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF, |
5377 | NeonTypeFlags IntTypeFlags) { |
5378 | int IsQuad = IntTypeFlags.isQuad(); |
5379 | switch (IntTypeFlags.getEltType()) { |
5380 | case NeonTypeFlags::Int16: |
5381 | return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad)); |
5382 | case NeonTypeFlags::Int32: |
5383 | return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad)); |
5384 | case NeonTypeFlags::Int64: |
5385 | return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad)); |
5386 | default: |
5387 | llvm_unreachable("Type can't be converted to floating-point!")::llvm::llvm_unreachable_internal("Type can't be converted to floating-point!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 5387); |
5388 | } |
5389 | } |
5390 | |
5391 | Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C, |
5392 | const ElementCount &Count) { |
5393 | Value *SV = llvm::ConstantVector::getSplat(Count, C); |
5394 | return Builder.CreateShuffleVector(V, V, SV, "lane"); |
5395 | } |
5396 | |
5397 | Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { |
5398 | ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount(); |
5399 | return EmitNeonSplat(V, C, EC); |
5400 | } |
5401 | |
5402 | Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, |
5403 | const char *name, |
5404 | unsigned shift, bool rightshift) { |
5405 | unsigned j = 0; |
5406 | for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); |
5407 | ai != ae; ++ai, ++j) { |
5408 | if (F->isConstrainedFPIntrinsic()) |
5409 | if (ai->getType()->isMetadataTy()) |
5410 | continue; |
5411 | if (shift > 0 && shift == j) |
5412 | Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); |
5413 | else |
5414 | Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); |
5415 | } |
5416 | |
5417 | if (F->isConstrainedFPIntrinsic()) |
5418 | return Builder.CreateConstrainedFPCall(F, Ops, name); |
5419 | else |
5420 | return Builder.CreateCall(F, Ops, name); |
5421 | } |
5422 | |
5423 | Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty, |
5424 | bool neg) { |
5425 | int SV = cast<ConstantInt>(V)->getSExtValue(); |
5426 | return ConstantInt::get(Ty, neg ? -SV : SV); |
5427 | } |
5428 | |
5429 | // Right-shift a vector by a constant. |
5430 | Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift, |
5431 | llvm::Type *Ty, bool usgn, |
5432 | const char *name) { |
5433 | llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); |
5434 | |
5435 | int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue(); |
5436 | int EltSize = VTy->getScalarSizeInBits(); |
5437 | |
5438 | Vec = Builder.CreateBitCast(Vec, Ty); |
5439 | |
5440 | // lshr/ashr are undefined when the shift amount is equal to the vector |
5441 | // element size. |
5442 | if (ShiftAmt == EltSize) { |
5443 | if (usgn) { |
5444 | // Right-shifting an unsigned value by its size yields 0. |
5445 | return llvm::ConstantAggregateZero::get(VTy); |
5446 | } else { |
5447 | // Right-shifting a signed value by its size is equivalent |
5448 | // to a shift of size-1. |
5449 | --ShiftAmt; |
5450 | Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt); |
5451 | } |
5452 | } |
5453 | |
5454 | Shift = EmitNeonShiftVector(Shift, Ty, false); |
5455 | if (usgn) |
5456 | return Builder.CreateLShr(Vec, Shift, name); |
5457 | else |
5458 | return Builder.CreateAShr(Vec, Shift, name); |
5459 | } |
5460 | |
5461 | enum { |
5462 | AddRetType = (1 << 0), |
5463 | Add1ArgType = (1 << 1), |
5464 | Add2ArgTypes = (1 << 2), |
5465 | |
5466 | VectorizeRetType = (1 << 3), |
5467 | VectorizeArgTypes = (1 << 4), |
5468 | |
5469 | InventFloatType = (1 << 5), |
5470 | UnsignedAlts = (1 << 6), |
5471 | |
5472 | Use64BitVectors = (1 << 7), |
5473 | Use128BitVectors = (1 << 8), |
5474 | |
5475 | Vectorize1ArgType = Add1ArgType | VectorizeArgTypes, |
5476 | VectorRet = AddRetType | VectorizeRetType, |
5477 | VectorRetGetArgs01 = |
5478 | AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes, |
5479 | FpCmpzModifiers = |
5480 | AddRetType | VectorizeRetType | Add1ArgType | InventFloatType |
5481 | }; |
5482 | |
5483 | namespace { |
5484 | struct ARMVectorIntrinsicInfo { |
5485 | const char *NameHint; |
5486 | unsigned BuiltinID; |
5487 | unsigned LLVMIntrinsic; |
5488 | unsigned AltLLVMIntrinsic; |
5489 | uint64_t TypeModifier; |
5490 | |
5491 | bool operator<(unsigned RHSBuiltinID) const { |
5492 | return BuiltinID < RHSBuiltinID; |
5493 | } |
5494 | bool operator<(const ARMVectorIntrinsicInfo &TE) const { |
5495 | return BuiltinID < TE.BuiltinID; |
5496 | } |
5497 | }; |
5498 | } // end anonymous namespace |
5499 | |
5500 | #define NEONMAP0(NameBase) \ |
5501 | { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 } |
5502 | |
5503 | #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ |
5504 | { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ |
5505 | Intrinsic::LLVMIntrinsic, 0, TypeModifier } |
5506 | |
5507 | #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \ |
5508 | { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ |
5509 | Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \ |
5510 | TypeModifier } |
5511 | |
5512 | static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = { |
5513 | NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0), |
5514 | NEONMAP0(splat_lane_v), |
5515 | NEONMAP0(splat_laneq_v), |
5516 | NEONMAP0(splatq_lane_v), |
5517 | NEONMAP0(splatq_laneq_v), |
5518 | NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), |
5519 | NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), |
5520 | NEONMAP1(vabs_v, arm_neon_vabs, 0), |
5521 | NEONMAP1(vabsq_v, arm_neon_vabs, 0), |
5522 | NEONMAP0(vadd_v), |
5523 | NEONMAP0(vaddhn_v), |
5524 | NEONMAP0(vaddq_v), |
5525 | NEONMAP1(vaesdq_v, arm_neon_aesd, 0), |
5526 | NEONMAP1(vaeseq_v, arm_neon_aese, 0), |
5527 | NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0), |
5528 | NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0), |
5529 | NEONMAP1(vbfdot_v, arm_neon_bfdot, 0), |
5530 | NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0), |
5531 | NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0), |
5532 | NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0), |
5533 | NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0), |
5534 | NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType), |
5535 | NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType), |
5536 | NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType), |
5537 | NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType), |
5538 | NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType), |
5539 | NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType), |
5540 | NEONMAP1(vcage_v, arm_neon_vacge, 0), |
5541 | NEONMAP1(vcageq_v, arm_neon_vacge, 0), |
5542 | NEONMAP1(vcagt_v, arm_neon_vacgt, 0), |
5543 | NEONMAP1(vcagtq_v, arm_neon_vacgt, 0), |
5544 | NEONMAP1(vcale_v, arm_neon_vacge, 0), |
5545 | NEONMAP1(vcaleq_v, arm_neon_vacge, 0), |
5546 | NEONMAP1(vcalt_v, arm_neon_vacgt, 0), |
5547 | NEONMAP1(vcaltq_v, arm_neon_vacgt, 0), |
5548 | NEONMAP0(vceqz_v), |
5549 | NEONMAP0(vceqzq_v), |
5550 | NEONMAP0(vcgez_v), |
5551 | NEONMAP0(vcgezq_v), |
5552 | NEONMAP0(vcgtz_v), |
5553 | NEONMAP0(vcgtzq_v), |
5554 | NEONMAP0(vclez_v), |
5555 | NEONMAP0(vclezq_v), |
5556 | NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType), |
5557 | NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType), |
5558 | NEONMAP0(vcltz_v), |
5559 | NEONMAP0(vcltzq_v), |
5560 | NEONMAP1(vclz_v, ctlz, Add1ArgType), |
5561 | NEONMAP1(vclzq_v, ctlz, Add1ArgType), |
5562 | NEONMAP1(vcnt_v, ctpop, Add1ArgType), |
5563 | NEONMAP1(vcntq_v, ctpop, Add1ArgType), |
5564 | NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0), |
5565 | NEONMAP0(vcvt_f16_v), |
5566 | NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0), |
5567 | NEONMAP0(vcvt_f32_v), |
5568 | NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
5569 | NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
5570 | NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0), |
5571 | NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0), |
5572 | NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0), |
5573 | NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0), |
5574 | NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0), |
5575 | NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0), |
5576 | NEONMAP0(vcvt_s16_v), |
5577 | NEONMAP0(vcvt_s32_v), |
5578 | NEONMAP0(vcvt_s64_v), |
5579 | NEONMAP0(vcvt_u16_v), |
5580 | NEONMAP0(vcvt_u32_v), |
5581 | NEONMAP0(vcvt_u64_v), |
5582 | NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0), |
5583 | NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0), |
5584 | NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0), |
5585 | NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0), |
5586 | NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0), |
5587 | NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0), |
5588 | NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0), |
5589 | NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0), |
5590 | NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0), |
5591 | NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0), |
5592 | NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0), |
5593 | NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0), |
5594 | NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0), |
5595 | NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0), |
5596 | NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0), |
5597 | NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0), |
5598 | NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0), |
5599 | NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0), |
5600 | NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0), |
5601 | NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0), |
5602 | NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0), |
5603 | NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0), |
5604 | NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0), |
5605 | NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0), |
5606 | NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0), |
5607 | NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0), |
5608 | NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0), |
5609 | NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0), |
5610 | NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0), |
5611 | NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0), |
5612 | NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0), |
5613 | NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0), |
5614 | NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0), |
5615 | NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0), |
5616 | NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0), |
5617 | NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0), |
5618 | NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0), |
5619 | NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0), |
5620 | NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0), |
5621 | NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0), |
5622 | NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0), |
5623 | NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0), |
5624 | NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0), |
5625 | NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0), |
5626 | NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0), |
5627 | NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0), |
5628 | NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0), |
5629 | NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0), |
5630 | NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0), |
5631 | NEONMAP0(vcvtq_f16_v), |
5632 | NEONMAP0(vcvtq_f32_v), |
5633 | NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
5634 | NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
5635 | NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0), |
5636 | NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0), |
5637 | NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0), |
5638 | NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0), |
5639 | NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0), |
5640 | NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0), |
5641 | NEONMAP0(vcvtq_s16_v), |
5642 | NEONMAP0(vcvtq_s32_v), |
5643 | NEONMAP0(vcvtq_s64_v), |
5644 | NEONMAP0(vcvtq_u16_v), |
5645 | NEONMAP0(vcvtq_u32_v), |
5646 | NEONMAP0(vcvtq_u64_v), |
5647 | NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0), |
5648 | NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0), |
5649 | NEONMAP0(vext_v), |
5650 | NEONMAP0(vextq_v), |
5651 | NEONMAP0(vfma_v), |
5652 | NEONMAP0(vfmaq_v), |
5653 | NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), |
5654 | NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), |
5655 | NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), |
5656 | NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), |
5657 | NEONMAP0(vld1_dup_v), |
5658 | NEONMAP1(vld1_v, arm_neon_vld1, 0), |
5659 | NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0), |
5660 | NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0), |
5661 | NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0), |
5662 | NEONMAP0(vld1q_dup_v), |
5663 | NEONMAP1(vld1q_v, arm_neon_vld1, 0), |
5664 | NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0), |
5665 | NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0), |
5666 | NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0), |
5667 | NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0), |
5668 | NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0), |
5669 | NEONMAP1(vld2_v, arm_neon_vld2, 0), |
5670 | NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0), |
5671 | NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0), |
5672 | NEONMAP1(vld2q_v, arm_neon_vld2, 0), |
5673 | NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0), |
5674 | NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0), |
5675 | NEONMAP1(vld3_v, arm_neon_vld3, 0), |
5676 | NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0), |
5677 | NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0), |
5678 | NEONMAP1(vld3q_v, arm_neon_vld3, 0), |
5679 | NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0), |
5680 | NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0), |
5681 | NEONMAP1(vld4_v, arm_neon_vld4, 0), |
5682 | NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0), |
5683 | NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0), |
5684 | NEONMAP1(vld4q_v, arm_neon_vld4, 0), |
5685 | NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), |
5686 | NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType), |
5687 | NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType), |
5688 | NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), |
5689 | NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), |
5690 | NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType), |
5691 | NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType), |
5692 | NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), |
5693 | NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0), |
5694 | NEONMAP0(vmovl_v), |
5695 | NEONMAP0(vmovn_v), |
5696 | NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType), |
5697 | NEONMAP0(vmull_v), |
5698 | NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType), |
5699 | NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), |
5700 | NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), |
5701 | NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType), |
5702 | NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), |
5703 | NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), |
5704 | NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType), |
5705 | NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts), |
5706 | NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts), |
5707 | NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType), |
5708 | NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType), |
5709 | NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts), |
5710 | NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts), |
5711 | NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0), |
5712 | NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0), |
5713 | NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType), |
5714 | NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType), |
5715 | NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType), |
5716 | NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts), |
5717 | NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType), |
5718 | NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType), |
5719 | NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType), |
5720 | NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType), |
5721 | NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType), |
5722 | NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), |
5723 | NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), |
5724 | NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), |
5725 | NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), |
5726 | NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), |
5727 | NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), |
5728 | NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0), |
5729 | NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0), |
5730 | NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts), |
5731 | NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts), |
5732 | NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType), |
5733 | NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), |
5734 | NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), |
5735 | NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType), |
5736 | NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType), |
5737 | NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), |
5738 | NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), |
5739 | NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType), |
5740 | NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType), |
5741 | NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType), |
5742 | NEONMAP0(vrndi_v), |
5743 | NEONMAP0(vrndiq_v), |
5744 | NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType), |
5745 | NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType), |
5746 | NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType), |
5747 | NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType), |
5748 | NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType), |
5749 | NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType), |
5750 | NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType), |
5751 | NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType), |
5752 | NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType), |
5753 | NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), |
5754 | NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), |
5755 | NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), |
5756 | NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), |
5757 | NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), |
5758 | NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), |
5759 | NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType), |
5760 | NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType), |
5761 | NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType), |
5762 | NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0), |
5763 | NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0), |
5764 | NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0), |
5765 | NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0), |
5766 | NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0), |
5767 | NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0), |
5768 | NEONMAP0(vshl_n_v), |
5769 | NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), |
5770 | NEONMAP0(vshll_n_v), |
5771 | NEONMAP0(vshlq_n_v), |
5772 | NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), |
5773 | NEONMAP0(vshr_n_v), |
5774 | NEONMAP0(vshrn_n_v), |
5775 | NEONMAP0(vshrq_n_v), |
5776 | NEONMAP1(vst1_v, arm_neon_vst1, 0), |
5777 | NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0), |
5778 | NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0), |
5779 | NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0), |
5780 | NEONMAP1(vst1q_v, arm_neon_vst1, 0), |
5781 | NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0), |
5782 | NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0), |
5783 | NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0), |
5784 | NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0), |
5785 | NEONMAP1(vst2_v, arm_neon_vst2, 0), |
5786 | NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0), |
5787 | NEONMAP1(vst2q_v, arm_neon_vst2, 0), |
5788 | NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0), |
5789 | NEONMAP1(vst3_v, arm_neon_vst3, 0), |
5790 | NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0), |
5791 | NEONMAP1(vst3q_v, arm_neon_vst3, 0), |
5792 | NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0), |
5793 | NEONMAP1(vst4_v, arm_neon_vst4, 0), |
5794 | NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0), |
5795 | NEONMAP1(vst4q_v, arm_neon_vst4, 0), |
5796 | NEONMAP0(vsubhn_v), |
5797 | NEONMAP0(vtrn_v), |
5798 | NEONMAP0(vtrnq_v), |
5799 | NEONMAP0(vtst_v), |
5800 | NEONMAP0(vtstq_v), |
5801 | NEONMAP1(vusdot_v, arm_neon_usdot, 0), |
5802 | NEONMAP1(vusdotq_v, arm_neon_usdot, 0), |
5803 | NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0), |
5804 | NEONMAP0(vuzp_v), |
5805 | NEONMAP0(vuzpq_v), |
5806 | NEONMAP0(vzip_v), |
5807 | NEONMAP0(vzipq_v) |
5808 | }; |
5809 | |
5810 | static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = { |
5811 | NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0), |
5812 | NEONMAP0(splat_lane_v), |
5813 | NEONMAP0(splat_laneq_v), |
5814 | NEONMAP0(splatq_lane_v), |
5815 | NEONMAP0(splatq_laneq_v), |
5816 | NEONMAP1(vabs_v, aarch64_neon_abs, 0), |
5817 | NEONMAP1(vabsq_v, aarch64_neon_abs, 0), |
5818 | NEONMAP0(vadd_v), |
5819 | NEONMAP0(vaddhn_v), |
5820 | NEONMAP0(vaddq_p128), |
5821 | NEONMAP0(vaddq_v), |
5822 | NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0), |
5823 | NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0), |
5824 | NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0), |
5825 | NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0), |
5826 | NEONMAP2(vbcaxq_v, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts), |
5827 | NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0), |
5828 | NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0), |
5829 | NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0), |
5830 | NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0), |
5831 | NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0), |
5832 | NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType), |
5833 | NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType), |
5834 | NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType), |
5835 | NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType), |
5836 | NEONMAP1(vcage_v, aarch64_neon_facge, 0), |
5837 | NEONMAP1(vcageq_v, aarch64_neon_facge, 0), |
5838 | NEONMAP1(vcagt_v, aarch64_neon_facgt, 0), |
5839 | NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0), |
5840 | NEONMAP1(vcale_v, aarch64_neon_facge, 0), |
5841 | NEONMAP1(vcaleq_v, aarch64_neon_facge, 0), |
5842 | NEONMAP1(vcalt_v, aarch64_neon_facgt, 0), |
5843 | NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0), |
5844 | NEONMAP0(vceqz_v), |
5845 | NEONMAP0(vceqzq_v), |
5846 | NEONMAP0(vcgez_v), |
5847 | NEONMAP0(vcgezq_v), |
5848 | NEONMAP0(vcgtz_v), |
5849 | NEONMAP0(vcgtzq_v), |
5850 | NEONMAP0(vclez_v), |
5851 | NEONMAP0(vclezq_v), |
5852 | NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType), |
5853 | NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType), |
5854 | NEONMAP0(vcltz_v), |
5855 | NEONMAP0(vcltzq_v), |
5856 | NEONMAP1(vclz_v, ctlz, Add1ArgType), |
5857 | NEONMAP1(vclzq_v, ctlz, Add1ArgType), |
5858 | NEONMAP1(vcmla_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType), |
5859 | NEONMAP1(vcmla_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType), |
5860 | NEONMAP1(vcmla_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType), |
5861 | NEONMAP1(vcmla_v, aarch64_neon_vcmla_rot0, Add1ArgType), |
5862 | NEONMAP1(vcmlaq_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType), |
5863 | NEONMAP1(vcmlaq_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType), |
5864 | NEONMAP1(vcmlaq_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType), |
5865 | NEONMAP1(vcmlaq_v, aarch64_neon_vcmla_rot0, Add1ArgType), |
5866 | NEONMAP1(vcnt_v, ctpop, Add1ArgType), |
5867 | NEONMAP1(vcntq_v, ctpop, Add1ArgType), |
5868 | NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0), |
5869 | NEONMAP0(vcvt_f16_v), |
5870 | NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0), |
5871 | NEONMAP0(vcvt_f32_v), |
5872 | NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
5873 | NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
5874 | NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
5875 | NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), |
5876 | NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), |
5877 | NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), |
5878 | NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), |
5879 | NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), |
5880 | NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), |
5881 | NEONMAP0(vcvtq_f16_v), |
5882 | NEONMAP0(vcvtq_f32_v), |
5883 | NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0), |
5884 | NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
5885 | NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
5886 | NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
5887 | NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), |
5888 | NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), |
5889 | NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), |
5890 | NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), |
5891 | NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), |
5892 | NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), |
5893 | NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType), |
5894 | NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0), |
5895 | NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0), |
5896 | NEONMAP2(veor3q_v, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts), |
5897 | NEONMAP0(vext_v), |
5898 | NEONMAP0(vextq_v), |
5899 | NEONMAP0(vfma_v), |
5900 | NEONMAP0(vfmaq_v), |
5901 | NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0), |
5902 | NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0), |
5903 | NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0), |
5904 | NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0), |
5905 | NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0), |
5906 | NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0), |
5907 | NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0), |
5908 | NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0), |
5909 | NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), |
5910 | NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), |
5911 | NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), |
5912 | NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), |
5913 | NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0), |
5914 | NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0), |
5915 | NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0), |
5916 | NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0), |
5917 | NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0), |
5918 | NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0), |
5919 | NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0), |
5920 | NEONMAP0(vmovl_v), |
5921 | NEONMAP0(vmovn_v), |
5922 | NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType), |
5923 | NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType), |
5924 | NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType), |
5925 | NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), |
5926 | NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), |
5927 | NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType), |
5928 | NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType), |
5929 | NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType), |
5930 | NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), |
5931 | NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), |
5932 | NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0), |
5933 | NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0), |
5934 | NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0), |
5935 | NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0), |
5936 | NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType), |
5937 | NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0), |
5938 | NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0), |
5939 | NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType), |
5940 | NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType), |
5941 | NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts), |
5942 | NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType), |
5943 | NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType), |
5944 | NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType), |
5945 | NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0), |
5946 | NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), |
5947 | NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType), |
5948 | NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0), |
5949 | NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), |
5950 | NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType), |
5951 | NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), |
5952 | NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), |
5953 | NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts), |
5954 | NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), |
5955 | NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts), |
5956 | NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), |
5957 | NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0), |
5958 | NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0), |
5959 | NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), |
5960 | NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), |
5961 | NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType), |
5962 | NEONMAP1(vrax1q_v, aarch64_crypto_rax1, 0), |
5963 | NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), |
5964 | NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), |
5965 | NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType), |
5966 | NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType), |
5967 | NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), |
5968 | NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), |
5969 | NEONMAP1(vrnd32x_v, aarch64_neon_frint32x, Add1ArgType), |
5970 | NEONMAP1(vrnd32xq_v, aarch64_neon_frint32x, Add1ArgType), |
5971 | NEONMAP1(vrnd32z_v, aarch64_neon_frint32z, Add1ArgType), |
5972 | NEONMAP1(vrnd32zq_v, aarch64_neon_frint32z, Add1ArgType), |
5973 | NEONMAP1(vrnd64x_v, aarch64_neon_frint64x, Add1ArgType), |
5974 | NEONMAP1(vrnd64xq_v, aarch64_neon_frint64x, Add1ArgType), |
5975 | NEONMAP1(vrnd64z_v, aarch64_neon_frint64z, Add1ArgType), |
5976 | NEONMAP1(vrnd64zq_v, aarch64_neon_frint64z, Add1ArgType), |
5977 | NEONMAP0(vrndi_v), |
5978 | NEONMAP0(vrndiq_v), |
5979 | NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), |
5980 | NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), |
5981 | NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), |
5982 | NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), |
5983 | NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), |
5984 | NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), |
5985 | NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType), |
5986 | NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType), |
5987 | NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType), |
5988 | NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0), |
5989 | NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0), |
5990 | NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0), |
5991 | NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0), |
5992 | NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0), |
5993 | NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0), |
5994 | NEONMAP1(vsha512h2q_v, aarch64_crypto_sha512h2, 0), |
5995 | NEONMAP1(vsha512hq_v, aarch64_crypto_sha512h, 0), |
5996 | NEONMAP1(vsha512su0q_v, aarch64_crypto_sha512su0, 0), |
5997 | NEONMAP1(vsha512su1q_v, aarch64_crypto_sha512su1, 0), |
5998 | NEONMAP0(vshl_n_v), |
5999 | NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), |
6000 | NEONMAP0(vshll_n_v), |
6001 | NEONMAP0(vshlq_n_v), |
6002 | NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), |
6003 | NEONMAP0(vshr_n_v), |
6004 | NEONMAP0(vshrn_n_v), |
6005 | NEONMAP0(vshrq_n_v), |
6006 | NEONMAP1(vsm3partw1q_v, aarch64_crypto_sm3partw1, 0), |
6007 | NEONMAP1(vsm3partw2q_v, aarch64_crypto_sm3partw2, 0), |
6008 | NEONMAP1(vsm3ss1q_v, aarch64_crypto_sm3ss1, 0), |
6009 | NEONMAP1(vsm3tt1aq_v, aarch64_crypto_sm3tt1a, 0), |
6010 | NEONMAP1(vsm3tt1bq_v, aarch64_crypto_sm3tt1b, 0), |
6011 | NEONMAP1(vsm3tt2aq_v, aarch64_crypto_sm3tt2a, 0), |
6012 | NEONMAP1(vsm3tt2bq_v, aarch64_crypto_sm3tt2b, 0), |
6013 | NEONMAP1(vsm4ekeyq_v, aarch64_crypto_sm4ekey, 0), |
6014 | NEONMAP1(vsm4eq_v, aarch64_crypto_sm4e, 0), |
6015 | NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0), |
6016 | NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0), |
6017 | NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0), |
6018 | NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0), |
6019 | NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0), |
6020 | NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0), |
6021 | NEONMAP0(vsubhn_v), |
6022 | NEONMAP0(vtst_v), |
6023 | NEONMAP0(vtstq_v), |
6024 | NEONMAP1(vusdot_v, aarch64_neon_usdot, 0), |
6025 | NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0), |
6026 | NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0), |
6027 | NEONMAP1(vxarq_v, aarch64_crypto_xar, 0), |
6028 | }; |
6029 | |
6030 | static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = { |
6031 | NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType), |
6032 | NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType), |
6033 | NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType), |
6034 | NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), |
6035 | NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), |
6036 | NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), |
6037 | NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), |
6038 | NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), |
6039 | NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), |
6040 | NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6041 | NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), |
6042 | NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType), |
6043 | NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), |
6044 | NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType), |
6045 | NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6046 | NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6047 | NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType), |
6048 | NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType), |
6049 | NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), |
6050 | NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), |
6051 | NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType), |
6052 | NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType), |
6053 | NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), |
6054 | NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), |
6055 | NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
6056 | NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
6057 | NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
6058 | NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
6059 | NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
6060 | NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
6061 | NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
6062 | NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
6063 | NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
6064 | NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
6065 | NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0), |
6066 | NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
6067 | NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
6068 | NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
6069 | NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
6070 | NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
6071 | NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
6072 | NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
6073 | NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
6074 | NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
6075 | NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
6076 | NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
6077 | NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
6078 | NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
6079 | NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
6080 | NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
6081 | NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
6082 | NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
6083 | NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
6084 | NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0), |
6085 | NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6086 | NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6087 | NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6088 | NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6089 | NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), |
6090 | NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), |
6091 | NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6092 | NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6093 | NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), |
6094 | NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), |
6095 | NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6096 | NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6097 | NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6098 | NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6099 | NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), |
6100 | NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), |
6101 | NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6102 | NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6103 | NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), |
6104 | NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), |
6105 | NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0), |
6106 | NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType), |
6107 | NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType), |
6108 | NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6109 | NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6110 | NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6111 | NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6112 | NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6113 | NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6114 | NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6115 | NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6116 | NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6117 | NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6118 | NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), |
6119 | NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType), |
6120 | NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), |
6121 | NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType), |
6122 | NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), |
6123 | NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), |
6124 | NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType), |
6125 | NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType), |
6126 | NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), |
6127 | NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), |
6128 | NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType), |
6129 | NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType), |
6130 | NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors), |
6131 | NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType), |
6132 | NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors), |
6133 | NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0), |
6134 | NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType), |
6135 | NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType), |
6136 | NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), |
6137 | NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), |
6138 | NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), |
6139 | NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), |
6140 | NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType), |
6141 | NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), |
6142 | NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), |
6143 | NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), |
6144 | NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType), |
6145 | NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), |
6146 | NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType), |
6147 | NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors), |
6148 | NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType), |
6149 | NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), |
6150 | NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), |
6151 | NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType), |
6152 | NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType), |
6153 | NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), |
6154 | NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), |
6155 | NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType), |
6156 | NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType), |
6157 | NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType), |
6158 | NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType), |
6159 | NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), |
6160 | NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), |
6161 | NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), |
6162 | NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), |
6163 | NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType), |
6164 | NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), |
6165 | NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), |
6166 | NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
6167 | NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
6168 | NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
6169 | NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
6170 | NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType), |
6171 | NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType), |
6172 | NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
6173 | NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
6174 | NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
6175 | NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
6176 | NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType), |
6177 | NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType), |
6178 | NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType), |
6179 | NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType), |
6180 | NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), |
6181 | NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), |
6182 | NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType), |
6183 | NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType), |
6184 | NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType), |
6185 | NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), |
6186 | NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), |
6187 | NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), |
6188 | NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), |
6189 | NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType), |
6190 | NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), |
6191 | NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), |
6192 | NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), |
6193 | NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), |
6194 | NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType), |
6195 | NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType), |
6196 | NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), |
6197 | NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), |
6198 | NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType), |
6199 | NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType), |
6200 | NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType), |
6201 | NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType), |
6202 | NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType), |
6203 | NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType), |
6204 | NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType), |
6205 | NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType), |
6206 | NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType), |
6207 | NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType), |
6208 | NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType), |
6209 | NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType), |
6210 | NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0), |
6211 | NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0), |
6212 | NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0), |
6213 | NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0), |
6214 | NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType), |
6215 | NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType), |
6216 | NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType), |
6217 | NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType), |
6218 | NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), |
6219 | NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType), |
6220 | NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), |
6221 | NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType), |
6222 | NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType), |
6223 | NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType), |
6224 | NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), |
6225 | NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType), |
6226 | NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), |
6227 | NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType), |
6228 | // FP16 scalar intrinisics go here. |
6229 | NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType), |
6230 | NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
6231 | NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
6232 | NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
6233 | NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
6234 | NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
6235 | NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
6236 | NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
6237 | NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
6238 | NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
6239 | NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
6240 | NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
6241 | NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
6242 | NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
6243 | NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
6244 | NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
6245 | NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
6246 | NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
6247 | NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
6248 | NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
6249 | NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
6250 | NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
6251 | NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
6252 | NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
6253 | NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
6254 | NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
6255 | NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
6256 | NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
6257 | NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
6258 | NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType), |
6259 | NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType), |
6260 | NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType), |
6261 | NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType), |
6262 | NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType), |
6263 | }; |
6264 | |
6265 | #undef NEONMAP0 |
6266 | #undef NEONMAP1 |
6267 | #undef NEONMAP2 |
6268 | |
6269 | #define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ |
6270 | { \ |
6271 | #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ |
6272 | TypeModifier \ |
6273 | } |
6274 | |
6275 | #define SVEMAP2(NameBase, TypeModifier) \ |
6276 | { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier } |
6277 | static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = { |
6278 | #define GET_SVE_LLVM_INTRINSIC_MAP |
6279 | #include "clang/Basic/arm_sve_builtin_cg.inc" |
6280 | #undef GET_SVE_LLVM_INTRINSIC_MAP |
6281 | }; |
6282 | |
6283 | #undef SVEMAP1 |
6284 | #undef SVEMAP2 |
6285 | |
6286 | static bool NEONSIMDIntrinsicsProvenSorted = false; |
6287 | |
6288 | static bool AArch64SIMDIntrinsicsProvenSorted = false; |
6289 | static bool AArch64SISDIntrinsicsProvenSorted = false; |
6290 | static bool AArch64SVEIntrinsicsProvenSorted = false; |
6291 | |
6292 | static const ARMVectorIntrinsicInfo * |
6293 | findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap, |
6294 | unsigned BuiltinID, bool &MapProvenSorted) { |
6295 | |
6296 | #ifndef NDEBUG |
6297 | if (!MapProvenSorted) { |
6298 | assert(llvm::is_sorted(IntrinsicMap))(static_cast <bool> (llvm::is_sorted(IntrinsicMap)) ? void (0) : __assert_fail ("llvm::is_sorted(IntrinsicMap)", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 6298, __extension__ __PRETTY_FUNCTION__)); |
6299 | MapProvenSorted = true; |
6300 | } |
6301 | #endif |
6302 | |
6303 | const ARMVectorIntrinsicInfo *Builtin = |
6304 | llvm::lower_bound(IntrinsicMap, BuiltinID); |
6305 | |
6306 | if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID) |
6307 | return Builtin; |
6308 | |
6309 | return nullptr; |
6310 | } |
6311 | |
6312 | Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID, |
6313 | unsigned Modifier, |
6314 | llvm::Type *ArgType, |
6315 | const CallExpr *E) { |
6316 | int VectorSize = 0; |
6317 | if (Modifier & Use64BitVectors) |
6318 | VectorSize = 64; |
6319 | else if (Modifier & Use128BitVectors) |
6320 | VectorSize = 128; |
6321 | |
6322 | // Return type. |
6323 | SmallVector<llvm::Type *, 3> Tys; |
6324 | if (Modifier & AddRetType) { |
6325 | llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); |
6326 | if (Modifier & VectorizeRetType) |
6327 | Ty = llvm::FixedVectorType::get( |
6328 | Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1); |
6329 | |
6330 | Tys.push_back(Ty); |
6331 | } |
6332 | |
6333 | // Arguments. |
6334 | if (Modifier & VectorizeArgTypes) { |
6335 | int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1; |
6336 | ArgType = llvm::FixedVectorType::get(ArgType, Elts); |
6337 | } |
6338 | |
6339 | if (Modifier & (Add1ArgType | Add2ArgTypes)) |
6340 | Tys.push_back(ArgType); |
6341 | |
6342 | if (Modifier & Add2ArgTypes) |
6343 | Tys.push_back(ArgType); |
6344 | |
6345 | if (Modifier & InventFloatType) |
6346 | Tys.push_back(FloatTy); |
6347 | |
6348 | return CGM.getIntrinsic(IntrinsicID, Tys); |
6349 | } |
6350 | |
6351 | static Value *EmitCommonNeonSISDBuiltinExpr( |
6352 | CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo, |
6353 | SmallVectorImpl<Value *> &Ops, const CallExpr *E) { |
6354 | unsigned BuiltinID = SISDInfo.BuiltinID; |
6355 | unsigned int Int = SISDInfo.LLVMIntrinsic; |
6356 | unsigned Modifier = SISDInfo.TypeModifier; |
6357 | const char *s = SISDInfo.NameHint; |
6358 | |
6359 | switch (BuiltinID) { |
6360 | case NEON::BI__builtin_neon_vcled_s64: |
6361 | case NEON::BI__builtin_neon_vcled_u64: |
6362 | case NEON::BI__builtin_neon_vcles_f32: |
6363 | case NEON::BI__builtin_neon_vcled_f64: |
6364 | case NEON::BI__builtin_neon_vcltd_s64: |
6365 | case NEON::BI__builtin_neon_vcltd_u64: |
6366 | case NEON::BI__builtin_neon_vclts_f32: |
6367 | case NEON::BI__builtin_neon_vcltd_f64: |
6368 | case NEON::BI__builtin_neon_vcales_f32: |
6369 | case NEON::BI__builtin_neon_vcaled_f64: |
6370 | case NEON::BI__builtin_neon_vcalts_f32: |
6371 | case NEON::BI__builtin_neon_vcaltd_f64: |
6372 | // Only one direction of comparisons actually exist, cmle is actually a cmge |
6373 | // with swapped operands. The table gives us the right intrinsic but we |
6374 | // still need to do the swap. |
6375 | std::swap(Ops[0], Ops[1]); |
6376 | break; |
6377 | } |
6378 | |
6379 | assert(Int && "Generic code assumes a valid intrinsic")(static_cast <bool> (Int && "Generic code assumes a valid intrinsic" ) ? void (0) : __assert_fail ("Int && \"Generic code assumes a valid intrinsic\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 6379, __extension__ __PRETTY_FUNCTION__)); |
6380 | |
6381 | // Determine the type(s) of this overloaded AArch64 intrinsic. |
6382 | const Expr *Arg = E->getArg(0); |
6383 | llvm::Type *ArgTy = CGF.ConvertType(Arg->getType()); |
6384 | Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E); |
6385 | |
6386 | int j = 0; |
6387 | ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0); |
6388 | for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); |
6389 | ai != ae; ++ai, ++j) { |
6390 | llvm::Type *ArgTy = ai->getType(); |
6391 | if (Ops[j]->getType()->getPrimitiveSizeInBits() == |
6392 | ArgTy->getPrimitiveSizeInBits()) |
6393 | continue; |
6394 | |
6395 | assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy())(static_cast <bool> (ArgTy->isVectorTy() && ! Ops[j]->getType()->isVectorTy()) ? void (0) : __assert_fail ("ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 6395, __extension__ __PRETTY_FUNCTION__)); |
6396 | // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate |
6397 | // it before inserting. |
6398 | Ops[j] = CGF.Builder.CreateTruncOrBitCast( |
6399 | Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType()); |
6400 | Ops[j] = |
6401 | CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0); |
6402 | } |
6403 | |
6404 | Value *Result = CGF.EmitNeonCall(F, Ops, s); |
6405 | llvm::Type *ResultType = CGF.ConvertType(E->getType()); |
6406 | if (ResultType->getPrimitiveSizeInBits().getFixedSize() < |
6407 | Result->getType()->getPrimitiveSizeInBits().getFixedSize()) |
6408 | return CGF.Builder.CreateExtractElement(Result, C0); |
6409 | |
6410 | return CGF.Builder.CreateBitCast(Result, ResultType, s); |
6411 | } |
6412 | |
6413 | Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( |
6414 | unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic, |
6415 | const char *NameHint, unsigned Modifier, const CallExpr *E, |
6416 | SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1, |
6417 | llvm::Triple::ArchType Arch) { |
6418 | // Get the last argument, which specifies the vector type. |
6419 | const Expr *Arg = E->getArg(E->getNumArgs() - 1); |
6420 | Optional<llvm::APSInt> NeonTypeConst = |
6421 | Arg->getIntegerConstantExpr(getContext()); |
6422 | if (!NeonTypeConst) |
6423 | return nullptr; |
6424 | |
6425 | // Determine the type of this overloaded NEON intrinsic. |
6426 | NeonTypeFlags Type(NeonTypeConst->getZExtValue()); |
6427 | bool Usgn = Type.isUnsigned(); |
6428 | bool Quad = Type.isQuad(); |
6429 | const bool HasLegalHalfType = getTarget().hasLegalHalfType(); |
6430 | const bool AllowBFloatArgsAndRet = |
6431 | getTargetHooks().getABIInfo().allowBFloatArgsAndRet(); |
6432 | |
6433 | llvm::FixedVectorType *VTy = |
6434 | GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet); |
6435 | llvm::Type *Ty = VTy; |
6436 | if (!Ty) |
6437 | return nullptr; |
6438 | |
6439 | auto getAlignmentValue32 = [&](Address addr) -> Value* { |
6440 | return Builder.getInt32(addr.getAlignment().getQuantity()); |
6441 | }; |
6442 | |
6443 | unsigned Int = LLVMIntrinsic; |
6444 | if ((Modifier & UnsignedAlts) && !Usgn) |
6445 | Int = AltLLVMIntrinsic; |
6446 | |
6447 | switch (BuiltinID) { |
6448 | default: break; |
6449 | case NEON::BI__builtin_neon_splat_lane_v: |
6450 | case NEON::BI__builtin_neon_splat_laneq_v: |
6451 | case NEON::BI__builtin_neon_splatq_lane_v: |
6452 | case NEON::BI__builtin_neon_splatq_laneq_v: { |
6453 | auto NumElements = VTy->getElementCount(); |
6454 | if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v) |
6455 | NumElements = NumElements * 2; |
6456 | if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v) |
6457 | NumElements = NumElements.divideCoefficientBy(2); |
6458 | |
6459 | Ops[0] = Builder.CreateBitCast(Ops[0], VTy); |
6460 | return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements); |
6461 | } |
6462 | case NEON::BI__builtin_neon_vpadd_v: |
6463 | case NEON::BI__builtin_neon_vpaddq_v: |
6464 | // We don't allow fp/int overloading of intrinsics. |
6465 | if (VTy->getElementType()->isFloatingPointTy() && |
6466 | Int == Intrinsic::aarch64_neon_addp) |
6467 | Int = Intrinsic::aarch64_neon_faddp; |
6468 | break; |
6469 | case NEON::BI__builtin_neon_vabs_v: |
6470 | case NEON::BI__builtin_neon_vabsq_v: |
6471 | if (VTy->getElementType()->isFloatingPointTy()) |
6472 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs"); |
6473 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs"); |
6474 | case NEON::BI__builtin_neon_vadd_v: |
6475 | case NEON::BI__builtin_neon_vaddq_v: { |
6476 | llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, Quad ? 16 : 8); |
6477 | Ops[0] = Builder.CreateBitCast(Ops[0], VTy); |
6478 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); |
6479 | Ops[0] = Builder.CreateXor(Ops[0], Ops[1]); |
6480 | return Builder.CreateBitCast(Ops[0], Ty); |
6481 | } |
6482 | case NEON::BI__builtin_neon_vaddhn_v: { |
6483 | llvm::FixedVectorType *SrcTy = |
6484 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
6485 | |
6486 | // %sum = add <4 x i32> %lhs, %rhs |
6487 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
6488 | Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); |
6489 | Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn"); |
6490 | |
6491 | // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> |
6492 | Constant *ShiftAmt = |
6493 | ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); |
6494 | Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn"); |
6495 | |
6496 | // %res = trunc <4 x i32> %high to <4 x i16> |
6497 | return Builder.CreateTrunc(Ops[0], VTy, "vaddhn"); |
6498 | } |
6499 | case NEON::BI__builtin_neon_vcale_v: |
6500 | case NEON::BI__builtin_neon_vcaleq_v: |
6501 | case NEON::BI__builtin_neon_vcalt_v: |
6502 | case NEON::BI__builtin_neon_vcaltq_v: |
6503 | std::swap(Ops[0], Ops[1]); |
6504 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
6505 | case NEON::BI__builtin_neon_vcage_v: |
6506 | case NEON::BI__builtin_neon_vcageq_v: |
6507 | case NEON::BI__builtin_neon_vcagt_v: |
6508 | case NEON::BI__builtin_neon_vcagtq_v: { |
6509 | llvm::Type *Ty; |
6510 | switch (VTy->getScalarSizeInBits()) { |
6511 | default: llvm_unreachable("unexpected type")::llvm::llvm_unreachable_internal("unexpected type", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 6511); |
6512 | case 32: |
6513 | Ty = FloatTy; |
6514 | break; |
6515 | case 64: |
6516 | Ty = DoubleTy; |
6517 | break; |
6518 | case 16: |
6519 | Ty = HalfTy; |
6520 | break; |
6521 | } |
6522 | auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements()); |
6523 | llvm::Type *Tys[] = { VTy, VecFlt }; |
6524 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6525 | return EmitNeonCall(F, Ops, NameHint); |
6526 | } |
6527 | case NEON::BI__builtin_neon_vceqz_v: |
6528 | case NEON::BI__builtin_neon_vceqzq_v: |
6529 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ, |
6530 | ICmpInst::ICMP_EQ, "vceqz"); |
6531 | case NEON::BI__builtin_neon_vcgez_v: |
6532 | case NEON::BI__builtin_neon_vcgezq_v: |
6533 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE, |
6534 | ICmpInst::ICMP_SGE, "vcgez"); |
6535 | case NEON::BI__builtin_neon_vclez_v: |
6536 | case NEON::BI__builtin_neon_vclezq_v: |
6537 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE, |
6538 | ICmpInst::ICMP_SLE, "vclez"); |
6539 | case NEON::BI__builtin_neon_vcgtz_v: |
6540 | case NEON::BI__builtin_neon_vcgtzq_v: |
6541 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT, |
6542 | ICmpInst::ICMP_SGT, "vcgtz"); |
6543 | case NEON::BI__builtin_neon_vcltz_v: |
6544 | case NEON::BI__builtin_neon_vcltzq_v: |
6545 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT, |
6546 | ICmpInst::ICMP_SLT, "vcltz"); |
6547 | case NEON::BI__builtin_neon_vclz_v: |
6548 | case NEON::BI__builtin_neon_vclzq_v: |
6549 | // We generate target-independent intrinsic, which needs a second argument |
6550 | // for whether or not clz of zero is undefined; on ARM it isn't. |
6551 | Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef())); |
6552 | break; |
6553 | case NEON::BI__builtin_neon_vcvt_f32_v: |
6554 | case NEON::BI__builtin_neon_vcvtq_f32_v: |
6555 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6556 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad), |
6557 | HasLegalHalfType); |
6558 | return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") |
6559 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); |
6560 | case NEON::BI__builtin_neon_vcvt_f16_v: |
6561 | case NEON::BI__builtin_neon_vcvtq_f16_v: |
6562 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6563 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad), |
6564 | HasLegalHalfType); |
6565 | return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") |
6566 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); |
6567 | case NEON::BI__builtin_neon_vcvt_n_f16_v: |
6568 | case NEON::BI__builtin_neon_vcvt_n_f32_v: |
6569 | case NEON::BI__builtin_neon_vcvt_n_f64_v: |
6570 | case NEON::BI__builtin_neon_vcvtq_n_f16_v: |
6571 | case NEON::BI__builtin_neon_vcvtq_n_f32_v: |
6572 | case NEON::BI__builtin_neon_vcvtq_n_f64_v: { |
6573 | llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty }; |
6574 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; |
6575 | Function *F = CGM.getIntrinsic(Int, Tys); |
6576 | return EmitNeonCall(F, Ops, "vcvt_n"); |
6577 | } |
6578 | case NEON::BI__builtin_neon_vcvt_n_s16_v: |
6579 | case NEON::BI__builtin_neon_vcvt_n_s32_v: |
6580 | case NEON::BI__builtin_neon_vcvt_n_u16_v: |
6581 | case NEON::BI__builtin_neon_vcvt_n_u32_v: |
6582 | case NEON::BI__builtin_neon_vcvt_n_s64_v: |
6583 | case NEON::BI__builtin_neon_vcvt_n_u64_v: |
6584 | case NEON::BI__builtin_neon_vcvtq_n_s16_v: |
6585 | case NEON::BI__builtin_neon_vcvtq_n_s32_v: |
6586 | case NEON::BI__builtin_neon_vcvtq_n_u16_v: |
6587 | case NEON::BI__builtin_neon_vcvtq_n_u32_v: |
6588 | case NEON::BI__builtin_neon_vcvtq_n_s64_v: |
6589 | case NEON::BI__builtin_neon_vcvtq_n_u64_v: { |
6590 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
6591 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6592 | return EmitNeonCall(F, Ops, "vcvt_n"); |
6593 | } |
6594 | case NEON::BI__builtin_neon_vcvt_s32_v: |
6595 | case NEON::BI__builtin_neon_vcvt_u32_v: |
6596 | case NEON::BI__builtin_neon_vcvt_s64_v: |
6597 | case NEON::BI__builtin_neon_vcvt_u64_v: |
6598 | case NEON::BI__builtin_neon_vcvt_s16_v: |
6599 | case NEON::BI__builtin_neon_vcvt_u16_v: |
6600 | case NEON::BI__builtin_neon_vcvtq_s32_v: |
6601 | case NEON::BI__builtin_neon_vcvtq_u32_v: |
6602 | case NEON::BI__builtin_neon_vcvtq_s64_v: |
6603 | case NEON::BI__builtin_neon_vcvtq_u64_v: |
6604 | case NEON::BI__builtin_neon_vcvtq_s16_v: |
6605 | case NEON::BI__builtin_neon_vcvtq_u16_v: { |
6606 | Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type)); |
6607 | return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") |
6608 | : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); |
6609 | } |
6610 | case NEON::BI__builtin_neon_vcvta_s16_v: |
6611 | case NEON::BI__builtin_neon_vcvta_s32_v: |
6612 | case NEON::BI__builtin_neon_vcvta_s64_v: |
6613 | case NEON::BI__builtin_neon_vcvta_u16_v: |
6614 | case NEON::BI__builtin_neon_vcvta_u32_v: |
6615 | case NEON::BI__builtin_neon_vcvta_u64_v: |
6616 | case NEON::BI__builtin_neon_vcvtaq_s16_v: |
6617 | case NEON::BI__builtin_neon_vcvtaq_s32_v: |
6618 | case NEON::BI__builtin_neon_vcvtaq_s64_v: |
6619 | case NEON::BI__builtin_neon_vcvtaq_u16_v: |
6620 | case NEON::BI__builtin_neon_vcvtaq_u32_v: |
6621 | case NEON::BI__builtin_neon_vcvtaq_u64_v: |
6622 | case NEON::BI__builtin_neon_vcvtn_s16_v: |
6623 | case NEON::BI__builtin_neon_vcvtn_s32_v: |
6624 | case NEON::BI__builtin_neon_vcvtn_s64_v: |
6625 | case NEON::BI__builtin_neon_vcvtn_u16_v: |
6626 | case NEON::BI__builtin_neon_vcvtn_u32_v: |
6627 | case NEON::BI__builtin_neon_vcvtn_u64_v: |
6628 | case NEON::BI__builtin_neon_vcvtnq_s16_v: |
6629 | case NEON::BI__builtin_neon_vcvtnq_s32_v: |
6630 | case NEON::BI__builtin_neon_vcvtnq_s64_v: |
6631 | case NEON::BI__builtin_neon_vcvtnq_u16_v: |
6632 | case NEON::BI__builtin_neon_vcvtnq_u32_v: |
6633 | case NEON::BI__builtin_neon_vcvtnq_u64_v: |
6634 | case NEON::BI__builtin_neon_vcvtp_s16_v: |
6635 | case NEON::BI__builtin_neon_vcvtp_s32_v: |
6636 | case NEON::BI__builtin_neon_vcvtp_s64_v: |
6637 | case NEON::BI__builtin_neon_vcvtp_u16_v: |
6638 | case NEON::BI__builtin_neon_vcvtp_u32_v: |
6639 | case NEON::BI__builtin_neon_vcvtp_u64_v: |
6640 | case NEON::BI__builtin_neon_vcvtpq_s16_v: |
6641 | case NEON::BI__builtin_neon_vcvtpq_s32_v: |
6642 | case NEON::BI__builtin_neon_vcvtpq_s64_v: |
6643 | case NEON::BI__builtin_neon_vcvtpq_u16_v: |
6644 | case NEON::BI__builtin_neon_vcvtpq_u32_v: |
6645 | case NEON::BI__builtin_neon_vcvtpq_u64_v: |
6646 | case NEON::BI__builtin_neon_vcvtm_s16_v: |
6647 | case NEON::BI__builtin_neon_vcvtm_s32_v: |
6648 | case NEON::BI__builtin_neon_vcvtm_s64_v: |
6649 | case NEON::BI__builtin_neon_vcvtm_u16_v: |
6650 | case NEON::BI__builtin_neon_vcvtm_u32_v: |
6651 | case NEON::BI__builtin_neon_vcvtm_u64_v: |
6652 | case NEON::BI__builtin_neon_vcvtmq_s16_v: |
6653 | case NEON::BI__builtin_neon_vcvtmq_s32_v: |
6654 | case NEON::BI__builtin_neon_vcvtmq_s64_v: |
6655 | case NEON::BI__builtin_neon_vcvtmq_u16_v: |
6656 | case NEON::BI__builtin_neon_vcvtmq_u32_v: |
6657 | case NEON::BI__builtin_neon_vcvtmq_u64_v: { |
6658 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
6659 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint); |
6660 | } |
6661 | case NEON::BI__builtin_neon_vcvtx_f32_v: { |
6662 | llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty}; |
6663 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint); |
6664 | |
6665 | } |
6666 | case NEON::BI__builtin_neon_vext_v: |
6667 | case NEON::BI__builtin_neon_vextq_v: { |
6668 | int CV = cast<ConstantInt>(Ops[2])->getSExtValue(); |
6669 | SmallVector<int, 16> Indices; |
6670 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
6671 | Indices.push_back(i+CV); |
6672 | |
6673 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6674 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6675 | return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext"); |
6676 | } |
6677 | case NEON::BI__builtin_neon_vfma_v: |
6678 | case NEON::BI__builtin_neon_vfmaq_v: { |
6679 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6680 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6681 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
6682 | |
6683 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. |
6684 | return emitCallMaybeConstrainedFPBuiltin( |
6685 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty, |
6686 | {Ops[1], Ops[2], Ops[0]}); |
6687 | } |
6688 | case NEON::BI__builtin_neon_vld1_v: |
6689 | case NEON::BI__builtin_neon_vld1q_v: { |
6690 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
6691 | Ops.push_back(getAlignmentValue32(PtrOp0)); |
6692 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1"); |
6693 | } |
6694 | case NEON::BI__builtin_neon_vld1_x2_v: |
6695 | case NEON::BI__builtin_neon_vld1q_x2_v: |
6696 | case NEON::BI__builtin_neon_vld1_x3_v: |
6697 | case NEON::BI__builtin_neon_vld1q_x3_v: |
6698 | case NEON::BI__builtin_neon_vld1_x4_v: |
6699 | case NEON::BI__builtin_neon_vld1q_x4_v: { |
6700 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); |
6701 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
6702 | llvm::Type *Tys[2] = { VTy, PTy }; |
6703 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6704 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN"); |
6705 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
6706 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6707 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
6708 | } |
6709 | case NEON::BI__builtin_neon_vld2_v: |
6710 | case NEON::BI__builtin_neon_vld2q_v: |
6711 | case NEON::BI__builtin_neon_vld3_v: |
6712 | case NEON::BI__builtin_neon_vld3q_v: |
6713 | case NEON::BI__builtin_neon_vld4_v: |
6714 | case NEON::BI__builtin_neon_vld4q_v: |
6715 | case NEON::BI__builtin_neon_vld2_dup_v: |
6716 | case NEON::BI__builtin_neon_vld2q_dup_v: |
6717 | case NEON::BI__builtin_neon_vld3_dup_v: |
6718 | case NEON::BI__builtin_neon_vld3q_dup_v: |
6719 | case NEON::BI__builtin_neon_vld4_dup_v: |
6720 | case NEON::BI__builtin_neon_vld4q_dup_v: { |
6721 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
6722 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6723 | Value *Align = getAlignmentValue32(PtrOp1); |
6724 | Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint); |
6725 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
6726 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6727 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
6728 | } |
6729 | case NEON::BI__builtin_neon_vld1_dup_v: |
6730 | case NEON::BI__builtin_neon_vld1q_dup_v: { |
6731 | Value *V = UndefValue::get(Ty); |
6732 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); |
6733 | PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty); |
6734 | LoadInst *Ld = Builder.CreateLoad(PtrOp0); |
6735 | llvm::Constant *CI = ConstantInt::get(SizeTy, 0); |
6736 | Ops[0] = Builder.CreateInsertElement(V, Ld, CI); |
6737 | return EmitNeonSplat(Ops[0], CI); |
6738 | } |
6739 | case NEON::BI__builtin_neon_vld2_lane_v: |
6740 | case NEON::BI__builtin_neon_vld2q_lane_v: |
6741 | case NEON::BI__builtin_neon_vld3_lane_v: |
6742 | case NEON::BI__builtin_neon_vld3q_lane_v: |
6743 | case NEON::BI__builtin_neon_vld4_lane_v: |
6744 | case NEON::BI__builtin_neon_vld4q_lane_v: { |
6745 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
6746 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6747 | for (unsigned I = 2; I < Ops.size() - 1; ++I) |
6748 | Ops[I] = Builder.CreateBitCast(Ops[I], Ty); |
6749 | Ops.push_back(getAlignmentValue32(PtrOp1)); |
6750 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint); |
6751 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
6752 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6753 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
6754 | } |
6755 | case NEON::BI__builtin_neon_vmovl_v: { |
6756 | llvm::FixedVectorType *DTy = |
6757 | llvm::FixedVectorType::getTruncatedElementVectorType(VTy); |
6758 | Ops[0] = Builder.CreateBitCast(Ops[0], DTy); |
6759 | if (Usgn) |
6760 | return Builder.CreateZExt(Ops[0], Ty, "vmovl"); |
6761 | return Builder.CreateSExt(Ops[0], Ty, "vmovl"); |
6762 | } |
6763 | case NEON::BI__builtin_neon_vmovn_v: { |
6764 | llvm::FixedVectorType *QTy = |
6765 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
6766 | Ops[0] = Builder.CreateBitCast(Ops[0], QTy); |
6767 | return Builder.CreateTrunc(Ops[0], Ty, "vmovn"); |
6768 | } |
6769 | case NEON::BI__builtin_neon_vmull_v: |
6770 | // FIXME: the integer vmull operations could be emitted in terms of pure |
6771 | // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of |
6772 | // hoisting the exts outside loops. Until global ISel comes along that can |
6773 | // see through such movement this leads to bad CodeGen. So we need an |
6774 | // intrinsic for now. |
6775 | Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; |
6776 | Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int; |
6777 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); |
6778 | case NEON::BI__builtin_neon_vpadal_v: |
6779 | case NEON::BI__builtin_neon_vpadalq_v: { |
6780 | // The source operand type has twice as many elements of half the size. |
6781 | unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); |
6782 | llvm::Type *EltTy = |
6783 | llvm::IntegerType::get(getLLVMContext(), EltBits / 2); |
6784 | auto *NarrowTy = |
6785 | llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2); |
6786 | llvm::Type *Tys[2] = { Ty, NarrowTy }; |
6787 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); |
6788 | } |
6789 | case NEON::BI__builtin_neon_vpaddl_v: |
6790 | case NEON::BI__builtin_neon_vpaddlq_v: { |
6791 | // The source operand type has twice as many elements of half the size. |
6792 | unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); |
6793 | llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); |
6794 | auto *NarrowTy = |
6795 | llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2); |
6796 | llvm::Type *Tys[2] = { Ty, NarrowTy }; |
6797 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl"); |
6798 | } |
6799 | case NEON::BI__builtin_neon_vqdmlal_v: |
6800 | case NEON::BI__builtin_neon_vqdmlsl_v: { |
6801 | SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end()); |
6802 | Ops[1] = |
6803 | EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal"); |
6804 | Ops.resize(2); |
6805 | return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint); |
6806 | } |
6807 | case NEON::BI__builtin_neon_vqdmulhq_lane_v: |
6808 | case NEON::BI__builtin_neon_vqdmulh_lane_v: |
6809 | case NEON::BI__builtin_neon_vqrdmulhq_lane_v: |
6810 | case NEON::BI__builtin_neon_vqrdmulh_lane_v: { |
6811 | auto *RTy = cast<llvm::FixedVectorType>(Ty); |
6812 | if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || |
6813 | BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v) |
6814 | RTy = llvm::FixedVectorType::get(RTy->getElementType(), |
6815 | RTy->getNumElements() * 2); |
6816 | llvm::Type *Tys[2] = { |
6817 | RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false, |
6818 | /*isQuad*/ false))}; |
6819 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); |
6820 | } |
6821 | case NEON::BI__builtin_neon_vqdmulhq_laneq_v: |
6822 | case NEON::BI__builtin_neon_vqdmulh_laneq_v: |
6823 | case NEON::BI__builtin_neon_vqrdmulhq_laneq_v: |
6824 | case NEON::BI__builtin_neon_vqrdmulh_laneq_v: { |
6825 | llvm::Type *Tys[2] = { |
6826 | Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false, |
6827 | /*isQuad*/ true))}; |
6828 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); |
6829 | } |
6830 | case NEON::BI__builtin_neon_vqshl_n_v: |
6831 | case NEON::BI__builtin_neon_vqshlq_n_v: |
6832 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n", |
6833 | 1, false); |
6834 | case NEON::BI__builtin_neon_vqshlu_n_v: |
6835 | case NEON::BI__builtin_neon_vqshluq_n_v: |
6836 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", |
6837 | 1, false); |
6838 | case NEON::BI__builtin_neon_vrecpe_v: |
6839 | case NEON::BI__builtin_neon_vrecpeq_v: |
6840 | case NEON::BI__builtin_neon_vrsqrte_v: |
6841 | case NEON::BI__builtin_neon_vrsqrteq_v: |
6842 | Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic; |
6843 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint); |
6844 | case NEON::BI__builtin_neon_vrndi_v: |
6845 | case NEON::BI__builtin_neon_vrndiq_v: |
6846 | Int = Builder.getIsFPConstrained() |
6847 | ? Intrinsic::experimental_constrained_nearbyint |
6848 | : Intrinsic::nearbyint; |
6849 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint); |
6850 | case NEON::BI__builtin_neon_vrshr_n_v: |
6851 | case NEON::BI__builtin_neon_vrshrq_n_v: |
6852 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", |
6853 | 1, true); |
6854 | case NEON::BI__builtin_neon_vsha512hq_v: |
6855 | case NEON::BI__builtin_neon_vsha512h2q_v: |
6856 | case NEON::BI__builtin_neon_vsha512su0q_v: |
6857 | case NEON::BI__builtin_neon_vsha512su1q_v: { |
6858 | Function *F = CGM.getIntrinsic(Int); |
6859 | return EmitNeonCall(F, Ops, ""); |
6860 | } |
6861 | case NEON::BI__builtin_neon_vshl_n_v: |
6862 | case NEON::BI__builtin_neon_vshlq_n_v: |
6863 | Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); |
6864 | return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], |
6865 | "vshl_n"); |
6866 | case NEON::BI__builtin_neon_vshll_n_v: { |
6867 | llvm::FixedVectorType *SrcTy = |
6868 | llvm::FixedVectorType::getTruncatedElementVectorType(VTy); |
6869 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
6870 | if (Usgn) |
6871 | Ops[0] = Builder.CreateZExt(Ops[0], VTy); |
6872 | else |
6873 | Ops[0] = Builder.CreateSExt(Ops[0], VTy); |
6874 | Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false); |
6875 | return Builder.CreateShl(Ops[0], Ops[1], "vshll_n"); |
6876 | } |
6877 | case NEON::BI__builtin_neon_vshrn_n_v: { |
6878 | llvm::FixedVectorType *SrcTy = |
6879 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
6880 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
6881 | Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false); |
6882 | if (Usgn) |
6883 | Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]); |
6884 | else |
6885 | Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]); |
6886 | return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n"); |
6887 | } |
6888 | case NEON::BI__builtin_neon_vshr_n_v: |
6889 | case NEON::BI__builtin_neon_vshrq_n_v: |
6890 | return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n"); |
6891 | case NEON::BI__builtin_neon_vst1_v: |
6892 | case NEON::BI__builtin_neon_vst1q_v: |
6893 | case NEON::BI__builtin_neon_vst2_v: |
6894 | case NEON::BI__builtin_neon_vst2q_v: |
6895 | case NEON::BI__builtin_neon_vst3_v: |
6896 | case NEON::BI__builtin_neon_vst3q_v: |
6897 | case NEON::BI__builtin_neon_vst4_v: |
6898 | case NEON::BI__builtin_neon_vst4q_v: |
6899 | case NEON::BI__builtin_neon_vst2_lane_v: |
6900 | case NEON::BI__builtin_neon_vst2q_lane_v: |
6901 | case NEON::BI__builtin_neon_vst3_lane_v: |
6902 | case NEON::BI__builtin_neon_vst3q_lane_v: |
6903 | case NEON::BI__builtin_neon_vst4_lane_v: |
6904 | case NEON::BI__builtin_neon_vst4q_lane_v: { |
6905 | llvm::Type *Tys[] = {Int8PtrTy, Ty}; |
6906 | Ops.push_back(getAlignmentValue32(PtrOp0)); |
6907 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, ""); |
6908 | } |
6909 | case NEON::BI__builtin_neon_vsm3partw1q_v: |
6910 | case NEON::BI__builtin_neon_vsm3partw2q_v: |
6911 | case NEON::BI__builtin_neon_vsm3ss1q_v: |
6912 | case NEON::BI__builtin_neon_vsm4ekeyq_v: |
6913 | case NEON::BI__builtin_neon_vsm4eq_v: { |
6914 | Function *F = CGM.getIntrinsic(Int); |
6915 | return EmitNeonCall(F, Ops, ""); |
6916 | } |
6917 | case NEON::BI__builtin_neon_vsm3tt1aq_v: |
6918 | case NEON::BI__builtin_neon_vsm3tt1bq_v: |
6919 | case NEON::BI__builtin_neon_vsm3tt2aq_v: |
6920 | case NEON::BI__builtin_neon_vsm3tt2bq_v: { |
6921 | Function *F = CGM.getIntrinsic(Int); |
6922 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); |
6923 | return EmitNeonCall(F, Ops, ""); |
6924 | } |
6925 | case NEON::BI__builtin_neon_vst1_x2_v: |
6926 | case NEON::BI__builtin_neon_vst1q_x2_v: |
6927 | case NEON::BI__builtin_neon_vst1_x3_v: |
6928 | case NEON::BI__builtin_neon_vst1q_x3_v: |
6929 | case NEON::BI__builtin_neon_vst1_x4_v: |
6930 | case NEON::BI__builtin_neon_vst1q_x4_v: { |
6931 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); |
6932 | // TODO: Currently in AArch32 mode the pointer operand comes first, whereas |
6933 | // in AArch64 it comes last. We may want to stick to one or another. |
6934 | if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be || |
6935 | Arch == llvm::Triple::aarch64_32) { |
6936 | llvm::Type *Tys[2] = { VTy, PTy }; |
6937 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
6938 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); |
6939 | } |
6940 | llvm::Type *Tys[2] = { PTy, VTy }; |
6941 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); |
6942 | } |
6943 | case NEON::BI__builtin_neon_vsubhn_v: { |
6944 | llvm::FixedVectorType *SrcTy = |
6945 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
6946 | |
6947 | // %sum = add <4 x i32> %lhs, %rhs |
6948 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
6949 | Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); |
6950 | Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn"); |
6951 | |
6952 | // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> |
6953 | Constant *ShiftAmt = |
6954 | ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); |
6955 | Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn"); |
6956 | |
6957 | // %res = trunc <4 x i32> %high to <4 x i16> |
6958 | return Builder.CreateTrunc(Ops[0], VTy, "vsubhn"); |
6959 | } |
6960 | case NEON::BI__builtin_neon_vtrn_v: |
6961 | case NEON::BI__builtin_neon_vtrnq_v: { |
6962 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
6963 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6964 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
6965 | Value *SV = nullptr; |
6966 | |
6967 | for (unsigned vi = 0; vi != 2; ++vi) { |
6968 | SmallVector<int, 16> Indices; |
6969 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
6970 | Indices.push_back(i+vi); |
6971 | Indices.push_back(i+e+vi); |
6972 | } |
6973 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
6974 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn"); |
6975 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
6976 | } |
6977 | return SV; |
6978 | } |
6979 | case NEON::BI__builtin_neon_vtst_v: |
6980 | case NEON::BI__builtin_neon_vtstq_v: { |
6981 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6982 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6983 | Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); |
6984 | Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], |
6985 | ConstantAggregateZero::get(Ty)); |
6986 | return Builder.CreateSExt(Ops[0], Ty, "vtst"); |
6987 | } |
6988 | case NEON::BI__builtin_neon_vuzp_v: |
6989 | case NEON::BI__builtin_neon_vuzpq_v: { |
6990 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
6991 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6992 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
6993 | Value *SV = nullptr; |
6994 | |
6995 | for (unsigned vi = 0; vi != 2; ++vi) { |
6996 | SmallVector<int, 16> Indices; |
6997 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
6998 | Indices.push_back(2*i+vi); |
6999 | |
7000 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
7001 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp"); |
7002 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
7003 | } |
7004 | return SV; |
7005 | } |
7006 | case NEON::BI__builtin_neon_vxarq_v: { |
7007 | Function *F = CGM.getIntrinsic(Int); |
7008 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
7009 | return EmitNeonCall(F, Ops, ""); |
7010 | } |
7011 | case NEON::BI__builtin_neon_vzip_v: |
7012 | case NEON::BI__builtin_neon_vzipq_v: { |
7013 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
7014 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7015 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
7016 | Value *SV = nullptr; |
7017 | |
7018 | for (unsigned vi = 0; vi != 2; ++vi) { |
7019 | SmallVector<int, 16> Indices; |
7020 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
7021 | Indices.push_back((i + vi*e) >> 1); |
7022 | Indices.push_back(((i + vi*e) >> 1)+e); |
7023 | } |
7024 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
7025 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip"); |
7026 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
7027 | } |
7028 | return SV; |
7029 | } |
7030 | case NEON::BI__builtin_neon_vdot_v: |
7031 | case NEON::BI__builtin_neon_vdotq_v: { |
7032 | auto *InputTy = |
7033 | llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
7034 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7035 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; |
7036 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot"); |
7037 | } |
7038 | case NEON::BI__builtin_neon_vfmlal_low_v: |
7039 | case NEON::BI__builtin_neon_vfmlalq_low_v: { |
7040 | auto *InputTy = |
7041 | llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
7042 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7043 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low"); |
7044 | } |
7045 | case NEON::BI__builtin_neon_vfmlsl_low_v: |
7046 | case NEON::BI__builtin_neon_vfmlslq_low_v: { |
7047 | auto *InputTy = |
7048 | llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
7049 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7050 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low"); |
7051 | } |
7052 | case NEON::BI__builtin_neon_vfmlal_high_v: |
7053 | case NEON::BI__builtin_neon_vfmlalq_high_v: { |
7054 | auto *InputTy = |
7055 | llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
7056 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7057 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high"); |
7058 | } |
7059 | case NEON::BI__builtin_neon_vfmlsl_high_v: |
7060 | case NEON::BI__builtin_neon_vfmlslq_high_v: { |
7061 | auto *InputTy = |
7062 | llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
7063 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7064 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high"); |
7065 | } |
7066 | case NEON::BI__builtin_neon_vmmlaq_v: { |
7067 | auto *InputTy = |
7068 | llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
7069 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7070 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; |
7071 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla"); |
7072 | } |
7073 | case NEON::BI__builtin_neon_vusmmlaq_v: { |
7074 | auto *InputTy = |
7075 | llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
7076 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7077 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla"); |
7078 | } |
7079 | case NEON::BI__builtin_neon_vusdot_v: |
7080 | case NEON::BI__builtin_neon_vusdotq_v: { |
7081 | auto *InputTy = |
7082 | llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
7083 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7084 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot"); |
7085 | } |
7086 | case NEON::BI__builtin_neon_vbfdot_v: |
7087 | case NEON::BI__builtin_neon_vbfdotq_v: { |
7088 | llvm::Type *InputTy = |
7089 | llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16); |
7090 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7091 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot"); |
7092 | } |
7093 | case NEON::BI__builtin_neon___a32_vcvt_bf16_v: { |
7094 | llvm::Type *Tys[1] = { Ty }; |
7095 | Function *F = CGM.getIntrinsic(Int, Tys); |
7096 | return EmitNeonCall(F, Ops, "vcvtfp2bf"); |
7097 | } |
7098 | |
7099 | } |
7100 | |
7101 | assert(Int && "Expected valid intrinsic number")(static_cast <bool> (Int && "Expected valid intrinsic number" ) ? void (0) : __assert_fail ("Int && \"Expected valid intrinsic number\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7101, __extension__ __PRETTY_FUNCTION__)); |
7102 | |
7103 | // Determine the type(s) of this overloaded AArch64 intrinsic. |
7104 | Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E); |
7105 | |
7106 | Value *Result = EmitNeonCall(F, Ops, NameHint); |
7107 | llvm::Type *ResultType = ConvertType(E->getType()); |
7108 | // AArch64 intrinsic one-element vector type cast to |
7109 | // scalar type expected by the builtin |
7110 | return Builder.CreateBitCast(Result, ResultType, NameHint); |
7111 | } |
7112 | |
7113 | Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr( |
7114 | Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp, |
7115 | const CmpInst::Predicate Ip, const Twine &Name) { |
7116 | llvm::Type *OTy = Op->getType(); |
7117 | |
7118 | // FIXME: this is utterly horrific. We should not be looking at previous |
7119 | // codegen context to find out what needs doing. Unfortunately TableGen |
7120 | // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32 |
7121 | // (etc). |
7122 | if (BitCastInst *BI = dyn_cast<BitCastInst>(Op)) |
7123 | OTy = BI->getOperand(0)->getType(); |
7124 | |
7125 | Op = Builder.CreateBitCast(Op, OTy); |
7126 | if (OTy->getScalarType()->isFloatingPointTy()) { |
7127 | Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy)); |
7128 | } else { |
7129 | Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy)); |
7130 | } |
7131 | return Builder.CreateSExt(Op, Ty, Name); |
7132 | } |
7133 | |
7134 | static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
7135 | Value *ExtOp, Value *IndexOp, |
7136 | llvm::Type *ResTy, unsigned IntID, |
7137 | const char *Name) { |
7138 | SmallVector<Value *, 2> TblOps; |
7139 | if (ExtOp) |
7140 | TblOps.push_back(ExtOp); |
7141 | |
7142 | // Build a vector containing sequential number like (0, 1, 2, ..., 15) |
7143 | SmallVector<int, 16> Indices; |
7144 | auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
7145 | for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) { |
7146 | Indices.push_back(2*i); |
7147 | Indices.push_back(2*i+1); |
7148 | } |
7149 | |
7150 | int PairPos = 0, End = Ops.size() - 1; |
7151 | while (PairPos < End) { |
7152 | TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], |
7153 | Ops[PairPos+1], Indices, |
7154 | Name)); |
7155 | PairPos += 2; |
7156 | } |
7157 | |
7158 | // If there's an odd number of 64-bit lookup table, fill the high 64-bit |
7159 | // of the 128-bit lookup table with zero. |
7160 | if (PairPos == End) { |
7161 | Value *ZeroTbl = ConstantAggregateZero::get(TblTy); |
7162 | TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], |
7163 | ZeroTbl, Indices, Name)); |
7164 | } |
7165 | |
7166 | Function *TblF; |
7167 | TblOps.push_back(IndexOp); |
7168 | TblF = CGF.CGM.getIntrinsic(IntID, ResTy); |
7169 | |
7170 | return CGF.EmitNeonCall(TblF, TblOps, Name); |
7171 | } |
7172 | |
7173 | Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) { |
7174 | unsigned Value; |
7175 | switch (BuiltinID) { |
7176 | default: |
7177 | return nullptr; |
7178 | case ARM::BI__builtin_arm_nop: |
7179 | Value = 0; |
7180 | break; |
7181 | case ARM::BI__builtin_arm_yield: |
7182 | case ARM::BI__yield: |
7183 | Value = 1; |
7184 | break; |
7185 | case ARM::BI__builtin_arm_wfe: |
7186 | case ARM::BI__wfe: |
7187 | Value = 2; |
7188 | break; |
7189 | case ARM::BI__builtin_arm_wfi: |
7190 | case ARM::BI__wfi: |
7191 | Value = 3; |
7192 | break; |
7193 | case ARM::BI__builtin_arm_sev: |
7194 | case ARM::BI__sev: |
7195 | Value = 4; |
7196 | break; |
7197 | case ARM::BI__builtin_arm_sevl: |
7198 | case ARM::BI__sevl: |
7199 | Value = 5; |
7200 | break; |
7201 | } |
7202 | |
7203 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint), |
7204 | llvm::ConstantInt::get(Int32Ty, Value)); |
7205 | } |
7206 | |
7207 | enum SpecialRegisterAccessKind { |
7208 | NormalRead, |
7209 | VolatileRead, |
7210 | Write, |
7211 | }; |
7212 | |
7213 | // Generates the IR for the read/write special register builtin, |
7214 | // ValueType is the type of the value that is to be written or read, |
7215 | // RegisterType is the type of the register being written to or read from. |
7216 | static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF, |
7217 | const CallExpr *E, |
7218 | llvm::Type *RegisterType, |
7219 | llvm::Type *ValueType, |
7220 | SpecialRegisterAccessKind AccessKind, |
7221 | StringRef SysReg = "") { |
7222 | // write and register intrinsics only support 32 and 64 bit operations. |
7223 | assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))(static_cast <bool> ((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && "Unsupported size for register." ) ? void (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7224, __extension__ __PRETTY_FUNCTION__)) |
7224 | && "Unsupported size for register.")(static_cast <bool> ((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && "Unsupported size for register." ) ? void (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7224, __extension__ __PRETTY_FUNCTION__)); |
7225 | |
7226 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
7227 | CodeGen::CodeGenModule &CGM = CGF.CGM; |
7228 | LLVMContext &Context = CGM.getLLVMContext(); |
7229 | |
7230 | if (SysReg.empty()) { |
7231 | const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts(); |
7232 | SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString(); |
7233 | } |
7234 | |
7235 | llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) }; |
7236 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); |
7237 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); |
7238 | |
7239 | llvm::Type *Types[] = { RegisterType }; |
7240 | |
7241 | bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32); |
7242 | assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))(static_cast <bool> (!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register" ) ? void (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7243, __extension__ __PRETTY_FUNCTION__)) |
7243 | && "Can't fit 64-bit value in 32-bit register")(static_cast <bool> (!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register" ) ? void (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7243, __extension__ __PRETTY_FUNCTION__)); |
7244 | |
7245 | if (AccessKind != Write) { |
7246 | assert(AccessKind == NormalRead || AccessKind == VolatileRead)(static_cast <bool> (AccessKind == NormalRead || AccessKind == VolatileRead) ? void (0) : __assert_fail ("AccessKind == NormalRead || AccessKind == VolatileRead" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7246, __extension__ __PRETTY_FUNCTION__)); |
7247 | llvm::Function *F = CGM.getIntrinsic( |
7248 | AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register |
7249 | : llvm::Intrinsic::read_register, |
7250 | Types); |
7251 | llvm::Value *Call = Builder.CreateCall(F, Metadata); |
7252 | |
7253 | if (MixedTypes) |
7254 | // Read into 64 bit register and then truncate result to 32 bit. |
7255 | return Builder.CreateTrunc(Call, ValueType); |
7256 | |
7257 | if (ValueType->isPointerTy()) |
7258 | // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*). |
7259 | return Builder.CreateIntToPtr(Call, ValueType); |
7260 | |
7261 | return Call; |
7262 | } |
7263 | |
7264 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); |
7265 | llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1)); |
7266 | if (MixedTypes) { |
7267 | // Extend 32 bit write value to 64 bit to pass to write. |
7268 | ArgValue = Builder.CreateZExt(ArgValue, RegisterType); |
7269 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
7270 | } |
7271 | |
7272 | if (ValueType->isPointerTy()) { |
7273 | // Have VoidPtrTy ArgValue but want to return an i32/i64. |
7274 | ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType); |
7275 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
7276 | } |
7277 | |
7278 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
7279 | } |
7280 | |
7281 | /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra |
7282 | /// argument that specifies the vector type. |
7283 | static bool HasExtraNeonArgument(unsigned BuiltinID) { |
7284 | switch (BuiltinID) { |
7285 | default: break; |
7286 | case NEON::BI__builtin_neon_vget_lane_i8: |
7287 | case NEON::BI__builtin_neon_vget_lane_i16: |
7288 | case NEON::BI__builtin_neon_vget_lane_bf16: |
7289 | case NEON::BI__builtin_neon_vget_lane_i32: |
7290 | case NEON::BI__builtin_neon_vget_lane_i64: |
7291 | case NEON::BI__builtin_neon_vget_lane_f32: |
7292 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
7293 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
7294 | case NEON::BI__builtin_neon_vgetq_lane_bf16: |
7295 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
7296 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
7297 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
7298 | case NEON::BI__builtin_neon_vduph_lane_bf16: |
7299 | case NEON::BI__builtin_neon_vduph_laneq_bf16: |
7300 | case NEON::BI__builtin_neon_vset_lane_i8: |
7301 | case NEON::BI__builtin_neon_vset_lane_i16: |
7302 | case NEON::BI__builtin_neon_vset_lane_bf16: |
7303 | case NEON::BI__builtin_neon_vset_lane_i32: |
7304 | case NEON::BI__builtin_neon_vset_lane_i64: |
7305 | case NEON::BI__builtin_neon_vset_lane_f32: |
7306 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
7307 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
7308 | case NEON::BI__builtin_neon_vsetq_lane_bf16: |
7309 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
7310 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
7311 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
7312 | case NEON::BI__builtin_neon_vsha1h_u32: |
7313 | case NEON::BI__builtin_neon_vsha1cq_u32: |
7314 | case NEON::BI__builtin_neon_vsha1pq_u32: |
7315 | case NEON::BI__builtin_neon_vsha1mq_u32: |
7316 | case NEON::BI__builtin_neon_vcvth_bf16_f32: |
7317 | case clang::ARM::BI_MoveToCoprocessor: |
7318 | case clang::ARM::BI_MoveToCoprocessor2: |
7319 | return false; |
7320 | } |
7321 | return true; |
7322 | } |
7323 | |
7324 | Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, |
7325 | const CallExpr *E, |
7326 | ReturnValueSlot ReturnValue, |
7327 | llvm::Triple::ArchType Arch) { |
7328 | if (auto Hint = GetValueForARMHint(BuiltinID)) |
7329 | return Hint; |
7330 | |
7331 | if (BuiltinID == ARM::BI__emit) { |
7332 | bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb; |
7333 | llvm::FunctionType *FTy = |
7334 | llvm::FunctionType::get(VoidTy, /*Variadic=*/false); |
7335 | |
7336 | Expr::EvalResult Result; |
7337 | if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext())) |
7338 | llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7338); |
7339 | |
7340 | llvm::APSInt Value = Result.Val.getInt(); |
7341 | uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue(); |
7342 | |
7343 | llvm::InlineAsm *Emit = |
7344 | IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "", |
7345 | /*hasSideEffects=*/true) |
7346 | : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "", |
7347 | /*hasSideEffects=*/true); |
7348 | |
7349 | return Builder.CreateCall(Emit); |
7350 | } |
7351 | |
7352 | if (BuiltinID == ARM::BI__builtin_arm_dbg) { |
7353 | Value *Option = EmitScalarExpr(E->getArg(0)); |
7354 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option); |
7355 | } |
7356 | |
7357 | if (BuiltinID == ARM::BI__builtin_arm_prefetch) { |
7358 | Value *Address = EmitScalarExpr(E->getArg(0)); |
7359 | Value *RW = EmitScalarExpr(E->getArg(1)); |
7360 | Value *IsData = EmitScalarExpr(E->getArg(2)); |
7361 | |
7362 | // Locality is not supported on ARM target |
7363 | Value *Locality = llvm::ConstantInt::get(Int32Ty, 3); |
7364 | |
7365 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
7366 | return Builder.CreateCall(F, {Address, RW, Locality, IsData}); |
7367 | } |
7368 | |
7369 | if (BuiltinID == ARM::BI__builtin_arm_rbit) { |
7370 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
7371 | return Builder.CreateCall( |
7372 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); |
7373 | } |
7374 | |
7375 | if (BuiltinID == ARM::BI__builtin_arm_cls) { |
7376 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
7377 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls"); |
7378 | } |
7379 | if (BuiltinID == ARM::BI__builtin_arm_cls64) { |
7380 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
7381 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg, |
7382 | "cls"); |
7383 | } |
7384 | |
7385 | if (BuiltinID == ARM::BI__clear_cache) { |
7386 | assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")(static_cast <bool> (E->getNumArgs() == 2 && "__clear_cache takes 2 arguments") ? void (0) : __assert_fail ("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7386, __extension__ __PRETTY_FUNCTION__)); |
7387 | const FunctionDecl *FD = E->getDirectCallee(); |
7388 | Value *Ops[2]; |
7389 | for (unsigned i = 0; i < 2; i++) |
7390 | Ops[i] = EmitScalarExpr(E->getArg(i)); |
7391 | llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); |
7392 | llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); |
7393 | StringRef Name = FD->getName(); |
7394 | return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); |
7395 | } |
7396 | |
7397 | if (BuiltinID == ARM::BI__builtin_arm_mcrr || |
7398 | BuiltinID == ARM::BI__builtin_arm_mcrr2) { |
7399 | Function *F; |
7400 | |
7401 | switch (BuiltinID) { |
7402 | default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7402); |
7403 | case ARM::BI__builtin_arm_mcrr: |
7404 | F = CGM.getIntrinsic(Intrinsic::arm_mcrr); |
7405 | break; |
7406 | case ARM::BI__builtin_arm_mcrr2: |
7407 | F = CGM.getIntrinsic(Intrinsic::arm_mcrr2); |
7408 | break; |
7409 | } |
7410 | |
7411 | // MCRR{2} instruction has 5 operands but |
7412 | // the intrinsic has 4 because Rt and Rt2 |
7413 | // are represented as a single unsigned 64 |
7414 | // bit integer in the intrinsic definition |
7415 | // but internally it's represented as 2 32 |
7416 | // bit integers. |
7417 | |
7418 | Value *Coproc = EmitScalarExpr(E->getArg(0)); |
7419 | Value *Opc1 = EmitScalarExpr(E->getArg(1)); |
7420 | Value *RtAndRt2 = EmitScalarExpr(E->getArg(2)); |
7421 | Value *CRm = EmitScalarExpr(E->getArg(3)); |
7422 | |
7423 | Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); |
7424 | Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty); |
7425 | Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1); |
7426 | Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty); |
7427 | |
7428 | return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm}); |
7429 | } |
7430 | |
7431 | if (BuiltinID == ARM::BI__builtin_arm_mrrc || |
7432 | BuiltinID == ARM::BI__builtin_arm_mrrc2) { |
7433 | Function *F; |
7434 | |
7435 | switch (BuiltinID) { |
7436 | default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7436); |
7437 | case ARM::BI__builtin_arm_mrrc: |
7438 | F = CGM.getIntrinsic(Intrinsic::arm_mrrc); |
7439 | break; |
7440 | case ARM::BI__builtin_arm_mrrc2: |
7441 | F = CGM.getIntrinsic(Intrinsic::arm_mrrc2); |
7442 | break; |
7443 | } |
7444 | |
7445 | Value *Coproc = EmitScalarExpr(E->getArg(0)); |
7446 | Value *Opc1 = EmitScalarExpr(E->getArg(1)); |
7447 | Value *CRm = EmitScalarExpr(E->getArg(2)); |
7448 | Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm}); |
7449 | |
7450 | // Returns an unsigned 64 bit integer, represented |
7451 | // as two 32 bit integers. |
7452 | |
7453 | Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1); |
7454 | Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0); |
7455 | Rt = Builder.CreateZExt(Rt, Int64Ty); |
7456 | Rt1 = Builder.CreateZExt(Rt1, Int64Ty); |
7457 | |
7458 | Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32); |
7459 | RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true); |
7460 | RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1); |
7461 | |
7462 | return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType())); |
7463 | } |
7464 | |
7465 | if (BuiltinID == ARM::BI__builtin_arm_ldrexd || |
7466 | ((BuiltinID == ARM::BI__builtin_arm_ldrex || |
7467 | BuiltinID == ARM::BI__builtin_arm_ldaex) && |
7468 | getContext().getTypeSize(E->getType()) == 64) || |
7469 | BuiltinID == ARM::BI__ldrexd) { |
7470 | Function *F; |
7471 | |
7472 | switch (BuiltinID) { |
7473 | default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7473); |
7474 | case ARM::BI__builtin_arm_ldaex: |
7475 | F = CGM.getIntrinsic(Intrinsic::arm_ldaexd); |
7476 | break; |
7477 | case ARM::BI__builtin_arm_ldrexd: |
7478 | case ARM::BI__builtin_arm_ldrex: |
7479 | case ARM::BI__ldrexd: |
7480 | F = CGM.getIntrinsic(Intrinsic::arm_ldrexd); |
7481 | break; |
7482 | } |
7483 | |
7484 | Value *LdPtr = EmitScalarExpr(E->getArg(0)); |
7485 | Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), |
7486 | "ldrexd"); |
7487 | |
7488 | Value *Val0 = Builder.CreateExtractValue(Val, 1); |
7489 | Value *Val1 = Builder.CreateExtractValue(Val, 0); |
7490 | Val0 = Builder.CreateZExt(Val0, Int64Ty); |
7491 | Val1 = Builder.CreateZExt(Val1, Int64Ty); |
7492 | |
7493 | Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32); |
7494 | Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); |
7495 | Val = Builder.CreateOr(Val, Val1); |
7496 | return Builder.CreateBitCast(Val, ConvertType(E->getType())); |
7497 | } |
7498 | |
7499 | if (BuiltinID == ARM::BI__builtin_arm_ldrex || |
7500 | BuiltinID == ARM::BI__builtin_arm_ldaex) { |
7501 | Value *LoadAddr = EmitScalarExpr(E->getArg(0)); |
7502 | |
7503 | QualType Ty = E->getType(); |
7504 | llvm::Type *RealResTy = ConvertType(Ty); |
7505 | llvm::Type *PtrTy = llvm::IntegerType::get( |
7506 | getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo(); |
7507 | LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy); |
7508 | |
7509 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex |
7510 | ? Intrinsic::arm_ldaex |
7511 | : Intrinsic::arm_ldrex, |
7512 | PtrTy); |
7513 | Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex"); |
7514 | |
7515 | if (RealResTy->isPointerTy()) |
7516 | return Builder.CreateIntToPtr(Val, RealResTy); |
7517 | else { |
7518 | llvm::Type *IntResTy = llvm::IntegerType::get( |
7519 | getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy)); |
7520 | Val = Builder.CreateTruncOrBitCast(Val, IntResTy); |
7521 | return Builder.CreateBitCast(Val, RealResTy); |
7522 | } |
7523 | } |
7524 | |
7525 | if (BuiltinID == ARM::BI__builtin_arm_strexd || |
7526 | ((BuiltinID == ARM::BI__builtin_arm_stlex || |
7527 | BuiltinID == ARM::BI__builtin_arm_strex) && |
7528 | getContext().getTypeSize(E->getArg(0)->getType()) == 64)) { |
7529 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex |
7530 | ? Intrinsic::arm_stlexd |
7531 | : Intrinsic::arm_strexd); |
7532 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty); |
7533 | |
7534 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); |
7535 | Value *Val = EmitScalarExpr(E->getArg(0)); |
7536 | Builder.CreateStore(Val, Tmp); |
7537 | |
7538 | Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy)); |
7539 | Val = Builder.CreateLoad(LdPtr); |
7540 | |
7541 | Value *Arg0 = Builder.CreateExtractValue(Val, 0); |
7542 | Value *Arg1 = Builder.CreateExtractValue(Val, 1); |
7543 | Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); |
7544 | return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd"); |
7545 | } |
7546 | |
7547 | if (BuiltinID == ARM::BI__builtin_arm_strex || |
7548 | BuiltinID == ARM::BI__builtin_arm_stlex) { |
7549 | Value *StoreVal = EmitScalarExpr(E->getArg(0)); |
7550 | Value *StoreAddr = EmitScalarExpr(E->getArg(1)); |
7551 | |
7552 | QualType Ty = E->getArg(0)->getType(); |
7553 | llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), |
7554 | getContext().getTypeSize(Ty)); |
7555 | StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); |
7556 | |
7557 | if (StoreVal->getType()->isPointerTy()) |
7558 | StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty); |
7559 | else { |
7560 | llvm::Type *IntTy = llvm::IntegerType::get( |
7561 | getLLVMContext(), |
7562 | CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType())); |
7563 | StoreVal = Builder.CreateBitCast(StoreVal, IntTy); |
7564 | StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty); |
7565 | } |
7566 | |
7567 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex |
7568 | ? Intrinsic::arm_stlex |
7569 | : Intrinsic::arm_strex, |
7570 | StoreAddr->getType()); |
7571 | return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex"); |
7572 | } |
7573 | |
7574 | if (BuiltinID == ARM::BI__builtin_arm_clrex) { |
7575 | Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex); |
7576 | return Builder.CreateCall(F); |
7577 | } |
7578 | |
7579 | // CRC32 |
7580 | Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; |
7581 | switch (BuiltinID) { |
7582 | case ARM::BI__builtin_arm_crc32b: |
7583 | CRCIntrinsicID = Intrinsic::arm_crc32b; break; |
7584 | case ARM::BI__builtin_arm_crc32cb: |
7585 | CRCIntrinsicID = Intrinsic::arm_crc32cb; break; |
7586 | case ARM::BI__builtin_arm_crc32h: |
7587 | CRCIntrinsicID = Intrinsic::arm_crc32h; break; |
7588 | case ARM::BI__builtin_arm_crc32ch: |
7589 | CRCIntrinsicID = Intrinsic::arm_crc32ch; break; |
7590 | case ARM::BI__builtin_arm_crc32w: |
7591 | case ARM::BI__builtin_arm_crc32d: |
7592 | CRCIntrinsicID = Intrinsic::arm_crc32w; break; |
7593 | case ARM::BI__builtin_arm_crc32cw: |
7594 | case ARM::BI__builtin_arm_crc32cd: |
7595 | CRCIntrinsicID = Intrinsic::arm_crc32cw; break; |
7596 | } |
7597 | |
7598 | if (CRCIntrinsicID != Intrinsic::not_intrinsic) { |
7599 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
7600 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); |
7601 | |
7602 | // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w |
7603 | // intrinsics, hence we need different codegen for these cases. |
7604 | if (BuiltinID == ARM::BI__builtin_arm_crc32d || |
7605 | BuiltinID == ARM::BI__builtin_arm_crc32cd) { |
7606 | Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); |
7607 | Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty); |
7608 | Value *Arg1b = Builder.CreateLShr(Arg1, C1); |
7609 | Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty); |
7610 | |
7611 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); |
7612 | Value *Res = Builder.CreateCall(F, {Arg0, Arg1a}); |
7613 | return Builder.CreateCall(F, {Res, Arg1b}); |
7614 | } else { |
7615 | Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty); |
7616 | |
7617 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); |
7618 | return Builder.CreateCall(F, {Arg0, Arg1}); |
7619 | } |
7620 | } |
7621 | |
7622 | if (BuiltinID == ARM::BI__builtin_arm_rsr || |
7623 | BuiltinID == ARM::BI__builtin_arm_rsr64 || |
7624 | BuiltinID == ARM::BI__builtin_arm_rsrp || |
7625 | BuiltinID == ARM::BI__builtin_arm_wsr || |
7626 | BuiltinID == ARM::BI__builtin_arm_wsr64 || |
7627 | BuiltinID == ARM::BI__builtin_arm_wsrp) { |
7628 | |
7629 | SpecialRegisterAccessKind AccessKind = Write; |
7630 | if (BuiltinID == ARM::BI__builtin_arm_rsr || |
7631 | BuiltinID == ARM::BI__builtin_arm_rsr64 || |
7632 | BuiltinID == ARM::BI__builtin_arm_rsrp) |
7633 | AccessKind = VolatileRead; |
7634 | |
7635 | bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp || |
7636 | BuiltinID == ARM::BI__builtin_arm_wsrp; |
7637 | |
7638 | bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 || |
7639 | BuiltinID == ARM::BI__builtin_arm_wsr64; |
7640 | |
7641 | llvm::Type *ValueType; |
7642 | llvm::Type *RegisterType; |
7643 | if (IsPointerBuiltin) { |
7644 | ValueType = VoidPtrTy; |
7645 | RegisterType = Int32Ty; |
7646 | } else if (Is64Bit) { |
7647 | ValueType = RegisterType = Int64Ty; |
7648 | } else { |
7649 | ValueType = RegisterType = Int32Ty; |
7650 | } |
7651 | |
7652 | return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, |
7653 | AccessKind); |
7654 | } |
7655 | |
7656 | // Handle MSVC intrinsics before argument evaluation to prevent double |
7657 | // evaluation. |
7658 | if (Optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID)) |
7659 | return EmitMSVCBuiltinExpr(*MsvcIntId, E); |
7660 | |
7661 | // Deal with MVE builtins |
7662 | if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch)) |
7663 | return Result; |
7664 | // Handle CDE builtins |
7665 | if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch)) |
7666 | return Result; |
7667 | |
7668 | // Find out if any arguments are required to be integer constant |
7669 | // expressions. |
7670 | unsigned ICEArguments = 0; |
7671 | ASTContext::GetBuiltinTypeError Error; |
7672 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
7673 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7673, __extension__ __PRETTY_FUNCTION__)); |
7674 | |
7675 | auto getAlignmentValue32 = [&](Address addr) -> Value* { |
7676 | return Builder.getInt32(addr.getAlignment().getQuantity()); |
7677 | }; |
7678 | |
7679 | Address PtrOp0 = Address::invalid(); |
7680 | Address PtrOp1 = Address::invalid(); |
7681 | SmallVector<Value*, 4> Ops; |
7682 | bool HasExtraArg = HasExtraNeonArgument(BuiltinID); |
7683 | unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0); |
7684 | for (unsigned i = 0, e = NumArgs; i != e; i++) { |
7685 | if (i == 0) { |
7686 | switch (BuiltinID) { |
7687 | case NEON::BI__builtin_neon_vld1_v: |
7688 | case NEON::BI__builtin_neon_vld1q_v: |
7689 | case NEON::BI__builtin_neon_vld1q_lane_v: |
7690 | case NEON::BI__builtin_neon_vld1_lane_v: |
7691 | case NEON::BI__builtin_neon_vld1_dup_v: |
7692 | case NEON::BI__builtin_neon_vld1q_dup_v: |
7693 | case NEON::BI__builtin_neon_vst1_v: |
7694 | case NEON::BI__builtin_neon_vst1q_v: |
7695 | case NEON::BI__builtin_neon_vst1q_lane_v: |
7696 | case NEON::BI__builtin_neon_vst1_lane_v: |
7697 | case NEON::BI__builtin_neon_vst2_v: |
7698 | case NEON::BI__builtin_neon_vst2q_v: |
7699 | case NEON::BI__builtin_neon_vst2_lane_v: |
7700 | case NEON::BI__builtin_neon_vst2q_lane_v: |
7701 | case NEON::BI__builtin_neon_vst3_v: |
7702 | case NEON::BI__builtin_neon_vst3q_v: |
7703 | case NEON::BI__builtin_neon_vst3_lane_v: |
7704 | case NEON::BI__builtin_neon_vst3q_lane_v: |
7705 | case NEON::BI__builtin_neon_vst4_v: |
7706 | case NEON::BI__builtin_neon_vst4q_v: |
7707 | case NEON::BI__builtin_neon_vst4_lane_v: |
7708 | case NEON::BI__builtin_neon_vst4q_lane_v: |
7709 | // Get the alignment for the argument in addition to the value; |
7710 | // we'll use it later. |
7711 | PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); |
7712 | Ops.push_back(PtrOp0.getPointer()); |
7713 | continue; |
7714 | } |
7715 | } |
7716 | if (i == 1) { |
7717 | switch (BuiltinID) { |
7718 | case NEON::BI__builtin_neon_vld2_v: |
7719 | case NEON::BI__builtin_neon_vld2q_v: |
7720 | case NEON::BI__builtin_neon_vld3_v: |
7721 | case NEON::BI__builtin_neon_vld3q_v: |
7722 | case NEON::BI__builtin_neon_vld4_v: |
7723 | case NEON::BI__builtin_neon_vld4q_v: |
7724 | case NEON::BI__builtin_neon_vld2_lane_v: |
7725 | case NEON::BI__builtin_neon_vld2q_lane_v: |
7726 | case NEON::BI__builtin_neon_vld3_lane_v: |
7727 | case NEON::BI__builtin_neon_vld3q_lane_v: |
7728 | case NEON::BI__builtin_neon_vld4_lane_v: |
7729 | case NEON::BI__builtin_neon_vld4q_lane_v: |
7730 | case NEON::BI__builtin_neon_vld2_dup_v: |
7731 | case NEON::BI__builtin_neon_vld2q_dup_v: |
7732 | case NEON::BI__builtin_neon_vld3_dup_v: |
7733 | case NEON::BI__builtin_neon_vld3q_dup_v: |
7734 | case NEON::BI__builtin_neon_vld4_dup_v: |
7735 | case NEON::BI__builtin_neon_vld4q_dup_v: |
7736 | // Get the alignment for the argument in addition to the value; |
7737 | // we'll use it later. |
7738 | PtrOp1 = EmitPointerWithAlignment(E->getArg(1)); |
7739 | Ops.push_back(PtrOp1.getPointer()); |
7740 | continue; |
7741 | } |
7742 | } |
7743 | |
7744 | if ((ICEArguments & (1 << i)) == 0) { |
7745 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
7746 | } else { |
7747 | // If this is required to be a constant, constant fold it so that we know |
7748 | // that the generated intrinsic gets a ConstantInt. |
7749 | Ops.push_back(llvm::ConstantInt::get( |
7750 | getLLVMContext(), |
7751 | *E->getArg(i)->getIntegerConstantExpr(getContext()))); |
7752 | } |
7753 | } |
7754 | |
7755 | switch (BuiltinID) { |
7756 | default: break; |
7757 | |
7758 | case NEON::BI__builtin_neon_vget_lane_i8: |
7759 | case NEON::BI__builtin_neon_vget_lane_i16: |
7760 | case NEON::BI__builtin_neon_vget_lane_i32: |
7761 | case NEON::BI__builtin_neon_vget_lane_i64: |
7762 | case NEON::BI__builtin_neon_vget_lane_bf16: |
7763 | case NEON::BI__builtin_neon_vget_lane_f32: |
7764 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
7765 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
7766 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
7767 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
7768 | case NEON::BI__builtin_neon_vgetq_lane_bf16: |
7769 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
7770 | case NEON::BI__builtin_neon_vduph_lane_bf16: |
7771 | case NEON::BI__builtin_neon_vduph_laneq_bf16: |
7772 | return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane"); |
7773 | |
7774 | case NEON::BI__builtin_neon_vrndns_f32: { |
7775 | Value *Arg = EmitScalarExpr(E->getArg(0)); |
7776 | llvm::Type *Tys[] = {Arg->getType()}; |
7777 | Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys); |
7778 | return Builder.CreateCall(F, {Arg}, "vrndn"); } |
7779 | |
7780 | case NEON::BI__builtin_neon_vset_lane_i8: |
7781 | case NEON::BI__builtin_neon_vset_lane_i16: |
7782 | case NEON::BI__builtin_neon_vset_lane_i32: |
7783 | case NEON::BI__builtin_neon_vset_lane_i64: |
7784 | case NEON::BI__builtin_neon_vset_lane_bf16: |
7785 | case NEON::BI__builtin_neon_vset_lane_f32: |
7786 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
7787 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
7788 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
7789 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
7790 | case NEON::BI__builtin_neon_vsetq_lane_bf16: |
7791 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
7792 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
7793 | |
7794 | case NEON::BI__builtin_neon_vsha1h_u32: |
7795 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops, |
7796 | "vsha1h"); |
7797 | case NEON::BI__builtin_neon_vsha1cq_u32: |
7798 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops, |
7799 | "vsha1h"); |
7800 | case NEON::BI__builtin_neon_vsha1pq_u32: |
7801 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops, |
7802 | "vsha1h"); |
7803 | case NEON::BI__builtin_neon_vsha1mq_u32: |
7804 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops, |
7805 | "vsha1h"); |
7806 | |
7807 | case NEON::BI__builtin_neon_vcvth_bf16_f32: { |
7808 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops, |
7809 | "vcvtbfp2bf"); |
7810 | } |
7811 | |
7812 | // The ARM _MoveToCoprocessor builtins put the input register value as |
7813 | // the first argument, but the LLVM intrinsic expects it as the third one. |
7814 | case ARM::BI_MoveToCoprocessor: |
7815 | case ARM::BI_MoveToCoprocessor2: { |
7816 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ? |
7817 | Intrinsic::arm_mcr : Intrinsic::arm_mcr2); |
7818 | return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0], |
7819 | Ops[3], Ops[4], Ops[5]}); |
7820 | } |
7821 | } |
7822 | |
7823 | // Get the last argument, which specifies the vector type. |
7824 | assert(HasExtraArg)(static_cast <bool> (HasExtraArg) ? void (0) : __assert_fail ("HasExtraArg", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 7824, __extension__ __PRETTY_FUNCTION__)); |
7825 | const Expr *Arg = E->getArg(E->getNumArgs()-1); |
7826 | Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()); |
7827 | if (!Result) |
7828 | return nullptr; |
7829 | |
7830 | if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f || |
7831 | BuiltinID == ARM::BI__builtin_arm_vcvtr_d) { |
7832 | // Determine the overloaded type of this builtin. |
7833 | llvm::Type *Ty; |
7834 | if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f) |
7835 | Ty = FloatTy; |
7836 | else |
7837 | Ty = DoubleTy; |
7838 | |
7839 | // Determine whether this is an unsigned conversion or not. |
7840 | bool usgn = Result->getZExtValue() == 1; |
7841 | unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; |
7842 | |
7843 | // Call the appropriate intrinsic. |
7844 | Function *F = CGM.getIntrinsic(Int, Ty); |
7845 | return Builder.CreateCall(F, Ops, "vcvtr"); |
7846 | } |
7847 | |
7848 | // Determine the type of this overloaded NEON intrinsic. |
7849 | NeonTypeFlags Type = Result->getZExtValue(); |
7850 | bool usgn = Type.isUnsigned(); |
7851 | bool rightShift = false; |
7852 | |
7853 | llvm::FixedVectorType *VTy = |
7854 | GetNeonType(this, Type, getTarget().hasLegalHalfType(), false, |
7855 | getTarget().hasBFloat16Type()); |
7856 | llvm::Type *Ty = VTy; |
7857 | if (!Ty) |
7858 | return nullptr; |
7859 | |
7860 | // Many NEON builtins have identical semantics and uses in ARM and |
7861 | // AArch64. Emit these in a single function. |
7862 | auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap); |
7863 | const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap( |
7864 | IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted); |
7865 | if (Builtin) |
7866 | return EmitCommonNeonBuiltinExpr( |
7867 | Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, |
7868 | Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch); |
7869 | |
7870 | unsigned Int; |
7871 | switch (BuiltinID) { |
7872 | default: return nullptr; |
7873 | case NEON::BI__builtin_neon_vld1q_lane_v: |
7874 | // Handle 64-bit integer elements as a special case. Use shuffles of |
7875 | // one-element vectors to avoid poor code for i64 in the backend. |
7876 | if (VTy->getElementType()->isIntegerTy(64)) { |
7877 | // Extract the other lane. |
7878 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7879 | int Lane = cast<ConstantInt>(Ops[2])->getZExtValue(); |
7880 | Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane)); |
7881 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); |
7882 | // Load the value as a one-element vector. |
7883 | Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1); |
7884 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
7885 | Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys); |
7886 | Value *Align = getAlignmentValue32(PtrOp0); |
7887 | Value *Ld = Builder.CreateCall(F, {Ops[0], Align}); |
7888 | // Combine them. |
7889 | int Indices[] = {1 - Lane, Lane}; |
7890 | return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane"); |
7891 | } |
7892 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
7893 | case NEON::BI__builtin_neon_vld1_lane_v: { |
7894 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7895 | PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType()); |
7896 | Value *Ld = Builder.CreateLoad(PtrOp0); |
7897 | return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane"); |
7898 | } |
7899 | case NEON::BI__builtin_neon_vqrshrn_n_v: |
7900 | Int = |
7901 | usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; |
7902 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n", |
7903 | 1, true); |
7904 | case NEON::BI__builtin_neon_vqrshrun_n_v: |
7905 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty), |
7906 | Ops, "vqrshrun_n", 1, true); |
7907 | case NEON::BI__builtin_neon_vqshrn_n_v: |
7908 | Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; |
7909 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n", |
7910 | 1, true); |
7911 | case NEON::BI__builtin_neon_vqshrun_n_v: |
7912 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty), |
7913 | Ops, "vqshrun_n", 1, true); |
7914 | case NEON::BI__builtin_neon_vrecpe_v: |
7915 | case NEON::BI__builtin_neon_vrecpeq_v: |
7916 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), |
7917 | Ops, "vrecpe"); |
7918 | case NEON::BI__builtin_neon_vrshrn_n_v: |
7919 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), |
7920 | Ops, "vrshrn_n", 1, true); |
7921 | case NEON::BI__builtin_neon_vrsra_n_v: |
7922 | case NEON::BI__builtin_neon_vrsraq_n_v: |
7923 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
7924 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7925 | Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); |
7926 | Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; |
7927 | Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]}); |
7928 | return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); |
7929 | case NEON::BI__builtin_neon_vsri_n_v: |
7930 | case NEON::BI__builtin_neon_vsriq_n_v: |
7931 | rightShift = true; |
7932 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
7933 | case NEON::BI__builtin_neon_vsli_n_v: |
7934 | case NEON::BI__builtin_neon_vsliq_n_v: |
7935 | Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift); |
7936 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty), |
7937 | Ops, "vsli_n"); |
7938 | case NEON::BI__builtin_neon_vsra_n_v: |
7939 | case NEON::BI__builtin_neon_vsraq_n_v: |
7940 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
7941 | Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n"); |
7942 | return Builder.CreateAdd(Ops[0], Ops[1]); |
7943 | case NEON::BI__builtin_neon_vst1q_lane_v: |
7944 | // Handle 64-bit integer elements as a special case. Use a shuffle to get |
7945 | // a one-element vector and avoid poor code for i64 in the backend. |
7946 | if (VTy->getElementType()->isIntegerTy(64)) { |
7947 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7948 | Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2])); |
7949 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); |
7950 | Ops[2] = getAlignmentValue32(PtrOp0); |
7951 | llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()}; |
7952 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, |
7953 | Tys), Ops); |
7954 | } |
7955 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
7956 | case NEON::BI__builtin_neon_vst1_lane_v: { |
7957 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7958 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); |
7959 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
7960 | auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty)); |
7961 | return St; |
7962 | } |
7963 | case NEON::BI__builtin_neon_vtbl1_v: |
7964 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), |
7965 | Ops, "vtbl1"); |
7966 | case NEON::BI__builtin_neon_vtbl2_v: |
7967 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), |
7968 | Ops, "vtbl2"); |
7969 | case NEON::BI__builtin_neon_vtbl3_v: |
7970 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), |
7971 | Ops, "vtbl3"); |
7972 | case NEON::BI__builtin_neon_vtbl4_v: |
7973 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), |
7974 | Ops, "vtbl4"); |
7975 | case NEON::BI__builtin_neon_vtbx1_v: |
7976 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), |
7977 | Ops, "vtbx1"); |
7978 | case NEON::BI__builtin_neon_vtbx2_v: |
7979 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), |
7980 | Ops, "vtbx2"); |
7981 | case NEON::BI__builtin_neon_vtbx3_v: |
7982 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), |
7983 | Ops, "vtbx3"); |
7984 | case NEON::BI__builtin_neon_vtbx4_v: |
7985 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), |
7986 | Ops, "vtbx4"); |
7987 | } |
7988 | } |
7989 | |
7990 | template<typename Integer> |
7991 | static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) { |
7992 | return E->getIntegerConstantExpr(Context)->getExtValue(); |
7993 | } |
7994 | |
7995 | static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V, |
7996 | llvm::Type *T, bool Unsigned) { |
7997 | // Helper function called by Tablegen-constructed ARM MVE builtin codegen, |
7998 | // which finds it convenient to specify signed/unsigned as a boolean flag. |
7999 | return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T); |
8000 | } |
8001 | |
8002 | static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V, |
8003 | uint32_t Shift, bool Unsigned) { |
8004 | // MVE helper function for integer shift right. This must handle signed vs |
8005 | // unsigned, and also deal specially with the case where the shift count is |
8006 | // equal to the lane size. In LLVM IR, an LShr with that parameter would be |
8007 | // undefined behavior, but in MVE it's legal, so we must convert it to code |
8008 | // that is not undefined in IR. |
8009 | unsigned LaneBits = cast<llvm::VectorType>(V->getType()) |
8010 | ->getElementType() |
8011 | ->getPrimitiveSizeInBits(); |
8012 | if (Shift == LaneBits) { |
8013 | // An unsigned shift of the full lane size always generates zero, so we can |
8014 | // simply emit a zero vector. A signed shift of the full lane size does the |
8015 | // same thing as shifting by one bit fewer. |
8016 | if (Unsigned) |
8017 | return llvm::Constant::getNullValue(V->getType()); |
8018 | else |
8019 | --Shift; |
8020 | } |
8021 | return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift); |
8022 | } |
8023 | |
8024 | static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) { |
8025 | // MVE-specific helper function for a vector splat, which infers the element |
8026 | // count of the output vector by knowing that MVE vectors are all 128 bits |
8027 | // wide. |
8028 | unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits(); |
8029 | return Builder.CreateVectorSplat(Elements, V); |
8030 | } |
8031 | |
8032 | static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder, |
8033 | CodeGenFunction *CGF, |
8034 | llvm::Value *V, |
8035 | llvm::Type *DestType) { |
8036 | // Convert one MVE vector type into another by reinterpreting its in-register |
8037 | // format. |
8038 | // |
8039 | // Little-endian, this is identical to a bitcast (which reinterprets the |
8040 | // memory format). But big-endian, they're not necessarily the same, because |
8041 | // the register and memory formats map to each other differently depending on |
8042 | // the lane size. |
8043 | // |
8044 | // We generate a bitcast whenever we can (if we're little-endian, or if the |
8045 | // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic |
8046 | // that performs the different kind of reinterpretation. |
8047 | if (CGF->getTarget().isBigEndian() && |
8048 | V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) { |
8049 | return Builder.CreateCall( |
8050 | CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq, |
8051 | {DestType, V->getType()}), |
8052 | V); |
8053 | } else { |
8054 | return Builder.CreateBitCast(V, DestType); |
8055 | } |
8056 | } |
8057 | |
8058 | static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) { |
8059 | // Make a shufflevector that extracts every other element of a vector (evens |
8060 | // or odds, as desired). |
8061 | SmallVector<int, 16> Indices; |
8062 | unsigned InputElements = |
8063 | cast<llvm::FixedVectorType>(V->getType())->getNumElements(); |
8064 | for (unsigned i = 0; i < InputElements; i += 2) |
8065 | Indices.push_back(i + Odd); |
8066 | return Builder.CreateShuffleVector(V, Indices); |
8067 | } |
8068 | |
8069 | static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0, |
8070 | llvm::Value *V1) { |
8071 | // Make a shufflevector that interleaves two vectors element by element. |
8072 | assert(V0->getType() == V1->getType() && "Can't zip different vector types")(static_cast <bool> (V0->getType() == V1->getType () && "Can't zip different vector types") ? void (0) : __assert_fail ("V0->getType() == V1->getType() && \"Can't zip different vector types\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8072, __extension__ __PRETTY_FUNCTION__)); |
8073 | SmallVector<int, 16> Indices; |
8074 | unsigned InputElements = |
8075 | cast<llvm::FixedVectorType>(V0->getType())->getNumElements(); |
8076 | for (unsigned i = 0; i < InputElements; i++) { |
8077 | Indices.push_back(i); |
8078 | Indices.push_back(i + InputElements); |
8079 | } |
8080 | return Builder.CreateShuffleVector(V0, V1, Indices); |
8081 | } |
8082 | |
8083 | template<unsigned HighBit, unsigned OtherBits> |
8084 | static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) { |
8085 | // MVE-specific helper function to make a vector splat of a constant such as |
8086 | // UINT_MAX or INT_MIN, in which all bits below the highest one are equal. |
8087 | llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType(); |
8088 | unsigned LaneBits = T->getPrimitiveSizeInBits(); |
8089 | uint32_t Value = HighBit << (LaneBits - 1); |
8090 | if (OtherBits) |
8091 | Value |= (1UL << (LaneBits - 1)) - 1; |
8092 | llvm::Value *Lane = llvm::ConstantInt::get(T, Value); |
8093 | return ARMMVEVectorSplat(Builder, Lane); |
8094 | } |
8095 | |
8096 | static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder, |
8097 | llvm::Value *V, |
8098 | unsigned ReverseWidth) { |
8099 | // MVE-specific helper function which reverses the elements of a |
8100 | // vector within every (ReverseWidth)-bit collection of lanes. |
8101 | SmallVector<int, 16> Indices; |
8102 | unsigned LaneSize = V->getType()->getScalarSizeInBits(); |
8103 | unsigned Elements = 128 / LaneSize; |
8104 | unsigned Mask = ReverseWidth / LaneSize - 1; |
8105 | for (unsigned i = 0; i < Elements; i++) |
8106 | Indices.push_back(i ^ Mask); |
8107 | return Builder.CreateShuffleVector(V, Indices); |
8108 | } |
8109 | |
8110 | Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID, |
8111 | const CallExpr *E, |
8112 | ReturnValueSlot ReturnValue, |
8113 | llvm::Triple::ArchType Arch) { |
8114 | enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType; |
8115 | Intrinsic::ID IRIntr; |
8116 | unsigned NumVectors; |
8117 | |
8118 | // Code autogenerated by Tablegen will handle all the simple builtins. |
8119 | switch (BuiltinID) { |
8120 | #include "clang/Basic/arm_mve_builtin_cg.inc" |
8121 | |
8122 | // If we didn't match an MVE builtin id at all, go back to the |
8123 | // main EmitARMBuiltinExpr. |
8124 | default: |
8125 | return nullptr; |
8126 | } |
8127 | |
8128 | // Anything that breaks from that switch is an MVE builtin that |
8129 | // needs handwritten code to generate. |
8130 | |
8131 | switch (CustomCodeGenType) { |
8132 | |
8133 | case CustomCodeGen::VLD24: { |
8134 | llvm::SmallVector<Value *, 4> Ops; |
8135 | llvm::SmallVector<llvm::Type *, 4> Tys; |
8136 | |
8137 | auto MvecCType = E->getType(); |
8138 | auto MvecLType = ConvertType(MvecCType); |
8139 | assert(MvecLType->isStructTy() &&(static_cast <bool> (MvecLType->isStructTy() && "Return type for vld[24]q should be a struct") ? void (0) : __assert_fail ("MvecLType->isStructTy() && \"Return type for vld[24]q should be a struct\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8140, __extension__ __PRETTY_FUNCTION__)) |
8140 | "Return type for vld[24]q should be a struct")(static_cast <bool> (MvecLType->isStructTy() && "Return type for vld[24]q should be a struct") ? void (0) : __assert_fail ("MvecLType->isStructTy() && \"Return type for vld[24]q should be a struct\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8140, __extension__ __PRETTY_FUNCTION__)); |
8141 | assert(MvecLType->getStructNumElements() == 1 &&(static_cast <bool> (MvecLType->getStructNumElements () == 1 && "Return-type struct for vld[24]q should have one element" ) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Return-type struct for vld[24]q should have one element\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8142, __extension__ __PRETTY_FUNCTION__)) |
8142 | "Return-type struct for vld[24]q should have one element")(static_cast <bool> (MvecLType->getStructNumElements () == 1 && "Return-type struct for vld[24]q should have one element" ) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Return-type struct for vld[24]q should have one element\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8142, __extension__ __PRETTY_FUNCTION__)); |
8143 | auto MvecLTypeInner = MvecLType->getStructElementType(0); |
8144 | assert(MvecLTypeInner->isArrayTy() &&(static_cast <bool> (MvecLTypeInner->isArrayTy() && "Return-type struct for vld[24]q should contain an array") ? void (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Return-type struct for vld[24]q should contain an array\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8145, __extension__ __PRETTY_FUNCTION__)) |
8145 | "Return-type struct for vld[24]q should contain an array")(static_cast <bool> (MvecLTypeInner->isArrayTy() && "Return-type struct for vld[24]q should contain an array") ? void (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Return-type struct for vld[24]q should contain an array\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8145, __extension__ __PRETTY_FUNCTION__)); |
8146 | assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&(static_cast <bool> (MvecLTypeInner->getArrayNumElements () == NumVectors && "Array member of return-type struct vld[24]q has wrong length" ) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8147, __extension__ __PRETTY_FUNCTION__)) |
8147 | "Array member of return-type struct vld[24]q has wrong length")(static_cast <bool> (MvecLTypeInner->getArrayNumElements () == NumVectors && "Array member of return-type struct vld[24]q has wrong length" ) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8147, __extension__ __PRETTY_FUNCTION__)); |
8148 | auto VecLType = MvecLTypeInner->getArrayElementType(); |
8149 | |
8150 | Tys.push_back(VecLType); |
8151 | |
8152 | auto Addr = E->getArg(0); |
8153 | Ops.push_back(EmitScalarExpr(Addr)); |
8154 | Tys.push_back(ConvertType(Addr->getType())); |
8155 | |
8156 | Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys)); |
8157 | Value *LoadResult = Builder.CreateCall(F, Ops); |
8158 | Value *MvecOut = UndefValue::get(MvecLType); |
8159 | for (unsigned i = 0; i < NumVectors; ++i) { |
8160 | Value *Vec = Builder.CreateExtractValue(LoadResult, i); |
8161 | MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i}); |
8162 | } |
8163 | |
8164 | if (ReturnValue.isNull()) |
8165 | return MvecOut; |
8166 | else |
8167 | return Builder.CreateStore(MvecOut, ReturnValue.getValue()); |
8168 | } |
8169 | |
8170 | case CustomCodeGen::VST24: { |
8171 | llvm::SmallVector<Value *, 4> Ops; |
8172 | llvm::SmallVector<llvm::Type *, 4> Tys; |
8173 | |
8174 | auto Addr = E->getArg(0); |
8175 | Ops.push_back(EmitScalarExpr(Addr)); |
8176 | Tys.push_back(ConvertType(Addr->getType())); |
8177 | |
8178 | auto MvecCType = E->getArg(1)->getType(); |
8179 | auto MvecLType = ConvertType(MvecCType); |
8180 | assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct")(static_cast <bool> (MvecLType->isStructTy() && "Data type for vst2q should be a struct") ? void (0) : __assert_fail ("MvecLType->isStructTy() && \"Data type for vst2q should be a struct\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8180, __extension__ __PRETTY_FUNCTION__)); |
8181 | assert(MvecLType->getStructNumElements() == 1 &&(static_cast <bool> (MvecLType->getStructNumElements () == 1 && "Data-type struct for vst2q should have one element" ) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Data-type struct for vst2q should have one element\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8182, __extension__ __PRETTY_FUNCTION__)) |
8182 | "Data-type struct for vst2q should have one element")(static_cast <bool> (MvecLType->getStructNumElements () == 1 && "Data-type struct for vst2q should have one element" ) ? void (0) : __assert_fail ("MvecLType->getStructNumElements() == 1 && \"Data-type struct for vst2q should have one element\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8182, __extension__ __PRETTY_FUNCTION__)); |
8183 | auto MvecLTypeInner = MvecLType->getStructElementType(0); |
8184 | assert(MvecLTypeInner->isArrayTy() &&(static_cast <bool> (MvecLTypeInner->isArrayTy() && "Data-type struct for vst2q should contain an array") ? void (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Data-type struct for vst2q should contain an array\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8185, __extension__ __PRETTY_FUNCTION__)) |
8185 | "Data-type struct for vst2q should contain an array")(static_cast <bool> (MvecLTypeInner->isArrayTy() && "Data-type struct for vst2q should contain an array") ? void (0) : __assert_fail ("MvecLTypeInner->isArrayTy() && \"Data-type struct for vst2q should contain an array\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8185, __extension__ __PRETTY_FUNCTION__)); |
8186 | assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&(static_cast <bool> (MvecLTypeInner->getArrayNumElements () == NumVectors && "Array member of return-type struct vld[24]q has wrong length" ) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8187, __extension__ __PRETTY_FUNCTION__)) |
8187 | "Array member of return-type struct vld[24]q has wrong length")(static_cast <bool> (MvecLTypeInner->getArrayNumElements () == NumVectors && "Array member of return-type struct vld[24]q has wrong length" ) ? void (0) : __assert_fail ("MvecLTypeInner->getArrayNumElements() == NumVectors && \"Array member of return-type struct vld[24]q has wrong length\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8187, __extension__ __PRETTY_FUNCTION__)); |
8188 | auto VecLType = MvecLTypeInner->getArrayElementType(); |
8189 | |
8190 | Tys.push_back(VecLType); |
8191 | |
8192 | AggValueSlot MvecSlot = CreateAggTemp(MvecCType); |
8193 | EmitAggExpr(E->getArg(1), MvecSlot); |
8194 | auto Mvec = Builder.CreateLoad(MvecSlot.getAddress()); |
8195 | for (unsigned i = 0; i < NumVectors; i++) |
8196 | Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i})); |
8197 | |
8198 | Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys)); |
8199 | Value *ToReturn = nullptr; |
8200 | for (unsigned i = 0; i < NumVectors; i++) { |
8201 | Ops.push_back(llvm::ConstantInt::get(Int32Ty, i)); |
8202 | ToReturn = Builder.CreateCall(F, Ops); |
8203 | Ops.pop_back(); |
8204 | } |
8205 | return ToReturn; |
8206 | } |
8207 | } |
8208 | llvm_unreachable("unknown custom codegen type.")::llvm::llvm_unreachable_internal("unknown custom codegen type." , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8208); |
8209 | } |
8210 | |
8211 | Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID, |
8212 | const CallExpr *E, |
8213 | ReturnValueSlot ReturnValue, |
8214 | llvm::Triple::ArchType Arch) { |
8215 | switch (BuiltinID) { |
8216 | default: |
8217 | return nullptr; |
8218 | #include "clang/Basic/arm_cde_builtin_cg.inc" |
8219 | } |
8220 | } |
8221 | |
8222 | static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, |
8223 | const CallExpr *E, |
8224 | SmallVectorImpl<Value *> &Ops, |
8225 | llvm::Triple::ArchType Arch) { |
8226 | unsigned int Int = 0; |
8227 | const char *s = nullptr; |
8228 | |
8229 | switch (BuiltinID) { |
8230 | default: |
8231 | return nullptr; |
8232 | case NEON::BI__builtin_neon_vtbl1_v: |
8233 | case NEON::BI__builtin_neon_vqtbl1_v: |
8234 | case NEON::BI__builtin_neon_vqtbl1q_v: |
8235 | case NEON::BI__builtin_neon_vtbl2_v: |
8236 | case NEON::BI__builtin_neon_vqtbl2_v: |
8237 | case NEON::BI__builtin_neon_vqtbl2q_v: |
8238 | case NEON::BI__builtin_neon_vtbl3_v: |
8239 | case NEON::BI__builtin_neon_vqtbl3_v: |
8240 | case NEON::BI__builtin_neon_vqtbl3q_v: |
8241 | case NEON::BI__builtin_neon_vtbl4_v: |
8242 | case NEON::BI__builtin_neon_vqtbl4_v: |
8243 | case NEON::BI__builtin_neon_vqtbl4q_v: |
8244 | break; |
8245 | case NEON::BI__builtin_neon_vtbx1_v: |
8246 | case NEON::BI__builtin_neon_vqtbx1_v: |
8247 | case NEON::BI__builtin_neon_vqtbx1q_v: |
8248 | case NEON::BI__builtin_neon_vtbx2_v: |
8249 | case NEON::BI__builtin_neon_vqtbx2_v: |
8250 | case NEON::BI__builtin_neon_vqtbx2q_v: |
8251 | case NEON::BI__builtin_neon_vtbx3_v: |
8252 | case NEON::BI__builtin_neon_vqtbx3_v: |
8253 | case NEON::BI__builtin_neon_vqtbx3q_v: |
8254 | case NEON::BI__builtin_neon_vtbx4_v: |
8255 | case NEON::BI__builtin_neon_vqtbx4_v: |
8256 | case NEON::BI__builtin_neon_vqtbx4q_v: |
8257 | break; |
8258 | } |
8259 | |
8260 | assert(E->getNumArgs() >= 3)(static_cast <bool> (E->getNumArgs() >= 3) ? void (0) : __assert_fail ("E->getNumArgs() >= 3", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8260, __extension__ __PRETTY_FUNCTION__)); |
8261 | |
8262 | // Get the last argument, which specifies the vector type. |
8263 | const Expr *Arg = E->getArg(E->getNumArgs() - 1); |
8264 | Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext()); |
8265 | if (!Result) |
8266 | return nullptr; |
8267 | |
8268 | // Determine the type of this overloaded NEON intrinsic. |
8269 | NeonTypeFlags Type = Result->getZExtValue(); |
8270 | llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type); |
8271 | if (!Ty) |
8272 | return nullptr; |
8273 | |
8274 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
8275 | |
8276 | // AArch64 scalar builtins are not overloaded, they do not have an extra |
8277 | // argument that specifies the vector type, need to handle each case. |
8278 | switch (BuiltinID) { |
8279 | case NEON::BI__builtin_neon_vtbl1_v: { |
8280 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr, |
8281 | Ops[1], Ty, Intrinsic::aarch64_neon_tbl1, |
8282 | "vtbl1"); |
8283 | } |
8284 | case NEON::BI__builtin_neon_vtbl2_v: { |
8285 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr, |
8286 | Ops[2], Ty, Intrinsic::aarch64_neon_tbl1, |
8287 | "vtbl1"); |
8288 | } |
8289 | case NEON::BI__builtin_neon_vtbl3_v: { |
8290 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr, |
8291 | Ops[3], Ty, Intrinsic::aarch64_neon_tbl2, |
8292 | "vtbl2"); |
8293 | } |
8294 | case NEON::BI__builtin_neon_vtbl4_v: { |
8295 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr, |
8296 | Ops[4], Ty, Intrinsic::aarch64_neon_tbl2, |
8297 | "vtbl2"); |
8298 | } |
8299 | case NEON::BI__builtin_neon_vtbx1_v: { |
8300 | Value *TblRes = |
8301 | packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2], |
8302 | Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1"); |
8303 | |
8304 | llvm::Constant *EightV = ConstantInt::get(Ty, 8); |
8305 | Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV); |
8306 | CmpRes = Builder.CreateSExt(CmpRes, Ty); |
8307 | |
8308 | Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]); |
8309 | Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes); |
8310 | return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx"); |
8311 | } |
8312 | case NEON::BI__builtin_neon_vtbx2_v: { |
8313 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0], |
8314 | Ops[3], Ty, Intrinsic::aarch64_neon_tbx1, |
8315 | "vtbx1"); |
8316 | } |
8317 | case NEON::BI__builtin_neon_vtbx3_v: { |
8318 | Value *TblRes = |
8319 | packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4], |
8320 | Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2"); |
8321 | |
8322 | llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24); |
8323 | Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4], |
8324 | TwentyFourV); |
8325 | CmpRes = Builder.CreateSExt(CmpRes, Ty); |
8326 | |
8327 | Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]); |
8328 | Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes); |
8329 | return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx"); |
8330 | } |
8331 | case NEON::BI__builtin_neon_vtbx4_v: { |
8332 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0], |
8333 | Ops[5], Ty, Intrinsic::aarch64_neon_tbx2, |
8334 | "vtbx2"); |
8335 | } |
8336 | case NEON::BI__builtin_neon_vqtbl1_v: |
8337 | case NEON::BI__builtin_neon_vqtbl1q_v: |
8338 | Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break; |
8339 | case NEON::BI__builtin_neon_vqtbl2_v: |
8340 | case NEON::BI__builtin_neon_vqtbl2q_v: { |
8341 | Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break; |
8342 | case NEON::BI__builtin_neon_vqtbl3_v: |
8343 | case NEON::BI__builtin_neon_vqtbl3q_v: |
8344 | Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break; |
8345 | case NEON::BI__builtin_neon_vqtbl4_v: |
8346 | case NEON::BI__builtin_neon_vqtbl4q_v: |
8347 | Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break; |
8348 | case NEON::BI__builtin_neon_vqtbx1_v: |
8349 | case NEON::BI__builtin_neon_vqtbx1q_v: |
8350 | Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break; |
8351 | case NEON::BI__builtin_neon_vqtbx2_v: |
8352 | case NEON::BI__builtin_neon_vqtbx2q_v: |
8353 | Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break; |
8354 | case NEON::BI__builtin_neon_vqtbx3_v: |
8355 | case NEON::BI__builtin_neon_vqtbx3q_v: |
8356 | Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break; |
8357 | case NEON::BI__builtin_neon_vqtbx4_v: |
8358 | case NEON::BI__builtin_neon_vqtbx4q_v: |
8359 | Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break; |
8360 | } |
8361 | } |
8362 | |
8363 | if (!Int) |
8364 | return nullptr; |
8365 | |
8366 | Function *F = CGF.CGM.getIntrinsic(Int, Ty); |
8367 | return CGF.EmitNeonCall(F, Ops, s); |
8368 | } |
8369 | |
8370 | Value *CodeGenFunction::vectorWrapScalar16(Value *Op) { |
8371 | auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
8372 | Op = Builder.CreateBitCast(Op, Int16Ty); |
8373 | Value *V = UndefValue::get(VTy); |
8374 | llvm::Constant *CI = ConstantInt::get(SizeTy, 0); |
8375 | Op = Builder.CreateInsertElement(V, Op, CI); |
8376 | return Op; |
8377 | } |
8378 | |
8379 | /// SVEBuiltinMemEltTy - Returns the memory element type for this memory |
8380 | /// access builtin. Only required if it can't be inferred from the base pointer |
8381 | /// operand. |
8382 | llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags) { |
8383 | switch (TypeFlags.getMemEltType()) { |
8384 | case SVETypeFlags::MemEltTyDefault: |
8385 | return getEltType(TypeFlags); |
8386 | case SVETypeFlags::MemEltTyInt8: |
8387 | return Builder.getInt8Ty(); |
8388 | case SVETypeFlags::MemEltTyInt16: |
8389 | return Builder.getInt16Ty(); |
8390 | case SVETypeFlags::MemEltTyInt32: |
8391 | return Builder.getInt32Ty(); |
8392 | case SVETypeFlags::MemEltTyInt64: |
8393 | return Builder.getInt64Ty(); |
8394 | } |
8395 | llvm_unreachable("Unknown MemEltType")::llvm::llvm_unreachable_internal("Unknown MemEltType", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8395); |
8396 | } |
8397 | |
8398 | llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) { |
8399 | switch (TypeFlags.getEltType()) { |
8400 | default: |
8401 | llvm_unreachable("Invalid SVETypeFlag!")::llvm::llvm_unreachable_internal("Invalid SVETypeFlag!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8401); |
8402 | |
8403 | case SVETypeFlags::EltTyInt8: |
8404 | return Builder.getInt8Ty(); |
8405 | case SVETypeFlags::EltTyInt16: |
8406 | return Builder.getInt16Ty(); |
8407 | case SVETypeFlags::EltTyInt32: |
8408 | return Builder.getInt32Ty(); |
8409 | case SVETypeFlags::EltTyInt64: |
8410 | return Builder.getInt64Ty(); |
8411 | |
8412 | case SVETypeFlags::EltTyFloat16: |
8413 | return Builder.getHalfTy(); |
8414 | case SVETypeFlags::EltTyFloat32: |
8415 | return Builder.getFloatTy(); |
8416 | case SVETypeFlags::EltTyFloat64: |
8417 | return Builder.getDoubleTy(); |
8418 | |
8419 | case SVETypeFlags::EltTyBFloat16: |
8420 | return Builder.getBFloatTy(); |
8421 | |
8422 | case SVETypeFlags::EltTyBool8: |
8423 | case SVETypeFlags::EltTyBool16: |
8424 | case SVETypeFlags::EltTyBool32: |
8425 | case SVETypeFlags::EltTyBool64: |
8426 | return Builder.getInt1Ty(); |
8427 | } |
8428 | } |
8429 | |
8430 | // Return the llvm predicate vector type corresponding to the specified element |
8431 | // TypeFlags. |
8432 | llvm::ScalableVectorType * |
8433 | CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) { |
8434 | switch (TypeFlags.getEltType()) { |
8435 | default: llvm_unreachable("Unhandled SVETypeFlag!")::llvm::llvm_unreachable_internal("Unhandled SVETypeFlag!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8435); |
8436 | |
8437 | case SVETypeFlags::EltTyInt8: |
8438 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); |
8439 | case SVETypeFlags::EltTyInt16: |
8440 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8441 | case SVETypeFlags::EltTyInt32: |
8442 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4); |
8443 | case SVETypeFlags::EltTyInt64: |
8444 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2); |
8445 | |
8446 | case SVETypeFlags::EltTyBFloat16: |
8447 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8448 | case SVETypeFlags::EltTyFloat16: |
8449 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8450 | case SVETypeFlags::EltTyFloat32: |
8451 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4); |
8452 | case SVETypeFlags::EltTyFloat64: |
8453 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2); |
8454 | |
8455 | case SVETypeFlags::EltTyBool8: |
8456 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); |
8457 | case SVETypeFlags::EltTyBool16: |
8458 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8459 | case SVETypeFlags::EltTyBool32: |
8460 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4); |
8461 | case SVETypeFlags::EltTyBool64: |
8462 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2); |
8463 | } |
8464 | } |
8465 | |
8466 | // Return the llvm vector type corresponding to the specified element TypeFlags. |
8467 | llvm::ScalableVectorType * |
8468 | CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) { |
8469 | switch (TypeFlags.getEltType()) { |
8470 | default: |
8471 | llvm_unreachable("Invalid SVETypeFlag!")::llvm::llvm_unreachable_internal("Invalid SVETypeFlag!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8471); |
8472 | |
8473 | case SVETypeFlags::EltTyInt8: |
8474 | return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16); |
8475 | case SVETypeFlags::EltTyInt16: |
8476 | return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8); |
8477 | case SVETypeFlags::EltTyInt32: |
8478 | return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4); |
8479 | case SVETypeFlags::EltTyInt64: |
8480 | return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2); |
8481 | |
8482 | case SVETypeFlags::EltTyFloat16: |
8483 | return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8); |
8484 | case SVETypeFlags::EltTyBFloat16: |
8485 | return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8); |
8486 | case SVETypeFlags::EltTyFloat32: |
8487 | return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4); |
8488 | case SVETypeFlags::EltTyFloat64: |
8489 | return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2); |
8490 | |
8491 | case SVETypeFlags::EltTyBool8: |
8492 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); |
8493 | case SVETypeFlags::EltTyBool16: |
8494 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8495 | case SVETypeFlags::EltTyBool32: |
8496 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4); |
8497 | case SVETypeFlags::EltTyBool64: |
8498 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2); |
8499 | } |
8500 | } |
8501 | |
8502 | llvm::Value * |
8503 | CodeGenFunction::EmitSVEAllTruePred(const SVETypeFlags &TypeFlags) { |
8504 | Function *Ptrue = |
8505 | CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags)); |
8506 | return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)}); |
8507 | } |
8508 | |
8509 | constexpr unsigned SVEBitsPerBlock = 128; |
8510 | |
8511 | static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) { |
8512 | unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits(); |
8513 | return llvm::ScalableVectorType::get(EltTy, NumElts); |
8514 | } |
8515 | |
8516 | // Reinterpret the input predicate so that it can be used to correctly isolate |
8517 | // the elements of the specified datatype. |
8518 | Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred, |
8519 | llvm::ScalableVectorType *VTy) { |
8520 | auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy); |
8521 | if (Pred->getType() == RTy) |
8522 | return Pred; |
8523 | |
8524 | unsigned IntID; |
8525 | llvm::Type *IntrinsicTy; |
8526 | switch (VTy->getMinNumElements()) { |
8527 | default: |
8528 | llvm_unreachable("unsupported element count!")::llvm::llvm_unreachable_internal("unsupported element count!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8528); |
8529 | case 2: |
8530 | case 4: |
8531 | case 8: |
8532 | IntID = Intrinsic::aarch64_sve_convert_from_svbool; |
8533 | IntrinsicTy = RTy; |
8534 | break; |
8535 | case 16: |
8536 | IntID = Intrinsic::aarch64_sve_convert_to_svbool; |
8537 | IntrinsicTy = Pred->getType(); |
8538 | break; |
8539 | } |
8540 | |
8541 | Function *F = CGM.getIntrinsic(IntID, IntrinsicTy); |
8542 | Value *C = Builder.CreateCall(F, Pred); |
8543 | assert(C->getType() == RTy && "Unexpected return type!")(static_cast <bool> (C->getType() == RTy && "Unexpected return type!" ) ? void (0) : __assert_fail ("C->getType() == RTy && \"Unexpected return type!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8543, __extension__ __PRETTY_FUNCTION__)); |
8544 | return C; |
8545 | } |
8546 | |
8547 | Value *CodeGenFunction::EmitSVEGatherLoad(const SVETypeFlags &TypeFlags, |
8548 | SmallVectorImpl<Value *> &Ops, |
8549 | unsigned IntID) { |
8550 | auto *ResultTy = getSVEType(TypeFlags); |
8551 | auto *OverloadedTy = |
8552 | llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy); |
8553 | |
8554 | // At the ACLE level there's only one predicate type, svbool_t, which is |
8555 | // mapped to <n x 16 x i1>. However, this might be incompatible with the |
8556 | // actual type being loaded. For example, when loading doubles (i64) the |
8557 | // predicated should be <n x 2 x i1> instead. At the IR level the type of |
8558 | // the predicate and the data being loaded must match. Cast accordingly. |
8559 | Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy); |
8560 | |
8561 | Function *F = nullptr; |
8562 | if (Ops[1]->getType()->isVectorTy()) |
8563 | // This is the "vector base, scalar offset" case. In order to uniquely |
8564 | // map this built-in to an LLVM IR intrinsic, we need both the return type |
8565 | // and the type of the vector base. |
8566 | F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()}); |
8567 | else |
8568 | // This is the "scalar base, vector offset case". The type of the offset |
8569 | // is encoded in the name of the intrinsic. We only need to specify the |
8570 | // return type in order to uniquely map this built-in to an LLVM IR |
8571 | // intrinsic. |
8572 | F = CGM.getIntrinsic(IntID, OverloadedTy); |
8573 | |
8574 | // Pass 0 when the offset is missing. This can only be applied when using |
8575 | // the "vector base" addressing mode for which ACLE allows no offset. The |
8576 | // corresponding LLVM IR always requires an offset. |
8577 | if (Ops.size() == 2) { |
8578 | assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset")(static_cast <bool> (Ops[1]->getType()->isVectorTy () && "Scalar base requires an offset") ? void (0) : __assert_fail ("Ops[1]->getType()->isVectorTy() && \"Scalar base requires an offset\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8578, __extension__ __PRETTY_FUNCTION__)); |
8579 | Ops.push_back(ConstantInt::get(Int64Ty, 0)); |
8580 | } |
8581 | |
8582 | // For "vector base, scalar index" scale the index so that it becomes a |
8583 | // scalar offset. |
8584 | if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) { |
8585 | unsigned BytesPerElt = |
8586 | OverloadedTy->getElementType()->getScalarSizeInBits() / 8; |
8587 | Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt); |
8588 | Ops[2] = Builder.CreateMul(Ops[2], Scale); |
8589 | } |
8590 | |
8591 | Value *Call = Builder.CreateCall(F, Ops); |
8592 | |
8593 | // The following sext/zext is only needed when ResultTy != OverloadedTy. In |
8594 | // other cases it's folded into a nop. |
8595 | return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy) |
8596 | : Builder.CreateSExt(Call, ResultTy); |
8597 | } |
8598 | |
8599 | Value *CodeGenFunction::EmitSVEScatterStore(const SVETypeFlags &TypeFlags, |
8600 | SmallVectorImpl<Value *> &Ops, |
8601 | unsigned IntID) { |
8602 | auto *SrcDataTy = getSVEType(TypeFlags); |
8603 | auto *OverloadedTy = |
8604 | llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy); |
8605 | |
8606 | // In ACLE the source data is passed in the last argument, whereas in LLVM IR |
8607 | // it's the first argument. Move it accordingly. |
8608 | Ops.insert(Ops.begin(), Ops.pop_back_val()); |
8609 | |
8610 | Function *F = nullptr; |
8611 | if (Ops[2]->getType()->isVectorTy()) |
8612 | // This is the "vector base, scalar offset" case. In order to uniquely |
8613 | // map this built-in to an LLVM IR intrinsic, we need both the return type |
8614 | // and the type of the vector base. |
8615 | F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()}); |
8616 | else |
8617 | // This is the "scalar base, vector offset case". The type of the offset |
8618 | // is encoded in the name of the intrinsic. We only need to specify the |
8619 | // return type in order to uniquely map this built-in to an LLVM IR |
8620 | // intrinsic. |
8621 | F = CGM.getIntrinsic(IntID, OverloadedTy); |
8622 | |
8623 | // Pass 0 when the offset is missing. This can only be applied when using |
8624 | // the "vector base" addressing mode for which ACLE allows no offset. The |
8625 | // corresponding LLVM IR always requires an offset. |
8626 | if (Ops.size() == 3) { |
8627 | assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset")(static_cast <bool> (Ops[1]->getType()->isVectorTy () && "Scalar base requires an offset") ? void (0) : __assert_fail ("Ops[1]->getType()->isVectorTy() && \"Scalar base requires an offset\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8627, __extension__ __PRETTY_FUNCTION__)); |
8628 | Ops.push_back(ConstantInt::get(Int64Ty, 0)); |
8629 | } |
8630 | |
8631 | // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's |
8632 | // folded into a nop. |
8633 | Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy); |
8634 | |
8635 | // At the ACLE level there's only one predicate type, svbool_t, which is |
8636 | // mapped to <n x 16 x i1>. However, this might be incompatible with the |
8637 | // actual type being stored. For example, when storing doubles (i64) the |
8638 | // predicated should be <n x 2 x i1> instead. At the IR level the type of |
8639 | // the predicate and the data being stored must match. Cast accordingly. |
8640 | Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy); |
8641 | |
8642 | // For "vector base, scalar index" scale the index so that it becomes a |
8643 | // scalar offset. |
8644 | if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) { |
8645 | unsigned BytesPerElt = |
8646 | OverloadedTy->getElementType()->getScalarSizeInBits() / 8; |
8647 | Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt); |
8648 | Ops[3] = Builder.CreateMul(Ops[3], Scale); |
8649 | } |
8650 | |
8651 | return Builder.CreateCall(F, Ops); |
8652 | } |
8653 | |
8654 | Value *CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags, |
8655 | SmallVectorImpl<Value *> &Ops, |
8656 | unsigned IntID) { |
8657 | // The gather prefetches are overloaded on the vector input - this can either |
8658 | // be the vector of base addresses or vector of offsets. |
8659 | auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType()); |
8660 | if (!OverloadedTy) |
8661 | OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType()); |
8662 | |
8663 | // Cast the predicate from svbool_t to the right number of elements. |
8664 | Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy); |
8665 | |
8666 | // vector + imm addressing modes |
8667 | if (Ops[1]->getType()->isVectorTy()) { |
8668 | if (Ops.size() == 3) { |
8669 | // Pass 0 for 'vector+imm' when the index is omitted. |
8670 | Ops.push_back(ConstantInt::get(Int64Ty, 0)); |
8671 | |
8672 | // The sv_prfop is the last operand in the builtin and IR intrinsic. |
8673 | std::swap(Ops[2], Ops[3]); |
8674 | } else { |
8675 | // Index needs to be passed as scaled offset. |
8676 | llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags); |
8677 | unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8; |
8678 | Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt); |
8679 | Ops[2] = Builder.CreateMul(Ops[2], Scale); |
8680 | } |
8681 | } |
8682 | |
8683 | Function *F = CGM.getIntrinsic(IntID, OverloadedTy); |
8684 | return Builder.CreateCall(F, Ops); |
8685 | } |
8686 | |
8687 | Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags, |
8688 | SmallVectorImpl<Value*> &Ops, |
8689 | unsigned IntID) { |
8690 | llvm::ScalableVectorType *VTy = getSVEType(TypeFlags); |
8691 | auto VecPtrTy = llvm::PointerType::getUnqual(VTy); |
8692 | auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType()); |
8693 | |
8694 | unsigned N; |
8695 | switch (IntID) { |
8696 | case Intrinsic::aarch64_sve_ld2: |
8697 | N = 2; |
8698 | break; |
8699 | case Intrinsic::aarch64_sve_ld3: |
8700 | N = 3; |
8701 | break; |
8702 | case Intrinsic::aarch64_sve_ld4: |
8703 | N = 4; |
8704 | break; |
8705 | default: |
8706 | llvm_unreachable("unknown intrinsic!")::llvm::llvm_unreachable_internal("unknown intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8706); |
8707 | } |
8708 | auto RetTy = llvm::VectorType::get(VTy->getElementType(), |
8709 | VTy->getElementCount() * N); |
8710 | |
8711 | Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy); |
8712 | Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy); |
8713 | Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0); |
8714 | BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset); |
8715 | BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy); |
8716 | |
8717 | Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()}); |
8718 | return Builder.CreateCall(F, { Predicate, BasePtr }); |
8719 | } |
8720 | |
8721 | Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags, |
8722 | SmallVectorImpl<Value*> &Ops, |
8723 | unsigned IntID) { |
8724 | llvm::ScalableVectorType *VTy = getSVEType(TypeFlags); |
8725 | auto VecPtrTy = llvm::PointerType::getUnqual(VTy); |
8726 | auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType()); |
8727 | |
8728 | unsigned N; |
8729 | switch (IntID) { |
8730 | case Intrinsic::aarch64_sve_st2: |
8731 | N = 2; |
8732 | break; |
8733 | case Intrinsic::aarch64_sve_st3: |
8734 | N = 3; |
8735 | break; |
8736 | case Intrinsic::aarch64_sve_st4: |
8737 | N = 4; |
8738 | break; |
8739 | default: |
8740 | llvm_unreachable("unknown intrinsic!")::llvm::llvm_unreachable_internal("unknown intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8740); |
8741 | } |
8742 | auto TupleTy = |
8743 | llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N); |
8744 | |
8745 | Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy); |
8746 | Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy); |
8747 | Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0); |
8748 | Value *Val = Ops.back(); |
8749 | BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset); |
8750 | BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy); |
8751 | |
8752 | // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we |
8753 | // need to break up the tuple vector. |
8754 | SmallVector<llvm::Value*, 5> Operands; |
8755 | Function *FExtr = |
8756 | CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy}); |
8757 | for (unsigned I = 0; I < N; ++I) |
8758 | Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)})); |
8759 | Operands.append({Predicate, BasePtr}); |
8760 | |
8761 | Function *F = CGM.getIntrinsic(IntID, { VTy }); |
8762 | return Builder.CreateCall(F, Operands); |
8763 | } |
8764 | |
8765 | // SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and |
8766 | // svpmullt_pair intrinsics, with the exception that their results are bitcast |
8767 | // to a wider type. |
8768 | Value *CodeGenFunction::EmitSVEPMull(const SVETypeFlags &TypeFlags, |
8769 | SmallVectorImpl<Value *> &Ops, |
8770 | unsigned BuiltinID) { |
8771 | // Splat scalar operand to vector (intrinsics with _n infix) |
8772 | if (TypeFlags.hasSplatOperand()) { |
8773 | unsigned OpNo = TypeFlags.getSplatOperand(); |
8774 | Ops[OpNo] = EmitSVEDupX(Ops[OpNo]); |
8775 | } |
8776 | |
8777 | // The pair-wise function has a narrower overloaded type. |
8778 | Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType()); |
8779 | Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]}); |
8780 | |
8781 | // Now bitcast to the wider result type. |
8782 | llvm::ScalableVectorType *Ty = getSVEType(TypeFlags); |
8783 | return EmitSVEReinterpret(Call, Ty); |
8784 | } |
8785 | |
8786 | Value *CodeGenFunction::EmitSVEMovl(const SVETypeFlags &TypeFlags, |
8787 | ArrayRef<Value *> Ops, unsigned BuiltinID) { |
8788 | llvm::Type *OverloadedTy = getSVEType(TypeFlags); |
8789 | Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy); |
8790 | return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)}); |
8791 | } |
8792 | |
8793 | Value *CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags, |
8794 | SmallVectorImpl<Value *> &Ops, |
8795 | unsigned BuiltinID) { |
8796 | auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags); |
8797 | auto *VectorTy = getSVEVectorForElementType(MemEltTy); |
8798 | auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy); |
8799 | |
8800 | Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy); |
8801 | Value *BasePtr = Ops[1]; |
8802 | |
8803 | // Implement the index operand if not omitted. |
8804 | if (Ops.size() > 3) { |
8805 | BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo()); |
8806 | BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]); |
8807 | } |
8808 | |
8809 | // Prefetch intriniscs always expect an i8* |
8810 | BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty)); |
8811 | Value *PrfOp = Ops.back(); |
8812 | |
8813 | Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType()); |
8814 | return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp}); |
8815 | } |
8816 | |
8817 | Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E, |
8818 | llvm::Type *ReturnTy, |
8819 | SmallVectorImpl<Value *> &Ops, |
8820 | unsigned BuiltinID, |
8821 | bool IsZExtReturn) { |
8822 | QualType LangPTy = E->getArg(1)->getType(); |
8823 | llvm::Type *MemEltTy = CGM.getTypes().ConvertType( |
8824 | LangPTy->castAs<PointerType>()->getPointeeType()); |
8825 | |
8826 | // The vector type that is returned may be different from the |
8827 | // eventual type loaded from memory. |
8828 | auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy); |
8829 | auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy); |
8830 | |
8831 | Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy); |
8832 | Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo()); |
8833 | Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0); |
8834 | BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset); |
8835 | |
8836 | BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo()); |
8837 | Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy); |
8838 | Value *Load = Builder.CreateCall(F, {Predicate, BasePtr}); |
8839 | |
8840 | return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy) |
8841 | : Builder.CreateSExt(Load, VectorTy); |
8842 | } |
8843 | |
8844 | Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E, |
8845 | SmallVectorImpl<Value *> &Ops, |
8846 | unsigned BuiltinID) { |
8847 | QualType LangPTy = E->getArg(1)->getType(); |
8848 | llvm::Type *MemEltTy = CGM.getTypes().ConvertType( |
8849 | LangPTy->castAs<PointerType>()->getPointeeType()); |
8850 | |
8851 | // The vector type that is stored may be different from the |
8852 | // eventual type stored to memory. |
8853 | auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType()); |
8854 | auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy); |
8855 | |
8856 | Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy); |
8857 | Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo()); |
8858 | Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0); |
8859 | BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset); |
8860 | |
8861 | // Last value is always the data |
8862 | llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy); |
8863 | |
8864 | BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo()); |
8865 | Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy); |
8866 | return Builder.CreateCall(F, {Val, Predicate, BasePtr}); |
8867 | } |
8868 | |
8869 | // Limit the usage of scalable llvm IR generated by the ACLE by using the |
8870 | // sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat. |
8871 | Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) { |
8872 | auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty); |
8873 | return Builder.CreateCall(F, Scalar); |
8874 | } |
8875 | |
8876 | Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) { |
8877 | return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType())); |
8878 | } |
8879 | |
8880 | Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) { |
8881 | // FIXME: For big endian this needs an additional REV, or needs a separate |
8882 | // intrinsic that is code-generated as a no-op, because the LLVM bitcast |
8883 | // instruction is defined as 'bitwise' equivalent from memory point of |
8884 | // view (when storing/reloading), whereas the svreinterpret builtin |
8885 | // implements bitwise equivalent cast from register point of view. |
8886 | // LLVM CodeGen for a bitcast must add an explicit REV for big-endian. |
8887 | return Builder.CreateBitCast(Val, Ty); |
8888 | } |
8889 | |
8890 | static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty, |
8891 | SmallVectorImpl<Value *> &Ops) { |
8892 | auto *SplatZero = Constant::getNullValue(Ty); |
8893 | Ops.insert(Ops.begin(), SplatZero); |
8894 | } |
8895 | |
8896 | static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty, |
8897 | SmallVectorImpl<Value *> &Ops) { |
8898 | auto *SplatUndef = UndefValue::get(Ty); |
8899 | Ops.insert(Ops.begin(), SplatUndef); |
8900 | } |
8901 | |
8902 | SmallVector<llvm::Type *, 2> |
8903 | CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags &TypeFlags, |
8904 | llvm::Type *ResultType, |
8905 | ArrayRef<Value *> Ops) { |
8906 | if (TypeFlags.isOverloadNone()) |
8907 | return {}; |
8908 | |
8909 | llvm::Type *DefaultType = getSVEType(TypeFlags); |
8910 | |
8911 | if (TypeFlags.isOverloadWhile()) |
8912 | return {DefaultType, Ops[1]->getType()}; |
8913 | |
8914 | if (TypeFlags.isOverloadWhileRW()) |
8915 | return {getSVEPredType(TypeFlags), Ops[0]->getType()}; |
8916 | |
8917 | if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet()) |
8918 | return {Ops[0]->getType(), Ops.back()->getType()}; |
8919 | |
8920 | if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet()) |
8921 | return {ResultType, Ops[0]->getType()}; |
8922 | |
8923 | assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads")(static_cast <bool> (TypeFlags.isOverloadDefault() && "Unexpected value for overloads") ? void (0) : __assert_fail ("TypeFlags.isOverloadDefault() && \"Unexpected value for overloads\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8923, __extension__ __PRETTY_FUNCTION__)); |
8924 | return {DefaultType}; |
8925 | } |
8926 | |
8927 | Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, |
8928 | const CallExpr *E) { |
8929 | // Find out if any arguments are required to be integer constant expressions. |
8930 | unsigned ICEArguments = 0; |
8931 | ASTContext::GetBuiltinTypeError Error; |
8932 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
8933 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8933, __extension__ __PRETTY_FUNCTION__)); |
8934 | |
8935 | llvm::Type *Ty = ConvertType(E->getType()); |
8936 | if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 && |
8937 | BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) { |
8938 | Value *Val = EmitScalarExpr(E->getArg(0)); |
8939 | return EmitSVEReinterpret(Val, Ty); |
8940 | } |
8941 | |
8942 | llvm::SmallVector<Value *, 4> Ops; |
8943 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { |
8944 | if ((ICEArguments & (1 << i)) == 0) |
8945 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
8946 | else { |
8947 | // If this is required to be a constant, constant fold it so that we know |
8948 | // that the generated intrinsic gets a ConstantInt. |
8949 | Optional<llvm::APSInt> Result = |
8950 | E->getArg(i)->getIntegerConstantExpr(getContext()); |
8951 | assert(Result && "Expected argument to be a constant")(static_cast <bool> (Result && "Expected argument to be a constant" ) ? void (0) : __assert_fail ("Result && \"Expected argument to be a constant\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 8951, __extension__ __PRETTY_FUNCTION__)); |
8952 | |
8953 | // Immediates for SVE llvm intrinsics are always 32bit. We can safely |
8954 | // truncate because the immediate has been range checked and no valid |
8955 | // immediate requires more than a handful of bits. |
8956 | *Result = Result->extOrTrunc(32); |
8957 | Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result)); |
8958 | } |
8959 | } |
8960 | |
8961 | auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID, |
8962 | AArch64SVEIntrinsicsProvenSorted); |
8963 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
8964 | if (TypeFlags.isLoad()) |
8965 | return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic, |
8966 | TypeFlags.isZExtReturn()); |
8967 | else if (TypeFlags.isStore()) |
8968 | return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic); |
8969 | else if (TypeFlags.isGatherLoad()) |
8970 | return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
8971 | else if (TypeFlags.isScatterStore()) |
8972 | return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
8973 | else if (TypeFlags.isPrefetch()) |
8974 | return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
8975 | else if (TypeFlags.isGatherPrefetch()) |
8976 | return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
8977 | else if (TypeFlags.isStructLoad()) |
8978 | return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
8979 | else if (TypeFlags.isStructStore()) |
8980 | return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
8981 | else if (TypeFlags.isUndef()) |
8982 | return UndefValue::get(Ty); |
8983 | else if (Builtin->LLVMIntrinsic != 0) { |
8984 | if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp) |
8985 | InsertExplicitZeroOperand(Builder, Ty, Ops); |
8986 | |
8987 | if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp) |
8988 | InsertExplicitUndefOperand(Builder, Ty, Ops); |
8989 | |
8990 | // Some ACLE builtins leave out the argument to specify the predicate |
8991 | // pattern, which is expected to be expanded to an SV_ALL pattern. |
8992 | if (TypeFlags.isAppendSVALL()) |
8993 | Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31)); |
8994 | if (TypeFlags.isInsertOp1SVALL()) |
8995 | Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31)); |
8996 | |
8997 | // Predicates must match the main datatype. |
8998 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
8999 | if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType())) |
9000 | if (PredTy->getElementType()->isIntegerTy(1)) |
9001 | Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags)); |
9002 | |
9003 | // Splat scalar operand to vector (intrinsics with _n infix) |
9004 | if (TypeFlags.hasSplatOperand()) { |
9005 | unsigned OpNo = TypeFlags.getSplatOperand(); |
9006 | Ops[OpNo] = EmitSVEDupX(Ops[OpNo]); |
9007 | } |
9008 | |
9009 | if (TypeFlags.isReverseCompare()) |
9010 | std::swap(Ops[1], Ops[2]); |
9011 | |
9012 | if (TypeFlags.isReverseUSDOT()) |
9013 | std::swap(Ops[1], Ops[2]); |
9014 | |
9015 | // Predicated intrinsics with _z suffix need a select w/ zeroinitializer. |
9016 | if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) { |
9017 | llvm::Type *OpndTy = Ops[1]->getType(); |
9018 | auto *SplatZero = Constant::getNullValue(OpndTy); |
9019 | Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy); |
9020 | Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero}); |
9021 | } |
9022 | |
9023 | Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic, |
9024 | getSVEOverloadTypes(TypeFlags, Ty, Ops)); |
9025 | Value *Call = Builder.CreateCall(F, Ops); |
9026 | |
9027 | // Predicate results must be converted to svbool_t. |
9028 | if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType())) |
9029 | if (PredTy->getScalarType()->isIntegerTy(1)) |
9030 | Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty)); |
9031 | |
9032 | return Call; |
9033 | } |
9034 | |
9035 | switch (BuiltinID) { |
9036 | default: |
9037 | return nullptr; |
9038 | |
9039 | case SVE::BI__builtin_sve_svmov_b_z: { |
9040 | // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op) |
9041 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
9042 | llvm::Type* OverloadedTy = getSVEType(TypeFlags); |
9043 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy); |
9044 | return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]}); |
9045 | } |
9046 | |
9047 | case SVE::BI__builtin_sve_svnot_b_z: { |
9048 | // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg) |
9049 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
9050 | llvm::Type* OverloadedTy = getSVEType(TypeFlags); |
9051 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy); |
9052 | return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]}); |
9053 | } |
9054 | |
9055 | case SVE::BI__builtin_sve_svmovlb_u16: |
9056 | case SVE::BI__builtin_sve_svmovlb_u32: |
9057 | case SVE::BI__builtin_sve_svmovlb_u64: |
9058 | return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb); |
9059 | |
9060 | case SVE::BI__builtin_sve_svmovlb_s16: |
9061 | case SVE::BI__builtin_sve_svmovlb_s32: |
9062 | case SVE::BI__builtin_sve_svmovlb_s64: |
9063 | return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb); |
9064 | |
9065 | case SVE::BI__builtin_sve_svmovlt_u16: |
9066 | case SVE::BI__builtin_sve_svmovlt_u32: |
9067 | case SVE::BI__builtin_sve_svmovlt_u64: |
9068 | return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt); |
9069 | |
9070 | case SVE::BI__builtin_sve_svmovlt_s16: |
9071 | case SVE::BI__builtin_sve_svmovlt_s32: |
9072 | case SVE::BI__builtin_sve_svmovlt_s64: |
9073 | return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt); |
9074 | |
9075 | case SVE::BI__builtin_sve_svpmullt_u16: |
9076 | case SVE::BI__builtin_sve_svpmullt_u64: |
9077 | case SVE::BI__builtin_sve_svpmullt_n_u16: |
9078 | case SVE::BI__builtin_sve_svpmullt_n_u64: |
9079 | return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair); |
9080 | |
9081 | case SVE::BI__builtin_sve_svpmullb_u16: |
9082 | case SVE::BI__builtin_sve_svpmullb_u64: |
9083 | case SVE::BI__builtin_sve_svpmullb_n_u16: |
9084 | case SVE::BI__builtin_sve_svpmullb_n_u64: |
9085 | return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair); |
9086 | |
9087 | case SVE::BI__builtin_sve_svdup_n_b8: |
9088 | case SVE::BI__builtin_sve_svdup_n_b16: |
9089 | case SVE::BI__builtin_sve_svdup_n_b32: |
9090 | case SVE::BI__builtin_sve_svdup_n_b64: { |
9091 | Value *CmpNE = |
9092 | Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType())); |
9093 | llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags); |
9094 | Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy); |
9095 | return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty)); |
9096 | } |
9097 | |
9098 | case SVE::BI__builtin_sve_svdupq_n_b8: |
9099 | case SVE::BI__builtin_sve_svdupq_n_b16: |
9100 | case SVE::BI__builtin_sve_svdupq_n_b32: |
9101 | case SVE::BI__builtin_sve_svdupq_n_b64: |
9102 | case SVE::BI__builtin_sve_svdupq_n_u8: |
9103 | case SVE::BI__builtin_sve_svdupq_n_s8: |
9104 | case SVE::BI__builtin_sve_svdupq_n_u64: |
9105 | case SVE::BI__builtin_sve_svdupq_n_f64: |
9106 | case SVE::BI__builtin_sve_svdupq_n_s64: |
9107 | case SVE::BI__builtin_sve_svdupq_n_u16: |
9108 | case SVE::BI__builtin_sve_svdupq_n_f16: |
9109 | case SVE::BI__builtin_sve_svdupq_n_bf16: |
9110 | case SVE::BI__builtin_sve_svdupq_n_s16: |
9111 | case SVE::BI__builtin_sve_svdupq_n_u32: |
9112 | case SVE::BI__builtin_sve_svdupq_n_f32: |
9113 | case SVE::BI__builtin_sve_svdupq_n_s32: { |
9114 | // These builtins are implemented by storing each element to an array and using |
9115 | // ld1rq to materialize a vector. |
9116 | unsigned NumOpnds = Ops.size(); |
9117 | |
9118 | bool IsBoolTy = |
9119 | cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1); |
9120 | |
9121 | // For svdupq_n_b* the element type of is an integer of type 128/numelts, |
9122 | // so that the compare can use the width that is natural for the expected |
9123 | // number of predicate lanes. |
9124 | llvm::Type *EltTy = Ops[0]->getType(); |
9125 | if (IsBoolTy) |
9126 | EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds); |
9127 | |
9128 | SmallVector<llvm::Value *, 16> VecOps; |
9129 | for (unsigned I = 0; I < NumOpnds; ++I) |
9130 | VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy)); |
9131 | Value *Vec = BuildVector(VecOps); |
9132 | |
9133 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
9134 | Value *Pred = EmitSVEAllTruePred(TypeFlags); |
9135 | |
9136 | llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy); |
9137 | Value *InsertSubVec = Builder.CreateInsertVector( |
9138 | OverloadedTy, UndefValue::get(OverloadedTy), Vec, Builder.getInt64(0)); |
9139 | |
9140 | Function *F = |
9141 | CGM.getIntrinsic(Intrinsic::aarch64_sve_dupq_lane, OverloadedTy); |
9142 | Value *DupQLane = |
9143 | Builder.CreateCall(F, {InsertSubVec, Builder.getInt64(0)}); |
9144 | |
9145 | if (!IsBoolTy) |
9146 | return DupQLane; |
9147 | |
9148 | // For svdupq_n_b* we need to add an additional 'cmpne' with '0'. |
9149 | F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne |
9150 | : Intrinsic::aarch64_sve_cmpne_wide, |
9151 | OverloadedTy); |
9152 | Value *Call = Builder.CreateCall( |
9153 | F, {Pred, DupQLane, EmitSVEDupX(Builder.getInt64(0))}); |
9154 | return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty)); |
9155 | } |
9156 | |
9157 | case SVE::BI__builtin_sve_svpfalse_b: |
9158 | return ConstantInt::getFalse(Ty); |
9159 | |
9160 | case SVE::BI__builtin_sve_svlen_bf16: |
9161 | case SVE::BI__builtin_sve_svlen_f16: |
9162 | case SVE::BI__builtin_sve_svlen_f32: |
9163 | case SVE::BI__builtin_sve_svlen_f64: |
9164 | case SVE::BI__builtin_sve_svlen_s8: |
9165 | case SVE::BI__builtin_sve_svlen_s16: |
9166 | case SVE::BI__builtin_sve_svlen_s32: |
9167 | case SVE::BI__builtin_sve_svlen_s64: |
9168 | case SVE::BI__builtin_sve_svlen_u8: |
9169 | case SVE::BI__builtin_sve_svlen_u16: |
9170 | case SVE::BI__builtin_sve_svlen_u32: |
9171 | case SVE::BI__builtin_sve_svlen_u64: { |
9172 | SVETypeFlags TF(Builtin->TypeModifier); |
9173 | auto VTy = cast<llvm::VectorType>(getSVEType(TF)); |
9174 | auto *NumEls = |
9175 | llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue()); |
9176 | |
9177 | Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty); |
9178 | return Builder.CreateMul(NumEls, Builder.CreateCall(F)); |
9179 | } |
9180 | |
9181 | case SVE::BI__builtin_sve_svtbl2_u8: |
9182 | case SVE::BI__builtin_sve_svtbl2_s8: |
9183 | case SVE::BI__builtin_sve_svtbl2_u16: |
9184 | case SVE::BI__builtin_sve_svtbl2_s16: |
9185 | case SVE::BI__builtin_sve_svtbl2_u32: |
9186 | case SVE::BI__builtin_sve_svtbl2_s32: |
9187 | case SVE::BI__builtin_sve_svtbl2_u64: |
9188 | case SVE::BI__builtin_sve_svtbl2_s64: |
9189 | case SVE::BI__builtin_sve_svtbl2_f16: |
9190 | case SVE::BI__builtin_sve_svtbl2_bf16: |
9191 | case SVE::BI__builtin_sve_svtbl2_f32: |
9192 | case SVE::BI__builtin_sve_svtbl2_f64: { |
9193 | SVETypeFlags TF(Builtin->TypeModifier); |
9194 | auto VTy = cast<llvm::VectorType>(getSVEType(TF)); |
9195 | auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy); |
9196 | Function *FExtr = |
9197 | CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy}); |
9198 | Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)}); |
9199 | Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)}); |
9200 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy); |
9201 | return Builder.CreateCall(F, {V0, V1, Ops[1]}); |
9202 | } |
9203 | } |
9204 | |
9205 | /// Should not happen |
9206 | return nullptr; |
9207 | } |
9208 | |
9209 | Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, |
9210 | const CallExpr *E, |
9211 | llvm::Triple::ArchType Arch) { |
9212 | if (BuiltinID >= AArch64::FirstSVEBuiltin && |
9213 | BuiltinID <= AArch64::LastSVEBuiltin) |
9214 | return EmitAArch64SVEBuiltinExpr(BuiltinID, E); |
9215 | |
9216 | unsigned HintID = static_cast<unsigned>(-1); |
9217 | switch (BuiltinID) { |
9218 | default: break; |
9219 | case AArch64::BI__builtin_arm_nop: |
9220 | HintID = 0; |
9221 | break; |
9222 | case AArch64::BI__builtin_arm_yield: |
9223 | case AArch64::BI__yield: |
9224 | HintID = 1; |
9225 | break; |
9226 | case AArch64::BI__builtin_arm_wfe: |
9227 | case AArch64::BI__wfe: |
9228 | HintID = 2; |
9229 | break; |
9230 | case AArch64::BI__builtin_arm_wfi: |
9231 | case AArch64::BI__wfi: |
9232 | HintID = 3; |
9233 | break; |
9234 | case AArch64::BI__builtin_arm_sev: |
9235 | case AArch64::BI__sev: |
9236 | HintID = 4; |
9237 | break; |
9238 | case AArch64::BI__builtin_arm_sevl: |
9239 | case AArch64::BI__sevl: |
9240 | HintID = 5; |
9241 | break; |
9242 | } |
9243 | |
9244 | if (HintID != static_cast<unsigned>(-1)) { |
9245 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint); |
9246 | return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID)); |
9247 | } |
9248 | |
9249 | if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { |
9250 | Value *Address = EmitScalarExpr(E->getArg(0)); |
9251 | Value *RW = EmitScalarExpr(E->getArg(1)); |
9252 | Value *CacheLevel = EmitScalarExpr(E->getArg(2)); |
9253 | Value *RetentionPolicy = EmitScalarExpr(E->getArg(3)); |
9254 | Value *IsData = EmitScalarExpr(E->getArg(4)); |
9255 | |
9256 | Value *Locality = nullptr; |
9257 | if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) { |
9258 | // Temporal fetch, needs to convert cache level to locality. |
9259 | Locality = llvm::ConstantInt::get(Int32Ty, |
9260 | -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3); |
9261 | } else { |
9262 | // Streaming fetch. |
9263 | Locality = llvm::ConstantInt::get(Int32Ty, 0); |
9264 | } |
9265 | |
9266 | // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify |
9267 | // PLDL3STRM or PLDL2STRM. |
9268 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
9269 | return Builder.CreateCall(F, {Address, RW, Locality, IsData}); |
9270 | } |
9271 | |
9272 | if (BuiltinID == AArch64::BI__builtin_arm_rbit) { |
9273 | assert((getContext().getTypeSize(E->getType()) == 32) &&(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 32) && "rbit of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9274, __extension__ __PRETTY_FUNCTION__)) |
9274 | "rbit of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 32) && "rbit of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9274, __extension__ __PRETTY_FUNCTION__)); |
9275 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9276 | return Builder.CreateCall( |
9277 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); |
9278 | } |
9279 | if (BuiltinID == AArch64::BI__builtin_arm_rbit64) { |
9280 | assert((getContext().getTypeSize(E->getType()) == 64) &&(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 64) && "rbit of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9281, __extension__ __PRETTY_FUNCTION__)) |
9281 | "rbit of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 64) && "rbit of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9281, __extension__ __PRETTY_FUNCTION__)); |
9282 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9283 | return Builder.CreateCall( |
9284 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); |
9285 | } |
9286 | |
9287 | if (BuiltinID == AArch64::BI__builtin_arm_cls) { |
9288 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9289 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg, |
9290 | "cls"); |
9291 | } |
9292 | if (BuiltinID == AArch64::BI__builtin_arm_cls64) { |
9293 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9294 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg, |
9295 | "cls"); |
9296 | } |
9297 | |
9298 | if (BuiltinID == AArch64::BI__builtin_arm_frint32zf || |
9299 | BuiltinID == AArch64::BI__builtin_arm_frint32z) { |
9300 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9301 | llvm::Type *Ty = Arg->getType(); |
9302 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty), |
9303 | Arg, "frint32z"); |
9304 | } |
9305 | |
9306 | if (BuiltinID == AArch64::BI__builtin_arm_frint64zf || |
9307 | BuiltinID == AArch64::BI__builtin_arm_frint64z) { |
9308 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9309 | llvm::Type *Ty = Arg->getType(); |
9310 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty), |
9311 | Arg, "frint64z"); |
9312 | } |
9313 | |
9314 | if (BuiltinID == AArch64::BI__builtin_arm_frint32xf || |
9315 | BuiltinID == AArch64::BI__builtin_arm_frint32x) { |
9316 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9317 | llvm::Type *Ty = Arg->getType(); |
9318 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty), |
9319 | Arg, "frint32x"); |
9320 | } |
9321 | |
9322 | if (BuiltinID == AArch64::BI__builtin_arm_frint64xf || |
9323 | BuiltinID == AArch64::BI__builtin_arm_frint64x) { |
9324 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9325 | llvm::Type *Ty = Arg->getType(); |
9326 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty), |
9327 | Arg, "frint64x"); |
9328 | } |
9329 | |
9330 | if (BuiltinID == AArch64::BI__builtin_arm_jcvt) { |
9331 | assert((getContext().getTypeSize(E->getType()) == 32) &&(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 32) && "__jcvt of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"__jcvt of unusual size!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9332, __extension__ __PRETTY_FUNCTION__)) |
9332 | "__jcvt of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 32) && "__jcvt of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"__jcvt of unusual size!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9332, __extension__ __PRETTY_FUNCTION__)); |
9333 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9334 | return Builder.CreateCall( |
9335 | CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg); |
9336 | } |
9337 | |
9338 | if (BuiltinID == AArch64::BI__builtin_arm_ld64b || |
9339 | BuiltinID == AArch64::BI__builtin_arm_st64b || |
9340 | BuiltinID == AArch64::BI__builtin_arm_st64bv || |
9341 | BuiltinID == AArch64::BI__builtin_arm_st64bv0) { |
9342 | llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0)); |
9343 | llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1)); |
9344 | |
9345 | if (BuiltinID == AArch64::BI__builtin_arm_ld64b) { |
9346 | // Load from the address via an LLVM intrinsic, receiving a |
9347 | // tuple of 8 i64 words, and store each one to ValPtr. |
9348 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b); |
9349 | llvm::Value *Val = Builder.CreateCall(F, MemAddr); |
9350 | llvm::Value *ToRet; |
9351 | for (size_t i = 0; i < 8; i++) { |
9352 | llvm::Value *ValOffsetPtr = |
9353 | Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i)); |
9354 | Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8)); |
9355 | ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr); |
9356 | } |
9357 | return ToRet; |
9358 | } else { |
9359 | // Load 8 i64 words from ValPtr, and store them to the address |
9360 | // via an LLVM intrinsic. |
9361 | SmallVector<llvm::Value *, 9> Args; |
9362 | Args.push_back(MemAddr); |
9363 | for (size_t i = 0; i < 8; i++) { |
9364 | llvm::Value *ValOffsetPtr = |
9365 | Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i)); |
9366 | Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8)); |
9367 | Args.push_back(Builder.CreateLoad(Addr)); |
9368 | } |
9369 | |
9370 | auto Intr = (BuiltinID == AArch64::BI__builtin_arm_st64b |
9371 | ? Intrinsic::aarch64_st64b |
9372 | : BuiltinID == AArch64::BI__builtin_arm_st64bv |
9373 | ? Intrinsic::aarch64_st64bv |
9374 | : Intrinsic::aarch64_st64bv0); |
9375 | Function *F = CGM.getIntrinsic(Intr); |
9376 | return Builder.CreateCall(F, Args); |
9377 | } |
9378 | } |
9379 | |
9380 | if (BuiltinID == AArch64::BI__builtin_arm_rndr || |
9381 | BuiltinID == AArch64::BI__builtin_arm_rndrrs) { |
9382 | |
9383 | auto Intr = (BuiltinID == AArch64::BI__builtin_arm_rndr |
9384 | ? Intrinsic::aarch64_rndr |
9385 | : Intrinsic::aarch64_rndrrs); |
9386 | Function *F = CGM.getIntrinsic(Intr); |
9387 | llvm::Value *Val = Builder.CreateCall(F); |
9388 | Value *RandomValue = Builder.CreateExtractValue(Val, 0); |
9389 | Value *Status = Builder.CreateExtractValue(Val, 1); |
9390 | |
9391 | Address MemAddress = EmitPointerWithAlignment(E->getArg(0)); |
9392 | Builder.CreateStore(RandomValue, MemAddress); |
9393 | Status = Builder.CreateZExt(Status, Int32Ty); |
9394 | return Status; |
9395 | } |
9396 | |
9397 | if (BuiltinID == AArch64::BI__clear_cache) { |
9398 | assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")(static_cast <bool> (E->getNumArgs() == 2 && "__clear_cache takes 2 arguments") ? void (0) : __assert_fail ("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9398, __extension__ __PRETTY_FUNCTION__)); |
9399 | const FunctionDecl *FD = E->getDirectCallee(); |
9400 | Value *Ops[2]; |
9401 | for (unsigned i = 0; i < 2; i++) |
9402 | Ops[i] = EmitScalarExpr(E->getArg(i)); |
9403 | llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); |
9404 | llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); |
9405 | StringRef Name = FD->getName(); |
9406 | return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); |
9407 | } |
9408 | |
9409 | if ((BuiltinID == AArch64::BI__builtin_arm_ldrex || |
9410 | BuiltinID == AArch64::BI__builtin_arm_ldaex) && |
9411 | getContext().getTypeSize(E->getType()) == 128) { |
9412 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex |
9413 | ? Intrinsic::aarch64_ldaxp |
9414 | : Intrinsic::aarch64_ldxp); |
9415 | |
9416 | Value *LdPtr = EmitScalarExpr(E->getArg(0)); |
9417 | Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), |
9418 | "ldxp"); |
9419 | |
9420 | Value *Val0 = Builder.CreateExtractValue(Val, 1); |
9421 | Value *Val1 = Builder.CreateExtractValue(Val, 0); |
9422 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
9423 | Val0 = Builder.CreateZExt(Val0, Int128Ty); |
9424 | Val1 = Builder.CreateZExt(Val1, Int128Ty); |
9425 | |
9426 | Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64); |
9427 | Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); |
9428 | Val = Builder.CreateOr(Val, Val1); |
9429 | return Builder.CreateBitCast(Val, ConvertType(E->getType())); |
9430 | } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex || |
9431 | BuiltinID == AArch64::BI__builtin_arm_ldaex) { |
9432 | Value *LoadAddr = EmitScalarExpr(E->getArg(0)); |
9433 | |
9434 | QualType Ty = E->getType(); |
9435 | llvm::Type *RealResTy = ConvertType(Ty); |
9436 | llvm::Type *PtrTy = llvm::IntegerType::get( |
9437 | getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo(); |
9438 | LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy); |
9439 | |
9440 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex |
9441 | ? Intrinsic::aarch64_ldaxr |
9442 | : Intrinsic::aarch64_ldxr, |
9443 | PtrTy); |
9444 | Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr"); |
9445 | |
9446 | if (RealResTy->isPointerTy()) |
9447 | return Builder.CreateIntToPtr(Val, RealResTy); |
9448 | |
9449 | llvm::Type *IntResTy = llvm::IntegerType::get( |
9450 | getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy)); |
9451 | Val = Builder.CreateTruncOrBitCast(Val, IntResTy); |
9452 | return Builder.CreateBitCast(Val, RealResTy); |
9453 | } |
9454 | |
9455 | if ((BuiltinID == AArch64::BI__builtin_arm_strex || |
9456 | BuiltinID == AArch64::BI__builtin_arm_stlex) && |
9457 | getContext().getTypeSize(E->getArg(0)->getType()) == 128) { |
9458 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex |
9459 | ? Intrinsic::aarch64_stlxp |
9460 | : Intrinsic::aarch64_stxp); |
9461 | llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty); |
9462 | |
9463 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); |
9464 | EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true); |
9465 | |
9466 | Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy)); |
9467 | llvm::Value *Val = Builder.CreateLoad(Tmp); |
9468 | |
9469 | Value *Arg0 = Builder.CreateExtractValue(Val, 0); |
9470 | Value *Arg1 = Builder.CreateExtractValue(Val, 1); |
9471 | Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), |
9472 | Int8PtrTy); |
9473 | return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp"); |
9474 | } |
9475 | |
9476 | if (BuiltinID == AArch64::BI__builtin_arm_strex || |
9477 | BuiltinID == AArch64::BI__builtin_arm_stlex) { |
9478 | Value *StoreVal = EmitScalarExpr(E->getArg(0)); |
9479 | Value *StoreAddr = EmitScalarExpr(E->getArg(1)); |
9480 | |
9481 | QualType Ty = E->getArg(0)->getType(); |
9482 | llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), |
9483 | getContext().getTypeSize(Ty)); |
9484 | StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); |
9485 | |
9486 | if (StoreVal->getType()->isPointerTy()) |
9487 | StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty); |
9488 | else { |
9489 | llvm::Type *IntTy = llvm::IntegerType::get( |
9490 | getLLVMContext(), |
9491 | CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType())); |
9492 | StoreVal = Builder.CreateBitCast(StoreVal, IntTy); |
9493 | StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty); |
9494 | } |
9495 | |
9496 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex |
9497 | ? Intrinsic::aarch64_stlxr |
9498 | : Intrinsic::aarch64_stxr, |
9499 | StoreAddr->getType()); |
9500 | return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr"); |
9501 | } |
9502 | |
9503 | if (BuiltinID == AArch64::BI__getReg) { |
9504 | Expr::EvalResult Result; |
9505 | if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext())) |
9506 | llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9506); |
9507 | |
9508 | llvm::APSInt Value = Result.Val.getInt(); |
9509 | LLVMContext &Context = CGM.getLLVMContext(); |
9510 | std::string Reg = Value == 31 ? "sp" : "x" + toString(Value, 10); |
9511 | |
9512 | llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)}; |
9513 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); |
9514 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); |
9515 | |
9516 | llvm::Function *F = |
9517 | CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty}); |
9518 | return Builder.CreateCall(F, Metadata); |
9519 | } |
9520 | |
9521 | if (BuiltinID == AArch64::BI__builtin_arm_clrex) { |
9522 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex); |
9523 | return Builder.CreateCall(F); |
9524 | } |
9525 | |
9526 | if (BuiltinID == AArch64::BI_ReadWriteBarrier) |
9527 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
9528 | llvm::SyncScope::SingleThread); |
9529 | |
9530 | // CRC32 |
9531 | Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; |
9532 | switch (BuiltinID) { |
9533 | case AArch64::BI__builtin_arm_crc32b: |
9534 | CRCIntrinsicID = Intrinsic::aarch64_crc32b; break; |
9535 | case AArch64::BI__builtin_arm_crc32cb: |
9536 | CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break; |
9537 | case AArch64::BI__builtin_arm_crc32h: |
9538 | CRCIntrinsicID = Intrinsic::aarch64_crc32h; break; |
9539 | case AArch64::BI__builtin_arm_crc32ch: |
9540 | CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break; |
9541 | case AArch64::BI__builtin_arm_crc32w: |
9542 | CRCIntrinsicID = Intrinsic::aarch64_crc32w; break; |
9543 | case AArch64::BI__builtin_arm_crc32cw: |
9544 | CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break; |
9545 | case AArch64::BI__builtin_arm_crc32d: |
9546 | CRCIntrinsicID = Intrinsic::aarch64_crc32x; break; |
9547 | case AArch64::BI__builtin_arm_crc32cd: |
9548 | CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break; |
9549 | } |
9550 | |
9551 | if (CRCIntrinsicID != Intrinsic::not_intrinsic) { |
9552 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
9553 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); |
9554 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); |
9555 | |
9556 | llvm::Type *DataTy = F->getFunctionType()->getParamType(1); |
9557 | Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy); |
9558 | |
9559 | return Builder.CreateCall(F, {Arg0, Arg1}); |
9560 | } |
9561 | |
9562 | // Memory Tagging Extensions (MTE) Intrinsics |
9563 | Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic; |
9564 | switch (BuiltinID) { |
9565 | case AArch64::BI__builtin_arm_irg: |
9566 | MTEIntrinsicID = Intrinsic::aarch64_irg; break; |
9567 | case AArch64::BI__builtin_arm_addg: |
9568 | MTEIntrinsicID = Intrinsic::aarch64_addg; break; |
9569 | case AArch64::BI__builtin_arm_gmi: |
9570 | MTEIntrinsicID = Intrinsic::aarch64_gmi; break; |
9571 | case AArch64::BI__builtin_arm_ldg: |
9572 | MTEIntrinsicID = Intrinsic::aarch64_ldg; break; |
9573 | case AArch64::BI__builtin_arm_stg: |
9574 | MTEIntrinsicID = Intrinsic::aarch64_stg; break; |
9575 | case AArch64::BI__builtin_arm_subp: |
9576 | MTEIntrinsicID = Intrinsic::aarch64_subp; break; |
9577 | } |
9578 | |
9579 | if (MTEIntrinsicID != Intrinsic::not_intrinsic) { |
9580 | llvm::Type *T = ConvertType(E->getType()); |
9581 | |
9582 | if (MTEIntrinsicID == Intrinsic::aarch64_irg) { |
9583 | Value *Pointer = EmitScalarExpr(E->getArg(0)); |
9584 | Value *Mask = EmitScalarExpr(E->getArg(1)); |
9585 | |
9586 | Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); |
9587 | Mask = Builder.CreateZExt(Mask, Int64Ty); |
9588 | Value *RV = Builder.CreateCall( |
9589 | CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask}); |
9590 | return Builder.CreatePointerCast(RV, T); |
9591 | } |
9592 | if (MTEIntrinsicID == Intrinsic::aarch64_addg) { |
9593 | Value *Pointer = EmitScalarExpr(E->getArg(0)); |
9594 | Value *TagOffset = EmitScalarExpr(E->getArg(1)); |
9595 | |
9596 | Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); |
9597 | TagOffset = Builder.CreateZExt(TagOffset, Int64Ty); |
9598 | Value *RV = Builder.CreateCall( |
9599 | CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset}); |
9600 | return Builder.CreatePointerCast(RV, T); |
9601 | } |
9602 | if (MTEIntrinsicID == Intrinsic::aarch64_gmi) { |
9603 | Value *Pointer = EmitScalarExpr(E->getArg(0)); |
9604 | Value *ExcludedMask = EmitScalarExpr(E->getArg(1)); |
9605 | |
9606 | ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty); |
9607 | Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); |
9608 | return Builder.CreateCall( |
9609 | CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask}); |
9610 | } |
9611 | // Although it is possible to supply a different return |
9612 | // address (first arg) to this intrinsic, for now we set |
9613 | // return address same as input address. |
9614 | if (MTEIntrinsicID == Intrinsic::aarch64_ldg) { |
9615 | Value *TagAddress = EmitScalarExpr(E->getArg(0)); |
9616 | TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy); |
9617 | Value *RV = Builder.CreateCall( |
9618 | CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress}); |
9619 | return Builder.CreatePointerCast(RV, T); |
9620 | } |
9621 | // Although it is possible to supply a different tag (to set) |
9622 | // to this intrinsic (as first arg), for now we supply |
9623 | // the tag that is in input address arg (common use case). |
9624 | if (MTEIntrinsicID == Intrinsic::aarch64_stg) { |
9625 | Value *TagAddress = EmitScalarExpr(E->getArg(0)); |
9626 | TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy); |
9627 | return Builder.CreateCall( |
9628 | CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress}); |
9629 | } |
9630 | if (MTEIntrinsicID == Intrinsic::aarch64_subp) { |
9631 | Value *PointerA = EmitScalarExpr(E->getArg(0)); |
9632 | Value *PointerB = EmitScalarExpr(E->getArg(1)); |
9633 | PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy); |
9634 | PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy); |
9635 | return Builder.CreateCall( |
9636 | CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB}); |
9637 | } |
9638 | } |
9639 | |
9640 | if (BuiltinID == AArch64::BI__builtin_arm_rsr || |
9641 | BuiltinID == AArch64::BI__builtin_arm_rsr64 || |
9642 | BuiltinID == AArch64::BI__builtin_arm_rsrp || |
9643 | BuiltinID == AArch64::BI__builtin_arm_wsr || |
9644 | BuiltinID == AArch64::BI__builtin_arm_wsr64 || |
9645 | BuiltinID == AArch64::BI__builtin_arm_wsrp) { |
9646 | |
9647 | SpecialRegisterAccessKind AccessKind = Write; |
9648 | if (BuiltinID == AArch64::BI__builtin_arm_rsr || |
9649 | BuiltinID == AArch64::BI__builtin_arm_rsr64 || |
9650 | BuiltinID == AArch64::BI__builtin_arm_rsrp) |
9651 | AccessKind = VolatileRead; |
9652 | |
9653 | bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp || |
9654 | BuiltinID == AArch64::BI__builtin_arm_wsrp; |
9655 | |
9656 | bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr && |
9657 | BuiltinID != AArch64::BI__builtin_arm_wsr; |
9658 | |
9659 | llvm::Type *ValueType; |
9660 | llvm::Type *RegisterType = Int64Ty; |
9661 | if (IsPointerBuiltin) { |
9662 | ValueType = VoidPtrTy; |
9663 | } else if (Is64Bit) { |
9664 | ValueType = Int64Ty; |
9665 | } else { |
9666 | ValueType = Int32Ty; |
9667 | } |
9668 | |
9669 | return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, |
9670 | AccessKind); |
9671 | } |
9672 | |
9673 | if (BuiltinID == AArch64::BI_ReadStatusReg || |
9674 | BuiltinID == AArch64::BI_WriteStatusReg) { |
9675 | LLVMContext &Context = CGM.getLLVMContext(); |
9676 | |
9677 | unsigned SysReg = |
9678 | E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
9679 | |
9680 | std::string SysRegStr; |
9681 | llvm::raw_string_ostream(SysRegStr) << |
9682 | ((1 << 1) | ((SysReg >> 14) & 1)) << ":" << |
9683 | ((SysReg >> 11) & 7) << ":" << |
9684 | ((SysReg >> 7) & 15) << ":" << |
9685 | ((SysReg >> 3) & 15) << ":" << |
9686 | ( SysReg & 7); |
9687 | |
9688 | llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) }; |
9689 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); |
9690 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); |
9691 | |
9692 | llvm::Type *RegisterType = Int64Ty; |
9693 | llvm::Type *Types[] = { RegisterType }; |
9694 | |
9695 | if (BuiltinID == AArch64::BI_ReadStatusReg) { |
9696 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); |
9697 | |
9698 | return Builder.CreateCall(F, Metadata); |
9699 | } |
9700 | |
9701 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); |
9702 | llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1)); |
9703 | |
9704 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
9705 | } |
9706 | |
9707 | if (BuiltinID == AArch64::BI_AddressOfReturnAddress) { |
9708 | llvm::Function *F = |
9709 | CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy); |
9710 | return Builder.CreateCall(F); |
9711 | } |
9712 | |
9713 | if (BuiltinID == AArch64::BI__builtin_sponentry) { |
9714 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy); |
9715 | return Builder.CreateCall(F); |
9716 | } |
9717 | |
9718 | if (BuiltinID == AArch64::BI__mulh || BuiltinID == AArch64::BI__umulh) { |
9719 | llvm::Type *ResType = ConvertType(E->getType()); |
9720 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
9721 | |
9722 | bool IsSigned = BuiltinID == AArch64::BI__mulh; |
9723 | Value *LHS = |
9724 | Builder.CreateIntCast(EmitScalarExpr(E->getArg(0)), Int128Ty, IsSigned); |
9725 | Value *RHS = |
9726 | Builder.CreateIntCast(EmitScalarExpr(E->getArg(1)), Int128Ty, IsSigned); |
9727 | |
9728 | Value *MulResult, *HigherBits; |
9729 | if (IsSigned) { |
9730 | MulResult = Builder.CreateNSWMul(LHS, RHS); |
9731 | HigherBits = Builder.CreateAShr(MulResult, 64); |
9732 | } else { |
9733 | MulResult = Builder.CreateNUWMul(LHS, RHS); |
9734 | HigherBits = Builder.CreateLShr(MulResult, 64); |
9735 | } |
9736 | HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned); |
9737 | |
9738 | return HigherBits; |
9739 | } |
9740 | |
9741 | // Handle MSVC intrinsics before argument evaluation to prevent double |
9742 | // evaluation. |
9743 | if (Optional<MSVCIntrin> MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID)) |
9744 | return EmitMSVCBuiltinExpr(*MsvcIntId, E); |
9745 | |
9746 | // Find out if any arguments are required to be integer constant |
9747 | // expressions. |
9748 | unsigned ICEArguments = 0; |
9749 | ASTContext::GetBuiltinTypeError Error; |
9750 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
9751 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9751, __extension__ __PRETTY_FUNCTION__)); |
9752 | |
9753 | llvm::SmallVector<Value*, 4> Ops; |
9754 | Address PtrOp0 = Address::invalid(); |
9755 | for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { |
9756 | if (i == 0) { |
9757 | switch (BuiltinID) { |
9758 | case NEON::BI__builtin_neon_vld1_v: |
9759 | case NEON::BI__builtin_neon_vld1q_v: |
9760 | case NEON::BI__builtin_neon_vld1_dup_v: |
9761 | case NEON::BI__builtin_neon_vld1q_dup_v: |
9762 | case NEON::BI__builtin_neon_vld1_lane_v: |
9763 | case NEON::BI__builtin_neon_vld1q_lane_v: |
9764 | case NEON::BI__builtin_neon_vst1_v: |
9765 | case NEON::BI__builtin_neon_vst1q_v: |
9766 | case NEON::BI__builtin_neon_vst1_lane_v: |
9767 | case NEON::BI__builtin_neon_vst1q_lane_v: |
9768 | // Get the alignment for the argument in addition to the value; |
9769 | // we'll use it later. |
9770 | PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); |
9771 | Ops.push_back(PtrOp0.getPointer()); |
9772 | continue; |
9773 | } |
9774 | } |
9775 | if ((ICEArguments & (1 << i)) == 0) { |
9776 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
9777 | } else { |
9778 | // If this is required to be a constant, constant fold it so that we know |
9779 | // that the generated intrinsic gets a ConstantInt. |
9780 | Ops.push_back(llvm::ConstantInt::get( |
9781 | getLLVMContext(), |
9782 | *E->getArg(i)->getIntegerConstantExpr(getContext()))); |
9783 | } |
9784 | } |
9785 | |
9786 | auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap); |
9787 | const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap( |
9788 | SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted); |
9789 | |
9790 | if (Builtin) { |
9791 | Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1))); |
9792 | Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E); |
9793 | assert(Result && "SISD intrinsic should have been handled")(static_cast <bool> (Result && "SISD intrinsic should have been handled" ) ? void (0) : __assert_fail ("Result && \"SISD intrinsic should have been handled\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9793, __extension__ __PRETTY_FUNCTION__)); |
9794 | return Result; |
9795 | } |
9796 | |
9797 | const Expr *Arg = E->getArg(E->getNumArgs()-1); |
9798 | NeonTypeFlags Type(0); |
9799 | if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext())) |
9800 | // Determine the type of this overloaded NEON intrinsic. |
9801 | Type = NeonTypeFlags(Result->getZExtValue()); |
9802 | |
9803 | bool usgn = Type.isUnsigned(); |
9804 | bool quad = Type.isQuad(); |
9805 | |
9806 | // Handle non-overloaded intrinsics first. |
9807 | switch (BuiltinID) { |
9808 | default: break; |
9809 | case NEON::BI__builtin_neon_vabsh_f16: |
9810 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
9811 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs"); |
9812 | case NEON::BI__builtin_neon_vaddq_p128: { |
9813 | llvm::Type *Ty = GetNeonType(this, NeonTypeFlags::Poly128); |
9814 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
9815 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
9816 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
9817 | Ops[0] = Builder.CreateXor(Ops[0], Ops[1]); |
9818 | llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128); |
9819 | return Builder.CreateBitCast(Ops[0], Int128Ty); |
9820 | } |
9821 | case NEON::BI__builtin_neon_vldrq_p128: { |
9822 | llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128); |
9823 | llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0); |
9824 | Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy); |
9825 | return Builder.CreateAlignedLoad(Int128Ty, Ptr, |
9826 | CharUnits::fromQuantity(16)); |
9827 | } |
9828 | case NEON::BI__builtin_neon_vstrq_p128: { |
9829 | llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128); |
9830 | Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy); |
9831 | return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr); |
9832 | } |
9833 | case NEON::BI__builtin_neon_vcvts_f32_u32: |
9834 | case NEON::BI__builtin_neon_vcvtd_f64_u64: |
9835 | usgn = true; |
9836 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
9837 | case NEON::BI__builtin_neon_vcvts_f32_s32: |
9838 | case NEON::BI__builtin_neon_vcvtd_f64_s64: { |
9839 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
9840 | bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64; |
9841 | llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty; |
9842 | llvm::Type *FTy = Is64 ? DoubleTy : FloatTy; |
9843 | Ops[0] = Builder.CreateBitCast(Ops[0], InTy); |
9844 | if (usgn) |
9845 | return Builder.CreateUIToFP(Ops[0], FTy); |
9846 | return Builder.CreateSIToFP(Ops[0], FTy); |
9847 | } |
9848 | case NEON::BI__builtin_neon_vcvth_f16_u16: |
9849 | case NEON::BI__builtin_neon_vcvth_f16_u32: |
9850 | case NEON::BI__builtin_neon_vcvth_f16_u64: |
9851 | usgn = true; |
9852 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
9853 | case NEON::BI__builtin_neon_vcvth_f16_s16: |
9854 | case NEON::BI__builtin_neon_vcvth_f16_s32: |
9855 | case NEON::BI__builtin_neon_vcvth_f16_s64: { |
9856 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
9857 | llvm::Type *FTy = HalfTy; |
9858 | llvm::Type *InTy; |
9859 | if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64) |
9860 | InTy = Int64Ty; |
9861 | else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32) |
9862 | InTy = Int32Ty; |
9863 | else |
9864 | InTy = Int16Ty; |
9865 | Ops[0] = Builder.CreateBitCast(Ops[0], InTy); |
9866 | if (usgn) |
9867 | return Builder.CreateUIToFP(Ops[0], FTy); |
9868 | return Builder.CreateSIToFP(Ops[0], FTy); |
9869 | } |
9870 | case NEON::BI__builtin_neon_vcvtah_u16_f16: |
9871 | case NEON::BI__builtin_neon_vcvtmh_u16_f16: |
9872 | case NEON::BI__builtin_neon_vcvtnh_u16_f16: |
9873 | case NEON::BI__builtin_neon_vcvtph_u16_f16: |
9874 | case NEON::BI__builtin_neon_vcvth_u16_f16: |
9875 | case NEON::BI__builtin_neon_vcvtah_s16_f16: |
9876 | case NEON::BI__builtin_neon_vcvtmh_s16_f16: |
9877 | case NEON::BI__builtin_neon_vcvtnh_s16_f16: |
9878 | case NEON::BI__builtin_neon_vcvtph_s16_f16: |
9879 | case NEON::BI__builtin_neon_vcvth_s16_f16: { |
9880 | unsigned Int; |
9881 | llvm::Type* InTy = Int32Ty; |
9882 | llvm::Type* FTy = HalfTy; |
9883 | llvm::Type *Tys[2] = {InTy, FTy}; |
9884 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
9885 | switch (BuiltinID) { |
9886 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9886); |
9887 | case NEON::BI__builtin_neon_vcvtah_u16_f16: |
9888 | Int = Intrinsic::aarch64_neon_fcvtau; break; |
9889 | case NEON::BI__builtin_neon_vcvtmh_u16_f16: |
9890 | Int = Intrinsic::aarch64_neon_fcvtmu; break; |
9891 | case NEON::BI__builtin_neon_vcvtnh_u16_f16: |
9892 | Int = Intrinsic::aarch64_neon_fcvtnu; break; |
9893 | case NEON::BI__builtin_neon_vcvtph_u16_f16: |
9894 | Int = Intrinsic::aarch64_neon_fcvtpu; break; |
9895 | case NEON::BI__builtin_neon_vcvth_u16_f16: |
9896 | Int = Intrinsic::aarch64_neon_fcvtzu; break; |
9897 | case NEON::BI__builtin_neon_vcvtah_s16_f16: |
9898 | Int = Intrinsic::aarch64_neon_fcvtas; break; |
9899 | case NEON::BI__builtin_neon_vcvtmh_s16_f16: |
9900 | Int = Intrinsic::aarch64_neon_fcvtms; break; |
9901 | case NEON::BI__builtin_neon_vcvtnh_s16_f16: |
9902 | Int = Intrinsic::aarch64_neon_fcvtns; break; |
9903 | case NEON::BI__builtin_neon_vcvtph_s16_f16: |
9904 | Int = Intrinsic::aarch64_neon_fcvtps; break; |
9905 | case NEON::BI__builtin_neon_vcvth_s16_f16: |
9906 | Int = Intrinsic::aarch64_neon_fcvtzs; break; |
9907 | } |
9908 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt"); |
9909 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
9910 | } |
9911 | case NEON::BI__builtin_neon_vcaleh_f16: |
9912 | case NEON::BI__builtin_neon_vcalth_f16: |
9913 | case NEON::BI__builtin_neon_vcageh_f16: |
9914 | case NEON::BI__builtin_neon_vcagth_f16: { |
9915 | unsigned Int; |
9916 | llvm::Type* InTy = Int32Ty; |
9917 | llvm::Type* FTy = HalfTy; |
9918 | llvm::Type *Tys[2] = {InTy, FTy}; |
9919 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
9920 | switch (BuiltinID) { |
9921 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9921); |
9922 | case NEON::BI__builtin_neon_vcageh_f16: |
9923 | Int = Intrinsic::aarch64_neon_facge; break; |
9924 | case NEON::BI__builtin_neon_vcagth_f16: |
9925 | Int = Intrinsic::aarch64_neon_facgt; break; |
9926 | case NEON::BI__builtin_neon_vcaleh_f16: |
9927 | Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break; |
9928 | case NEON::BI__builtin_neon_vcalth_f16: |
9929 | Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break; |
9930 | } |
9931 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg"); |
9932 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
9933 | } |
9934 | case NEON::BI__builtin_neon_vcvth_n_s16_f16: |
9935 | case NEON::BI__builtin_neon_vcvth_n_u16_f16: { |
9936 | unsigned Int; |
9937 | llvm::Type* InTy = Int32Ty; |
9938 | llvm::Type* FTy = HalfTy; |
9939 | llvm::Type *Tys[2] = {InTy, FTy}; |
9940 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
9941 | switch (BuiltinID) { |
9942 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9942); |
9943 | case NEON::BI__builtin_neon_vcvth_n_s16_f16: |
9944 | Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break; |
9945 | case NEON::BI__builtin_neon_vcvth_n_u16_f16: |
9946 | Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break; |
9947 | } |
9948 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n"); |
9949 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
9950 | } |
9951 | case NEON::BI__builtin_neon_vcvth_n_f16_s16: |
9952 | case NEON::BI__builtin_neon_vcvth_n_f16_u16: { |
9953 | unsigned Int; |
9954 | llvm::Type* FTy = HalfTy; |
9955 | llvm::Type* InTy = Int32Ty; |
9956 | llvm::Type *Tys[2] = {FTy, InTy}; |
9957 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
9958 | switch (BuiltinID) { |
9959 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 9959); |
9960 | case NEON::BI__builtin_neon_vcvth_n_f16_s16: |
9961 | Int = Intrinsic::aarch64_neon_vcvtfxs2fp; |
9962 | Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext"); |
9963 | break; |
9964 | case NEON::BI__builtin_neon_vcvth_n_f16_u16: |
9965 | Int = Intrinsic::aarch64_neon_vcvtfxu2fp; |
9966 | Ops[0] = Builder.CreateZExt(Ops[0], InTy); |
9967 | break; |
9968 | } |
9969 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n"); |
9970 | } |
9971 | case NEON::BI__builtin_neon_vpaddd_s64: { |
9972 | auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2); |
9973 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
9974 | // The vector is v2f64, so make sure it's bitcast to that. |
9975 | Vec = Builder.CreateBitCast(Vec, Ty, "v2i64"); |
9976 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); |
9977 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); |
9978 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); |
9979 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); |
9980 | // Pairwise addition of a v2f64 into a scalar f64. |
9981 | return Builder.CreateAdd(Op0, Op1, "vpaddd"); |
9982 | } |
9983 | case NEON::BI__builtin_neon_vpaddd_f64: { |
9984 | auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2); |
9985 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
9986 | // The vector is v2f64, so make sure it's bitcast to that. |
9987 | Vec = Builder.CreateBitCast(Vec, Ty, "v2f64"); |
9988 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); |
9989 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); |
9990 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); |
9991 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); |
9992 | // Pairwise addition of a v2f64 into a scalar f64. |
9993 | return Builder.CreateFAdd(Op0, Op1, "vpaddd"); |
9994 | } |
9995 | case NEON::BI__builtin_neon_vpadds_f32: { |
9996 | auto *Ty = llvm::FixedVectorType::get(FloatTy, 2); |
9997 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
9998 | // The vector is v2f32, so make sure it's bitcast to that. |
9999 | Vec = Builder.CreateBitCast(Vec, Ty, "v2f32"); |
10000 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); |
10001 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); |
10002 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); |
10003 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); |
10004 | // Pairwise addition of a v2f32 into a scalar f32. |
10005 | return Builder.CreateFAdd(Op0, Op1, "vpaddd"); |
10006 | } |
10007 | case NEON::BI__builtin_neon_vceqzd_s64: |
10008 | case NEON::BI__builtin_neon_vceqzd_f64: |
10009 | case NEON::BI__builtin_neon_vceqzs_f32: |
10010 | case NEON::BI__builtin_neon_vceqzh_f16: |
10011 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10012 | return EmitAArch64CompareBuiltinExpr( |
10013 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10014 | ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz"); |
10015 | case NEON::BI__builtin_neon_vcgezd_s64: |
10016 | case NEON::BI__builtin_neon_vcgezd_f64: |
10017 | case NEON::BI__builtin_neon_vcgezs_f32: |
10018 | case NEON::BI__builtin_neon_vcgezh_f16: |
10019 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10020 | return EmitAArch64CompareBuiltinExpr( |
10021 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10022 | ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez"); |
10023 | case NEON::BI__builtin_neon_vclezd_s64: |
10024 | case NEON::BI__builtin_neon_vclezd_f64: |
10025 | case NEON::BI__builtin_neon_vclezs_f32: |
10026 | case NEON::BI__builtin_neon_vclezh_f16: |
10027 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10028 | return EmitAArch64CompareBuiltinExpr( |
10029 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10030 | ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez"); |
10031 | case NEON::BI__builtin_neon_vcgtzd_s64: |
10032 | case NEON::BI__builtin_neon_vcgtzd_f64: |
10033 | case NEON::BI__builtin_neon_vcgtzs_f32: |
10034 | case NEON::BI__builtin_neon_vcgtzh_f16: |
10035 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10036 | return EmitAArch64CompareBuiltinExpr( |
10037 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10038 | ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz"); |
10039 | case NEON::BI__builtin_neon_vcltzd_s64: |
10040 | case NEON::BI__builtin_neon_vcltzd_f64: |
10041 | case NEON::BI__builtin_neon_vcltzs_f32: |
10042 | case NEON::BI__builtin_neon_vcltzh_f16: |
10043 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10044 | return EmitAArch64CompareBuiltinExpr( |
10045 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10046 | ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz"); |
10047 | |
10048 | case NEON::BI__builtin_neon_vceqzd_u64: { |
10049 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10050 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); |
10051 | Ops[0] = |
10052 | Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty)); |
10053 | return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd"); |
10054 | } |
10055 | case NEON::BI__builtin_neon_vceqd_f64: |
10056 | case NEON::BI__builtin_neon_vcled_f64: |
10057 | case NEON::BI__builtin_neon_vcltd_f64: |
10058 | case NEON::BI__builtin_neon_vcged_f64: |
10059 | case NEON::BI__builtin_neon_vcgtd_f64: { |
10060 | llvm::CmpInst::Predicate P; |
10061 | switch (BuiltinID) { |
10062 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 10062); |
10063 | case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break; |
10064 | case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break; |
10065 | case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break; |
10066 | case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break; |
10067 | case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break; |
10068 | } |
10069 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10070 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
10071 | Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); |
10072 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); |
10073 | return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd"); |
10074 | } |
10075 | case NEON::BI__builtin_neon_vceqs_f32: |
10076 | case NEON::BI__builtin_neon_vcles_f32: |
10077 | case NEON::BI__builtin_neon_vclts_f32: |
10078 | case NEON::BI__builtin_neon_vcges_f32: |
10079 | case NEON::BI__builtin_neon_vcgts_f32: { |
10080 | llvm::CmpInst::Predicate P; |
10081 | switch (BuiltinID) { |
10082 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 10082); |
10083 | case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break; |
10084 | case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break; |
10085 | case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break; |
10086 | case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break; |
10087 | case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break; |
10088 | } |
10089 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10090 | Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy); |
10091 | Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy); |
10092 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); |
10093 | return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd"); |
10094 | } |
10095 | case NEON::BI__builtin_neon_vceqh_f16: |
10096 | case NEON::BI__builtin_neon_vcleh_f16: |
10097 | case NEON::BI__builtin_neon_vclth_f16: |
10098 | case NEON::BI__builtin_neon_vcgeh_f16: |
10099 | case NEON::BI__builtin_neon_vcgth_f16: { |
10100 | llvm::CmpInst::Predicate P; |
10101 | switch (BuiltinID) { |
10102 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 10102); |
10103 | case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break; |
10104 | case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break; |
10105 | case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break; |
10106 | case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break; |
10107 | case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break; |
10108 | } |
10109 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10110 | Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy); |
10111 | Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy); |
10112 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); |
10113 | return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd"); |
10114 | } |
10115 | case NEON::BI__builtin_neon_vceqd_s64: |
10116 | case NEON::BI__builtin_neon_vceqd_u64: |
10117 | case NEON::BI__builtin_neon_vcgtd_s64: |
10118 | case NEON::BI__builtin_neon_vcgtd_u64: |
10119 | case NEON::BI__builtin_neon_vcltd_s64: |
10120 | case NEON::BI__builtin_neon_vcltd_u64: |
10121 | case NEON::BI__builtin_neon_vcged_u64: |
10122 | case NEON::BI__builtin_neon_vcged_s64: |
10123 | case NEON::BI__builtin_neon_vcled_u64: |
10124 | case NEON::BI__builtin_neon_vcled_s64: { |
10125 | llvm::CmpInst::Predicate P; |
10126 | switch (BuiltinID) { |
10127 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 10127); |
10128 | case NEON::BI__builtin_neon_vceqd_s64: |
10129 | case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break; |
10130 | case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break; |
10131 | case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break; |
10132 | case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break; |
10133 | case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break; |
10134 | case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break; |
10135 | case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break; |
10136 | case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break; |
10137 | case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break; |
10138 | } |
10139 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10140 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); |
10141 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); |
10142 | Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]); |
10143 | return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd"); |
10144 | } |
10145 | case NEON::BI__builtin_neon_vtstd_s64: |
10146 | case NEON::BI__builtin_neon_vtstd_u64: { |
10147 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10148 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); |
10149 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); |
10150 | Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); |
10151 | Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], |
10152 | llvm::Constant::getNullValue(Int64Ty)); |
10153 | return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd"); |
10154 | } |
10155 | case NEON::BI__builtin_neon_vset_lane_i8: |
10156 | case NEON::BI__builtin_neon_vset_lane_i16: |
10157 | case NEON::BI__builtin_neon_vset_lane_i32: |
10158 | case NEON::BI__builtin_neon_vset_lane_i64: |
10159 | case NEON::BI__builtin_neon_vset_lane_bf16: |
10160 | case NEON::BI__builtin_neon_vset_lane_f32: |
10161 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
10162 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
10163 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
10164 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
10165 | case NEON::BI__builtin_neon_vsetq_lane_bf16: |
10166 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
10167 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
10168 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
10169 | case NEON::BI__builtin_neon_vset_lane_f64: |
10170 | // The vector type needs a cast for the v1f64 variant. |
10171 | Ops[1] = |
10172 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1)); |
10173 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
10174 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
10175 | case NEON::BI__builtin_neon_vsetq_lane_f64: |
10176 | // The vector type needs a cast for the v2f64 variant. |
10177 | Ops[1] = |
10178 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2)); |
10179 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
10180 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
10181 | |
10182 | case NEON::BI__builtin_neon_vget_lane_i8: |
10183 | case NEON::BI__builtin_neon_vdupb_lane_i8: |
10184 | Ops[0] = |
10185 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8)); |
10186 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10187 | "vget_lane"); |
10188 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
10189 | case NEON::BI__builtin_neon_vdupb_laneq_i8: |
10190 | Ops[0] = |
10191 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16)); |
10192 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10193 | "vgetq_lane"); |
10194 | case NEON::BI__builtin_neon_vget_lane_i16: |
10195 | case NEON::BI__builtin_neon_vduph_lane_i16: |
10196 | Ops[0] = |
10197 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4)); |
10198 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10199 | "vget_lane"); |
10200 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
10201 | case NEON::BI__builtin_neon_vduph_laneq_i16: |
10202 | Ops[0] = |
10203 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8)); |
10204 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10205 | "vgetq_lane"); |
10206 | case NEON::BI__builtin_neon_vget_lane_i32: |
10207 | case NEON::BI__builtin_neon_vdups_lane_i32: |
10208 | Ops[0] = |
10209 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2)); |
10210 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10211 | "vget_lane"); |
10212 | case NEON::BI__builtin_neon_vdups_lane_f32: |
10213 | Ops[0] = |
10214 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2)); |
10215 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10216 | "vdups_lane"); |
10217 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
10218 | case NEON::BI__builtin_neon_vdups_laneq_i32: |
10219 | Ops[0] = |
10220 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4)); |
10221 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10222 | "vgetq_lane"); |
10223 | case NEON::BI__builtin_neon_vget_lane_i64: |
10224 | case NEON::BI__builtin_neon_vdupd_lane_i64: |
10225 | Ops[0] = |
10226 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1)); |
10227 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10228 | "vget_lane"); |
10229 | case NEON::BI__builtin_neon_vdupd_lane_f64: |
10230 | Ops[0] = |
10231 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1)); |
10232 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10233 | "vdupd_lane"); |
10234 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
10235 | case NEON::BI__builtin_neon_vdupd_laneq_i64: |
10236 | Ops[0] = |
10237 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2)); |
10238 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10239 | "vgetq_lane"); |
10240 | case NEON::BI__builtin_neon_vget_lane_f32: |
10241 | Ops[0] = |
10242 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2)); |
10243 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10244 | "vget_lane"); |
10245 | case NEON::BI__builtin_neon_vget_lane_f64: |
10246 | Ops[0] = |
10247 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1)); |
10248 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10249 | "vget_lane"); |
10250 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
10251 | case NEON::BI__builtin_neon_vdups_laneq_f32: |
10252 | Ops[0] = |
10253 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4)); |
10254 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10255 | "vgetq_lane"); |
10256 | case NEON::BI__builtin_neon_vgetq_lane_f64: |
10257 | case NEON::BI__builtin_neon_vdupd_laneq_f64: |
10258 | Ops[0] = |
10259 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2)); |
10260 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10261 | "vgetq_lane"); |
10262 | case NEON::BI__builtin_neon_vaddh_f16: |
10263 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10264 | return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh"); |
10265 | case NEON::BI__builtin_neon_vsubh_f16: |
10266 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10267 | return Builder.CreateFSub(Ops[0], Ops[1], "vsubh"); |
10268 | case NEON::BI__builtin_neon_vmulh_f16: |
10269 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10270 | return Builder.CreateFMul(Ops[0], Ops[1], "vmulh"); |
10271 | case NEON::BI__builtin_neon_vdivh_f16: |
10272 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10273 | return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh"); |
10274 | case NEON::BI__builtin_neon_vfmah_f16: |
10275 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. |
10276 | return emitCallMaybeConstrainedFPBuiltin( |
10277 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy, |
10278 | {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]}); |
10279 | case NEON::BI__builtin_neon_vfmsh_f16: { |
10280 | // FIXME: This should be an fneg instruction: |
10281 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy); |
10282 | Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh"); |
10283 | |
10284 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. |
10285 | return emitCallMaybeConstrainedFPBuiltin( |
10286 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy, |
10287 | {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]}); |
10288 | } |
10289 | case NEON::BI__builtin_neon_vaddd_s64: |
10290 | case NEON::BI__builtin_neon_vaddd_u64: |
10291 | return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd"); |
10292 | case NEON::BI__builtin_neon_vsubd_s64: |
10293 | case NEON::BI__builtin_neon_vsubd_u64: |
10294 | return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd"); |
10295 | case NEON::BI__builtin_neon_vqdmlalh_s16: |
10296 | case NEON::BI__builtin_neon_vqdmlslh_s16: { |
10297 | SmallVector<Value *, 2> ProductOps; |
10298 | ProductOps.push_back(vectorWrapScalar16(Ops[1])); |
10299 | ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2)))); |
10300 | auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4); |
10301 | Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), |
10302 | ProductOps, "vqdmlXl"); |
10303 | Constant *CI = ConstantInt::get(SizeTy, 0); |
10304 | Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); |
10305 | |
10306 | unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16 |
10307 | ? Intrinsic::aarch64_neon_sqadd |
10308 | : Intrinsic::aarch64_neon_sqsub; |
10309 | return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl"); |
10310 | } |
10311 | case NEON::BI__builtin_neon_vqshlud_n_s64: { |
10312 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10313 | Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); |
10314 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty), |
10315 | Ops, "vqshlu_n"); |
10316 | } |
10317 | case NEON::BI__builtin_neon_vqshld_n_u64: |
10318 | case NEON::BI__builtin_neon_vqshld_n_s64: { |
10319 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64 |
10320 | ? Intrinsic::aarch64_neon_uqshl |
10321 | : Intrinsic::aarch64_neon_sqshl; |
10322 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10323 | Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); |
10324 | return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n"); |
10325 | } |
10326 | case NEON::BI__builtin_neon_vrshrd_n_u64: |
10327 | case NEON::BI__builtin_neon_vrshrd_n_s64: { |
10328 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64 |
10329 | ? Intrinsic::aarch64_neon_urshl |
10330 | : Intrinsic::aarch64_neon_srshl; |
10331 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10332 | int SV = cast<ConstantInt>(Ops[1])->getSExtValue(); |
10333 | Ops[1] = ConstantInt::get(Int64Ty, -SV); |
10334 | return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n"); |
10335 | } |
10336 | case NEON::BI__builtin_neon_vrsrad_n_u64: |
10337 | case NEON::BI__builtin_neon_vrsrad_n_s64: { |
10338 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64 |
10339 | ? Intrinsic::aarch64_neon_urshl |
10340 | : Intrinsic::aarch64_neon_srshl; |
10341 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); |
10342 | Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2)))); |
10343 | Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty), |
10344 | {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)}); |
10345 | return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty)); |
10346 | } |
10347 | case NEON::BI__builtin_neon_vshld_n_s64: |
10348 | case NEON::BI__builtin_neon_vshld_n_u64: { |
10349 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
10350 | return Builder.CreateShl( |
10351 | Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n"); |
10352 | } |
10353 | case NEON::BI__builtin_neon_vshrd_n_s64: { |
10354 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
10355 | return Builder.CreateAShr( |
10356 | Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63), |
10357 | Amt->getZExtValue())), |
10358 | "shrd_n"); |
10359 | } |
10360 | case NEON::BI__builtin_neon_vshrd_n_u64: { |
10361 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
10362 | uint64_t ShiftAmt = Amt->getZExtValue(); |
10363 | // Right-shifting an unsigned value by its size yields 0. |
10364 | if (ShiftAmt == 64) |
10365 | return ConstantInt::get(Int64Ty, 0); |
10366 | return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt), |
10367 | "shrd_n"); |
10368 | } |
10369 | case NEON::BI__builtin_neon_vsrad_n_s64: { |
10370 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); |
10371 | Ops[1] = Builder.CreateAShr( |
10372 | Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63), |
10373 | Amt->getZExtValue())), |
10374 | "shrd_n"); |
10375 | return Builder.CreateAdd(Ops[0], Ops[1]); |
10376 | } |
10377 | case NEON::BI__builtin_neon_vsrad_n_u64: { |
10378 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); |
10379 | uint64_t ShiftAmt = Amt->getZExtValue(); |
10380 | // Right-shifting an unsigned value by its size yields 0. |
10381 | // As Op + 0 = Op, return Ops[0] directly. |
10382 | if (ShiftAmt == 64) |
10383 | return Ops[0]; |
10384 | Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt), |
10385 | "shrd_n"); |
10386 | return Builder.CreateAdd(Ops[0], Ops[1]); |
10387 | } |
10388 | case NEON::BI__builtin_neon_vqdmlalh_lane_s16: |
10389 | case NEON::BI__builtin_neon_vqdmlalh_laneq_s16: |
10390 | case NEON::BI__builtin_neon_vqdmlslh_lane_s16: |
10391 | case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: { |
10392 | Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), |
10393 | "lane"); |
10394 | SmallVector<Value *, 2> ProductOps; |
10395 | ProductOps.push_back(vectorWrapScalar16(Ops[1])); |
10396 | ProductOps.push_back(vectorWrapScalar16(Ops[2])); |
10397 | auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4); |
10398 | Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), |
10399 | ProductOps, "vqdmlXl"); |
10400 | Constant *CI = ConstantInt::get(SizeTy, 0); |
10401 | Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); |
10402 | Ops.pop_back(); |
10403 | |
10404 | unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 || |
10405 | BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16) |
10406 | ? Intrinsic::aarch64_neon_sqadd |
10407 | : Intrinsic::aarch64_neon_sqsub; |
10408 | return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl"); |
10409 | } |
10410 | case NEON::BI__builtin_neon_vqdmlals_s32: |
10411 | case NEON::BI__builtin_neon_vqdmlsls_s32: { |
10412 | SmallVector<Value *, 2> ProductOps; |
10413 | ProductOps.push_back(Ops[1]); |
10414 | ProductOps.push_back(EmitScalarExpr(E->getArg(2))); |
10415 | Ops[1] = |
10416 | EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), |
10417 | ProductOps, "vqdmlXl"); |
10418 | |
10419 | unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32 |
10420 | ? Intrinsic::aarch64_neon_sqadd |
10421 | : Intrinsic::aarch64_neon_sqsub; |
10422 | return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl"); |
10423 | } |
10424 | case NEON::BI__builtin_neon_vqdmlals_lane_s32: |
10425 | case NEON::BI__builtin_neon_vqdmlals_laneq_s32: |
10426 | case NEON::BI__builtin_neon_vqdmlsls_lane_s32: |
10427 | case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: { |
10428 | Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), |
10429 | "lane"); |
10430 | SmallVector<Value *, 2> ProductOps; |
10431 | ProductOps.push_back(Ops[1]); |
10432 | ProductOps.push_back(Ops[2]); |
10433 | Ops[1] = |
10434 | EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), |
10435 | ProductOps, "vqdmlXl"); |
10436 | Ops.pop_back(); |
10437 | |
10438 | unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 || |
10439 | BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32) |
10440 | ? Intrinsic::aarch64_neon_sqadd |
10441 | : Intrinsic::aarch64_neon_sqsub; |
10442 | return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl"); |
10443 | } |
10444 | case NEON::BI__builtin_neon_vget_lane_bf16: |
10445 | case NEON::BI__builtin_neon_vduph_lane_bf16: |
10446 | case NEON::BI__builtin_neon_vduph_lane_f16: { |
10447 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10448 | "vget_lane"); |
10449 | } |
10450 | case NEON::BI__builtin_neon_vgetq_lane_bf16: |
10451 | case NEON::BI__builtin_neon_vduph_laneq_bf16: |
10452 | case NEON::BI__builtin_neon_vduph_laneq_f16: { |
10453 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10454 | "vgetq_lane"); |
10455 | } |
10456 | |
10457 | case AArch64::BI_InterlockedAdd: { |
10458 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
10459 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); |
10460 | AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( |
10461 | AtomicRMWInst::Add, Arg0, Arg1, |
10462 | llvm::AtomicOrdering::SequentiallyConsistent); |
10463 | return Builder.CreateAdd(RMWI, Arg1); |
10464 | } |
10465 | } |
10466 | |
10467 | llvm::FixedVectorType *VTy = GetNeonType(this, Type); |
10468 | llvm::Type *Ty = VTy; |
10469 | if (!Ty) |
10470 | return nullptr; |
10471 | |
10472 | // Not all intrinsics handled by the common case work for AArch64 yet, so only |
10473 | // defer to common code if it's been added to our special map. |
10474 | Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, |
10475 | AArch64SIMDIntrinsicsProvenSorted); |
10476 | |
10477 | if (Builtin) |
10478 | return EmitCommonNeonBuiltinExpr( |
10479 | Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, |
10480 | Builtin->NameHint, Builtin->TypeModifier, E, Ops, |
10481 | /*never use addresses*/ Address::invalid(), Address::invalid(), Arch); |
10482 | |
10483 | if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) |
10484 | return V; |
10485 | |
10486 | unsigned Int; |
10487 | switch (BuiltinID) { |
10488 | default: return nullptr; |
10489 | case NEON::BI__builtin_neon_vbsl_v: |
10490 | case NEON::BI__builtin_neon_vbslq_v: { |
10491 | llvm::Type *BitTy = llvm::VectorType::getInteger(VTy); |
10492 | Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl"); |
10493 | Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl"); |
10494 | Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl"); |
10495 | |
10496 | Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl"); |
10497 | Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl"); |
10498 | Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl"); |
10499 | return Builder.CreateBitCast(Ops[0], Ty); |
10500 | } |
10501 | case NEON::BI__builtin_neon_vfma_lane_v: |
10502 | case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types |
10503 | // The ARM builtins (and instructions) have the addend as the first |
10504 | // operand, but the 'fma' intrinsics have it last. Swap it around here. |
10505 | Value *Addend = Ops[0]; |
10506 | Value *Multiplicand = Ops[1]; |
10507 | Value *LaneSource = Ops[2]; |
10508 | Ops[0] = Multiplicand; |
10509 | Ops[1] = LaneSource; |
10510 | Ops[2] = Addend; |
10511 | |
10512 | // Now adjust things to handle the lane access. |
10513 | auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v |
10514 | ? llvm::FixedVectorType::get(VTy->getElementType(), |
10515 | VTy->getNumElements() / 2) |
10516 | : VTy; |
10517 | llvm::Constant *cst = cast<Constant>(Ops[3]); |
10518 | Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst); |
10519 | Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy); |
10520 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane"); |
10521 | |
10522 | Ops.pop_back(); |
10523 | Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma |
10524 | : Intrinsic::fma; |
10525 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla"); |
10526 | } |
10527 | case NEON::BI__builtin_neon_vfma_laneq_v: { |
10528 | auto *VTy = cast<llvm::FixedVectorType>(Ty); |
10529 | // v1f64 fma should be mapped to Neon scalar f64 fma |
10530 | if (VTy && VTy->getElementType() == DoubleTy) { |
10531 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
10532 | Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); |
10533 | llvm::FixedVectorType *VTy = |
10534 | GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true)); |
10535 | Ops[2] = Builder.CreateBitCast(Ops[2], VTy); |
10536 | Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); |
10537 | Value *Result; |
10538 | Result = emitCallMaybeConstrainedFPBuiltin( |
10539 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, |
10540 | DoubleTy, {Ops[1], Ops[2], Ops[0]}); |
10541 | return Builder.CreateBitCast(Result, Ty); |
10542 | } |
10543 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
10544 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
10545 | |
10546 | auto *STy = llvm::FixedVectorType::get(VTy->getElementType(), |
10547 | VTy->getNumElements() * 2); |
10548 | Ops[2] = Builder.CreateBitCast(Ops[2], STy); |
10549 | Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), |
10550 | cast<ConstantInt>(Ops[3])); |
10551 | Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane"); |
10552 | |
10553 | return emitCallMaybeConstrainedFPBuiltin( |
10554 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty, |
10555 | {Ops[2], Ops[1], Ops[0]}); |
10556 | } |
10557 | case NEON::BI__builtin_neon_vfmaq_laneq_v: { |
10558 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
10559 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
10560 | |
10561 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
10562 | Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3])); |
10563 | return emitCallMaybeConstrainedFPBuiltin( |
10564 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty, |
10565 | {Ops[2], Ops[1], Ops[0]}); |
10566 | } |
10567 | case NEON::BI__builtin_neon_vfmah_lane_f16: |
10568 | case NEON::BI__builtin_neon_vfmas_lane_f32: |
10569 | case NEON::BI__builtin_neon_vfmah_laneq_f16: |
10570 | case NEON::BI__builtin_neon_vfmas_laneq_f32: |
10571 | case NEON::BI__builtin_neon_vfmad_lane_f64: |
10572 | case NEON::BI__builtin_neon_vfmad_laneq_f64: { |
10573 | Ops.push_back(EmitScalarExpr(E->getArg(3))); |
10574 | llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); |
10575 | Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); |
10576 | return emitCallMaybeConstrainedFPBuiltin( |
10577 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty, |
10578 | {Ops[1], Ops[2], Ops[0]}); |
10579 | } |
10580 | case NEON::BI__builtin_neon_vmull_v: |
10581 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
10582 | Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull; |
10583 | if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull; |
10584 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); |
10585 | case NEON::BI__builtin_neon_vmax_v: |
10586 | case NEON::BI__builtin_neon_vmaxq_v: |
10587 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
10588 | Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax; |
10589 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax; |
10590 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax"); |
10591 | case NEON::BI__builtin_neon_vmaxh_f16: { |
10592 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10593 | Int = Intrinsic::aarch64_neon_fmax; |
10594 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax"); |
10595 | } |
10596 | case NEON::BI__builtin_neon_vmin_v: |
10597 | case NEON::BI__builtin_neon_vminq_v: |
10598 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
10599 | Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin; |
10600 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin; |
10601 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin"); |
10602 | case NEON::BI__builtin_neon_vminh_f16: { |
10603 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10604 | Int = Intrinsic::aarch64_neon_fmin; |
10605 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin"); |
10606 | } |
10607 | case NEON::BI__builtin_neon_vabd_v: |
10608 | case NEON::BI__builtin_neon_vabdq_v: |
10609 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
10610 | Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd; |
10611 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd; |
10612 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd"); |
10613 | case NEON::BI__builtin_neon_vpadal_v: |
10614 | case NEON::BI__builtin_neon_vpadalq_v: { |
10615 | unsigned ArgElts = VTy->getNumElements(); |
10616 | llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType()); |
10617 | unsigned BitWidth = EltTy->getBitWidth(); |
10618 | auto *ArgTy = llvm::FixedVectorType::get( |
10619 | llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts); |
10620 | llvm::Type* Tys[2] = { VTy, ArgTy }; |
10621 | Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp; |
10622 | SmallVector<llvm::Value*, 1> TmpOps; |
10623 | TmpOps.push_back(Ops[1]); |
10624 | Function *F = CGM.getIntrinsic(Int, Tys); |
10625 | llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal"); |
10626 | llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType()); |
10627 | return Builder.CreateAdd(tmp, addend); |
10628 | } |
10629 | case NEON::BI__builtin_neon_vpmin_v: |
10630 | case NEON::BI__builtin_neon_vpminq_v: |
10631 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
10632 | Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp; |
10633 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp; |
10634 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); |
10635 | case NEON::BI__builtin_neon_vpmax_v: |
10636 | case NEON::BI__builtin_neon_vpmaxq_v: |
10637 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
10638 | Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp; |
10639 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp; |
10640 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); |
10641 | case NEON::BI__builtin_neon_vminnm_v: |
10642 | case NEON::BI__builtin_neon_vminnmq_v: |
10643 | Int = Intrinsic::aarch64_neon_fminnm; |
10644 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm"); |
10645 | case NEON::BI__builtin_neon_vminnmh_f16: |
10646 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10647 | Int = Intrinsic::aarch64_neon_fminnm; |
10648 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm"); |
10649 | case NEON::BI__builtin_neon_vmaxnm_v: |
10650 | case NEON::BI__builtin_neon_vmaxnmq_v: |
10651 | Int = Intrinsic::aarch64_neon_fmaxnm; |
10652 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm"); |
10653 | case NEON::BI__builtin_neon_vmaxnmh_f16: |
10654 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10655 | Int = Intrinsic::aarch64_neon_fmaxnm; |
10656 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm"); |
10657 | case NEON::BI__builtin_neon_vrecpss_f32: { |
10658 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10659 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy), |
10660 | Ops, "vrecps"); |
10661 | } |
10662 | case NEON::BI__builtin_neon_vrecpsd_f64: |
10663 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10664 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy), |
10665 | Ops, "vrecps"); |
10666 | case NEON::BI__builtin_neon_vrecpsh_f16: |
10667 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10668 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy), |
10669 | Ops, "vrecps"); |
10670 | case NEON::BI__builtin_neon_vqshrun_n_v: |
10671 | Int = Intrinsic::aarch64_neon_sqshrun; |
10672 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n"); |
10673 | case NEON::BI__builtin_neon_vqrshrun_n_v: |
10674 | Int = Intrinsic::aarch64_neon_sqrshrun; |
10675 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n"); |
10676 | case NEON::BI__builtin_neon_vqshrn_n_v: |
10677 | Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn; |
10678 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n"); |
10679 | case NEON::BI__builtin_neon_vrshrn_n_v: |
10680 | Int = Intrinsic::aarch64_neon_rshrn; |
10681 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n"); |
10682 | case NEON::BI__builtin_neon_vqrshrn_n_v: |
10683 | Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn; |
10684 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n"); |
10685 | case NEON::BI__builtin_neon_vrndah_f16: { |
10686 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10687 | Int = Builder.getIsFPConstrained() |
10688 | ? Intrinsic::experimental_constrained_round |
10689 | : Intrinsic::round; |
10690 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda"); |
10691 | } |
10692 | case NEON::BI__builtin_neon_vrnda_v: |
10693 | case NEON::BI__builtin_neon_vrndaq_v: { |
10694 | Int = Builder.getIsFPConstrained() |
10695 | ? Intrinsic::experimental_constrained_round |
10696 | : Intrinsic::round; |
10697 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda"); |
10698 | } |
10699 | case NEON::BI__builtin_neon_vrndih_f16: { |
10700 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10701 | Int = Builder.getIsFPConstrained() |
10702 | ? Intrinsic::experimental_constrained_nearbyint |
10703 | : Intrinsic::nearbyint; |
10704 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi"); |
10705 | } |
10706 | case NEON::BI__builtin_neon_vrndmh_f16: { |
10707 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10708 | Int = Builder.getIsFPConstrained() |
10709 | ? Intrinsic::experimental_constrained_floor |
10710 | : Intrinsic::floor; |
10711 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm"); |
10712 | } |
10713 | case NEON::BI__builtin_neon_vrndm_v: |
10714 | case NEON::BI__builtin_neon_vrndmq_v: { |
10715 | Int = Builder.getIsFPConstrained() |
10716 | ? Intrinsic::experimental_constrained_floor |
10717 | : Intrinsic::floor; |
10718 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm"); |
10719 | } |
10720 | case NEON::BI__builtin_neon_vrndnh_f16: { |
10721 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10722 | Int = Builder.getIsFPConstrained() |
10723 | ? Intrinsic::experimental_constrained_roundeven |
10724 | : Intrinsic::roundeven; |
10725 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn"); |
10726 | } |
10727 | case NEON::BI__builtin_neon_vrndn_v: |
10728 | case NEON::BI__builtin_neon_vrndnq_v: { |
10729 | Int = Builder.getIsFPConstrained() |
10730 | ? Intrinsic::experimental_constrained_roundeven |
10731 | : Intrinsic::roundeven; |
10732 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn"); |
10733 | } |
10734 | case NEON::BI__builtin_neon_vrndns_f32: { |
10735 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10736 | Int = Builder.getIsFPConstrained() |
10737 | ? Intrinsic::experimental_constrained_roundeven |
10738 | : Intrinsic::roundeven; |
10739 | return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn"); |
10740 | } |
10741 | case NEON::BI__builtin_neon_vrndph_f16: { |
10742 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10743 | Int = Builder.getIsFPConstrained() |
10744 | ? Intrinsic::experimental_constrained_ceil |
10745 | : Intrinsic::ceil; |
10746 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp"); |
10747 | } |
10748 | case NEON::BI__builtin_neon_vrndp_v: |
10749 | case NEON::BI__builtin_neon_vrndpq_v: { |
10750 | Int = Builder.getIsFPConstrained() |
10751 | ? Intrinsic::experimental_constrained_ceil |
10752 | : Intrinsic::ceil; |
10753 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp"); |
10754 | } |
10755 | case NEON::BI__builtin_neon_vrndxh_f16: { |
10756 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10757 | Int = Builder.getIsFPConstrained() |
10758 | ? Intrinsic::experimental_constrained_rint |
10759 | : Intrinsic::rint; |
10760 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx"); |
10761 | } |
10762 | case NEON::BI__builtin_neon_vrndx_v: |
10763 | case NEON::BI__builtin_neon_vrndxq_v: { |
10764 | Int = Builder.getIsFPConstrained() |
10765 | ? Intrinsic::experimental_constrained_rint |
10766 | : Intrinsic::rint; |
10767 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx"); |
10768 | } |
10769 | case NEON::BI__builtin_neon_vrndh_f16: { |
10770 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10771 | Int = Builder.getIsFPConstrained() |
10772 | ? Intrinsic::experimental_constrained_trunc |
10773 | : Intrinsic::trunc; |
10774 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz"); |
10775 | } |
10776 | case NEON::BI__builtin_neon_vrnd32x_v: |
10777 | case NEON::BI__builtin_neon_vrnd32xq_v: { |
10778 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10779 | Int = Intrinsic::aarch64_neon_frint32x; |
10780 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x"); |
10781 | } |
10782 | case NEON::BI__builtin_neon_vrnd32z_v: |
10783 | case NEON::BI__builtin_neon_vrnd32zq_v: { |
10784 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10785 | Int = Intrinsic::aarch64_neon_frint32z; |
10786 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z"); |
10787 | } |
10788 | case NEON::BI__builtin_neon_vrnd64x_v: |
10789 | case NEON::BI__builtin_neon_vrnd64xq_v: { |
10790 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10791 | Int = Intrinsic::aarch64_neon_frint64x; |
10792 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x"); |
10793 | } |
10794 | case NEON::BI__builtin_neon_vrnd64z_v: |
10795 | case NEON::BI__builtin_neon_vrnd64zq_v: { |
10796 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10797 | Int = Intrinsic::aarch64_neon_frint64z; |
10798 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z"); |
10799 | } |
10800 | case NEON::BI__builtin_neon_vrnd_v: |
10801 | case NEON::BI__builtin_neon_vrndq_v: { |
10802 | Int = Builder.getIsFPConstrained() |
10803 | ? Intrinsic::experimental_constrained_trunc |
10804 | : Intrinsic::trunc; |
10805 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz"); |
10806 | } |
10807 | case NEON::BI__builtin_neon_vcvt_f64_v: |
10808 | case NEON::BI__builtin_neon_vcvtq_f64_v: |
10809 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
10810 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad)); |
10811 | return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") |
10812 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); |
10813 | case NEON::BI__builtin_neon_vcvt_f64_f32: { |
10814 | assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&(static_cast <bool> (Type.getEltType() == NeonTypeFlags ::Float64 && quad && "unexpected vcvt_f64_f32 builtin" ) ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 10815, __extension__ __PRETTY_FUNCTION__)) |
10815 | "unexpected vcvt_f64_f32 builtin")(static_cast <bool> (Type.getEltType() == NeonTypeFlags ::Float64 && quad && "unexpected vcvt_f64_f32 builtin" ) ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 10815, __extension__ __PRETTY_FUNCTION__)); |
10816 | NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false); |
10817 | Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag)); |
10818 | |
10819 | return Builder.CreateFPExt(Ops[0], Ty, "vcvt"); |
10820 | } |
10821 | case NEON::BI__builtin_neon_vcvt_f32_f64: { |
10822 | assert(Type.getEltType() == NeonTypeFlags::Float32 &&(static_cast <bool> (Type.getEltType() == NeonTypeFlags ::Float32 && "unexpected vcvt_f32_f64 builtin") ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 10823, __extension__ __PRETTY_FUNCTION__)) |
10823 | "unexpected vcvt_f32_f64 builtin")(static_cast <bool> (Type.getEltType() == NeonTypeFlags ::Float32 && "unexpected vcvt_f32_f64 builtin") ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 10823, __extension__ __PRETTY_FUNCTION__)); |
10824 | NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true); |
10825 | Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag)); |
10826 | |
10827 | return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt"); |
10828 | } |
10829 | case NEON::BI__builtin_neon_vcvt_s32_v: |
10830 | case NEON::BI__builtin_neon_vcvt_u32_v: |
10831 | case NEON::BI__builtin_neon_vcvt_s64_v: |
10832 | case NEON::BI__builtin_neon_vcvt_u64_v: |
10833 | case NEON::BI__builtin_neon_vcvt_s16_v: |
10834 | case NEON::BI__builtin_neon_vcvt_u16_v: |
10835 | case NEON::BI__builtin_neon_vcvtq_s32_v: |
10836 | case NEON::BI__builtin_neon_vcvtq_u32_v: |
10837 | case NEON::BI__builtin_neon_vcvtq_s64_v: |
10838 | case NEON::BI__builtin_neon_vcvtq_u64_v: |
10839 | case NEON::BI__builtin_neon_vcvtq_s16_v: |
10840 | case NEON::BI__builtin_neon_vcvtq_u16_v: { |
10841 | Int = |
10842 | usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs; |
10843 | llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)}; |
10844 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz"); |
10845 | } |
10846 | case NEON::BI__builtin_neon_vcvta_s16_v: |
10847 | case NEON::BI__builtin_neon_vcvta_u16_v: |
10848 | case NEON::BI__builtin_neon_vcvta_s32_v: |
10849 | case NEON::BI__builtin_neon_vcvtaq_s16_v: |
10850 | case NEON::BI__builtin_neon_vcvtaq_s32_v: |
10851 | case NEON::BI__builtin_neon_vcvta_u32_v: |
10852 | case NEON::BI__builtin_neon_vcvtaq_u16_v: |
10853 | case NEON::BI__builtin_neon_vcvtaq_u32_v: |
10854 | case NEON::BI__builtin_neon_vcvta_s64_v: |
10855 | case NEON::BI__builtin_neon_vcvtaq_s64_v: |
10856 | case NEON::BI__builtin_neon_vcvta_u64_v: |
10857 | case NEON::BI__builtin_neon_vcvtaq_u64_v: { |
10858 | Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas; |
10859 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
10860 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta"); |
10861 | } |
10862 | case NEON::BI__builtin_neon_vcvtm_s16_v: |
10863 | case NEON::BI__builtin_neon_vcvtm_s32_v: |
10864 | case NEON::BI__builtin_neon_vcvtmq_s16_v: |
10865 | case NEON::BI__builtin_neon_vcvtmq_s32_v: |
10866 | case NEON::BI__builtin_neon_vcvtm_u16_v: |
10867 | case NEON::BI__builtin_neon_vcvtm_u32_v: |
10868 | case NEON::BI__builtin_neon_vcvtmq_u16_v: |
10869 | case NEON::BI__builtin_neon_vcvtmq_u32_v: |
10870 | case NEON::BI__builtin_neon_vcvtm_s64_v: |
10871 | case NEON::BI__builtin_neon_vcvtmq_s64_v: |
10872 | case NEON::BI__builtin_neon_vcvtm_u64_v: |
10873 | case NEON::BI__builtin_neon_vcvtmq_u64_v: { |
10874 | Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms; |
10875 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
10876 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm"); |
10877 | } |
10878 | case NEON::BI__builtin_neon_vcvtn_s16_v: |
10879 | case NEON::BI__builtin_neon_vcvtn_s32_v: |
10880 | case NEON::BI__builtin_neon_vcvtnq_s16_v: |
10881 | case NEON::BI__builtin_neon_vcvtnq_s32_v: |
10882 | case NEON::BI__builtin_neon_vcvtn_u16_v: |
10883 | case NEON::BI__builtin_neon_vcvtn_u32_v: |
10884 | case NEON::BI__builtin_neon_vcvtnq_u16_v: |
10885 | case NEON::BI__builtin_neon_vcvtnq_u32_v: |
10886 | case NEON::BI__builtin_neon_vcvtn_s64_v: |
10887 | case NEON::BI__builtin_neon_vcvtnq_s64_v: |
10888 | case NEON::BI__builtin_neon_vcvtn_u64_v: |
10889 | case NEON::BI__builtin_neon_vcvtnq_u64_v: { |
10890 | Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns; |
10891 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
10892 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn"); |
10893 | } |
10894 | case NEON::BI__builtin_neon_vcvtp_s16_v: |
10895 | case NEON::BI__builtin_neon_vcvtp_s32_v: |
10896 | case NEON::BI__builtin_neon_vcvtpq_s16_v: |
10897 | case NEON::BI__builtin_neon_vcvtpq_s32_v: |
10898 | case NEON::BI__builtin_neon_vcvtp_u16_v: |
10899 | case NEON::BI__builtin_neon_vcvtp_u32_v: |
10900 | case NEON::BI__builtin_neon_vcvtpq_u16_v: |
10901 | case NEON::BI__builtin_neon_vcvtpq_u32_v: |
10902 | case NEON::BI__builtin_neon_vcvtp_s64_v: |
10903 | case NEON::BI__builtin_neon_vcvtpq_s64_v: |
10904 | case NEON::BI__builtin_neon_vcvtp_u64_v: |
10905 | case NEON::BI__builtin_neon_vcvtpq_u64_v: { |
10906 | Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps; |
10907 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
10908 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp"); |
10909 | } |
10910 | case NEON::BI__builtin_neon_vmulx_v: |
10911 | case NEON::BI__builtin_neon_vmulxq_v: { |
10912 | Int = Intrinsic::aarch64_neon_fmulx; |
10913 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx"); |
10914 | } |
10915 | case NEON::BI__builtin_neon_vmulxh_lane_f16: |
10916 | case NEON::BI__builtin_neon_vmulxh_laneq_f16: { |
10917 | // vmulx_lane should be mapped to Neon scalar mulx after |
10918 | // extracting the scalar element |
10919 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
10920 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); |
10921 | Ops.pop_back(); |
10922 | Int = Intrinsic::aarch64_neon_fmulx; |
10923 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx"); |
10924 | } |
10925 | case NEON::BI__builtin_neon_vmul_lane_v: |
10926 | case NEON::BI__builtin_neon_vmul_laneq_v: { |
10927 | // v1f64 vmul_lane should be mapped to Neon scalar mul lane |
10928 | bool Quad = false; |
10929 | if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v) |
10930 | Quad = true; |
10931 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
10932 | llvm::FixedVectorType *VTy = |
10933 | GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad)); |
10934 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); |
10935 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); |
10936 | Value *Result = Builder.CreateFMul(Ops[0], Ops[1]); |
10937 | return Builder.CreateBitCast(Result, Ty); |
10938 | } |
10939 | case NEON::BI__builtin_neon_vnegd_s64: |
10940 | return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd"); |
10941 | case NEON::BI__builtin_neon_vnegh_f16: |
10942 | return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh"); |
10943 | case NEON::BI__builtin_neon_vpmaxnm_v: |
10944 | case NEON::BI__builtin_neon_vpmaxnmq_v: { |
10945 | Int = Intrinsic::aarch64_neon_fmaxnmp; |
10946 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm"); |
10947 | } |
10948 | case NEON::BI__builtin_neon_vpminnm_v: |
10949 | case NEON::BI__builtin_neon_vpminnmq_v: { |
10950 | Int = Intrinsic::aarch64_neon_fminnmp; |
10951 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm"); |
10952 | } |
10953 | case NEON::BI__builtin_neon_vsqrth_f16: { |
10954 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10955 | Int = Builder.getIsFPConstrained() |
10956 | ? Intrinsic::experimental_constrained_sqrt |
10957 | : Intrinsic::sqrt; |
10958 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt"); |
10959 | } |
10960 | case NEON::BI__builtin_neon_vsqrt_v: |
10961 | case NEON::BI__builtin_neon_vsqrtq_v: { |
10962 | Int = Builder.getIsFPConstrained() |
10963 | ? Intrinsic::experimental_constrained_sqrt |
10964 | : Intrinsic::sqrt; |
10965 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
10966 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt"); |
10967 | } |
10968 | case NEON::BI__builtin_neon_vrbit_v: |
10969 | case NEON::BI__builtin_neon_vrbitq_v: { |
10970 | Int = Intrinsic::bitreverse; |
10971 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit"); |
10972 | } |
10973 | case NEON::BI__builtin_neon_vaddv_u8: |
10974 | // FIXME: These are handled by the AArch64 scalar code. |
10975 | usgn = true; |
10976 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
10977 | case NEON::BI__builtin_neon_vaddv_s8: { |
10978 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
10979 | Ty = Int32Ty; |
10980 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
10981 | llvm::Type *Tys[2] = { Ty, VTy }; |
10982 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10983 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
10984 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
10985 | } |
10986 | case NEON::BI__builtin_neon_vaddv_u16: |
10987 | usgn = true; |
10988 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
10989 | case NEON::BI__builtin_neon_vaddv_s16: { |
10990 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
10991 | Ty = Int32Ty; |
10992 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
10993 | llvm::Type *Tys[2] = { Ty, VTy }; |
10994 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10995 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
10996 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
10997 | } |
10998 | case NEON::BI__builtin_neon_vaddvq_u8: |
10999 | usgn = true; |
11000 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
11001 | case NEON::BI__builtin_neon_vaddvq_s8: { |
11002 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
11003 | Ty = Int32Ty; |
11004 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11005 | llvm::Type *Tys[2] = { Ty, VTy }; |
11006 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11007 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
11008 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11009 | } |
11010 | case NEON::BI__builtin_neon_vaddvq_u16: |
11011 | usgn = true; |
11012 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
11013 | case NEON::BI__builtin_neon_vaddvq_s16: { |
11014 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
11015 | Ty = Int32Ty; |
11016 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11017 | llvm::Type *Tys[2] = { Ty, VTy }; |
11018 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11019 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
11020 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11021 | } |
11022 | case NEON::BI__builtin_neon_vmaxv_u8: { |
11023 | Int = Intrinsic::aarch64_neon_umaxv; |
11024 | Ty = Int32Ty; |
11025 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11026 | llvm::Type *Tys[2] = { Ty, VTy }; |
11027 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11028 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11029 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11030 | } |
11031 | case NEON::BI__builtin_neon_vmaxv_u16: { |
11032 | Int = Intrinsic::aarch64_neon_umaxv; |
11033 | Ty = Int32Ty; |
11034 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11035 | llvm::Type *Tys[2] = { Ty, VTy }; |
11036 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11037 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11038 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11039 | } |
11040 | case NEON::BI__builtin_neon_vmaxvq_u8: { |
11041 | Int = Intrinsic::aarch64_neon_umaxv; |
11042 | Ty = Int32Ty; |
11043 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11044 | llvm::Type *Tys[2] = { Ty, VTy }; |
11045 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11046 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11047 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11048 | } |
11049 | case NEON::BI__builtin_neon_vmaxvq_u16: { |
11050 | Int = Intrinsic::aarch64_neon_umaxv; |
11051 | Ty = Int32Ty; |
11052 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11053 | llvm::Type *Tys[2] = { Ty, VTy }; |
11054 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11055 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11056 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11057 | } |
11058 | case NEON::BI__builtin_neon_vmaxv_s8: { |
11059 | Int = Intrinsic::aarch64_neon_smaxv; |
11060 | Ty = Int32Ty; |
11061 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11062 | llvm::Type *Tys[2] = { Ty, VTy }; |
11063 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11064 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11065 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11066 | } |
11067 | case NEON::BI__builtin_neon_vmaxv_s16: { |
11068 | Int = Intrinsic::aarch64_neon_smaxv; |
11069 | Ty = Int32Ty; |
11070 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11071 | llvm::Type *Tys[2] = { Ty, VTy }; |
11072 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11073 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11074 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11075 | } |
11076 | case NEON::BI__builtin_neon_vmaxvq_s8: { |
11077 | Int = Intrinsic::aarch64_neon_smaxv; |
11078 | Ty = Int32Ty; |
11079 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11080 | llvm::Type *Tys[2] = { Ty, VTy }; |
11081 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11082 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11083 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11084 | } |
11085 | case NEON::BI__builtin_neon_vmaxvq_s16: { |
11086 | Int = Intrinsic::aarch64_neon_smaxv; |
11087 | Ty = Int32Ty; |
11088 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11089 | llvm::Type *Tys[2] = { Ty, VTy }; |
11090 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11091 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11092 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11093 | } |
11094 | case NEON::BI__builtin_neon_vmaxv_f16: { |
11095 | Int = Intrinsic::aarch64_neon_fmaxv; |
11096 | Ty = HalfTy; |
11097 | VTy = llvm::FixedVectorType::get(HalfTy, 4); |
11098 | llvm::Type *Tys[2] = { Ty, VTy }; |
11099 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11100 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11101 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11102 | } |
11103 | case NEON::BI__builtin_neon_vmaxvq_f16: { |
11104 | Int = Intrinsic::aarch64_neon_fmaxv; |
11105 | Ty = HalfTy; |
11106 | VTy = llvm::FixedVectorType::get(HalfTy, 8); |
11107 | llvm::Type *Tys[2] = { Ty, VTy }; |
11108 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11109 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11110 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11111 | } |
11112 | case NEON::BI__builtin_neon_vminv_u8: { |
11113 | Int = Intrinsic::aarch64_neon_uminv; |
11114 | Ty = Int32Ty; |
11115 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11116 | llvm::Type *Tys[2] = { Ty, VTy }; |
11117 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11118 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11119 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11120 | } |
11121 | case NEON::BI__builtin_neon_vminv_u16: { |
11122 | Int = Intrinsic::aarch64_neon_uminv; |
11123 | Ty = Int32Ty; |
11124 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11125 | llvm::Type *Tys[2] = { Ty, VTy }; |
11126 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11127 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11128 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11129 | } |
11130 | case NEON::BI__builtin_neon_vminvq_u8: { |
11131 | Int = Intrinsic::aarch64_neon_uminv; |
11132 | Ty = Int32Ty; |
11133 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11134 | llvm::Type *Tys[2] = { Ty, VTy }; |
11135 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11136 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11137 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11138 | } |
11139 | case NEON::BI__builtin_neon_vminvq_u16: { |
11140 | Int = Intrinsic::aarch64_neon_uminv; |
11141 | Ty = Int32Ty; |
11142 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11143 | llvm::Type *Tys[2] = { Ty, VTy }; |
11144 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11145 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11146 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11147 | } |
11148 | case NEON::BI__builtin_neon_vminv_s8: { |
11149 | Int = Intrinsic::aarch64_neon_sminv; |
11150 | Ty = Int32Ty; |
11151 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11152 | llvm::Type *Tys[2] = { Ty, VTy }; |
11153 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11154 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11155 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11156 | } |
11157 | case NEON::BI__builtin_neon_vminv_s16: { |
11158 | Int = Intrinsic::aarch64_neon_sminv; |
11159 | Ty = Int32Ty; |
11160 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11161 | llvm::Type *Tys[2] = { Ty, VTy }; |
11162 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11163 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11164 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11165 | } |
11166 | case NEON::BI__builtin_neon_vminvq_s8: { |
11167 | Int = Intrinsic::aarch64_neon_sminv; |
11168 | Ty = Int32Ty; |
11169 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11170 | llvm::Type *Tys[2] = { Ty, VTy }; |
11171 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11172 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11173 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11174 | } |
11175 | case NEON::BI__builtin_neon_vminvq_s16: { |
11176 | Int = Intrinsic::aarch64_neon_sminv; |
11177 | Ty = Int32Ty; |
11178 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11179 | llvm::Type *Tys[2] = { Ty, VTy }; |
11180 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11181 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11182 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11183 | } |
11184 | case NEON::BI__builtin_neon_vminv_f16: { |
11185 | Int = Intrinsic::aarch64_neon_fminv; |
11186 | Ty = HalfTy; |
11187 | VTy = llvm::FixedVectorType::get(HalfTy, 4); |
11188 | llvm::Type *Tys[2] = { Ty, VTy }; |
11189 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11190 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11191 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11192 | } |
11193 | case NEON::BI__builtin_neon_vminvq_f16: { |
11194 | Int = Intrinsic::aarch64_neon_fminv; |
11195 | Ty = HalfTy; |
11196 | VTy = llvm::FixedVectorType::get(HalfTy, 8); |
11197 | llvm::Type *Tys[2] = { Ty, VTy }; |
11198 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11199 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11200 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11201 | } |
11202 | case NEON::BI__builtin_neon_vmaxnmv_f16: { |
11203 | Int = Intrinsic::aarch64_neon_fmaxnmv; |
11204 | Ty = HalfTy; |
11205 | VTy = llvm::FixedVectorType::get(HalfTy, 4); |
11206 | llvm::Type *Tys[2] = { Ty, VTy }; |
11207 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11208 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv"); |
11209 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11210 | } |
11211 | case NEON::BI__builtin_neon_vmaxnmvq_f16: { |
11212 | Int = Intrinsic::aarch64_neon_fmaxnmv; |
11213 | Ty = HalfTy; |
11214 | VTy = llvm::FixedVectorType::get(HalfTy, 8); |
11215 | llvm::Type *Tys[2] = { Ty, VTy }; |
11216 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11217 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv"); |
11218 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11219 | } |
11220 | case NEON::BI__builtin_neon_vminnmv_f16: { |
11221 | Int = Intrinsic::aarch64_neon_fminnmv; |
11222 | Ty = HalfTy; |
11223 | VTy = llvm::FixedVectorType::get(HalfTy, 4); |
11224 | llvm::Type *Tys[2] = { Ty, VTy }; |
11225 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11226 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv"); |
11227 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11228 | } |
11229 | case NEON::BI__builtin_neon_vminnmvq_f16: { |
11230 | Int = Intrinsic::aarch64_neon_fminnmv; |
11231 | Ty = HalfTy; |
11232 | VTy = llvm::FixedVectorType::get(HalfTy, 8); |
11233 | llvm::Type *Tys[2] = { Ty, VTy }; |
11234 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11235 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv"); |
11236 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11237 | } |
11238 | case NEON::BI__builtin_neon_vmul_n_f64: { |
11239 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
11240 | Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy); |
11241 | return Builder.CreateFMul(Ops[0], RHS); |
11242 | } |
11243 | case NEON::BI__builtin_neon_vaddlv_u8: { |
11244 | Int = Intrinsic::aarch64_neon_uaddlv; |
11245 | Ty = Int32Ty; |
11246 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11247 | llvm::Type *Tys[2] = { Ty, VTy }; |
11248 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11249 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11250 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11251 | } |
11252 | case NEON::BI__builtin_neon_vaddlv_u16: { |
11253 | Int = Intrinsic::aarch64_neon_uaddlv; |
11254 | Ty = Int32Ty; |
11255 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11256 | llvm::Type *Tys[2] = { Ty, VTy }; |
11257 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11258 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11259 | } |
11260 | case NEON::BI__builtin_neon_vaddlvq_u8: { |
11261 | Int = Intrinsic::aarch64_neon_uaddlv; |
11262 | Ty = Int32Ty; |
11263 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11264 | llvm::Type *Tys[2] = { Ty, VTy }; |
11265 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11266 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11267 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11268 | } |
11269 | case NEON::BI__builtin_neon_vaddlvq_u16: { |
11270 | Int = Intrinsic::aarch64_neon_uaddlv; |
11271 | Ty = Int32Ty; |
11272 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11273 | llvm::Type *Tys[2] = { Ty, VTy }; |
11274 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11275 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11276 | } |
11277 | case NEON::BI__builtin_neon_vaddlv_s8: { |
11278 | Int = Intrinsic::aarch64_neon_saddlv; |
11279 | Ty = Int32Ty; |
11280 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11281 | llvm::Type *Tys[2] = { Ty, VTy }; |
11282 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11283 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11284 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11285 | } |
11286 | case NEON::BI__builtin_neon_vaddlv_s16: { |
11287 | Int = Intrinsic::aarch64_neon_saddlv; |
11288 | Ty = Int32Ty; |
11289 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11290 | llvm::Type *Tys[2] = { Ty, VTy }; |
11291 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11292 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11293 | } |
11294 | case NEON::BI__builtin_neon_vaddlvq_s8: { |
11295 | Int = Intrinsic::aarch64_neon_saddlv; |
11296 | Ty = Int32Ty; |
11297 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11298 | llvm::Type *Tys[2] = { Ty, VTy }; |
11299 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11300 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11301 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11302 | } |
11303 | case NEON::BI__builtin_neon_vaddlvq_s16: { |
11304 | Int = Intrinsic::aarch64_neon_saddlv; |
11305 | Ty = Int32Ty; |
11306 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11307 | llvm::Type *Tys[2] = { Ty, VTy }; |
11308 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11309 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11310 | } |
11311 | case NEON::BI__builtin_neon_vsri_n_v: |
11312 | case NEON::BI__builtin_neon_vsriq_n_v: { |
11313 | Int = Intrinsic::aarch64_neon_vsri; |
11314 | llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty); |
11315 | return EmitNeonCall(Intrin, Ops, "vsri_n"); |
11316 | } |
11317 | case NEON::BI__builtin_neon_vsli_n_v: |
11318 | case NEON::BI__builtin_neon_vsliq_n_v: { |
11319 | Int = Intrinsic::aarch64_neon_vsli; |
11320 | llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty); |
11321 | return EmitNeonCall(Intrin, Ops, "vsli_n"); |
11322 | } |
11323 | case NEON::BI__builtin_neon_vsra_n_v: |
11324 | case NEON::BI__builtin_neon_vsraq_n_v: |
11325 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11326 | Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n"); |
11327 | return Builder.CreateAdd(Ops[0], Ops[1]); |
11328 | case NEON::BI__builtin_neon_vrsra_n_v: |
11329 | case NEON::BI__builtin_neon_vrsraq_n_v: { |
11330 | Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl; |
11331 | SmallVector<llvm::Value*,2> TmpOps; |
11332 | TmpOps.push_back(Ops[1]); |
11333 | TmpOps.push_back(Ops[2]); |
11334 | Function* F = CGM.getIntrinsic(Int, Ty); |
11335 | llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true); |
11336 | Ops[0] = Builder.CreateBitCast(Ops[0], VTy); |
11337 | return Builder.CreateAdd(Ops[0], tmp); |
11338 | } |
11339 | case NEON::BI__builtin_neon_vld1_v: |
11340 | case NEON::BI__builtin_neon_vld1q_v: { |
11341 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy)); |
11342 | return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment()); |
11343 | } |
11344 | case NEON::BI__builtin_neon_vst1_v: |
11345 | case NEON::BI__builtin_neon_vst1q_v: |
11346 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy)); |
11347 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); |
11348 | return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment()); |
11349 | case NEON::BI__builtin_neon_vld1_lane_v: |
11350 | case NEON::BI__builtin_neon_vld1q_lane_v: { |
11351 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11352 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); |
11353 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11354 | Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], |
11355 | PtrOp0.getAlignment()); |
11356 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane"); |
11357 | } |
11358 | case NEON::BI__builtin_neon_vld1_dup_v: |
11359 | case NEON::BI__builtin_neon_vld1q_dup_v: { |
11360 | Value *V = UndefValue::get(Ty); |
11361 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); |
11362 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11363 | Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], |
11364 | PtrOp0.getAlignment()); |
11365 | llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); |
11366 | Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI); |
11367 | return EmitNeonSplat(Ops[0], CI); |
11368 | } |
11369 | case NEON::BI__builtin_neon_vst1_lane_v: |
11370 | case NEON::BI__builtin_neon_vst1q_lane_v: |
11371 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11372 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); |
11373 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
11374 | return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty), |
11375 | PtrOp0.getAlignment()); |
11376 | case NEON::BI__builtin_neon_vld2_v: |
11377 | case NEON::BI__builtin_neon_vld2q_v: { |
11378 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); |
11379 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11380 | llvm::Type *Tys[2] = { VTy, PTy }; |
11381 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys); |
11382 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); |
11383 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11384 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11385 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11386 | } |
11387 | case NEON::BI__builtin_neon_vld3_v: |
11388 | case NEON::BI__builtin_neon_vld3q_v: { |
11389 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); |
11390 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11391 | llvm::Type *Tys[2] = { VTy, PTy }; |
11392 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys); |
11393 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); |
11394 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11395 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11396 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11397 | } |
11398 | case NEON::BI__builtin_neon_vld4_v: |
11399 | case NEON::BI__builtin_neon_vld4q_v: { |
11400 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); |
11401 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11402 | llvm::Type *Tys[2] = { VTy, PTy }; |
11403 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys); |
11404 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); |
11405 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11406 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11407 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11408 | } |
11409 | case NEON::BI__builtin_neon_vld2_dup_v: |
11410 | case NEON::BI__builtin_neon_vld2q_dup_v: { |
11411 | llvm::Type *PTy = |
11412 | llvm::PointerType::getUnqual(VTy->getElementType()); |
11413 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11414 | llvm::Type *Tys[2] = { VTy, PTy }; |
11415 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys); |
11416 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); |
11417 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11418 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11419 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11420 | } |
11421 | case NEON::BI__builtin_neon_vld3_dup_v: |
11422 | case NEON::BI__builtin_neon_vld3q_dup_v: { |
11423 | llvm::Type *PTy = |
11424 | llvm::PointerType::getUnqual(VTy->getElementType()); |
11425 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11426 | llvm::Type *Tys[2] = { VTy, PTy }; |
11427 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys); |
11428 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); |
11429 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11430 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11431 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11432 | } |
11433 | case NEON::BI__builtin_neon_vld4_dup_v: |
11434 | case NEON::BI__builtin_neon_vld4q_dup_v: { |
11435 | llvm::Type *PTy = |
11436 | llvm::PointerType::getUnqual(VTy->getElementType()); |
11437 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11438 | llvm::Type *Tys[2] = { VTy, PTy }; |
11439 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys); |
11440 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); |
11441 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11442 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11443 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11444 | } |
11445 | case NEON::BI__builtin_neon_vld2_lane_v: |
11446 | case NEON::BI__builtin_neon_vld2q_lane_v: { |
11447 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
11448 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys); |
11449 | std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end()); |
11450 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11451 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11452 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); |
11453 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane"); |
11454 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
11455 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11456 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11457 | } |
11458 | case NEON::BI__builtin_neon_vld3_lane_v: |
11459 | case NEON::BI__builtin_neon_vld3q_lane_v: { |
11460 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
11461 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys); |
11462 | std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end()); |
11463 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11464 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11465 | Ops[3] = Builder.CreateBitCast(Ops[3], Ty); |
11466 | Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty); |
11467 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); |
11468 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
11469 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11470 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11471 | } |
11472 | case NEON::BI__builtin_neon_vld4_lane_v: |
11473 | case NEON::BI__builtin_neon_vld4q_lane_v: { |
11474 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
11475 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys); |
11476 | std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end()); |
11477 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11478 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11479 | Ops[3] = Builder.CreateBitCast(Ops[3], Ty); |
11480 | Ops[4] = Builder.CreateBitCast(Ops[4], Ty); |
11481 | Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty); |
11482 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane"); |
11483 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
11484 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11485 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11486 | } |
11487 | case NEON::BI__builtin_neon_vst2_v: |
11488 | case NEON::BI__builtin_neon_vst2q_v: { |
11489 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11490 | llvm::Type *Tys[2] = { VTy, Ops[2]->getType() }; |
11491 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys), |
11492 | Ops, ""); |
11493 | } |
11494 | case NEON::BI__builtin_neon_vst2_lane_v: |
11495 | case NEON::BI__builtin_neon_vst2q_lane_v: { |
11496 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11497 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
11498 | llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; |
11499 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys), |
11500 | Ops, ""); |
11501 | } |
11502 | case NEON::BI__builtin_neon_vst3_v: |
11503 | case NEON::BI__builtin_neon_vst3q_v: { |
11504 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11505 | llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; |
11506 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys), |
11507 | Ops, ""); |
11508 | } |
11509 | case NEON::BI__builtin_neon_vst3_lane_v: |
11510 | case NEON::BI__builtin_neon_vst3q_lane_v: { |
11511 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11512 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); |
11513 | llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; |
11514 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys), |
11515 | Ops, ""); |
11516 | } |
11517 | case NEON::BI__builtin_neon_vst4_v: |
11518 | case NEON::BI__builtin_neon_vst4q_v: { |
11519 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11520 | llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; |
11521 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys), |
11522 | Ops, ""); |
11523 | } |
11524 | case NEON::BI__builtin_neon_vst4_lane_v: |
11525 | case NEON::BI__builtin_neon_vst4q_lane_v: { |
11526 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11527 | Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty); |
11528 | llvm::Type *Tys[2] = { VTy, Ops[5]->getType() }; |
11529 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys), |
11530 | Ops, ""); |
11531 | } |
11532 | case NEON::BI__builtin_neon_vtrn_v: |
11533 | case NEON::BI__builtin_neon_vtrnq_v: { |
11534 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
11535 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11536 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11537 | Value *SV = nullptr; |
11538 | |
11539 | for (unsigned vi = 0; vi != 2; ++vi) { |
11540 | SmallVector<int, 16> Indices; |
11541 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
11542 | Indices.push_back(i+vi); |
11543 | Indices.push_back(i+e+vi); |
11544 | } |
11545 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
11546 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn"); |
11547 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
11548 | } |
11549 | return SV; |
11550 | } |
11551 | case NEON::BI__builtin_neon_vuzp_v: |
11552 | case NEON::BI__builtin_neon_vuzpq_v: { |
11553 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
11554 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11555 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11556 | Value *SV = nullptr; |
11557 | |
11558 | for (unsigned vi = 0; vi != 2; ++vi) { |
11559 | SmallVector<int, 16> Indices; |
11560 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
11561 | Indices.push_back(2*i+vi); |
11562 | |
11563 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
11564 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp"); |
11565 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
11566 | } |
11567 | return SV; |
11568 | } |
11569 | case NEON::BI__builtin_neon_vzip_v: |
11570 | case NEON::BI__builtin_neon_vzipq_v: { |
11571 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
11572 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11573 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11574 | Value *SV = nullptr; |
11575 | |
11576 | for (unsigned vi = 0; vi != 2; ++vi) { |
11577 | SmallVector<int, 16> Indices; |
11578 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
11579 | Indices.push_back((i + vi*e) >> 1); |
11580 | Indices.push_back(((i + vi*e) >> 1)+e); |
11581 | } |
11582 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
11583 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip"); |
11584 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
11585 | } |
11586 | return SV; |
11587 | } |
11588 | case NEON::BI__builtin_neon_vqtbl1q_v: { |
11589 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty), |
11590 | Ops, "vtbl1"); |
11591 | } |
11592 | case NEON::BI__builtin_neon_vqtbl2q_v: { |
11593 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty), |
11594 | Ops, "vtbl2"); |
11595 | } |
11596 | case NEON::BI__builtin_neon_vqtbl3q_v: { |
11597 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty), |
11598 | Ops, "vtbl3"); |
11599 | } |
11600 | case NEON::BI__builtin_neon_vqtbl4q_v: { |
11601 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty), |
11602 | Ops, "vtbl4"); |
11603 | } |
11604 | case NEON::BI__builtin_neon_vqtbx1q_v: { |
11605 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty), |
11606 | Ops, "vtbx1"); |
11607 | } |
11608 | case NEON::BI__builtin_neon_vqtbx2q_v: { |
11609 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty), |
11610 | Ops, "vtbx2"); |
11611 | } |
11612 | case NEON::BI__builtin_neon_vqtbx3q_v: { |
11613 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty), |
11614 | Ops, "vtbx3"); |
11615 | } |
11616 | case NEON::BI__builtin_neon_vqtbx4q_v: { |
11617 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty), |
11618 | Ops, "vtbx4"); |
11619 | } |
11620 | case NEON::BI__builtin_neon_vsqadd_v: |
11621 | case NEON::BI__builtin_neon_vsqaddq_v: { |
11622 | Int = Intrinsic::aarch64_neon_usqadd; |
11623 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd"); |
11624 | } |
11625 | case NEON::BI__builtin_neon_vuqadd_v: |
11626 | case NEON::BI__builtin_neon_vuqaddq_v: { |
11627 | Int = Intrinsic::aarch64_neon_suqadd; |
11628 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd"); |
11629 | } |
11630 | } |
11631 | } |
11632 | |
11633 | Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID, |
11634 | const CallExpr *E) { |
11635 | assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value ) && "unexpected BPF builtin") ? void (0) : __assert_fail ("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 11639, __extension__ __PRETTY_FUNCTION__)) |
11636 | BuiltinID == BPF::BI__builtin_btf_type_id ||(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value ) && "unexpected BPF builtin") ? void (0) : __assert_fail ("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 11639, __extension__ __PRETTY_FUNCTION__)) |
11637 | BuiltinID == BPF::BI__builtin_preserve_type_info ||(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value ) && "unexpected BPF builtin") ? void (0) : __assert_fail ("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 11639, __extension__ __PRETTY_FUNCTION__)) |
11638 | BuiltinID == BPF::BI__builtin_preserve_enum_value) &&(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value ) && "unexpected BPF builtin") ? void (0) : __assert_fail ("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 11639, __extension__ __PRETTY_FUNCTION__)) |
11639 | "unexpected BPF builtin")(static_cast <bool> ((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value ) && "unexpected BPF builtin") ? void (0) : __assert_fail ("(BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || BuiltinID == BPF::BI__builtin_preserve_enum_value) && \"unexpected BPF builtin\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 11639, __extension__ __PRETTY_FUNCTION__)); |
11640 | |
11641 | // A sequence number, injected into IR builtin functions, to |
11642 | // prevent CSE given the only difference of the funciton |
11643 | // may just be the debuginfo metadata. |
11644 | static uint32_t BuiltinSeqNum; |
11645 | |
11646 | switch (BuiltinID) { |
11647 | default: |
11648 | llvm_unreachable("Unexpected BPF builtin")::llvm::llvm_unreachable_internal("Unexpected BPF builtin", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 11648); |
11649 | case BPF::BI__builtin_preserve_field_info: { |
11650 | const Expr *Arg = E->getArg(0); |
11651 | bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField; |
11652 | |
11653 | if (!getDebugInfo()) { |
11654 | CGM.Error(E->getExprLoc(), |
11655 | "using __builtin_preserve_field_info() without -g"); |
11656 | return IsBitField ? EmitLValue(Arg).getBitFieldPointer() |
11657 | : EmitLValue(Arg).getPointer(*this); |
11658 | } |
11659 | |
11660 | // Enable underlying preserve_*_access_index() generation. |
11661 | bool OldIsInPreservedAIRegion = IsInPreservedAIRegion; |
11662 | IsInPreservedAIRegion = true; |
11663 | Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer() |
11664 | : EmitLValue(Arg).getPointer(*this); |
11665 | IsInPreservedAIRegion = OldIsInPreservedAIRegion; |
11666 | |
11667 | ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
11668 | Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue()); |
11669 | |
11670 | // Built the IR for the preserve_field_info intrinsic. |
11671 | llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration( |
11672 | &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info, |
11673 | {FieldAddr->getType()}); |
11674 | return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind}); |
11675 | } |
11676 | case BPF::BI__builtin_btf_type_id: |
11677 | case BPF::BI__builtin_preserve_type_info: { |
11678 | if (!getDebugInfo()) { |
11679 | CGM.Error(E->getExprLoc(), "using builtin function without -g"); |
11680 | return nullptr; |
11681 | } |
11682 | |
11683 | const Expr *Arg0 = E->getArg(0); |
11684 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType( |
11685 | Arg0->getType(), Arg0->getExprLoc()); |
11686 | |
11687 | ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
11688 | Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue()); |
11689 | Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++); |
11690 | |
11691 | llvm::Function *FnDecl; |
11692 | if (BuiltinID == BPF::BI__builtin_btf_type_id) |
11693 | FnDecl = llvm::Intrinsic::getDeclaration( |
11694 | &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {}); |
11695 | else |
11696 | FnDecl = llvm::Intrinsic::getDeclaration( |
11697 | &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {}); |
11698 | CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue}); |
11699 | Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); |
11700 | return Fn; |
11701 | } |
11702 | case BPF::BI__builtin_preserve_enum_value: { |
11703 | if (!getDebugInfo()) { |
11704 | CGM.Error(E->getExprLoc(), "using builtin function without -g"); |
11705 | return nullptr; |
11706 | } |
11707 | |
11708 | const Expr *Arg0 = E->getArg(0); |
11709 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType( |
11710 | Arg0->getType(), Arg0->getExprLoc()); |
11711 | |
11712 | // Find enumerator |
11713 | const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens()); |
11714 | const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr()); |
11715 | const auto *DR = cast<DeclRefExpr>(CE->getSubExpr()); |
11716 | const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl()); |
11717 | |
11718 | auto &InitVal = Enumerator->getInitVal(); |
11719 | std::string InitValStr; |
11720 | if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX(9223372036854775807L))) |
11721 | InitValStr = std::to_string(InitVal.getSExtValue()); |
11722 | else |
11723 | InitValStr = std::to_string(InitVal.getZExtValue()); |
11724 | std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr; |
11725 | Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr); |
11726 | |
11727 | ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
11728 | Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue()); |
11729 | Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++); |
11730 | |
11731 | llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration( |
11732 | &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {}); |
11733 | CallInst *Fn = |
11734 | Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue}); |
11735 | Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); |
11736 | return Fn; |
11737 | } |
11738 | } |
11739 | } |
11740 | |
11741 | llvm::Value *CodeGenFunction:: |
11742 | BuildVector(ArrayRef<llvm::Value*> Ops) { |
11743 | assert((Ops.size() & (Ops.size() - 1)) == 0 &&(static_cast <bool> ((Ops.size() & (Ops.size() - 1) ) == 0 && "Not a power-of-two sized vector!") ? void ( 0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 11744, __extension__ __PRETTY_FUNCTION__)) |
11744 | "Not a power-of-two sized vector!")(static_cast <bool> ((Ops.size() & (Ops.size() - 1) ) == 0 && "Not a power-of-two sized vector!") ? void ( 0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 11744, __extension__ __PRETTY_FUNCTION__)); |
11745 | bool AllConstants = true; |
11746 | for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i) |
11747 | AllConstants &= isa<Constant>(Ops[i]); |
11748 | |
11749 | // If this is a constant vector, create a ConstantVector. |
11750 | if (AllConstants) { |
11751 | SmallVector<llvm::Constant*, 16> CstOps; |
11752 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
11753 | CstOps.push_back(cast<Constant>(Ops[i])); |
11754 | return llvm::ConstantVector::get(CstOps); |
11755 | } |
11756 | |
11757 | // Otherwise, insertelement the values to build the vector. |
11758 | Value *Result = llvm::UndefValue::get( |
11759 | llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size())); |
11760 | |
11761 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
11762 | Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i)); |
11763 | |
11764 | return Result; |
11765 | } |
11766 | |
11767 | // Convert the mask from an integer type to a vector of i1. |
11768 | static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask, |
11769 | unsigned NumElts) { |
11770 | |
11771 | auto *MaskTy = llvm::FixedVectorType::get( |
11772 | CGF.Builder.getInt1Ty(), |
11773 | cast<IntegerType>(Mask->getType())->getBitWidth()); |
11774 | Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy); |
11775 | |
11776 | // If we have less than 8 elements, then the starting mask was an i8 and |
11777 | // we need to extract down to the right number of elements. |
11778 | if (NumElts < 8) { |
11779 | int Indices[4]; |
11780 | for (unsigned i = 0; i != NumElts; ++i) |
11781 | Indices[i] = i; |
11782 | MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec, |
11783 | makeArrayRef(Indices, NumElts), |
11784 | "extract"); |
11785 | } |
11786 | return MaskVec; |
11787 | } |
11788 | |
11789 | static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
11790 | Align Alignment) { |
11791 | // Cast the pointer to right type. |
11792 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
11793 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11794 | |
11795 | Value *MaskVec = getMaskVecValue( |
11796 | CGF, Ops[2], |
11797 | cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements()); |
11798 | |
11799 | return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec); |
11800 | } |
11801 | |
11802 | static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
11803 | Align Alignment) { |
11804 | // Cast the pointer to right type. |
11805 | llvm::Type *Ty = Ops[1]->getType(); |
11806 | Value *Ptr = |
11807 | CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
11808 | |
11809 | Value *MaskVec = getMaskVecValue( |
11810 | CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements()); |
11811 | |
11812 | return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]); |
11813 | } |
11814 | |
11815 | static Value *EmitX86ExpandLoad(CodeGenFunction &CGF, |
11816 | ArrayRef<Value *> Ops) { |
11817 | auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType()); |
11818 | llvm::Type *PtrTy = ResultTy->getElementType(); |
11819 | |
11820 | // Cast the pointer to element type. |
11821 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
11822 | llvm::PointerType::getUnqual(PtrTy)); |
11823 | |
11824 | Value *MaskVec = getMaskVecValue( |
11825 | CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements()); |
11826 | |
11827 | llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload, |
11828 | ResultTy); |
11829 | return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] }); |
11830 | } |
11831 | |
11832 | static Value *EmitX86CompressExpand(CodeGenFunction &CGF, |
11833 | ArrayRef<Value *> Ops, |
11834 | bool IsCompress) { |
11835 | auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType()); |
11836 | |
11837 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); |
11838 | |
11839 | Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress |
11840 | : Intrinsic::x86_avx512_mask_expand; |
11841 | llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy); |
11842 | return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec }); |
11843 | } |
11844 | |
11845 | static Value *EmitX86CompressStore(CodeGenFunction &CGF, |
11846 | ArrayRef<Value *> Ops) { |
11847 | auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType()); |
11848 | llvm::Type *PtrTy = ResultTy->getElementType(); |
11849 | |
11850 | // Cast the pointer to element type. |
11851 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
11852 | llvm::PointerType::getUnqual(PtrTy)); |
11853 | |
11854 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); |
11855 | |
11856 | llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore, |
11857 | ResultTy); |
11858 | return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec }); |
11859 | } |
11860 | |
11861 | static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc, |
11862 | ArrayRef<Value *> Ops, |
11863 | bool InvertLHS = false) { |
11864 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
11865 | Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts); |
11866 | Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts); |
11867 | |
11868 | if (InvertLHS) |
11869 | LHS = CGF.Builder.CreateNot(LHS); |
11870 | |
11871 | return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS), |
11872 | Ops[0]->getType()); |
11873 | } |
11874 | |
11875 | static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1, |
11876 | Value *Amt, bool IsRight) { |
11877 | llvm::Type *Ty = Op0->getType(); |
11878 | |
11879 | // Amount may be scalar immediate, in which case create a splat vector. |
11880 | // Funnel shifts amounts are treated as modulo and types are all power-of-2 so |
11881 | // we only care about the lowest log2 bits anyway. |
11882 | if (Amt->getType() != Ty) { |
11883 | unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements(); |
11884 | Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false); |
11885 | Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt); |
11886 | } |
11887 | |
11888 | unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl; |
11889 | Function *F = CGF.CGM.getIntrinsic(IID, Ty); |
11890 | return CGF.Builder.CreateCall(F, {Op0, Op1, Amt}); |
11891 | } |
11892 | |
11893 | static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
11894 | bool IsSigned) { |
11895 | Value *Op0 = Ops[0]; |
11896 | Value *Op1 = Ops[1]; |
11897 | llvm::Type *Ty = Op0->getType(); |
11898 | uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; |
11899 | |
11900 | CmpInst::Predicate Pred; |
11901 | switch (Imm) { |
11902 | case 0x0: |
11903 | Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; |
11904 | break; |
11905 | case 0x1: |
11906 | Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; |
11907 | break; |
11908 | case 0x2: |
11909 | Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; |
11910 | break; |
11911 | case 0x3: |
11912 | Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; |
11913 | break; |
11914 | case 0x4: |
11915 | Pred = ICmpInst::ICMP_EQ; |
11916 | break; |
11917 | case 0x5: |
11918 | Pred = ICmpInst::ICMP_NE; |
11919 | break; |
11920 | case 0x6: |
11921 | return llvm::Constant::getNullValue(Ty); // FALSE |
11922 | case 0x7: |
11923 | return llvm::Constant::getAllOnesValue(Ty); // TRUE |
11924 | default: |
11925 | llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate")::llvm::llvm_unreachable_internal("Unexpected XOP vpcom/vpcomu predicate" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 11925); |
11926 | } |
11927 | |
11928 | Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1); |
11929 | Value *Res = CGF.Builder.CreateSExt(Cmp, Ty); |
11930 | return Res; |
11931 | } |
11932 | |
11933 | static Value *EmitX86Select(CodeGenFunction &CGF, |
11934 | Value *Mask, Value *Op0, Value *Op1) { |
11935 | |
11936 | // If the mask is all ones just return first argument. |
11937 | if (const auto *C = dyn_cast<Constant>(Mask)) |
11938 | if (C->isAllOnesValue()) |
11939 | return Op0; |
11940 | |
11941 | Mask = getMaskVecValue( |
11942 | CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements()); |
11943 | |
11944 | return CGF.Builder.CreateSelect(Mask, Op0, Op1); |
11945 | } |
11946 | |
11947 | static Value *EmitX86ScalarSelect(CodeGenFunction &CGF, |
11948 | Value *Mask, Value *Op0, Value *Op1) { |
11949 | // If the mask is all ones just return first argument. |
11950 | if (const auto *C = dyn_cast<Constant>(Mask)) |
11951 | if (C->isAllOnesValue()) |
11952 | return Op0; |
11953 | |
11954 | auto *MaskTy = llvm::FixedVectorType::get( |
11955 | CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth()); |
11956 | Mask = CGF.Builder.CreateBitCast(Mask, MaskTy); |
11957 | Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0); |
11958 | return CGF.Builder.CreateSelect(Mask, Op0, Op1); |
11959 | } |
11960 | |
11961 | static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp, |
11962 | unsigned NumElts, Value *MaskIn) { |
11963 | if (MaskIn) { |
11964 | const auto *C = dyn_cast<Constant>(MaskIn); |
11965 | if (!C || !C->isAllOnesValue()) |
11966 | Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts)); |
11967 | } |
11968 | |
11969 | if (NumElts < 8) { |
11970 | int Indices[8]; |
11971 | for (unsigned i = 0; i != NumElts; ++i) |
11972 | Indices[i] = i; |
11973 | for (unsigned i = NumElts; i != 8; ++i) |
11974 | Indices[i] = i % NumElts + NumElts; |
11975 | Cmp = CGF.Builder.CreateShuffleVector( |
11976 | Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices); |
11977 | } |
11978 | |
11979 | return CGF.Builder.CreateBitCast(Cmp, |
11980 | IntegerType::get(CGF.getLLVMContext(), |
11981 | std::max(NumElts, 8U))); |
11982 | } |
11983 | |
11984 | static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC, |
11985 | bool Signed, ArrayRef<Value *> Ops) { |
11986 | assert((Ops.size() == 2 || Ops.size() == 4) &&(static_cast <bool> ((Ops.size() == 2 || Ops.size() == 4 ) && "Unexpected number of arguments") ? void (0) : __assert_fail ("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 11987, __extension__ __PRETTY_FUNCTION__)) |
11987 | "Unexpected number of arguments")(static_cast <bool> ((Ops.size() == 2 || Ops.size() == 4 ) && "Unexpected number of arguments") ? void (0) : __assert_fail ("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 11987, __extension__ __PRETTY_FUNCTION__)); |
11988 | unsigned NumElts = |
11989 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
11990 | Value *Cmp; |
11991 | |
11992 | if (CC == 3) { |
11993 | Cmp = Constant::getNullValue( |
11994 | llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts)); |
11995 | } else if (CC == 7) { |
11996 | Cmp = Constant::getAllOnesValue( |
11997 | llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts)); |
11998 | } else { |
11999 | ICmpInst::Predicate Pred; |
12000 | switch (CC) { |
12001 | default: llvm_unreachable("Unknown condition code")::llvm::llvm_unreachable_internal("Unknown condition code", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 12001); |
12002 | case 0: Pred = ICmpInst::ICMP_EQ; break; |
12003 | case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; |
12004 | case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; |
12005 | case 4: Pred = ICmpInst::ICMP_NE; break; |
12006 | case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; |
12007 | case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; |
12008 | } |
12009 | Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]); |
12010 | } |
12011 | |
12012 | Value *MaskIn = nullptr; |
12013 | if (Ops.size() == 4) |
12014 | MaskIn = Ops[3]; |
12015 | |
12016 | return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn); |
12017 | } |
12018 | |
12019 | static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) { |
12020 | Value *Zero = Constant::getNullValue(In->getType()); |
12021 | return EmitX86MaskedCompare(CGF, 1, true, { In, Zero }); |
12022 | } |
12023 | |
12024 | static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E, |
12025 | ArrayRef<Value *> Ops, bool IsSigned) { |
12026 | unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue(); |
12027 | llvm::Type *Ty = Ops[1]->getType(); |
12028 | |
12029 | Value *Res; |
12030 | if (Rnd != 4) { |
12031 | Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round |
12032 | : Intrinsic::x86_avx512_uitofp_round; |
12033 | Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() }); |
12034 | Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] }); |
12035 | } else { |
12036 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
12037 | Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty) |
12038 | : CGF.Builder.CreateUIToFP(Ops[0], Ty); |
12039 | } |
12040 | |
12041 | return EmitX86Select(CGF, Ops[2], Res, Ops[1]); |
12042 | } |
12043 | |
12044 | // Lowers X86 FMA intrinsics to IR. |
12045 | static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E, |
12046 | ArrayRef<Value *> Ops, unsigned BuiltinID, |
12047 | bool IsAddSub) { |
12048 | |
12049 | bool Subtract = false; |
12050 | Intrinsic::ID IID = Intrinsic::not_intrinsic; |
12051 | switch (BuiltinID) { |
12052 | default: break; |
12053 | case clang::X86::BI__builtin_ia32_vfmsubph512_mask3: |
12054 | Subtract = true; |
12055 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
12056 | case clang::X86::BI__builtin_ia32_vfmaddph512_mask: |
12057 | case clang::X86::BI__builtin_ia32_vfmaddph512_maskz: |
12058 | case clang::X86::BI__builtin_ia32_vfmaddph512_mask3: |
12059 | IID = llvm::Intrinsic::x86_avx512fp16_vfmadd_ph_512; |
12060 | break; |
12061 | case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3: |
12062 | Subtract = true; |
12063 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
12064 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask: |
12065 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz: |
12066 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3: |
12067 | IID = llvm::Intrinsic::x86_avx512fp16_vfmaddsub_ph_512; |
12068 | break; |
12069 | case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: |
12070 | Subtract = true; |
12071 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
12072 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask: |
12073 | case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: |
12074 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: |
12075 | IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break; |
12076 | case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: |
12077 | Subtract = true; |
12078 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
12079 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: |
12080 | case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: |
12081 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: |
12082 | IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break; |
12083 | case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
12084 | Subtract = true; |
12085 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
12086 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: |
12087 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
12088 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
12089 | IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512; |
12090 | break; |
12091 | case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
12092 | Subtract = true; |
12093 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
12094 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
12095 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
12096 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
12097 | IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512; |
12098 | break; |
12099 | } |
12100 | |
12101 | Value *A = Ops[0]; |
12102 | Value *B = Ops[1]; |
12103 | Value *C = Ops[2]; |
12104 | |
12105 | if (Subtract) |
12106 | C = CGF.Builder.CreateFNeg(C); |
12107 | |
12108 | Value *Res; |
12109 | |
12110 | // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding). |
12111 | if (IID != Intrinsic::not_intrinsic && |
12112 | (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 || |
12113 | IsAddSub)) { |
12114 | Function *Intr = CGF.CGM.getIntrinsic(IID); |
12115 | Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() }); |
12116 | } else { |
12117 | llvm::Type *Ty = A->getType(); |
12118 | Function *FMA; |
12119 | if (CGF.Builder.getIsFPConstrained()) { |
12120 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
12121 | FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty); |
12122 | Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C}); |
12123 | } else { |
12124 | FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty); |
12125 | Res = CGF.Builder.CreateCall(FMA, {A, B, C}); |
12126 | } |
12127 | } |
12128 | |
12129 | // Handle any required masking. |
12130 | Value *MaskFalseVal = nullptr; |
12131 | switch (BuiltinID) { |
12132 | case clang::X86::BI__builtin_ia32_vfmaddph512_mask: |
12133 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask: |
12134 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: |
12135 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask: |
12136 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: |
12137 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
12138 | MaskFalseVal = Ops[0]; |
12139 | break; |
12140 | case clang::X86::BI__builtin_ia32_vfmaddph512_maskz: |
12141 | case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: |
12142 | case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: |
12143 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz: |
12144 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
12145 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
12146 | MaskFalseVal = Constant::getNullValue(Ops[0]->getType()); |
12147 | break; |
12148 | case clang::X86::BI__builtin_ia32_vfmsubph512_mask3: |
12149 | case clang::X86::BI__builtin_ia32_vfmaddph512_mask3: |
12150 | case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: |
12151 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: |
12152 | case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: |
12153 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: |
12154 | case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3: |
12155 | case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3: |
12156 | case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
12157 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
12158 | case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
12159 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
12160 | MaskFalseVal = Ops[2]; |
12161 | break; |
12162 | } |
12163 | |
12164 | if (MaskFalseVal) |
12165 | return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal); |
12166 | |
12167 | return Res; |
12168 | } |
12169 | |
12170 | static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E, |
12171 | MutableArrayRef<Value *> Ops, Value *Upper, |
12172 | bool ZeroMask = false, unsigned PTIdx = 0, |
12173 | bool NegAcc = false) { |
12174 | unsigned Rnd = 4; |
12175 | if (Ops.size() > 4) |
12176 | Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue(); |
12177 | |
12178 | if (NegAcc) |
12179 | Ops[2] = CGF.Builder.CreateFNeg(Ops[2]); |
12180 | |
12181 | Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0); |
12182 | Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0); |
12183 | Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0); |
12184 | Value *Res; |
12185 | if (Rnd != 4) { |
12186 | Intrinsic::ID IID; |
12187 | |
12188 | switch (Ops[0]->getType()->getPrimitiveSizeInBits()) { |
12189 | case 16: |
12190 | IID = Intrinsic::x86_avx512fp16_vfmadd_f16; |
12191 | break; |
12192 | case 32: |
12193 | IID = Intrinsic::x86_avx512_vfmadd_f32; |
12194 | break; |
12195 | case 64: |
12196 | IID = Intrinsic::x86_avx512_vfmadd_f64; |
12197 | break; |
12198 | default: |
12199 | llvm_unreachable("Unexpected size")::llvm::llvm_unreachable_internal("Unexpected size", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 12199); |
12200 | } |
12201 | Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), |
12202 | {Ops[0], Ops[1], Ops[2], Ops[4]}); |
12203 | } else if (CGF.Builder.getIsFPConstrained()) { |
12204 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
12205 | Function *FMA = CGF.CGM.getIntrinsic( |
12206 | Intrinsic::experimental_constrained_fma, Ops[0]->getType()); |
12207 | Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3)); |
12208 | } else { |
12209 | Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType()); |
12210 | Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3)); |
12211 | } |
12212 | // If we have more than 3 arguments, we need to do masking. |
12213 | if (Ops.size() > 3) { |
12214 | Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType()) |
12215 | : Ops[PTIdx]; |
12216 | |
12217 | // If we negated the accumulator and the its the PassThru value we need to |
12218 | // bypass the negate. Conveniently Upper should be the same thing in this |
12219 | // case. |
12220 | if (NegAcc && PTIdx == 2) |
12221 | PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0); |
12222 | |
12223 | Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru); |
12224 | } |
12225 | return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0); |
12226 | } |
12227 | |
12228 | static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned, |
12229 | ArrayRef<Value *> Ops) { |
12230 | llvm::Type *Ty = Ops[0]->getType(); |
12231 | // Arguments have a vXi32 type so cast to vXi64. |
12232 | Ty = llvm::FixedVectorType::get(CGF.Int64Ty, |
12233 | Ty->getPrimitiveSizeInBits() / 64); |
12234 | Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty); |
12235 | Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty); |
12236 | |
12237 | if (IsSigned) { |
12238 | // Shift left then arithmetic shift right. |
12239 | Constant *ShiftAmt = ConstantInt::get(Ty, 32); |
12240 | LHS = CGF.Builder.CreateShl(LHS, ShiftAmt); |
12241 | LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt); |
12242 | RHS = CGF.Builder.CreateShl(RHS, ShiftAmt); |
12243 | RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt); |
12244 | } else { |
12245 | // Clear the upper bits. |
12246 | Constant *Mask = ConstantInt::get(Ty, 0xffffffff); |
12247 | LHS = CGF.Builder.CreateAnd(LHS, Mask); |
12248 | RHS = CGF.Builder.CreateAnd(RHS, Mask); |
12249 | } |
12250 | |
12251 | return CGF.Builder.CreateMul(LHS, RHS); |
12252 | } |
12253 | |
12254 | // Emit a masked pternlog intrinsic. This only exists because the header has to |
12255 | // use a macro and we aren't able to pass the input argument to a pternlog |
12256 | // builtin and a select builtin without evaluating it twice. |
12257 | static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask, |
12258 | ArrayRef<Value *> Ops) { |
12259 | llvm::Type *Ty = Ops[0]->getType(); |
12260 | |
12261 | unsigned VecWidth = Ty->getPrimitiveSizeInBits(); |
12262 | unsigned EltWidth = Ty->getScalarSizeInBits(); |
12263 | Intrinsic::ID IID; |
12264 | if (VecWidth == 128 && EltWidth == 32) |
12265 | IID = Intrinsic::x86_avx512_pternlog_d_128; |
12266 | else if (VecWidth == 256 && EltWidth == 32) |
12267 | IID = Intrinsic::x86_avx512_pternlog_d_256; |
12268 | else if (VecWidth == 512 && EltWidth == 32) |
12269 | IID = Intrinsic::x86_avx512_pternlog_d_512; |
12270 | else if (VecWidth == 128 && EltWidth == 64) |
12271 | IID = Intrinsic::x86_avx512_pternlog_q_128; |
12272 | else if (VecWidth == 256 && EltWidth == 64) |
12273 | IID = Intrinsic::x86_avx512_pternlog_q_256; |
12274 | else if (VecWidth == 512 && EltWidth == 64) |
12275 | IID = Intrinsic::x86_avx512_pternlog_q_512; |
12276 | else |
12277 | llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 12277); |
12278 | |
12279 | Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), |
12280 | Ops.drop_back()); |
12281 | Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0]; |
12282 | return EmitX86Select(CGF, Ops[4], Ternlog, PassThru); |
12283 | } |
12284 | |
12285 | static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op, |
12286 | llvm::Type *DstTy) { |
12287 | unsigned NumberOfElements = |
12288 | cast<llvm::FixedVectorType>(DstTy)->getNumElements(); |
12289 | Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements); |
12290 | return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2"); |
12291 | } |
12292 | |
12293 | // Emit binary intrinsic with the same type used in result/args. |
12294 | static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF, |
12295 | ArrayRef<Value *> Ops, Intrinsic::ID IID) { |
12296 | llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType()); |
12297 | return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]}); |
12298 | } |
12299 | |
12300 | Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) { |
12301 | const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts(); |
12302 | StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString(); |
12303 | return EmitX86CpuIs(CPUStr); |
12304 | } |
12305 | |
12306 | // Convert F16 halfs to floats. |
12307 | static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF, |
12308 | ArrayRef<Value *> Ops, |
12309 | llvm::Type *DstTy) { |
12310 | assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&(static_cast <bool> ((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && "Unknown cvtph2ps intrinsic") ? void (0) : __assert_fail ("(Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && \"Unknown cvtph2ps intrinsic\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 12311, __extension__ __PRETTY_FUNCTION__)) |
12311 | "Unknown cvtph2ps intrinsic")(static_cast <bool> ((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && "Unknown cvtph2ps intrinsic") ? void (0) : __assert_fail ("(Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && \"Unknown cvtph2ps intrinsic\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 12311, __extension__ __PRETTY_FUNCTION__)); |
12312 | |
12313 | // If the SAE intrinsic doesn't use default rounding then we can't upgrade. |
12314 | if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) { |
12315 | Function *F = |
12316 | CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512); |
12317 | return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]}); |
12318 | } |
12319 | |
12320 | unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements(); |
12321 | Value *Src = Ops[0]; |
12322 | |
12323 | // Extract the subvector. |
12324 | if (NumDstElts != |
12325 | cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) { |
12326 | assert(NumDstElts == 4 && "Unexpected vector size")(static_cast <bool> (NumDstElts == 4 && "Unexpected vector size" ) ? void (0) : __assert_fail ("NumDstElts == 4 && \"Unexpected vector size\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 12326, __extension__ __PRETTY_FUNCTION__)); |
12327 | Src = CGF.Builder.CreateShuffleVector(Src, ArrayRef<int>{0, 1, 2, 3}); |
12328 | } |
12329 | |
12330 | // Bitcast from vXi16 to vXf16. |
12331 | auto *HalfTy = llvm::FixedVectorType::get( |
12332 | llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts); |
12333 | Src = CGF.Builder.CreateBitCast(Src, HalfTy); |
12334 | |
12335 | // Perform the fp-extension. |
12336 | Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps"); |
12337 | |
12338 | if (Ops.size() >= 3) |
12339 | Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]); |
12340 | return Res; |
12341 | } |
12342 | |
12343 | // Convert a BF16 to a float. |
12344 | static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF, |
12345 | const CallExpr *E, |
12346 | ArrayRef<Value *> Ops) { |
12347 | llvm::Type *Int32Ty = CGF.Builder.getInt32Ty(); |
12348 | Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty); |
12349 | Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16); |
12350 | llvm::Type *ResultType = CGF.ConvertType(E->getType()); |
12351 | Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType); |
12352 | return BitCast; |
12353 | } |
12354 | |
12355 | Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) { |
12356 | |
12357 | llvm::Type *Int32Ty = Builder.getInt32Ty(); |
12358 | |
12359 | // Matching the struct layout from the compiler-rt/libgcc structure that is |
12360 | // filled in: |
12361 | // unsigned int __cpu_vendor; |
12362 | // unsigned int __cpu_type; |
12363 | // unsigned int __cpu_subtype; |
12364 | // unsigned int __cpu_features[1]; |
12365 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, |
12366 | llvm::ArrayType::get(Int32Ty, 1)); |
12367 | |
12368 | // Grab the global __cpu_model. |
12369 | llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model"); |
12370 | cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true); |
12371 | |
12372 | // Calculate the index needed to access the correct field based on the |
12373 | // range. Also adjust the expected value. |
12374 | unsigned Index; |
12375 | unsigned Value; |
12376 | std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr) |
12377 | #define X86_VENDOR(ENUM, STRING) \ |
12378 | .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)}) |
12379 | #define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \ |
12380 | .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)}) |
12381 | #define X86_CPU_TYPE(ENUM, STR) \ |
12382 | .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)}) |
12383 | #define X86_CPU_SUBTYPE(ENUM, STR) \ |
12384 | .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)}) |
12385 | #include "llvm/Support/X86TargetParser.def" |
12386 | .Default({0, 0}); |
12387 | assert(Value != 0 && "Invalid CPUStr passed to CpuIs")(static_cast <bool> (Value != 0 && "Invalid CPUStr passed to CpuIs" ) ? void (0) : __assert_fail ("Value != 0 && \"Invalid CPUStr passed to CpuIs\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 12387, __extension__ __PRETTY_FUNCTION__)); |
12388 | |
12389 | // Grab the appropriate field from __cpu_model. |
12390 | llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0), |
12391 | ConstantInt::get(Int32Ty, Index)}; |
12392 | llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs); |
12393 | CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue, |
12394 | CharUnits::fromQuantity(4)); |
12395 | |
12396 | // Check the value of the field against the requested value. |
12397 | return Builder.CreateICmpEQ(CpuValue, |
12398 | llvm::ConstantInt::get(Int32Ty, Value)); |
12399 | } |
12400 | |
12401 | Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) { |
12402 | const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts(); |
12403 | StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString(); |
12404 | return EmitX86CpuSupports(FeatureStr); |
12405 | } |
12406 | |
12407 | uint64_t |
12408 | CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) { |
12409 | // Processor features and mapping to processor feature value. |
12410 | uint64_t FeaturesMask = 0; |
12411 | for (const StringRef &FeatureStr : FeatureStrs) { |
12412 | unsigned Feature = |
12413 | StringSwitch<unsigned>(FeatureStr) |
12414 | #define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) \ |
12415 | .Case(STR, llvm::X86::FEATURE_##ENUM) |
12416 | #include "llvm/Support/X86TargetParser.def" |
12417 | ; |
12418 | FeaturesMask |= (1ULL << Feature); |
12419 | } |
12420 | return FeaturesMask; |
12421 | } |
12422 | |
12423 | Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) { |
12424 | return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs)); |
12425 | } |
12426 | |
12427 | llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) { |
12428 | uint32_t Features1 = Lo_32(FeaturesMask); |
12429 | uint32_t Features2 = Hi_32(FeaturesMask); |
12430 | |
12431 | Value *Result = Builder.getTrue(); |
12432 | |
12433 | if (Features1 != 0) { |
12434 | // Matching the struct layout from the compiler-rt/libgcc structure that is |
12435 | // filled in: |
12436 | // unsigned int __cpu_vendor; |
12437 | // unsigned int __cpu_type; |
12438 | // unsigned int __cpu_subtype; |
12439 | // unsigned int __cpu_features[1]; |
12440 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, |
12441 | llvm::ArrayType::get(Int32Ty, 1)); |
12442 | |
12443 | // Grab the global __cpu_model. |
12444 | llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model"); |
12445 | cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true); |
12446 | |
12447 | // Grab the first (0th) element from the field __cpu_features off of the |
12448 | // global in the struct STy. |
12449 | Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3), |
12450 | Builder.getInt32(0)}; |
12451 | Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs); |
12452 | Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures, |
12453 | CharUnits::fromQuantity(4)); |
12454 | |
12455 | // Check the value of the bit corresponding to the feature requested. |
12456 | Value *Mask = Builder.getInt32(Features1); |
12457 | Value *Bitset = Builder.CreateAnd(Features, Mask); |
12458 | Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask); |
12459 | Result = Builder.CreateAnd(Result, Cmp); |
12460 | } |
12461 | |
12462 | if (Features2 != 0) { |
12463 | llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty, |
12464 | "__cpu_features2"); |
12465 | cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true); |
12466 | |
12467 | Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures2, |
12468 | CharUnits::fromQuantity(4)); |
12469 | |
12470 | // Check the value of the bit corresponding to the feature requested. |
12471 | Value *Mask = Builder.getInt32(Features2); |
12472 | Value *Bitset = Builder.CreateAnd(Features, Mask); |
12473 | Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask); |
12474 | Result = Builder.CreateAnd(Result, Cmp); |
12475 | } |
12476 | |
12477 | return Result; |
12478 | } |
12479 | |
12480 | Value *CodeGenFunction::EmitX86CpuInit() { |
12481 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, |
12482 | /*Variadic*/ false); |
12483 | llvm::FunctionCallee Func = |
12484 | CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init"); |
12485 | cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true); |
12486 | cast<llvm::GlobalValue>(Func.getCallee()) |
12487 | ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
12488 | return Builder.CreateCall(Func); |
12489 | } |
12490 | |
12491 | Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, |
12492 | const CallExpr *E) { |
12493 | if (BuiltinID == X86::BI__builtin_cpu_is) |
12494 | return EmitX86CpuIs(E); |
12495 | if (BuiltinID == X86::BI__builtin_cpu_supports) |
12496 | return EmitX86CpuSupports(E); |
12497 | if (BuiltinID == X86::BI__builtin_cpu_init) |
12498 | return EmitX86CpuInit(); |
12499 | |
12500 | // Handle MSVC intrinsics before argument evaluation to prevent double |
12501 | // evaluation. |
12502 | if (Optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID)) |
12503 | return EmitMSVCBuiltinExpr(*MsvcIntId, E); |
12504 | |
12505 | SmallVector<Value*, 4> Ops; |
12506 | bool IsMaskFCmp = false; |
12507 | |
12508 | // Find out if any arguments are required to be integer constant expressions. |
12509 | unsigned ICEArguments = 0; |
12510 | ASTContext::GetBuiltinTypeError Error; |
12511 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
12512 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 12512, __extension__ __PRETTY_FUNCTION__)); |
12513 | |
12514 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { |
12515 | // If this is a normal argument, just emit it as a scalar. |
12516 | if ((ICEArguments & (1 << i)) == 0) { |
12517 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
12518 | continue; |
12519 | } |
12520 | |
12521 | // If this is required to be a constant, constant fold it so that we know |
12522 | // that the generated intrinsic gets a ConstantInt. |
12523 | Ops.push_back(llvm::ConstantInt::get( |
12524 | getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext()))); |
12525 | } |
12526 | |
12527 | // These exist so that the builtin that takes an immediate can be bounds |
12528 | // checked by clang to avoid passing bad immediates to the backend. Since |
12529 | // AVX has a larger immediate than SSE we would need separate builtins to |
12530 | // do the different bounds checking. Rather than create a clang specific |
12531 | // SSE only builtin, this implements eight separate builtins to match gcc |
12532 | // implementation. |
12533 | auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) { |
12534 | Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm)); |
12535 | llvm::Function *F = CGM.getIntrinsic(ID); |
12536 | return Builder.CreateCall(F, Ops); |
12537 | }; |
12538 | |
12539 | // For the vector forms of FP comparisons, translate the builtins directly to |
12540 | // IR. |
12541 | // TODO: The builtins could be removed if the SSE header files used vector |
12542 | // extension comparisons directly (vector ordered/unordered may need |
12543 | // additional support via __builtin_isnan()). |
12544 | auto getVectorFCmpIR = [this, &Ops, E](CmpInst::Predicate Pred, |
12545 | bool IsSignaling) { |
12546 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
12547 | Value *Cmp; |
12548 | if (IsSignaling) |
12549 | Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]); |
12550 | else |
12551 | Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]); |
12552 | llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType()); |
12553 | llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy); |
12554 | Value *Sext = Builder.CreateSExt(Cmp, IntVecTy); |
12555 | return Builder.CreateBitCast(Sext, FPVecTy); |
12556 | }; |
12557 | |
12558 | switch (BuiltinID) { |
12559 | default: return nullptr; |
12560 | case X86::BI_mm_prefetch: { |
12561 | Value *Address = Ops[0]; |
12562 | ConstantInt *C = cast<ConstantInt>(Ops[1]); |
12563 | Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1); |
12564 | Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3); |
12565 | Value *Data = ConstantInt::get(Int32Ty, 1); |
12566 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
12567 | return Builder.CreateCall(F, {Address, RW, Locality, Data}); |
12568 | } |
12569 | case X86::BI_mm_clflush: { |
12570 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush), |
12571 | Ops[0]); |
12572 | } |
12573 | case X86::BI_mm_lfence: { |
12574 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence)); |
12575 | } |
12576 | case X86::BI_mm_mfence: { |
12577 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence)); |
12578 | } |
12579 | case X86::BI_mm_sfence: { |
12580 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence)); |
12581 | } |
12582 | case X86::BI_mm_pause: { |
12583 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause)); |
12584 | } |
12585 | case X86::BI__rdtsc: { |
12586 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc)); |
12587 | } |
12588 | case X86::BI__builtin_ia32_rdtscp: { |
12589 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp)); |
12590 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1), |
12591 | Ops[0]); |
12592 | return Builder.CreateExtractValue(Call, 0); |
12593 | } |
12594 | case X86::BI__builtin_ia32_lzcnt_u16: |
12595 | case X86::BI__builtin_ia32_lzcnt_u32: |
12596 | case X86::BI__builtin_ia32_lzcnt_u64: { |
12597 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); |
12598 | return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)}); |
12599 | } |
12600 | case X86::BI__builtin_ia32_tzcnt_u16: |
12601 | case X86::BI__builtin_ia32_tzcnt_u32: |
12602 | case X86::BI__builtin_ia32_tzcnt_u64: { |
12603 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType()); |
12604 | return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)}); |
12605 | } |
12606 | case X86::BI__builtin_ia32_undef128: |
12607 | case X86::BI__builtin_ia32_undef256: |
12608 | case X86::BI__builtin_ia32_undef512: |
12609 | // The x86 definition of "undef" is not the same as the LLVM definition |
12610 | // (PR32176). We leave optimizing away an unnecessary zero constant to the |
12611 | // IR optimizer and backend. |
12612 | // TODO: If we had a "freeze" IR instruction to generate a fixed undef |
12613 | // value, we should use that here instead of a zero. |
12614 | return llvm::Constant::getNullValue(ConvertType(E->getType())); |
12615 | case X86::BI__builtin_ia32_vec_init_v8qi: |
12616 | case X86::BI__builtin_ia32_vec_init_v4hi: |
12617 | case X86::BI__builtin_ia32_vec_init_v2si: |
12618 | return Builder.CreateBitCast(BuildVector(Ops), |
12619 | llvm::Type::getX86_MMXTy(getLLVMContext())); |
12620 | case X86::BI__builtin_ia32_vec_ext_v2si: |
12621 | case X86::BI__builtin_ia32_vec_ext_v16qi: |
12622 | case X86::BI__builtin_ia32_vec_ext_v8hi: |
12623 | case X86::BI__builtin_ia32_vec_ext_v4si: |
12624 | case X86::BI__builtin_ia32_vec_ext_v4sf: |
12625 | case X86::BI__builtin_ia32_vec_ext_v2di: |
12626 | case X86::BI__builtin_ia32_vec_ext_v32qi: |
12627 | case X86::BI__builtin_ia32_vec_ext_v16hi: |
12628 | case X86::BI__builtin_ia32_vec_ext_v8si: |
12629 | case X86::BI__builtin_ia32_vec_ext_v4di: { |
12630 | unsigned NumElts = |
12631 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
12632 | uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue(); |
12633 | Index &= NumElts - 1; |
12634 | // These builtins exist so we can ensure the index is an ICE and in range. |
12635 | // Otherwise we could just do this in the header file. |
12636 | return Builder.CreateExtractElement(Ops[0], Index); |
12637 | } |
12638 | case X86::BI__builtin_ia32_vec_set_v16qi: |
12639 | case X86::BI__builtin_ia32_vec_set_v8hi: |
12640 | case X86::BI__builtin_ia32_vec_set_v4si: |
12641 | case X86::BI__builtin_ia32_vec_set_v2di: |
12642 | case X86::BI__builtin_ia32_vec_set_v32qi: |
12643 | case X86::BI__builtin_ia32_vec_set_v16hi: |
12644 | case X86::BI__builtin_ia32_vec_set_v8si: |
12645 | case X86::BI__builtin_ia32_vec_set_v4di: { |
12646 | unsigned NumElts = |
12647 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
12648 | unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue(); |
12649 | Index &= NumElts - 1; |
12650 | // These builtins exist so we can ensure the index is an ICE and in range. |
12651 | // Otherwise we could just do this in the header file. |
12652 | return Builder.CreateInsertElement(Ops[0], Ops[1], Index); |
12653 | } |
12654 | case X86::BI_mm_setcsr: |
12655 | case X86::BI__builtin_ia32_ldmxcsr: { |
12656 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); |
12657 | Builder.CreateStore(Ops[0], Tmp); |
12658 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), |
12659 | Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); |
12660 | } |
12661 | case X86::BI_mm_getcsr: |
12662 | case X86::BI__builtin_ia32_stmxcsr: { |
12663 | Address Tmp = CreateMemTemp(E->getType()); |
12664 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), |
12665 | Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); |
12666 | return Builder.CreateLoad(Tmp, "stmxcsr"); |
12667 | } |
12668 | case X86::BI__builtin_ia32_xsave: |
12669 | case X86::BI__builtin_ia32_xsave64: |
12670 | case X86::BI__builtin_ia32_xrstor: |
12671 | case X86::BI__builtin_ia32_xrstor64: |
12672 | case X86::BI__builtin_ia32_xsaveopt: |
12673 | case X86::BI__builtin_ia32_xsaveopt64: |
12674 | case X86::BI__builtin_ia32_xrstors: |
12675 | case X86::BI__builtin_ia32_xrstors64: |
12676 | case X86::BI__builtin_ia32_xsavec: |
12677 | case X86::BI__builtin_ia32_xsavec64: |
12678 | case X86::BI__builtin_ia32_xsaves: |
12679 | case X86::BI__builtin_ia32_xsaves64: |
12680 | case X86::BI__builtin_ia32_xsetbv: |
12681 | case X86::BI_xsetbv: { |
12682 | Intrinsic::ID ID; |
12683 | #define INTRINSIC_X86_XSAVE_ID(NAME) \ |
12684 | case X86::BI__builtin_ia32_##NAME: \ |
12685 | ID = Intrinsic::x86_##NAME; \ |
12686 | break |
12687 | switch (BuiltinID) { |
12688 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 12688); |
12689 | INTRINSIC_X86_XSAVE_ID(xsave); |
12690 | INTRINSIC_X86_XSAVE_ID(xsave64); |
12691 | INTRINSIC_X86_XSAVE_ID(xrstor); |
12692 | INTRINSIC_X86_XSAVE_ID(xrstor64); |
12693 | INTRINSIC_X86_XSAVE_ID(xsaveopt); |
12694 | INTRINSIC_X86_XSAVE_ID(xsaveopt64); |
12695 | INTRINSIC_X86_XSAVE_ID(xrstors); |
12696 | INTRINSIC_X86_XSAVE_ID(xrstors64); |
12697 | INTRINSIC_X86_XSAVE_ID(xsavec); |
12698 | INTRINSIC_X86_XSAVE_ID(xsavec64); |
12699 | INTRINSIC_X86_XSAVE_ID(xsaves); |
12700 | INTRINSIC_X86_XSAVE_ID(xsaves64); |
12701 | INTRINSIC_X86_XSAVE_ID(xsetbv); |
12702 | case X86::BI_xsetbv: |
12703 | ID = Intrinsic::x86_xsetbv; |
12704 | break; |
12705 | } |
12706 | #undef INTRINSIC_X86_XSAVE_ID |
12707 | Value *Mhi = Builder.CreateTrunc( |
12708 | Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty); |
12709 | Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty); |
12710 | Ops[1] = Mhi; |
12711 | Ops.push_back(Mlo); |
12712 | return Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
12713 | } |
12714 | case X86::BI__builtin_ia32_xgetbv: |
12715 | case X86::BI_xgetbv: |
12716 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops); |
12717 | case X86::BI__builtin_ia32_storedqudi128_mask: |
12718 | case X86::BI__builtin_ia32_storedqusi128_mask: |
12719 | case X86::BI__builtin_ia32_storedquhi128_mask: |
12720 | case X86::BI__builtin_ia32_storedquqi128_mask: |
12721 | case X86::BI__builtin_ia32_storeupd128_mask: |
12722 | case X86::BI__builtin_ia32_storeups128_mask: |
12723 | case X86::BI__builtin_ia32_storedqudi256_mask: |
12724 | case X86::BI__builtin_ia32_storedqusi256_mask: |
12725 | case X86::BI__builtin_ia32_storedquhi256_mask: |
12726 | case X86::BI__builtin_ia32_storedquqi256_mask: |
12727 | case X86::BI__builtin_ia32_storeupd256_mask: |
12728 | case X86::BI__builtin_ia32_storeups256_mask: |
12729 | case X86::BI__builtin_ia32_storedqudi512_mask: |
12730 | case X86::BI__builtin_ia32_storedqusi512_mask: |
12731 | case X86::BI__builtin_ia32_storedquhi512_mask: |
12732 | case X86::BI__builtin_ia32_storedquqi512_mask: |
12733 | case X86::BI__builtin_ia32_storeupd512_mask: |
12734 | case X86::BI__builtin_ia32_storeups512_mask: |
12735 | return EmitX86MaskedStore(*this, Ops, Align(1)); |
12736 | |
12737 | case X86::BI__builtin_ia32_storesh128_mask: |
12738 | case X86::BI__builtin_ia32_storess128_mask: |
12739 | case X86::BI__builtin_ia32_storesd128_mask: |
12740 | return EmitX86MaskedStore(*this, Ops, Align(1)); |
12741 | |
12742 | case X86::BI__builtin_ia32_vpopcntb_128: |
12743 | case X86::BI__builtin_ia32_vpopcntd_128: |
12744 | case X86::BI__builtin_ia32_vpopcntq_128: |
12745 | case X86::BI__builtin_ia32_vpopcntw_128: |
12746 | case X86::BI__builtin_ia32_vpopcntb_256: |
12747 | case X86::BI__builtin_ia32_vpopcntd_256: |
12748 | case X86::BI__builtin_ia32_vpopcntq_256: |
12749 | case X86::BI__builtin_ia32_vpopcntw_256: |
12750 | case X86::BI__builtin_ia32_vpopcntb_512: |
12751 | case X86::BI__builtin_ia32_vpopcntd_512: |
12752 | case X86::BI__builtin_ia32_vpopcntq_512: |
12753 | case X86::BI__builtin_ia32_vpopcntw_512: { |
12754 | llvm::Type *ResultType = ConvertType(E->getType()); |
12755 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); |
12756 | return Builder.CreateCall(F, Ops); |
12757 | } |
12758 | case X86::BI__builtin_ia32_cvtmask2b128: |
12759 | case X86::BI__builtin_ia32_cvtmask2b256: |
12760 | case X86::BI__builtin_ia32_cvtmask2b512: |
12761 | case X86::BI__builtin_ia32_cvtmask2w128: |
12762 | case X86::BI__builtin_ia32_cvtmask2w256: |
12763 | case X86::BI__builtin_ia32_cvtmask2w512: |
12764 | case X86::BI__builtin_ia32_cvtmask2d128: |
12765 | case X86::BI__builtin_ia32_cvtmask2d256: |
12766 | case X86::BI__builtin_ia32_cvtmask2d512: |
12767 | case X86::BI__builtin_ia32_cvtmask2q128: |
12768 | case X86::BI__builtin_ia32_cvtmask2q256: |
12769 | case X86::BI__builtin_ia32_cvtmask2q512: |
12770 | return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType())); |
12771 | |
12772 | case X86::BI__builtin_ia32_cvtb2mask128: |
12773 | case X86::BI__builtin_ia32_cvtb2mask256: |
12774 | case X86::BI__builtin_ia32_cvtb2mask512: |
12775 | case X86::BI__builtin_ia32_cvtw2mask128: |
12776 | case X86::BI__builtin_ia32_cvtw2mask256: |
12777 | case X86::BI__builtin_ia32_cvtw2mask512: |
12778 | case X86::BI__builtin_ia32_cvtd2mask128: |
12779 | case X86::BI__builtin_ia32_cvtd2mask256: |
12780 | case X86::BI__builtin_ia32_cvtd2mask512: |
12781 | case X86::BI__builtin_ia32_cvtq2mask128: |
12782 | case X86::BI__builtin_ia32_cvtq2mask256: |
12783 | case X86::BI__builtin_ia32_cvtq2mask512: |
12784 | return EmitX86ConvertToMask(*this, Ops[0]); |
12785 | |
12786 | case X86::BI__builtin_ia32_cvtdq2ps512_mask: |
12787 | case X86::BI__builtin_ia32_cvtqq2ps512_mask: |
12788 | case X86::BI__builtin_ia32_cvtqq2pd512_mask: |
12789 | case X86::BI__builtin_ia32_vcvtw2ph512_mask: |
12790 | case X86::BI__builtin_ia32_vcvtdq2ph512_mask: |
12791 | case X86::BI__builtin_ia32_vcvtqq2ph512_mask: |
12792 | return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ true); |
12793 | case X86::BI__builtin_ia32_cvtudq2ps512_mask: |
12794 | case X86::BI__builtin_ia32_cvtuqq2ps512_mask: |
12795 | case X86::BI__builtin_ia32_cvtuqq2pd512_mask: |
12796 | case X86::BI__builtin_ia32_vcvtuw2ph512_mask: |
12797 | case X86::BI__builtin_ia32_vcvtudq2ph512_mask: |
12798 | case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: |
12799 | return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ false); |
12800 | |
12801 | case X86::BI__builtin_ia32_vfmaddss3: |
12802 | case X86::BI__builtin_ia32_vfmaddsd3: |
12803 | case X86::BI__builtin_ia32_vfmaddsh3_mask: |
12804 | case X86::BI__builtin_ia32_vfmaddss3_mask: |
12805 | case X86::BI__builtin_ia32_vfmaddsd3_mask: |
12806 | return EmitScalarFMAExpr(*this, E, Ops, Ops[0]); |
12807 | case X86::BI__builtin_ia32_vfmaddss: |
12808 | case X86::BI__builtin_ia32_vfmaddsd: |
12809 | return EmitScalarFMAExpr(*this, E, Ops, |
12810 | Constant::getNullValue(Ops[0]->getType())); |
12811 | case X86::BI__builtin_ia32_vfmaddsh3_maskz: |
12812 | case X86::BI__builtin_ia32_vfmaddss3_maskz: |
12813 | case X86::BI__builtin_ia32_vfmaddsd3_maskz: |
12814 | return EmitScalarFMAExpr(*this, E, Ops, Ops[0], /*ZeroMask*/ true); |
12815 | case X86::BI__builtin_ia32_vfmaddsh3_mask3: |
12816 | case X86::BI__builtin_ia32_vfmaddss3_mask3: |
12817 | case X86::BI__builtin_ia32_vfmaddsd3_mask3: |
12818 | return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2); |
12819 | case X86::BI__builtin_ia32_vfmsubsh3_mask3: |
12820 | case X86::BI__builtin_ia32_vfmsubss3_mask3: |
12821 | case X86::BI__builtin_ia32_vfmsubsd3_mask3: |
12822 | return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2, |
12823 | /*NegAcc*/ true); |
12824 | case X86::BI__builtin_ia32_vfmaddph: |
12825 | case X86::BI__builtin_ia32_vfmaddps: |
12826 | case X86::BI__builtin_ia32_vfmaddpd: |
12827 | case X86::BI__builtin_ia32_vfmaddph256: |
12828 | case X86::BI__builtin_ia32_vfmaddps256: |
12829 | case X86::BI__builtin_ia32_vfmaddpd256: |
12830 | case X86::BI__builtin_ia32_vfmaddph512_mask: |
12831 | case X86::BI__builtin_ia32_vfmaddph512_maskz: |
12832 | case X86::BI__builtin_ia32_vfmaddph512_mask3: |
12833 | case X86::BI__builtin_ia32_vfmaddps512_mask: |
12834 | case X86::BI__builtin_ia32_vfmaddps512_maskz: |
12835 | case X86::BI__builtin_ia32_vfmaddps512_mask3: |
12836 | case X86::BI__builtin_ia32_vfmsubps512_mask3: |
12837 | case X86::BI__builtin_ia32_vfmaddpd512_mask: |
12838 | case X86::BI__builtin_ia32_vfmaddpd512_maskz: |
12839 | case X86::BI__builtin_ia32_vfmaddpd512_mask3: |
12840 | case X86::BI__builtin_ia32_vfmsubpd512_mask3: |
12841 | case X86::BI__builtin_ia32_vfmsubph512_mask3: |
12842 | return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ false); |
12843 | case X86::BI__builtin_ia32_vfmaddsubph512_mask: |
12844 | case X86::BI__builtin_ia32_vfmaddsubph512_maskz: |
12845 | case X86::BI__builtin_ia32_vfmaddsubph512_mask3: |
12846 | case X86::BI__builtin_ia32_vfmsubaddph512_mask3: |
12847 | case X86::BI__builtin_ia32_vfmaddsubps512_mask: |
12848 | case X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
12849 | case X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
12850 | case X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
12851 | case X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
12852 | case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
12853 | case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
12854 | case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
12855 | return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ true); |
12856 | |
12857 | case X86::BI__builtin_ia32_movdqa32store128_mask: |
12858 | case X86::BI__builtin_ia32_movdqa64store128_mask: |
12859 | case X86::BI__builtin_ia32_storeaps128_mask: |
12860 | case X86::BI__builtin_ia32_storeapd128_mask: |
12861 | case X86::BI__builtin_ia32_movdqa32store256_mask: |
12862 | case X86::BI__builtin_ia32_movdqa64store256_mask: |
12863 | case X86::BI__builtin_ia32_storeaps256_mask: |
12864 | case X86::BI__builtin_ia32_storeapd256_mask: |
12865 | case X86::BI__builtin_ia32_movdqa32store512_mask: |
12866 | case X86::BI__builtin_ia32_movdqa64store512_mask: |
12867 | case X86::BI__builtin_ia32_storeaps512_mask: |
12868 | case X86::BI__builtin_ia32_storeapd512_mask: |
12869 | return EmitX86MaskedStore( |
12870 | *this, Ops, |
12871 | getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign()); |
12872 | |
12873 | case X86::BI__builtin_ia32_loadups128_mask: |
12874 | case X86::BI__builtin_ia32_loadups256_mask: |
12875 | case X86::BI__builtin_ia32_loadups512_mask: |
12876 | case X86::BI__builtin_ia32_loadupd128_mask: |
12877 | case X86::BI__builtin_ia32_loadupd256_mask: |
12878 | case X86::BI__builtin_ia32_loadupd512_mask: |
12879 | case X86::BI__builtin_ia32_loaddquqi128_mask: |
12880 | case X86::BI__builtin_ia32_loaddquqi256_mask: |
12881 | case X86::BI__builtin_ia32_loaddquqi512_mask: |
12882 | case X86::BI__builtin_ia32_loaddquhi128_mask: |
12883 | case X86::BI__builtin_ia32_loaddquhi256_mask: |
12884 | case X86::BI__builtin_ia32_loaddquhi512_mask: |
12885 | case X86::BI__builtin_ia32_loaddqusi128_mask: |
12886 | case X86::BI__builtin_ia32_loaddqusi256_mask: |
12887 | case X86::BI__builtin_ia32_loaddqusi512_mask: |
12888 | case X86::BI__builtin_ia32_loaddqudi128_mask: |
12889 | case X86::BI__builtin_ia32_loaddqudi256_mask: |
12890 | case X86::BI__builtin_ia32_loaddqudi512_mask: |
12891 | return EmitX86MaskedLoad(*this, Ops, Align(1)); |
12892 | |
12893 | case X86::BI__builtin_ia32_loadsh128_mask: |
12894 | case X86::BI__builtin_ia32_loadss128_mask: |
12895 | case X86::BI__builtin_ia32_loadsd128_mask: |
12896 | return EmitX86MaskedLoad(*this, Ops, Align(1)); |
12897 | |
12898 | case X86::BI__builtin_ia32_loadaps128_mask: |
12899 | case X86::BI__builtin_ia32_loadaps256_mask: |
12900 | case X86::BI__builtin_ia32_loadaps512_mask: |
12901 | case X86::BI__builtin_ia32_loadapd128_mask: |
12902 | case X86::BI__builtin_ia32_loadapd256_mask: |
12903 | case X86::BI__builtin_ia32_loadapd512_mask: |
12904 | case X86::BI__builtin_ia32_movdqa32load128_mask: |
12905 | case X86::BI__builtin_ia32_movdqa32load256_mask: |
12906 | case X86::BI__builtin_ia32_movdqa32load512_mask: |
12907 | case X86::BI__builtin_ia32_movdqa64load128_mask: |
12908 | case X86::BI__builtin_ia32_movdqa64load256_mask: |
12909 | case X86::BI__builtin_ia32_movdqa64load512_mask: |
12910 | return EmitX86MaskedLoad( |
12911 | *this, Ops, |
12912 | getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign()); |
12913 | |
12914 | case X86::BI__builtin_ia32_expandloaddf128_mask: |
12915 | case X86::BI__builtin_ia32_expandloaddf256_mask: |
12916 | case X86::BI__builtin_ia32_expandloaddf512_mask: |
12917 | case X86::BI__builtin_ia32_expandloadsf128_mask: |
12918 | case X86::BI__builtin_ia32_expandloadsf256_mask: |
12919 | case X86::BI__builtin_ia32_expandloadsf512_mask: |
12920 | case X86::BI__builtin_ia32_expandloaddi128_mask: |
12921 | case X86::BI__builtin_ia32_expandloaddi256_mask: |
12922 | case X86::BI__builtin_ia32_expandloaddi512_mask: |
12923 | case X86::BI__builtin_ia32_expandloadsi128_mask: |
12924 | case X86::BI__builtin_ia32_expandloadsi256_mask: |
12925 | case X86::BI__builtin_ia32_expandloadsi512_mask: |
12926 | case X86::BI__builtin_ia32_expandloadhi128_mask: |
12927 | case X86::BI__builtin_ia32_expandloadhi256_mask: |
12928 | case X86::BI__builtin_ia32_expandloadhi512_mask: |
12929 | case X86::BI__builtin_ia32_expandloadqi128_mask: |
12930 | case X86::BI__builtin_ia32_expandloadqi256_mask: |
12931 | case X86::BI__builtin_ia32_expandloadqi512_mask: |
12932 | return EmitX86ExpandLoad(*this, Ops); |
12933 | |
12934 | case X86::BI__builtin_ia32_compressstoredf128_mask: |
12935 | case X86::BI__builtin_ia32_compressstoredf256_mask: |
12936 | case X86::BI__builtin_ia32_compressstoredf512_mask: |
12937 | case X86::BI__builtin_ia32_compressstoresf128_mask: |
12938 | case X86::BI__builtin_ia32_compressstoresf256_mask: |
12939 | case X86::BI__builtin_ia32_compressstoresf512_mask: |
12940 | case X86::BI__builtin_ia32_compressstoredi128_mask: |
12941 | case X86::BI__builtin_ia32_compressstoredi256_mask: |
12942 | case X86::BI__builtin_ia32_compressstoredi512_mask: |
12943 | case X86::BI__builtin_ia32_compressstoresi128_mask: |
12944 | case X86::BI__builtin_ia32_compressstoresi256_mask: |
12945 | case X86::BI__builtin_ia32_compressstoresi512_mask: |
12946 | case X86::BI__builtin_ia32_compressstorehi128_mask: |
12947 | case X86::BI__builtin_ia32_compressstorehi256_mask: |
12948 | case X86::BI__builtin_ia32_compressstorehi512_mask: |
12949 | case X86::BI__builtin_ia32_compressstoreqi128_mask: |
12950 | case X86::BI__builtin_ia32_compressstoreqi256_mask: |
12951 | case X86::BI__builtin_ia32_compressstoreqi512_mask: |
12952 | return EmitX86CompressStore(*this, Ops); |
12953 | |
12954 | case X86::BI__builtin_ia32_expanddf128_mask: |
12955 | case X86::BI__builtin_ia32_expanddf256_mask: |
12956 | case X86::BI__builtin_ia32_expanddf512_mask: |
12957 | case X86::BI__builtin_ia32_expandsf128_mask: |
12958 | case X86::BI__builtin_ia32_expandsf256_mask: |
12959 | case X86::BI__builtin_ia32_expandsf512_mask: |
12960 | case X86::BI__builtin_ia32_expanddi128_mask: |
12961 | case X86::BI__builtin_ia32_expanddi256_mask: |
12962 | case X86::BI__builtin_ia32_expanddi512_mask: |
12963 | case X86::BI__builtin_ia32_expandsi128_mask: |
12964 | case X86::BI__builtin_ia32_expandsi256_mask: |
12965 | case X86::BI__builtin_ia32_expandsi512_mask: |
12966 | case X86::BI__builtin_ia32_expandhi128_mask: |
12967 | case X86::BI__builtin_ia32_expandhi256_mask: |
12968 | case X86::BI__builtin_ia32_expandhi512_mask: |
12969 | case X86::BI__builtin_ia32_expandqi128_mask: |
12970 | case X86::BI__builtin_ia32_expandqi256_mask: |
12971 | case X86::BI__builtin_ia32_expandqi512_mask: |
12972 | return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false); |
12973 | |
12974 | case X86::BI__builtin_ia32_compressdf128_mask: |
12975 | case X86::BI__builtin_ia32_compressdf256_mask: |
12976 | case X86::BI__builtin_ia32_compressdf512_mask: |
12977 | case X86::BI__builtin_ia32_compresssf128_mask: |
12978 | case X86::BI__builtin_ia32_compresssf256_mask: |
12979 | case X86::BI__builtin_ia32_compresssf512_mask: |
12980 | case X86::BI__builtin_ia32_compressdi128_mask: |
12981 | case X86::BI__builtin_ia32_compressdi256_mask: |
12982 | case X86::BI__builtin_ia32_compressdi512_mask: |
12983 | case X86::BI__builtin_ia32_compresssi128_mask: |
12984 | case X86::BI__builtin_ia32_compresssi256_mask: |
12985 | case X86::BI__builtin_ia32_compresssi512_mask: |
12986 | case X86::BI__builtin_ia32_compresshi128_mask: |
12987 | case X86::BI__builtin_ia32_compresshi256_mask: |
12988 | case X86::BI__builtin_ia32_compresshi512_mask: |
12989 | case X86::BI__builtin_ia32_compressqi128_mask: |
12990 | case X86::BI__builtin_ia32_compressqi256_mask: |
12991 | case X86::BI__builtin_ia32_compressqi512_mask: |
12992 | return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true); |
12993 | |
12994 | case X86::BI__builtin_ia32_gather3div2df: |
12995 | case X86::BI__builtin_ia32_gather3div2di: |
12996 | case X86::BI__builtin_ia32_gather3div4df: |
12997 | case X86::BI__builtin_ia32_gather3div4di: |
12998 | case X86::BI__builtin_ia32_gather3div4sf: |
12999 | case X86::BI__builtin_ia32_gather3div4si: |
13000 | case X86::BI__builtin_ia32_gather3div8sf: |
13001 | case X86::BI__builtin_ia32_gather3div8si: |
13002 | case X86::BI__builtin_ia32_gather3siv2df: |
13003 | case X86::BI__builtin_ia32_gather3siv2di: |
13004 | case X86::BI__builtin_ia32_gather3siv4df: |
13005 | case X86::BI__builtin_ia32_gather3siv4di: |
13006 | case X86::BI__builtin_ia32_gather3siv4sf: |
13007 | case X86::BI__builtin_ia32_gather3siv4si: |
13008 | case X86::BI__builtin_ia32_gather3siv8sf: |
13009 | case X86::BI__builtin_ia32_gather3siv8si: |
13010 | case X86::BI__builtin_ia32_gathersiv8df: |
13011 | case X86::BI__builtin_ia32_gathersiv16sf: |
13012 | case X86::BI__builtin_ia32_gatherdiv8df: |
13013 | case X86::BI__builtin_ia32_gatherdiv16sf: |
13014 | case X86::BI__builtin_ia32_gathersiv8di: |
13015 | case X86::BI__builtin_ia32_gathersiv16si: |
13016 | case X86::BI__builtin_ia32_gatherdiv8di: |
13017 | case X86::BI__builtin_ia32_gatherdiv16si: { |
13018 | Intrinsic::ID IID; |
13019 | switch (BuiltinID) { |
13020 | default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 13020); |
13021 | case X86::BI__builtin_ia32_gather3div2df: |
13022 | IID = Intrinsic::x86_avx512_mask_gather3div2_df; |
13023 | break; |
13024 | case X86::BI__builtin_ia32_gather3div2di: |
13025 | IID = Intrinsic::x86_avx512_mask_gather3div2_di; |
13026 | break; |
13027 | case X86::BI__builtin_ia32_gather3div4df: |
13028 | IID = Intrinsic::x86_avx512_mask_gather3div4_df; |
13029 | break; |
13030 | case X86::BI__builtin_ia32_gather3div4di: |
13031 | IID = Intrinsic::x86_avx512_mask_gather3div4_di; |
13032 | break; |
13033 | case X86::BI__builtin_ia32_gather3div4sf: |
13034 | IID = Intrinsic::x86_avx512_mask_gather3div4_sf; |
13035 | break; |
13036 | case X86::BI__builtin_ia32_gather3div4si: |
13037 | IID = Intrinsic::x86_avx512_mask_gather3div4_si; |
13038 | break; |
13039 | case X86::BI__builtin_ia32_gather3div8sf: |
13040 | IID = Intrinsic::x86_avx512_mask_gather3div8_sf; |
13041 | break; |
13042 | case X86::BI__builtin_ia32_gather3div8si: |
13043 | IID = Intrinsic::x86_avx512_mask_gather3div8_si; |
13044 | break; |
13045 | case X86::BI__builtin_ia32_gather3siv2df: |
13046 | IID = Intrinsic::x86_avx512_mask_gather3siv2_df; |
13047 | break; |
13048 | case X86::BI__builtin_ia32_gather3siv2di: |
13049 | IID = Intrinsic::x86_avx512_mask_gather3siv2_di; |
13050 | break; |
13051 | case X86::BI__builtin_ia32_gather3siv4df: |
13052 | IID = Intrinsic::x86_avx512_mask_gather3siv4_df; |
13053 | break; |
13054 | case X86::BI__builtin_ia32_gather3siv4di: |
13055 | IID = Intrinsic::x86_avx512_mask_gather3siv4_di; |
13056 | break; |
13057 | case X86::BI__builtin_ia32_gather3siv4sf: |
13058 | IID = Intrinsic::x86_avx512_mask_gather3siv4_sf; |
13059 | break; |
13060 | case X86::BI__builtin_ia32_gather3siv4si: |
13061 | IID = Intrinsic::x86_avx512_mask_gather3siv4_si; |
13062 | break; |
13063 | case X86::BI__builtin_ia32_gather3siv8sf: |
13064 | IID = Intrinsic::x86_avx512_mask_gather3siv8_sf; |
13065 | break; |
13066 | case X86::BI__builtin_ia32_gather3siv8si: |
13067 | IID = Intrinsic::x86_avx512_mask_gather3siv8_si; |
13068 | break; |
13069 | case X86::BI__builtin_ia32_gathersiv8df: |
13070 | IID = Intrinsic::x86_avx512_mask_gather_dpd_512; |
13071 | break; |
13072 | case X86::BI__builtin_ia32_gathersiv16sf: |
13073 | IID = Intrinsic::x86_avx512_mask_gather_dps_512; |
13074 | break; |
13075 | case X86::BI__builtin_ia32_gatherdiv8df: |
13076 | IID = Intrinsic::x86_avx512_mask_gather_qpd_512; |
13077 | break; |
13078 | case X86::BI__builtin_ia32_gatherdiv16sf: |
13079 | IID = Intrinsic::x86_avx512_mask_gather_qps_512; |
13080 | break; |
13081 | case X86::BI__builtin_ia32_gathersiv8di: |
13082 | IID = Intrinsic::x86_avx512_mask_gather_dpq_512; |
13083 | break; |
13084 | case X86::BI__builtin_ia32_gathersiv16si: |
13085 | IID = Intrinsic::x86_avx512_mask_gather_dpi_512; |
13086 | break; |
13087 | case X86::BI__builtin_ia32_gatherdiv8di: |
13088 | IID = Intrinsic::x86_avx512_mask_gather_qpq_512; |
13089 | break; |
13090 | case X86::BI__builtin_ia32_gatherdiv16si: |
13091 | IID = Intrinsic::x86_avx512_mask_gather_qpi_512; |
13092 | break; |
13093 | } |
13094 | |
13095 | unsigned MinElts = std::min( |
13096 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(), |
13097 | cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements()); |
13098 | Ops[3] = getMaskVecValue(*this, Ops[3], MinElts); |
13099 | Function *Intr = CGM.getIntrinsic(IID); |
13100 | return Builder.CreateCall(Intr, Ops); |
13101 | } |
13102 | |
13103 | case X86::BI__builtin_ia32_scattersiv8df: |
13104 | case X86::BI__builtin_ia32_scattersiv16sf: |
13105 | case X86::BI__builtin_ia32_scatterdiv8df: |
13106 | case X86::BI__builtin_ia32_scatterdiv16sf: |
13107 | case X86::BI__builtin_ia32_scattersiv8di: |
13108 | case X86::BI__builtin_ia32_scattersiv16si: |
13109 | case X86::BI__builtin_ia32_scatterdiv8di: |
13110 | case X86::BI__builtin_ia32_scatterdiv16si: |
13111 | case X86::BI__builtin_ia32_scatterdiv2df: |
13112 | case X86::BI__builtin_ia32_scatterdiv2di: |
13113 | case X86::BI__builtin_ia32_scatterdiv4df: |
13114 | case X86::BI__builtin_ia32_scatterdiv4di: |
13115 | case X86::BI__builtin_ia32_scatterdiv4sf: |
13116 | case X86::BI__builtin_ia32_scatterdiv4si: |
13117 | case X86::BI__builtin_ia32_scatterdiv8sf: |
13118 | case X86::BI__builtin_ia32_scatterdiv8si: |
13119 | case X86::BI__builtin_ia32_scattersiv2df: |
13120 | case X86::BI__builtin_ia32_scattersiv2di: |
13121 | case X86::BI__builtin_ia32_scattersiv4df: |
13122 | case X86::BI__builtin_ia32_scattersiv4di: |
13123 | case X86::BI__builtin_ia32_scattersiv4sf: |
13124 | case X86::BI__builtin_ia32_scattersiv4si: |
13125 | case X86::BI__builtin_ia32_scattersiv8sf: |
13126 | case X86::BI__builtin_ia32_scattersiv8si: { |
13127 | Intrinsic::ID IID; |
13128 | switch (BuiltinID) { |
13129 | default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 13129); |
13130 | case X86::BI__builtin_ia32_scattersiv8df: |
13131 | IID = Intrinsic::x86_avx512_mask_scatter_dpd_512; |
13132 | break; |
13133 | case X86::BI__builtin_ia32_scattersiv16sf: |
13134 | IID = Intrinsic::x86_avx512_mask_scatter_dps_512; |
13135 | break; |
13136 | case X86::BI__builtin_ia32_scatterdiv8df: |
13137 | IID = Intrinsic::x86_avx512_mask_scatter_qpd_512; |
13138 | break; |
13139 | case X86::BI__builtin_ia32_scatterdiv16sf: |
13140 | IID = Intrinsic::x86_avx512_mask_scatter_qps_512; |
13141 | break; |
13142 | case X86::BI__builtin_ia32_scattersiv8di: |
13143 | IID = Intrinsic::x86_avx512_mask_scatter_dpq_512; |
13144 | break; |
13145 | case X86::BI__builtin_ia32_scattersiv16si: |
13146 | IID = Intrinsic::x86_avx512_mask_scatter_dpi_512; |
13147 | break; |
13148 | case X86::BI__builtin_ia32_scatterdiv8di: |
13149 | IID = Intrinsic::x86_avx512_mask_scatter_qpq_512; |
13150 | break; |
13151 | case X86::BI__builtin_ia32_scatterdiv16si: |
13152 | IID = Intrinsic::x86_avx512_mask_scatter_qpi_512; |
13153 | break; |
13154 | case X86::BI__builtin_ia32_scatterdiv2df: |
13155 | IID = Intrinsic::x86_avx512_mask_scatterdiv2_df; |
13156 | break; |
13157 | case X86::BI__builtin_ia32_scatterdiv2di: |
13158 | IID = Intrinsic::x86_avx512_mask_scatterdiv2_di; |
13159 | break; |
13160 | case X86::BI__builtin_ia32_scatterdiv4df: |
13161 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_df; |
13162 | break; |
13163 | case X86::BI__builtin_ia32_scatterdiv4di: |
13164 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_di; |
13165 | break; |
13166 | case X86::BI__builtin_ia32_scatterdiv4sf: |
13167 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf; |
13168 | break; |
13169 | case X86::BI__builtin_ia32_scatterdiv4si: |
13170 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_si; |
13171 | break; |
13172 | case X86::BI__builtin_ia32_scatterdiv8sf: |
13173 | IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf; |
13174 | break; |
13175 | case X86::BI__builtin_ia32_scatterdiv8si: |
13176 | IID = Intrinsic::x86_avx512_mask_scatterdiv8_si; |
13177 | break; |
13178 | case X86::BI__builtin_ia32_scattersiv2df: |
13179 | IID = Intrinsic::x86_avx512_mask_scattersiv2_df; |
13180 | break; |
13181 | case X86::BI__builtin_ia32_scattersiv2di: |
13182 | IID = Intrinsic::x86_avx512_mask_scattersiv2_di; |
13183 | break; |
13184 | case X86::BI__builtin_ia32_scattersiv4df: |
13185 | IID = Intrinsic::x86_avx512_mask_scattersiv4_df; |
13186 | break; |
13187 | case X86::BI__builtin_ia32_scattersiv4di: |
13188 | IID = Intrinsic::x86_avx512_mask_scattersiv4_di; |
13189 | break; |
13190 | case X86::BI__builtin_ia32_scattersiv4sf: |
13191 | IID = Intrinsic::x86_avx512_mask_scattersiv4_sf; |
13192 | break; |
13193 | case X86::BI__builtin_ia32_scattersiv4si: |
13194 | IID = Intrinsic::x86_avx512_mask_scattersiv4_si; |
13195 | break; |
13196 | case X86::BI__builtin_ia32_scattersiv8sf: |
13197 | IID = Intrinsic::x86_avx512_mask_scattersiv8_sf; |
13198 | break; |
13199 | case X86::BI__builtin_ia32_scattersiv8si: |
13200 | IID = Intrinsic::x86_avx512_mask_scattersiv8_si; |
13201 | break; |
13202 | } |
13203 | |
13204 | unsigned MinElts = std::min( |
13205 | cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(), |
13206 | cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements()); |
13207 | Ops[1] = getMaskVecValue(*this, Ops[1], MinElts); |
13208 | Function *Intr = CGM.getIntrinsic(IID); |
13209 | return Builder.CreateCall(Intr, Ops); |
13210 | } |
13211 | |
13212 | case X86::BI__builtin_ia32_vextractf128_pd256: |
13213 | case X86::BI__builtin_ia32_vextractf128_ps256: |
13214 | case X86::BI__builtin_ia32_vextractf128_si256: |
13215 | case X86::BI__builtin_ia32_extract128i256: |
13216 | case X86::BI__builtin_ia32_extractf64x4_mask: |
13217 | case X86::BI__builtin_ia32_extractf32x4_mask: |
13218 | case X86::BI__builtin_ia32_extracti64x4_mask: |
13219 | case X86::BI__builtin_ia32_extracti32x4_mask: |
13220 | case X86::BI__builtin_ia32_extractf32x8_mask: |
13221 | case X86::BI__builtin_ia32_extracti32x8_mask: |
13222 | case X86::BI__builtin_ia32_extractf32x4_256_mask: |
13223 | case X86::BI__builtin_ia32_extracti32x4_256_mask: |
13224 | case X86::BI__builtin_ia32_extractf64x2_256_mask: |
13225 | case X86::BI__builtin_ia32_extracti64x2_256_mask: |
13226 | case X86::BI__builtin_ia32_extractf64x2_512_mask: |
13227 | case X86::BI__builtin_ia32_extracti64x2_512_mask: { |
13228 | auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType())); |
13229 | unsigned NumElts = DstTy->getNumElements(); |
13230 | unsigned SrcNumElts = |
13231 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13232 | unsigned SubVectors = SrcNumElts / NumElts; |
13233 | unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue(); |
13234 | assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")(static_cast <bool> (llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors") ? void (0) : __assert_fail ("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 13234, __extension__ __PRETTY_FUNCTION__)); |
13235 | Index &= SubVectors - 1; // Remove any extra bits. |
13236 | Index *= NumElts; |
13237 | |
13238 | int Indices[16]; |
13239 | for (unsigned i = 0; i != NumElts; ++i) |
13240 | Indices[i] = i + Index; |
13241 | |
13242 | Value *Res = Builder.CreateShuffleVector(Ops[0], |
13243 | makeArrayRef(Indices, NumElts), |
13244 | "extract"); |
13245 | |
13246 | if (Ops.size() == 4) |
13247 | Res = EmitX86Select(*this, Ops[3], Res, Ops[2]); |
13248 | |
13249 | return Res; |
13250 | } |
13251 | case X86::BI__builtin_ia32_vinsertf128_pd256: |
13252 | case X86::BI__builtin_ia32_vinsertf128_ps256: |
13253 | case X86::BI__builtin_ia32_vinsertf128_si256: |
13254 | case X86::BI__builtin_ia32_insert128i256: |
13255 | case X86::BI__builtin_ia32_insertf64x4: |
13256 | case X86::BI__builtin_ia32_insertf32x4: |
13257 | case X86::BI__builtin_ia32_inserti64x4: |
13258 | case X86::BI__builtin_ia32_inserti32x4: |
13259 | case X86::BI__builtin_ia32_insertf32x8: |
13260 | case X86::BI__builtin_ia32_inserti32x8: |
13261 | case X86::BI__builtin_ia32_insertf32x4_256: |
13262 | case X86::BI__builtin_ia32_inserti32x4_256: |
13263 | case X86::BI__builtin_ia32_insertf64x2_256: |
13264 | case X86::BI__builtin_ia32_inserti64x2_256: |
13265 | case X86::BI__builtin_ia32_insertf64x2_512: |
13266 | case X86::BI__builtin_ia32_inserti64x2_512: { |
13267 | unsigned DstNumElts = |
13268 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13269 | unsigned SrcNumElts = |
13270 | cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements(); |
13271 | unsigned SubVectors = DstNumElts / SrcNumElts; |
13272 | unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue(); |
13273 | assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")(static_cast <bool> (llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors") ? void (0) : __assert_fail ("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 13273, __extension__ __PRETTY_FUNCTION__)); |
13274 | Index &= SubVectors - 1; // Remove any extra bits. |
13275 | Index *= SrcNumElts; |
13276 | |
13277 | int Indices[16]; |
13278 | for (unsigned i = 0; i != DstNumElts; ++i) |
13279 | Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i; |
13280 | |
13281 | Value *Op1 = Builder.CreateShuffleVector(Ops[1], |
13282 | makeArrayRef(Indices, DstNumElts), |
13283 | "widen"); |
13284 | |
13285 | for (unsigned i = 0; i != DstNumElts; ++i) { |
13286 | if (i >= Index && i < (Index + SrcNumElts)) |
13287 | Indices[i] = (i - Index) + DstNumElts; |
13288 | else |
13289 | Indices[i] = i; |
13290 | } |
13291 | |
13292 | return Builder.CreateShuffleVector(Ops[0], Op1, |
13293 | makeArrayRef(Indices, DstNumElts), |
13294 | "insert"); |
13295 | } |
13296 | case X86::BI__builtin_ia32_pmovqd512_mask: |
13297 | case X86::BI__builtin_ia32_pmovwb512_mask: { |
13298 | Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType()); |
13299 | return EmitX86Select(*this, Ops[2], Res, Ops[1]); |
13300 | } |
13301 | case X86::BI__builtin_ia32_pmovdb512_mask: |
13302 | case X86::BI__builtin_ia32_pmovdw512_mask: |
13303 | case X86::BI__builtin_ia32_pmovqw512_mask: { |
13304 | if (const auto *C = dyn_cast<Constant>(Ops[2])) |
13305 | if (C->isAllOnesValue()) |
13306 | return Builder.CreateTrunc(Ops[0], Ops[1]->getType()); |
13307 | |
13308 | Intrinsic::ID IID; |
13309 | switch (BuiltinID) { |
13310 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 13310); |
13311 | case X86::BI__builtin_ia32_pmovdb512_mask: |
13312 | IID = Intrinsic::x86_avx512_mask_pmov_db_512; |
13313 | break; |
13314 | case X86::BI__builtin_ia32_pmovdw512_mask: |
13315 | IID = Intrinsic::x86_avx512_mask_pmov_dw_512; |
13316 | break; |
13317 | case X86::BI__builtin_ia32_pmovqw512_mask: |
13318 | IID = Intrinsic::x86_avx512_mask_pmov_qw_512; |
13319 | break; |
13320 | } |
13321 | |
13322 | Function *Intr = CGM.getIntrinsic(IID); |
13323 | return Builder.CreateCall(Intr, Ops); |
13324 | } |
13325 | case X86::BI__builtin_ia32_pblendw128: |
13326 | case X86::BI__builtin_ia32_blendpd: |
13327 | case X86::BI__builtin_ia32_blendps: |
13328 | case X86::BI__builtin_ia32_blendpd256: |
13329 | case X86::BI__builtin_ia32_blendps256: |
13330 | case X86::BI__builtin_ia32_pblendw256: |
13331 | case X86::BI__builtin_ia32_pblendd128: |
13332 | case X86::BI__builtin_ia32_pblendd256: { |
13333 | unsigned NumElts = |
13334 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13335 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
13336 | |
13337 | int Indices[16]; |
13338 | // If there are more than 8 elements, the immediate is used twice so make |
13339 | // sure we handle that. |
13340 | for (unsigned i = 0; i != NumElts; ++i) |
13341 | Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i; |
13342 | |
13343 | return Builder.CreateShuffleVector(Ops[0], Ops[1], |
13344 | makeArrayRef(Indices, NumElts), |
13345 | "blend"); |
13346 | } |
13347 | case X86::BI__builtin_ia32_pshuflw: |
13348 | case X86::BI__builtin_ia32_pshuflw256: |
13349 | case X86::BI__builtin_ia32_pshuflw512: { |
13350 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
13351 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13352 | unsigned NumElts = Ty->getNumElements(); |
13353 | |
13354 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
13355 | Imm = (Imm & 0xff) * 0x01010101; |
13356 | |
13357 | int Indices[32]; |
13358 | for (unsigned l = 0; l != NumElts; l += 8) { |
13359 | for (unsigned i = 0; i != 4; ++i) { |
13360 | Indices[l + i] = l + (Imm & 3); |
13361 | Imm >>= 2; |
13362 | } |
13363 | for (unsigned i = 4; i != 8; ++i) |
13364 | Indices[l + i] = l + i; |
13365 | } |
13366 | |
13367 | return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts), |
13368 | "pshuflw"); |
13369 | } |
13370 | case X86::BI__builtin_ia32_pshufhw: |
13371 | case X86::BI__builtin_ia32_pshufhw256: |
13372 | case X86::BI__builtin_ia32_pshufhw512: { |
13373 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
13374 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13375 | unsigned NumElts = Ty->getNumElements(); |
13376 | |
13377 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
13378 | Imm = (Imm & 0xff) * 0x01010101; |
13379 | |
13380 | int Indices[32]; |
13381 | for (unsigned l = 0; l != NumElts; l += 8) { |
13382 | for (unsigned i = 0; i != 4; ++i) |
13383 | Indices[l + i] = l + i; |
13384 | for (unsigned i = 4; i != 8; ++i) { |
13385 | Indices[l + i] = l + 4 + (Imm & 3); |
13386 | Imm >>= 2; |
13387 | } |
13388 | } |
13389 | |
13390 | return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts), |
13391 | "pshufhw"); |
13392 | } |
13393 | case X86::BI__builtin_ia32_pshufd: |
13394 | case X86::BI__builtin_ia32_pshufd256: |
13395 | case X86::BI__builtin_ia32_pshufd512: |
13396 | case X86::BI__builtin_ia32_vpermilpd: |
13397 | case X86::BI__builtin_ia32_vpermilps: |
13398 | case X86::BI__builtin_ia32_vpermilpd256: |
13399 | case X86::BI__builtin_ia32_vpermilps256: |
13400 | case X86::BI__builtin_ia32_vpermilpd512: |
13401 | case X86::BI__builtin_ia32_vpermilps512: { |
13402 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
13403 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13404 | unsigned NumElts = Ty->getNumElements(); |
13405 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; |
13406 | unsigned NumLaneElts = NumElts / NumLanes; |
13407 | |
13408 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
13409 | Imm = (Imm & 0xff) * 0x01010101; |
13410 | |
13411 | int Indices[16]; |
13412 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
13413 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
13414 | Indices[i + l] = (Imm % NumLaneElts) + l; |
13415 | Imm /= NumLaneElts; |
13416 | } |
13417 | } |
13418 | |
13419 | return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts), |
13420 | "permil"); |
13421 | } |
13422 | case X86::BI__builtin_ia32_shufpd: |
13423 | case X86::BI__builtin_ia32_shufpd256: |
13424 | case X86::BI__builtin_ia32_shufpd512: |
13425 | case X86::BI__builtin_ia32_shufps: |
13426 | case X86::BI__builtin_ia32_shufps256: |
13427 | case X86::BI__builtin_ia32_shufps512: { |
13428 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
13429 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13430 | unsigned NumElts = Ty->getNumElements(); |
13431 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; |
13432 | unsigned NumLaneElts = NumElts / NumLanes; |
13433 | |
13434 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
13435 | Imm = (Imm & 0xff) * 0x01010101; |
13436 | |
13437 | int Indices[16]; |
13438 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
13439 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
13440 | unsigned Index = Imm % NumLaneElts; |
13441 | Imm /= NumLaneElts; |
13442 | if (i >= (NumLaneElts / 2)) |
13443 | Index += NumElts; |
13444 | Indices[l + i] = l + Index; |
13445 | } |
13446 | } |
13447 | |
13448 | return Builder.CreateShuffleVector(Ops[0], Ops[1], |
13449 | makeArrayRef(Indices, NumElts), |
13450 | "shufp"); |
13451 | } |
13452 | case X86::BI__builtin_ia32_permdi256: |
13453 | case X86::BI__builtin_ia32_permdf256: |
13454 | case X86::BI__builtin_ia32_permdi512: |
13455 | case X86::BI__builtin_ia32_permdf512: { |
13456 | unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
13457 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13458 | unsigned NumElts = Ty->getNumElements(); |
13459 | |
13460 | // These intrinsics operate on 256-bit lanes of four 64-bit elements. |
13461 | int Indices[8]; |
13462 | for (unsigned l = 0; l != NumElts; l += 4) |
13463 | for (unsigned i = 0; i != 4; ++i) |
13464 | Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3); |
13465 | |
13466 | return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts), |
13467 | "perm"); |
13468 | } |
13469 | case X86::BI__builtin_ia32_palignr128: |
13470 | case X86::BI__builtin_ia32_palignr256: |
13471 | case X86::BI__builtin_ia32_palignr512: { |
13472 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; |
13473 | |
13474 | unsigned NumElts = |
13475 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13476 | assert(NumElts % 16 == 0)(static_cast <bool> (NumElts % 16 == 0) ? void (0) : __assert_fail ("NumElts % 16 == 0", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 13476, __extension__ __PRETTY_FUNCTION__)); |
13477 | |
13478 | // If palignr is shifting the pair of vectors more than the size of two |
13479 | // lanes, emit zero. |
13480 | if (ShiftVal >= 32) |
13481 | return llvm::Constant::getNullValue(ConvertType(E->getType())); |
13482 | |
13483 | // If palignr is shifting the pair of input vectors more than one lane, |
13484 | // but less than two lanes, convert to shifting in zeroes. |
13485 | if (ShiftVal > 16) { |
13486 | ShiftVal -= 16; |
13487 | Ops[1] = Ops[0]; |
13488 | Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType()); |
13489 | } |
13490 | |
13491 | int Indices[64]; |
13492 | // 256-bit palignr operates on 128-bit lanes so we need to handle that |
13493 | for (unsigned l = 0; l != NumElts; l += 16) { |
13494 | for (unsigned i = 0; i != 16; ++i) { |
13495 | unsigned Idx = ShiftVal + i; |
13496 | if (Idx >= 16) |
13497 | Idx += NumElts - 16; // End of lane, switch operand. |
13498 | Indices[l + i] = Idx + l; |
13499 | } |
13500 | } |
13501 | |
13502 | return Builder.CreateShuffleVector(Ops[1], Ops[0], |
13503 | makeArrayRef(Indices, NumElts), |
13504 | "palignr"); |
13505 | } |
13506 | case X86::BI__builtin_ia32_alignd128: |
13507 | case X86::BI__builtin_ia32_alignd256: |
13508 | case X86::BI__builtin_ia32_alignd512: |
13509 | case X86::BI__builtin_ia32_alignq128: |
13510 | case X86::BI__builtin_ia32_alignq256: |
13511 | case X86::BI__builtin_ia32_alignq512: { |
13512 | unsigned NumElts = |
13513 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13514 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; |
13515 | |
13516 | // Mask the shift amount to width of a vector. |
13517 | ShiftVal &= NumElts - 1; |
13518 | |
13519 | int Indices[16]; |
13520 | for (unsigned i = 0; i != NumElts; ++i) |
13521 | Indices[i] = i + ShiftVal; |
13522 | |
13523 | return Builder.CreateShuffleVector(Ops[1], Ops[0], |
13524 | makeArrayRef(Indices, NumElts), |
13525 | "valign"); |
13526 | } |
13527 | case X86::BI__builtin_ia32_shuf_f32x4_256: |
13528 | case X86::BI__builtin_ia32_shuf_f64x2_256: |
13529 | case X86::BI__builtin_ia32_shuf_i32x4_256: |
13530 | case X86::BI__builtin_ia32_shuf_i64x2_256: |
13531 | case X86::BI__builtin_ia32_shuf_f32x4: |
13532 | case X86::BI__builtin_ia32_shuf_f64x2: |
13533 | case X86::BI__builtin_ia32_shuf_i32x4: |
13534 | case X86::BI__builtin_ia32_shuf_i64x2: { |
13535 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
13536 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13537 | unsigned NumElts = Ty->getNumElements(); |
13538 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2; |
13539 | unsigned NumLaneElts = NumElts / NumLanes; |
13540 | |
13541 | int Indices[16]; |
13542 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
13543 | unsigned Index = (Imm % NumLanes) * NumLaneElts; |
13544 | Imm /= NumLanes; // Discard the bits we just used. |
13545 | if (l >= (NumElts / 2)) |
13546 | Index += NumElts; // Switch to other source. |
13547 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
13548 | Indices[l + i] = Index + i; |
13549 | } |
13550 | } |
13551 | |
13552 | return Builder.CreateShuffleVector(Ops[0], Ops[1], |
13553 | makeArrayRef(Indices, NumElts), |
13554 | "shuf"); |
13555 | } |
13556 | |
13557 | case X86::BI__builtin_ia32_vperm2f128_pd256: |
13558 | case X86::BI__builtin_ia32_vperm2f128_ps256: |
13559 | case X86::BI__builtin_ia32_vperm2f128_si256: |
13560 | case X86::BI__builtin_ia32_permti256: { |
13561 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
13562 | unsigned NumElts = |
13563 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13564 | |
13565 | // This takes a very simple approach since there are two lanes and a |
13566 | // shuffle can have 2 inputs. So we reserve the first input for the first |
13567 | // lane and the second input for the second lane. This may result in |
13568 | // duplicate sources, but this can be dealt with in the backend. |
13569 | |
13570 | Value *OutOps[2]; |
13571 | int Indices[8]; |
13572 | for (unsigned l = 0; l != 2; ++l) { |
13573 | // Determine the source for this lane. |
13574 | if (Imm & (1 << ((l * 4) + 3))) |
13575 | OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType()); |
13576 | else if (Imm & (1 << ((l * 4) + 1))) |
13577 | OutOps[l] = Ops[1]; |
13578 | else |
13579 | OutOps[l] = Ops[0]; |
13580 | |
13581 | for (unsigned i = 0; i != NumElts/2; ++i) { |
13582 | // Start with ith element of the source for this lane. |
13583 | unsigned Idx = (l * NumElts) + i; |
13584 | // If bit 0 of the immediate half is set, switch to the high half of |
13585 | // the source. |
13586 | if (Imm & (1 << (l * 4))) |
13587 | Idx += NumElts/2; |
13588 | Indices[(l * (NumElts/2)) + i] = Idx; |
13589 | } |
13590 | } |
13591 | |
13592 | return Builder.CreateShuffleVector(OutOps[0], OutOps[1], |
13593 | makeArrayRef(Indices, NumElts), |
13594 | "vperm"); |
13595 | } |
13596 | |
13597 | case X86::BI__builtin_ia32_pslldqi128_byteshift: |
13598 | case X86::BI__builtin_ia32_pslldqi256_byteshift: |
13599 | case X86::BI__builtin_ia32_pslldqi512_byteshift: { |
13600 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
13601 | auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13602 | // Builtin type is vXi64 so multiply by 8 to get bytes. |
13603 | unsigned NumElts = ResultType->getNumElements() * 8; |
13604 | |
13605 | // If pslldq is shifting the vector more than 15 bytes, emit zero. |
13606 | if (ShiftVal >= 16) |
13607 | return llvm::Constant::getNullValue(ResultType); |
13608 | |
13609 | int Indices[64]; |
13610 | // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that |
13611 | for (unsigned l = 0; l != NumElts; l += 16) { |
13612 | for (unsigned i = 0; i != 16; ++i) { |
13613 | unsigned Idx = NumElts + i - ShiftVal; |
13614 | if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand. |
13615 | Indices[l + i] = Idx + l; |
13616 | } |
13617 | } |
13618 | |
13619 | auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts); |
13620 | Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast"); |
13621 | Value *Zero = llvm::Constant::getNullValue(VecTy); |
13622 | Value *SV = Builder.CreateShuffleVector(Zero, Cast, |
13623 | makeArrayRef(Indices, NumElts), |
13624 | "pslldq"); |
13625 | return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast"); |
13626 | } |
13627 | case X86::BI__builtin_ia32_psrldqi128_byteshift: |
13628 | case X86::BI__builtin_ia32_psrldqi256_byteshift: |
13629 | case X86::BI__builtin_ia32_psrldqi512_byteshift: { |
13630 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
13631 | auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13632 | // Builtin type is vXi64 so multiply by 8 to get bytes. |
13633 | unsigned NumElts = ResultType->getNumElements() * 8; |
13634 | |
13635 | // If psrldq is shifting the vector more than 15 bytes, emit zero. |
13636 | if (ShiftVal >= 16) |
13637 | return llvm::Constant::getNullValue(ResultType); |
13638 | |
13639 | int Indices[64]; |
13640 | // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that |
13641 | for (unsigned l = 0; l != NumElts; l += 16) { |
13642 | for (unsigned i = 0; i != 16; ++i) { |
13643 | unsigned Idx = i + ShiftVal; |
13644 | if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand. |
13645 | Indices[l + i] = Idx + l; |
13646 | } |
13647 | } |
13648 | |
13649 | auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts); |
13650 | Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast"); |
13651 | Value *Zero = llvm::Constant::getNullValue(VecTy); |
13652 | Value *SV = Builder.CreateShuffleVector(Cast, Zero, |
13653 | makeArrayRef(Indices, NumElts), |
13654 | "psrldq"); |
13655 | return Builder.CreateBitCast(SV, ResultType, "cast"); |
13656 | } |
13657 | case X86::BI__builtin_ia32_kshiftliqi: |
13658 | case X86::BI__builtin_ia32_kshiftlihi: |
13659 | case X86::BI__builtin_ia32_kshiftlisi: |
13660 | case X86::BI__builtin_ia32_kshiftlidi: { |
13661 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
13662 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13663 | |
13664 | if (ShiftVal >= NumElts) |
13665 | return llvm::Constant::getNullValue(Ops[0]->getType()); |
13666 | |
13667 | Value *In = getMaskVecValue(*this, Ops[0], NumElts); |
13668 | |
13669 | int Indices[64]; |
13670 | for (unsigned i = 0; i != NumElts; ++i) |
13671 | Indices[i] = NumElts + i - ShiftVal; |
13672 | |
13673 | Value *Zero = llvm::Constant::getNullValue(In->getType()); |
13674 | Value *SV = Builder.CreateShuffleVector(Zero, In, |
13675 | makeArrayRef(Indices, NumElts), |
13676 | "kshiftl"); |
13677 | return Builder.CreateBitCast(SV, Ops[0]->getType()); |
13678 | } |
13679 | case X86::BI__builtin_ia32_kshiftriqi: |
13680 | case X86::BI__builtin_ia32_kshiftrihi: |
13681 | case X86::BI__builtin_ia32_kshiftrisi: |
13682 | case X86::BI__builtin_ia32_kshiftridi: { |
13683 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
13684 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13685 | |
13686 | if (ShiftVal >= NumElts) |
13687 | return llvm::Constant::getNullValue(Ops[0]->getType()); |
13688 | |
13689 | Value *In = getMaskVecValue(*this, Ops[0], NumElts); |
13690 | |
13691 | int Indices[64]; |
13692 | for (unsigned i = 0; i != NumElts; ++i) |
13693 | Indices[i] = i + ShiftVal; |
13694 | |
13695 | Value *Zero = llvm::Constant::getNullValue(In->getType()); |
13696 | Value *SV = Builder.CreateShuffleVector(In, Zero, |
13697 | makeArrayRef(Indices, NumElts), |
13698 | "kshiftr"); |
13699 | return Builder.CreateBitCast(SV, Ops[0]->getType()); |
13700 | } |
13701 | case X86::BI__builtin_ia32_movnti: |
13702 | case X86::BI__builtin_ia32_movnti64: |
13703 | case X86::BI__builtin_ia32_movntsd: |
13704 | case X86::BI__builtin_ia32_movntss: { |
13705 | llvm::MDNode *Node = llvm::MDNode::get( |
13706 | getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); |
13707 | |
13708 | Value *Ptr = Ops[0]; |
13709 | Value *Src = Ops[1]; |
13710 | |
13711 | // Extract the 0'th element of the source vector. |
13712 | if (BuiltinID == X86::BI__builtin_ia32_movntsd || |
13713 | BuiltinID == X86::BI__builtin_ia32_movntss) |
13714 | Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract"); |
13715 | |
13716 | // Convert the type of the pointer to a pointer to the stored type. |
13717 | Value *BC = Builder.CreateBitCast( |
13718 | Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast"); |
13719 | |
13720 | // Unaligned nontemporal store of the scalar value. |
13721 | StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC); |
13722 | SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); |
13723 | SI->setAlignment(llvm::Align(1)); |
13724 | return SI; |
13725 | } |
13726 | // Rotate is a special case of funnel shift - 1st 2 args are the same. |
13727 | case X86::BI__builtin_ia32_vprotb: |
13728 | case X86::BI__builtin_ia32_vprotw: |
13729 | case X86::BI__builtin_ia32_vprotd: |
13730 | case X86::BI__builtin_ia32_vprotq: |
13731 | case X86::BI__builtin_ia32_vprotbi: |
13732 | case X86::BI__builtin_ia32_vprotwi: |
13733 | case X86::BI__builtin_ia32_vprotdi: |
13734 | case X86::BI__builtin_ia32_vprotqi: |
13735 | case X86::BI__builtin_ia32_prold128: |
13736 | case X86::BI__builtin_ia32_prold256: |
13737 | case X86::BI__builtin_ia32_prold512: |
13738 | case X86::BI__builtin_ia32_prolq128: |
13739 | case X86::BI__builtin_ia32_prolq256: |
13740 | case X86::BI__builtin_ia32_prolq512: |
13741 | case X86::BI__builtin_ia32_prolvd128: |
13742 | case X86::BI__builtin_ia32_prolvd256: |
13743 | case X86::BI__builtin_ia32_prolvd512: |
13744 | case X86::BI__builtin_ia32_prolvq128: |
13745 | case X86::BI__builtin_ia32_prolvq256: |
13746 | case X86::BI__builtin_ia32_prolvq512: |
13747 | return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false); |
13748 | case X86::BI__builtin_ia32_prord128: |
13749 | case X86::BI__builtin_ia32_prord256: |
13750 | case X86::BI__builtin_ia32_prord512: |
13751 | case X86::BI__builtin_ia32_prorq128: |
13752 | case X86::BI__builtin_ia32_prorq256: |
13753 | case X86::BI__builtin_ia32_prorq512: |
13754 | case X86::BI__builtin_ia32_prorvd128: |
13755 | case X86::BI__builtin_ia32_prorvd256: |
13756 | case X86::BI__builtin_ia32_prorvd512: |
13757 | case X86::BI__builtin_ia32_prorvq128: |
13758 | case X86::BI__builtin_ia32_prorvq256: |
13759 | case X86::BI__builtin_ia32_prorvq512: |
13760 | return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true); |
13761 | case X86::BI__builtin_ia32_selectb_128: |
13762 | case X86::BI__builtin_ia32_selectb_256: |
13763 | case X86::BI__builtin_ia32_selectb_512: |
13764 | case X86::BI__builtin_ia32_selectw_128: |
13765 | case X86::BI__builtin_ia32_selectw_256: |
13766 | case X86::BI__builtin_ia32_selectw_512: |
13767 | case X86::BI__builtin_ia32_selectd_128: |
13768 | case X86::BI__builtin_ia32_selectd_256: |
13769 | case X86::BI__builtin_ia32_selectd_512: |
13770 | case X86::BI__builtin_ia32_selectq_128: |
13771 | case X86::BI__builtin_ia32_selectq_256: |
13772 | case X86::BI__builtin_ia32_selectq_512: |
13773 | case X86::BI__builtin_ia32_selectph_128: |
13774 | case X86::BI__builtin_ia32_selectph_256: |
13775 | case X86::BI__builtin_ia32_selectph_512: |
13776 | case X86::BI__builtin_ia32_selectps_128: |
13777 | case X86::BI__builtin_ia32_selectps_256: |
13778 | case X86::BI__builtin_ia32_selectps_512: |
13779 | case X86::BI__builtin_ia32_selectpd_128: |
13780 | case X86::BI__builtin_ia32_selectpd_256: |
13781 | case X86::BI__builtin_ia32_selectpd_512: |
13782 | return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]); |
13783 | case X86::BI__builtin_ia32_selectsh_128: |
13784 | case X86::BI__builtin_ia32_selectss_128: |
13785 | case X86::BI__builtin_ia32_selectsd_128: { |
13786 | Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0); |
13787 | Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0); |
13788 | A = EmitX86ScalarSelect(*this, Ops[0], A, B); |
13789 | return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0); |
13790 | } |
13791 | case X86::BI__builtin_ia32_cmpb128_mask: |
13792 | case X86::BI__builtin_ia32_cmpb256_mask: |
13793 | case X86::BI__builtin_ia32_cmpb512_mask: |
13794 | case X86::BI__builtin_ia32_cmpw128_mask: |
13795 | case X86::BI__builtin_ia32_cmpw256_mask: |
13796 | case X86::BI__builtin_ia32_cmpw512_mask: |
13797 | case X86::BI__builtin_ia32_cmpd128_mask: |
13798 | case X86::BI__builtin_ia32_cmpd256_mask: |
13799 | case X86::BI__builtin_ia32_cmpd512_mask: |
13800 | case X86::BI__builtin_ia32_cmpq128_mask: |
13801 | case X86::BI__builtin_ia32_cmpq256_mask: |
13802 | case X86::BI__builtin_ia32_cmpq512_mask: { |
13803 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; |
13804 | return EmitX86MaskedCompare(*this, CC, true, Ops); |
13805 | } |
13806 | case X86::BI__builtin_ia32_ucmpb128_mask: |
13807 | case X86::BI__builtin_ia32_ucmpb256_mask: |
13808 | case X86::BI__builtin_ia32_ucmpb512_mask: |
13809 | case X86::BI__builtin_ia32_ucmpw128_mask: |
13810 | case X86::BI__builtin_ia32_ucmpw256_mask: |
13811 | case X86::BI__builtin_ia32_ucmpw512_mask: |
13812 | case X86::BI__builtin_ia32_ucmpd128_mask: |
13813 | case X86::BI__builtin_ia32_ucmpd256_mask: |
13814 | case X86::BI__builtin_ia32_ucmpd512_mask: |
13815 | case X86::BI__builtin_ia32_ucmpq128_mask: |
13816 | case X86::BI__builtin_ia32_ucmpq256_mask: |
13817 | case X86::BI__builtin_ia32_ucmpq512_mask: { |
13818 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; |
13819 | return EmitX86MaskedCompare(*this, CC, false, Ops); |
13820 | } |
13821 | case X86::BI__builtin_ia32_vpcomb: |
13822 | case X86::BI__builtin_ia32_vpcomw: |
13823 | case X86::BI__builtin_ia32_vpcomd: |
13824 | case X86::BI__builtin_ia32_vpcomq: |
13825 | return EmitX86vpcom(*this, Ops, true); |
13826 | case X86::BI__builtin_ia32_vpcomub: |
13827 | case X86::BI__builtin_ia32_vpcomuw: |
13828 | case X86::BI__builtin_ia32_vpcomud: |
13829 | case X86::BI__builtin_ia32_vpcomuq: |
13830 | return EmitX86vpcom(*this, Ops, false); |
13831 | |
13832 | case X86::BI__builtin_ia32_kortestcqi: |
13833 | case X86::BI__builtin_ia32_kortestchi: |
13834 | case X86::BI__builtin_ia32_kortestcsi: |
13835 | case X86::BI__builtin_ia32_kortestcdi: { |
13836 | Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops); |
13837 | Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType()); |
13838 | Value *Cmp = Builder.CreateICmpEQ(Or, C); |
13839 | return Builder.CreateZExt(Cmp, ConvertType(E->getType())); |
13840 | } |
13841 | case X86::BI__builtin_ia32_kortestzqi: |
13842 | case X86::BI__builtin_ia32_kortestzhi: |
13843 | case X86::BI__builtin_ia32_kortestzsi: |
13844 | case X86::BI__builtin_ia32_kortestzdi: { |
13845 | Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops); |
13846 | Value *C = llvm::Constant::getNullValue(Ops[0]->getType()); |
13847 | Value *Cmp = Builder.CreateICmpEQ(Or, C); |
13848 | return Builder.CreateZExt(Cmp, ConvertType(E->getType())); |
13849 | } |
13850 | |
13851 | case X86::BI__builtin_ia32_ktestcqi: |
13852 | case X86::BI__builtin_ia32_ktestzqi: |
13853 | case X86::BI__builtin_ia32_ktestchi: |
13854 | case X86::BI__builtin_ia32_ktestzhi: |
13855 | case X86::BI__builtin_ia32_ktestcsi: |
13856 | case X86::BI__builtin_ia32_ktestzsi: |
13857 | case X86::BI__builtin_ia32_ktestcdi: |
13858 | case X86::BI__builtin_ia32_ktestzdi: { |
13859 | Intrinsic::ID IID; |
13860 | switch (BuiltinID) { |
13861 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 13861); |
13862 | case X86::BI__builtin_ia32_ktestcqi: |
13863 | IID = Intrinsic::x86_avx512_ktestc_b; |
13864 | break; |
13865 | case X86::BI__builtin_ia32_ktestzqi: |
13866 | IID = Intrinsic::x86_avx512_ktestz_b; |
13867 | break; |
13868 | case X86::BI__builtin_ia32_ktestchi: |
13869 | IID = Intrinsic::x86_avx512_ktestc_w; |
13870 | break; |
13871 | case X86::BI__builtin_ia32_ktestzhi: |
13872 | IID = Intrinsic::x86_avx512_ktestz_w; |
13873 | break; |
13874 | case X86::BI__builtin_ia32_ktestcsi: |
13875 | IID = Intrinsic::x86_avx512_ktestc_d; |
13876 | break; |
13877 | case X86::BI__builtin_ia32_ktestzsi: |
13878 | IID = Intrinsic::x86_avx512_ktestz_d; |
13879 | break; |
13880 | case X86::BI__builtin_ia32_ktestcdi: |
13881 | IID = Intrinsic::x86_avx512_ktestc_q; |
13882 | break; |
13883 | case X86::BI__builtin_ia32_ktestzdi: |
13884 | IID = Intrinsic::x86_avx512_ktestz_q; |
13885 | break; |
13886 | } |
13887 | |
13888 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13889 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); |
13890 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); |
13891 | Function *Intr = CGM.getIntrinsic(IID); |
13892 | return Builder.CreateCall(Intr, {LHS, RHS}); |
13893 | } |
13894 | |
13895 | case X86::BI__builtin_ia32_kaddqi: |
13896 | case X86::BI__builtin_ia32_kaddhi: |
13897 | case X86::BI__builtin_ia32_kaddsi: |
13898 | case X86::BI__builtin_ia32_kadddi: { |
13899 | Intrinsic::ID IID; |
13900 | switch (BuiltinID) { |
13901 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 13901); |
13902 | case X86::BI__builtin_ia32_kaddqi: |
13903 | IID = Intrinsic::x86_avx512_kadd_b; |
13904 | break; |
13905 | case X86::BI__builtin_ia32_kaddhi: |
13906 | IID = Intrinsic::x86_avx512_kadd_w; |
13907 | break; |
13908 | case X86::BI__builtin_ia32_kaddsi: |
13909 | IID = Intrinsic::x86_avx512_kadd_d; |
13910 | break; |
13911 | case X86::BI__builtin_ia32_kadddi: |
13912 | IID = Intrinsic::x86_avx512_kadd_q; |
13913 | break; |
13914 | } |
13915 | |
13916 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13917 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); |
13918 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); |
13919 | Function *Intr = CGM.getIntrinsic(IID); |
13920 | Value *Res = Builder.CreateCall(Intr, {LHS, RHS}); |
13921 | return Builder.CreateBitCast(Res, Ops[0]->getType()); |
13922 | } |
13923 | case X86::BI__builtin_ia32_kandqi: |
13924 | case X86::BI__builtin_ia32_kandhi: |
13925 | case X86::BI__builtin_ia32_kandsi: |
13926 | case X86::BI__builtin_ia32_kanddi: |
13927 | return EmitX86MaskLogic(*this, Instruction::And, Ops); |
13928 | case X86::BI__builtin_ia32_kandnqi: |
13929 | case X86::BI__builtin_ia32_kandnhi: |
13930 | case X86::BI__builtin_ia32_kandnsi: |
13931 | case X86::BI__builtin_ia32_kandndi: |
13932 | return EmitX86MaskLogic(*this, Instruction::And, Ops, true); |
13933 | case X86::BI__builtin_ia32_korqi: |
13934 | case X86::BI__builtin_ia32_korhi: |
13935 | case X86::BI__builtin_ia32_korsi: |
13936 | case X86::BI__builtin_ia32_kordi: |
13937 | return EmitX86MaskLogic(*this, Instruction::Or, Ops); |
13938 | case X86::BI__builtin_ia32_kxnorqi: |
13939 | case X86::BI__builtin_ia32_kxnorhi: |
13940 | case X86::BI__builtin_ia32_kxnorsi: |
13941 | case X86::BI__builtin_ia32_kxnordi: |
13942 | return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true); |
13943 | case X86::BI__builtin_ia32_kxorqi: |
13944 | case X86::BI__builtin_ia32_kxorhi: |
13945 | case X86::BI__builtin_ia32_kxorsi: |
13946 | case X86::BI__builtin_ia32_kxordi: |
13947 | return EmitX86MaskLogic(*this, Instruction::Xor, Ops); |
13948 | case X86::BI__builtin_ia32_knotqi: |
13949 | case X86::BI__builtin_ia32_knothi: |
13950 | case X86::BI__builtin_ia32_knotsi: |
13951 | case X86::BI__builtin_ia32_knotdi: { |
13952 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13953 | Value *Res = getMaskVecValue(*this, Ops[0], NumElts); |
13954 | return Builder.CreateBitCast(Builder.CreateNot(Res), |
13955 | Ops[0]->getType()); |
13956 | } |
13957 | case X86::BI__builtin_ia32_kmovb: |
13958 | case X86::BI__builtin_ia32_kmovw: |
13959 | case X86::BI__builtin_ia32_kmovd: |
13960 | case X86::BI__builtin_ia32_kmovq: { |
13961 | // Bitcast to vXi1 type and then back to integer. This gets the mask |
13962 | // register type into the IR, but might be optimized out depending on |
13963 | // what's around it. |
13964 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13965 | Value *Res = getMaskVecValue(*this, Ops[0], NumElts); |
13966 | return Builder.CreateBitCast(Res, Ops[0]->getType()); |
13967 | } |
13968 | |
13969 | case X86::BI__builtin_ia32_kunpckdi: |
13970 | case X86::BI__builtin_ia32_kunpcksi: |
13971 | case X86::BI__builtin_ia32_kunpckhi: { |
13972 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13973 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); |
13974 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); |
13975 | int Indices[64]; |
13976 | for (unsigned i = 0; i != NumElts; ++i) |
13977 | Indices[i] = i; |
13978 | |
13979 | // First extract half of each vector. This gives better codegen than |
13980 | // doing it in a single shuffle. |
13981 | LHS = Builder.CreateShuffleVector(LHS, LHS, |
13982 | makeArrayRef(Indices, NumElts / 2)); |
13983 | RHS = Builder.CreateShuffleVector(RHS, RHS, |
13984 | makeArrayRef(Indices, NumElts / 2)); |
13985 | // Concat the vectors. |
13986 | // NOTE: Operands are swapped to match the intrinsic definition. |
13987 | Value *Res = Builder.CreateShuffleVector(RHS, LHS, |
13988 | makeArrayRef(Indices, NumElts)); |
13989 | return Builder.CreateBitCast(Res, Ops[0]->getType()); |
13990 | } |
13991 | |
13992 | case X86::BI__builtin_ia32_vplzcntd_128: |
13993 | case X86::BI__builtin_ia32_vplzcntd_256: |
13994 | case X86::BI__builtin_ia32_vplzcntd_512: |
13995 | case X86::BI__builtin_ia32_vplzcntq_128: |
13996 | case X86::BI__builtin_ia32_vplzcntq_256: |
13997 | case X86::BI__builtin_ia32_vplzcntq_512: { |
13998 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); |
13999 | return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)}); |
14000 | } |
14001 | case X86::BI__builtin_ia32_sqrtss: |
14002 | case X86::BI__builtin_ia32_sqrtsd: { |
14003 | Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0); |
14004 | Function *F; |
14005 | if (Builder.getIsFPConstrained()) { |
14006 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
14007 | F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, |
14008 | A->getType()); |
14009 | A = Builder.CreateConstrainedFPCall(F, {A}); |
14010 | } else { |
14011 | F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); |
14012 | A = Builder.CreateCall(F, {A}); |
14013 | } |
14014 | return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); |
14015 | } |
14016 | case X86::BI__builtin_ia32_sqrtsh_round_mask: |
14017 | case X86::BI__builtin_ia32_sqrtsd_round_mask: |
14018 | case X86::BI__builtin_ia32_sqrtss_round_mask: { |
14019 | unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue(); |
14020 | // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), |
14021 | // otherwise keep the intrinsic. |
14022 | if (CC != 4) { |
14023 | Intrinsic::ID IID; |
14024 | |
14025 | switch (BuiltinID) { |
14026 | default: |
14027 | llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 14027); |
14028 | case X86::BI__builtin_ia32_sqrtsh_round_mask: |
14029 | IID = Intrinsic::x86_avx512fp16_mask_sqrt_sh; |
14030 | break; |
14031 | case X86::BI__builtin_ia32_sqrtsd_round_mask: |
14032 | IID = Intrinsic::x86_avx512_mask_sqrt_sd; |
14033 | break; |
14034 | case X86::BI__builtin_ia32_sqrtss_round_mask: |
14035 | IID = Intrinsic::x86_avx512_mask_sqrt_ss; |
14036 | break; |
14037 | } |
14038 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
14039 | } |
14040 | Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0); |
14041 | Function *F; |
14042 | if (Builder.getIsFPConstrained()) { |
14043 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
14044 | F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, |
14045 | A->getType()); |
14046 | A = Builder.CreateConstrainedFPCall(F, A); |
14047 | } else { |
14048 | F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); |
14049 | A = Builder.CreateCall(F, A); |
14050 | } |
14051 | Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0); |
14052 | A = EmitX86ScalarSelect(*this, Ops[3], A, Src); |
14053 | return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); |
14054 | } |
14055 | case X86::BI__builtin_ia32_sqrtpd256: |
14056 | case X86::BI__builtin_ia32_sqrtpd: |
14057 | case X86::BI__builtin_ia32_sqrtps256: |
14058 | case X86::BI__builtin_ia32_sqrtps: |
14059 | case X86::BI__builtin_ia32_sqrtph256: |
14060 | case X86::BI__builtin_ia32_sqrtph: |
14061 | case X86::BI__builtin_ia32_sqrtph512: |
14062 | case X86::BI__builtin_ia32_sqrtps512: |
14063 | case X86::BI__builtin_ia32_sqrtpd512: { |
14064 | if (Ops.size() == 2) { |
14065 | unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
14066 | // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), |
14067 | // otherwise keep the intrinsic. |
14068 | if (CC != 4) { |
14069 | Intrinsic::ID IID; |
14070 | |
14071 | switch (BuiltinID) { |
14072 | default: |
14073 | llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 14073); |
14074 | case X86::BI__builtin_ia32_sqrtph512: |
14075 | IID = Intrinsic::x86_avx512fp16_sqrt_ph_512; |
14076 | break; |
14077 | case X86::BI__builtin_ia32_sqrtps512: |
14078 | IID = Intrinsic::x86_avx512_sqrt_ps_512; |
14079 | break; |
14080 | case X86::BI__builtin_ia32_sqrtpd512: |
14081 | IID = Intrinsic::x86_avx512_sqrt_pd_512; |
14082 | break; |
14083 | } |
14084 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
14085 | } |
14086 | } |
14087 | if (Builder.getIsFPConstrained()) { |
14088 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
14089 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, |
14090 | Ops[0]->getType()); |
14091 | return Builder.CreateConstrainedFPCall(F, Ops[0]); |
14092 | } else { |
14093 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType()); |
14094 | return Builder.CreateCall(F, Ops[0]); |
14095 | } |
14096 | } |
14097 | case X86::BI__builtin_ia32_pabsb128: |
14098 | case X86::BI__builtin_ia32_pabsw128: |
14099 | case X86::BI__builtin_ia32_pabsd128: |
14100 | case X86::BI__builtin_ia32_pabsb256: |
14101 | case X86::BI__builtin_ia32_pabsw256: |
14102 | case X86::BI__builtin_ia32_pabsd256: |
14103 | case X86::BI__builtin_ia32_pabsq128: |
14104 | case X86::BI__builtin_ia32_pabsq256: |
14105 | case X86::BI__builtin_ia32_pabsb512: |
14106 | case X86::BI__builtin_ia32_pabsw512: |
14107 | case X86::BI__builtin_ia32_pabsd512: |
14108 | case X86::BI__builtin_ia32_pabsq512: { |
14109 | Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType()); |
14110 | return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)}); |
14111 | } |
14112 | case X86::BI__builtin_ia32_pmaxsb128: |
14113 | case X86::BI__builtin_ia32_pmaxsw128: |
14114 | case X86::BI__builtin_ia32_pmaxsd128: |
14115 | case X86::BI__builtin_ia32_pmaxsq128: |
14116 | case X86::BI__builtin_ia32_pmaxsb256: |
14117 | case X86::BI__builtin_ia32_pmaxsw256: |
14118 | case X86::BI__builtin_ia32_pmaxsd256: |
14119 | case X86::BI__builtin_ia32_pmaxsq256: |
14120 | case X86::BI__builtin_ia32_pmaxsb512: |
14121 | case X86::BI__builtin_ia32_pmaxsw512: |
14122 | case X86::BI__builtin_ia32_pmaxsd512: |
14123 | case X86::BI__builtin_ia32_pmaxsq512: |
14124 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax); |
14125 | case X86::BI__builtin_ia32_pmaxub128: |
14126 | case X86::BI__builtin_ia32_pmaxuw128: |
14127 | case X86::BI__builtin_ia32_pmaxud128: |
14128 | case X86::BI__builtin_ia32_pmaxuq128: |
14129 | case X86::BI__builtin_ia32_pmaxub256: |
14130 | case X86::BI__builtin_ia32_pmaxuw256: |
14131 | case X86::BI__builtin_ia32_pmaxud256: |
14132 | case X86::BI__builtin_ia32_pmaxuq256: |
14133 | case X86::BI__builtin_ia32_pmaxub512: |
14134 | case X86::BI__builtin_ia32_pmaxuw512: |
14135 | case X86::BI__builtin_ia32_pmaxud512: |
14136 | case X86::BI__builtin_ia32_pmaxuq512: |
14137 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax); |
14138 | case X86::BI__builtin_ia32_pminsb128: |
14139 | case X86::BI__builtin_ia32_pminsw128: |
14140 | case X86::BI__builtin_ia32_pminsd128: |
14141 | case X86::BI__builtin_ia32_pminsq128: |
14142 | case X86::BI__builtin_ia32_pminsb256: |
14143 | case X86::BI__builtin_ia32_pminsw256: |
14144 | case X86::BI__builtin_ia32_pminsd256: |
14145 | case X86::BI__builtin_ia32_pminsq256: |
14146 | case X86::BI__builtin_ia32_pminsb512: |
14147 | case X86::BI__builtin_ia32_pminsw512: |
14148 | case X86::BI__builtin_ia32_pminsd512: |
14149 | case X86::BI__builtin_ia32_pminsq512: |
14150 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin); |
14151 | case X86::BI__builtin_ia32_pminub128: |
14152 | case X86::BI__builtin_ia32_pminuw128: |
14153 | case X86::BI__builtin_ia32_pminud128: |
14154 | case X86::BI__builtin_ia32_pminuq128: |
14155 | case X86::BI__builtin_ia32_pminub256: |
14156 | case X86::BI__builtin_ia32_pminuw256: |
14157 | case X86::BI__builtin_ia32_pminud256: |
14158 | case X86::BI__builtin_ia32_pminuq256: |
14159 | case X86::BI__builtin_ia32_pminub512: |
14160 | case X86::BI__builtin_ia32_pminuw512: |
14161 | case X86::BI__builtin_ia32_pminud512: |
14162 | case X86::BI__builtin_ia32_pminuq512: |
14163 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin); |
14164 | |
14165 | case X86::BI__builtin_ia32_pmuludq128: |
14166 | case X86::BI__builtin_ia32_pmuludq256: |
14167 | case X86::BI__builtin_ia32_pmuludq512: |
14168 | return EmitX86Muldq(*this, /*IsSigned*/false, Ops); |
14169 | |
14170 | case X86::BI__builtin_ia32_pmuldq128: |
14171 | case X86::BI__builtin_ia32_pmuldq256: |
14172 | case X86::BI__builtin_ia32_pmuldq512: |
14173 | return EmitX86Muldq(*this, /*IsSigned*/true, Ops); |
14174 | |
14175 | case X86::BI__builtin_ia32_pternlogd512_mask: |
14176 | case X86::BI__builtin_ia32_pternlogq512_mask: |
14177 | case X86::BI__builtin_ia32_pternlogd128_mask: |
14178 | case X86::BI__builtin_ia32_pternlogd256_mask: |
14179 | case X86::BI__builtin_ia32_pternlogq128_mask: |
14180 | case X86::BI__builtin_ia32_pternlogq256_mask: |
14181 | return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops); |
14182 | |
14183 | case X86::BI__builtin_ia32_pternlogd512_maskz: |
14184 | case X86::BI__builtin_ia32_pternlogq512_maskz: |
14185 | case X86::BI__builtin_ia32_pternlogd128_maskz: |
14186 | case X86::BI__builtin_ia32_pternlogd256_maskz: |
14187 | case X86::BI__builtin_ia32_pternlogq128_maskz: |
14188 | case X86::BI__builtin_ia32_pternlogq256_maskz: |
14189 | return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops); |
14190 | |
14191 | case X86::BI__builtin_ia32_vpshldd128: |
14192 | case X86::BI__builtin_ia32_vpshldd256: |
14193 | case X86::BI__builtin_ia32_vpshldd512: |
14194 | case X86::BI__builtin_ia32_vpshldq128: |
14195 | case X86::BI__builtin_ia32_vpshldq256: |
14196 | case X86::BI__builtin_ia32_vpshldq512: |
14197 | case X86::BI__builtin_ia32_vpshldw128: |
14198 | case X86::BI__builtin_ia32_vpshldw256: |
14199 | case X86::BI__builtin_ia32_vpshldw512: |
14200 | return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false); |
14201 | |
14202 | case X86::BI__builtin_ia32_vpshrdd128: |
14203 | case X86::BI__builtin_ia32_vpshrdd256: |
14204 | case X86::BI__builtin_ia32_vpshrdd512: |
14205 | case X86::BI__builtin_ia32_vpshrdq128: |
14206 | case X86::BI__builtin_ia32_vpshrdq256: |
14207 | case X86::BI__builtin_ia32_vpshrdq512: |
14208 | case X86::BI__builtin_ia32_vpshrdw128: |
14209 | case X86::BI__builtin_ia32_vpshrdw256: |
14210 | case X86::BI__builtin_ia32_vpshrdw512: |
14211 | // Ops 0 and 1 are swapped. |
14212 | return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true); |
14213 | |
14214 | case X86::BI__builtin_ia32_vpshldvd128: |
14215 | case X86::BI__builtin_ia32_vpshldvd256: |
14216 | case X86::BI__builtin_ia32_vpshldvd512: |
14217 | case X86::BI__builtin_ia32_vpshldvq128: |
14218 | case X86::BI__builtin_ia32_vpshldvq256: |
14219 | case X86::BI__builtin_ia32_vpshldvq512: |
14220 | case X86::BI__builtin_ia32_vpshldvw128: |
14221 | case X86::BI__builtin_ia32_vpshldvw256: |
14222 | case X86::BI__builtin_ia32_vpshldvw512: |
14223 | return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false); |
14224 | |
14225 | case X86::BI__builtin_ia32_vpshrdvd128: |
14226 | case X86::BI__builtin_ia32_vpshrdvd256: |
14227 | case X86::BI__builtin_ia32_vpshrdvd512: |
14228 | case X86::BI__builtin_ia32_vpshrdvq128: |
14229 | case X86::BI__builtin_ia32_vpshrdvq256: |
14230 | case X86::BI__builtin_ia32_vpshrdvq512: |
14231 | case X86::BI__builtin_ia32_vpshrdvw128: |
14232 | case X86::BI__builtin_ia32_vpshrdvw256: |
14233 | case X86::BI__builtin_ia32_vpshrdvw512: |
14234 | // Ops 0 and 1 are swapped. |
14235 | return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true); |
14236 | |
14237 | // Reductions |
14238 | case X86::BI__builtin_ia32_reduce_add_d512: |
14239 | case X86::BI__builtin_ia32_reduce_add_q512: { |
14240 | Function *F = |
14241 | CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType()); |
14242 | return Builder.CreateCall(F, {Ops[0]}); |
14243 | } |
14244 | case X86::BI__builtin_ia32_reduce_and_d512: |
14245 | case X86::BI__builtin_ia32_reduce_and_q512: { |
14246 | Function *F = |
14247 | CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType()); |
14248 | return Builder.CreateCall(F, {Ops[0]}); |
14249 | } |
14250 | case X86::BI__builtin_ia32_reduce_fadd_pd512: |
14251 | case X86::BI__builtin_ia32_reduce_fadd_ps512: |
14252 | case X86::BI__builtin_ia32_reduce_fadd_ph512: |
14253 | case X86::BI__builtin_ia32_reduce_fadd_ph256: |
14254 | case X86::BI__builtin_ia32_reduce_fadd_ph128: { |
14255 | Function *F = |
14256 | CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType()); |
14257 | Builder.getFastMathFlags().setAllowReassoc(); |
14258 | return Builder.CreateCall(F, {Ops[0], Ops[1]}); |
14259 | } |
14260 | case X86::BI__builtin_ia32_reduce_fmul_pd512: |
14261 | case X86::BI__builtin_ia32_reduce_fmul_ps512: |
14262 | case X86::BI__builtin_ia32_reduce_fmul_ph512: |
14263 | case X86::BI__builtin_ia32_reduce_fmul_ph256: |
14264 | case X86::BI__builtin_ia32_reduce_fmul_ph128: { |
14265 | Function *F = |
14266 | CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType()); |
14267 | Builder.getFastMathFlags().setAllowReassoc(); |
14268 | return Builder.CreateCall(F, {Ops[0], Ops[1]}); |
14269 | } |
14270 | case X86::BI__builtin_ia32_reduce_fmax_pd512: |
14271 | case X86::BI__builtin_ia32_reduce_fmax_ps512: |
14272 | case X86::BI__builtin_ia32_reduce_fmax_ph512: |
14273 | case X86::BI__builtin_ia32_reduce_fmax_ph256: |
14274 | case X86::BI__builtin_ia32_reduce_fmax_ph128: { |
14275 | Function *F = |
14276 | CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType()); |
14277 | Builder.getFastMathFlags().setNoNaNs(); |
14278 | return Builder.CreateCall(F, {Ops[0]}); |
14279 | } |
14280 | case X86::BI__builtin_ia32_reduce_fmin_pd512: |
14281 | case X86::BI__builtin_ia32_reduce_fmin_ps512: |
14282 | case X86::BI__builtin_ia32_reduce_fmin_ph512: |
14283 | case X86::BI__builtin_ia32_reduce_fmin_ph256: |
14284 | case X86::BI__builtin_ia32_reduce_fmin_ph128: { |
14285 | Function *F = |
14286 | CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType()); |
14287 | Builder.getFastMathFlags().setNoNaNs(); |
14288 | return Builder.CreateCall(F, {Ops[0]}); |
14289 | } |
14290 | case X86::BI__builtin_ia32_reduce_mul_d512: |
14291 | case X86::BI__builtin_ia32_reduce_mul_q512: { |
14292 | Function *F = |
14293 | CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType()); |
14294 | return Builder.CreateCall(F, {Ops[0]}); |
14295 | } |
14296 | case X86::BI__builtin_ia32_reduce_or_d512: |
14297 | case X86::BI__builtin_ia32_reduce_or_q512: { |
14298 | Function *F = |
14299 | CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType()); |
14300 | return Builder.CreateCall(F, {Ops[0]}); |
14301 | } |
14302 | case X86::BI__builtin_ia32_reduce_smax_d512: |
14303 | case X86::BI__builtin_ia32_reduce_smax_q512: { |
14304 | Function *F = |
14305 | CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType()); |
14306 | return Builder.CreateCall(F, {Ops[0]}); |
14307 | } |
14308 | case X86::BI__builtin_ia32_reduce_smin_d512: |
14309 | case X86::BI__builtin_ia32_reduce_smin_q512: { |
14310 | Function *F = |
14311 | CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType()); |
14312 | return Builder.CreateCall(F, {Ops[0]}); |
14313 | } |
14314 | case X86::BI__builtin_ia32_reduce_umax_d512: |
14315 | case X86::BI__builtin_ia32_reduce_umax_q512: { |
14316 | Function *F = |
14317 | CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType()); |
14318 | return Builder.CreateCall(F, {Ops[0]}); |
14319 | } |
14320 | case X86::BI__builtin_ia32_reduce_umin_d512: |
14321 | case X86::BI__builtin_ia32_reduce_umin_q512: { |
14322 | Function *F = |
14323 | CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType()); |
14324 | return Builder.CreateCall(F, {Ops[0]}); |
14325 | } |
14326 | |
14327 | // 3DNow! |
14328 | case X86::BI__builtin_ia32_pswapdsf: |
14329 | case X86::BI__builtin_ia32_pswapdsi: { |
14330 | llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext()); |
14331 | Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast"); |
14332 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd); |
14333 | return Builder.CreateCall(F, Ops, "pswapd"); |
14334 | } |
14335 | case X86::BI__builtin_ia32_rdrand16_step: |
14336 | case X86::BI__builtin_ia32_rdrand32_step: |
14337 | case X86::BI__builtin_ia32_rdrand64_step: |
14338 | case X86::BI__builtin_ia32_rdseed16_step: |
14339 | case X86::BI__builtin_ia32_rdseed32_step: |
14340 | case X86::BI__builtin_ia32_rdseed64_step: { |
14341 | Intrinsic::ID ID; |
14342 | switch (BuiltinID) { |
14343 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 14343); |
14344 | case X86::BI__builtin_ia32_rdrand16_step: |
14345 | ID = Intrinsic::x86_rdrand_16; |
14346 | break; |
14347 | case X86::BI__builtin_ia32_rdrand32_step: |
14348 | ID = Intrinsic::x86_rdrand_32; |
14349 | break; |
14350 | case X86::BI__builtin_ia32_rdrand64_step: |
14351 | ID = Intrinsic::x86_rdrand_64; |
14352 | break; |
14353 | case X86::BI__builtin_ia32_rdseed16_step: |
14354 | ID = Intrinsic::x86_rdseed_16; |
14355 | break; |
14356 | case X86::BI__builtin_ia32_rdseed32_step: |
14357 | ID = Intrinsic::x86_rdseed_32; |
14358 | break; |
14359 | case X86::BI__builtin_ia32_rdseed64_step: |
14360 | ID = Intrinsic::x86_rdseed_64; |
14361 | break; |
14362 | } |
14363 | |
14364 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID)); |
14365 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0), |
14366 | Ops[0]); |
14367 | return Builder.CreateExtractValue(Call, 1); |
14368 | } |
14369 | case X86::BI__builtin_ia32_addcarryx_u32: |
14370 | case X86::BI__builtin_ia32_addcarryx_u64: |
14371 | case X86::BI__builtin_ia32_subborrow_u32: |
14372 | case X86::BI__builtin_ia32_subborrow_u64: { |
14373 | Intrinsic::ID IID; |
14374 | switch (BuiltinID) { |
14375 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 14375); |
14376 | case X86::BI__builtin_ia32_addcarryx_u32: |
14377 | IID = Intrinsic::x86_addcarry_32; |
14378 | break; |
14379 | case X86::BI__builtin_ia32_addcarryx_u64: |
14380 | IID = Intrinsic::x86_addcarry_64; |
14381 | break; |
14382 | case X86::BI__builtin_ia32_subborrow_u32: |
14383 | IID = Intrinsic::x86_subborrow_32; |
14384 | break; |
14385 | case X86::BI__builtin_ia32_subborrow_u64: |
14386 | IID = Intrinsic::x86_subborrow_64; |
14387 | break; |
14388 | } |
14389 | |
14390 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), |
14391 | { Ops[0], Ops[1], Ops[2] }); |
14392 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1), |
14393 | Ops[3]); |
14394 | return Builder.CreateExtractValue(Call, 0); |
14395 | } |
14396 | |
14397 | case X86::BI__builtin_ia32_fpclassps128_mask: |
14398 | case X86::BI__builtin_ia32_fpclassps256_mask: |
14399 | case X86::BI__builtin_ia32_fpclassps512_mask: |
14400 | case X86::BI__builtin_ia32_fpclassph128_mask: |
14401 | case X86::BI__builtin_ia32_fpclassph256_mask: |
14402 | case X86::BI__builtin_ia32_fpclassph512_mask: |
14403 | case X86::BI__builtin_ia32_fpclasspd128_mask: |
14404 | case X86::BI__builtin_ia32_fpclasspd256_mask: |
14405 | case X86::BI__builtin_ia32_fpclasspd512_mask: { |
14406 | unsigned NumElts = |
14407 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14408 | Value *MaskIn = Ops[2]; |
14409 | Ops.erase(&Ops[2]); |
14410 | |
14411 | Intrinsic::ID ID; |
14412 | switch (BuiltinID) { |
14413 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 14413); |
14414 | case X86::BI__builtin_ia32_fpclassph128_mask: |
14415 | ID = Intrinsic::x86_avx512fp16_fpclass_ph_128; |
14416 | break; |
14417 | case X86::BI__builtin_ia32_fpclassph256_mask: |
14418 | ID = Intrinsic::x86_avx512fp16_fpclass_ph_256; |
14419 | break; |
14420 | case X86::BI__builtin_ia32_fpclassph512_mask: |
14421 | ID = Intrinsic::x86_avx512fp16_fpclass_ph_512; |
14422 | break; |
14423 | case X86::BI__builtin_ia32_fpclassps128_mask: |
14424 | ID = Intrinsic::x86_avx512_fpclass_ps_128; |
14425 | break; |
14426 | case X86::BI__builtin_ia32_fpclassps256_mask: |
14427 | ID = Intrinsic::x86_avx512_fpclass_ps_256; |
14428 | break; |
14429 | case X86::BI__builtin_ia32_fpclassps512_mask: |
14430 | ID = Intrinsic::x86_avx512_fpclass_ps_512; |
14431 | break; |
14432 | case X86::BI__builtin_ia32_fpclasspd128_mask: |
14433 | ID = Intrinsic::x86_avx512_fpclass_pd_128; |
14434 | break; |
14435 | case X86::BI__builtin_ia32_fpclasspd256_mask: |
14436 | ID = Intrinsic::x86_avx512_fpclass_pd_256; |
14437 | break; |
14438 | case X86::BI__builtin_ia32_fpclasspd512_mask: |
14439 | ID = Intrinsic::x86_avx512_fpclass_pd_512; |
14440 | break; |
14441 | } |
14442 | |
14443 | Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
14444 | return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn); |
14445 | } |
14446 | |
14447 | case X86::BI__builtin_ia32_vp2intersect_q_512: |
14448 | case X86::BI__builtin_ia32_vp2intersect_q_256: |
14449 | case X86::BI__builtin_ia32_vp2intersect_q_128: |
14450 | case X86::BI__builtin_ia32_vp2intersect_d_512: |
14451 | case X86::BI__builtin_ia32_vp2intersect_d_256: |
14452 | case X86::BI__builtin_ia32_vp2intersect_d_128: { |
14453 | unsigned NumElts = |
14454 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14455 | Intrinsic::ID ID; |
14456 | |
14457 | switch (BuiltinID) { |
14458 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 14458); |
14459 | case X86::BI__builtin_ia32_vp2intersect_q_512: |
14460 | ID = Intrinsic::x86_avx512_vp2intersect_q_512; |
14461 | break; |
14462 | case X86::BI__builtin_ia32_vp2intersect_q_256: |
14463 | ID = Intrinsic::x86_avx512_vp2intersect_q_256; |
14464 | break; |
14465 | case X86::BI__builtin_ia32_vp2intersect_q_128: |
14466 | ID = Intrinsic::x86_avx512_vp2intersect_q_128; |
14467 | break; |
14468 | case X86::BI__builtin_ia32_vp2intersect_d_512: |
14469 | ID = Intrinsic::x86_avx512_vp2intersect_d_512; |
14470 | break; |
14471 | case X86::BI__builtin_ia32_vp2intersect_d_256: |
14472 | ID = Intrinsic::x86_avx512_vp2intersect_d_256; |
14473 | break; |
14474 | case X86::BI__builtin_ia32_vp2intersect_d_128: |
14475 | ID = Intrinsic::x86_avx512_vp2intersect_d_128; |
14476 | break; |
14477 | } |
14478 | |
14479 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]}); |
14480 | Value *Result = Builder.CreateExtractValue(Call, 0); |
14481 | Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr); |
14482 | Builder.CreateDefaultAlignedStore(Result, Ops[2]); |
14483 | |
14484 | Result = Builder.CreateExtractValue(Call, 1); |
14485 | Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr); |
14486 | return Builder.CreateDefaultAlignedStore(Result, Ops[3]); |
14487 | } |
14488 | |
14489 | case X86::BI__builtin_ia32_vpmultishiftqb128: |
14490 | case X86::BI__builtin_ia32_vpmultishiftqb256: |
14491 | case X86::BI__builtin_ia32_vpmultishiftqb512: { |
14492 | Intrinsic::ID ID; |
14493 | switch (BuiltinID) { |
14494 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 14494); |
14495 | case X86::BI__builtin_ia32_vpmultishiftqb128: |
14496 | ID = Intrinsic::x86_avx512_pmultishift_qb_128; |
14497 | break; |
14498 | case X86::BI__builtin_ia32_vpmultishiftqb256: |
14499 | ID = Intrinsic::x86_avx512_pmultishift_qb_256; |
14500 | break; |
14501 | case X86::BI__builtin_ia32_vpmultishiftqb512: |
14502 | ID = Intrinsic::x86_avx512_pmultishift_qb_512; |
14503 | break; |
14504 | } |
14505 | |
14506 | return Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
14507 | } |
14508 | |
14509 | case X86::BI__builtin_ia32_vpshufbitqmb128_mask: |
14510 | case X86::BI__builtin_ia32_vpshufbitqmb256_mask: |
14511 | case X86::BI__builtin_ia32_vpshufbitqmb512_mask: { |
14512 | unsigned NumElts = |
14513 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14514 | Value *MaskIn = Ops[2]; |
14515 | Ops.erase(&Ops[2]); |
14516 | |
14517 | Intrinsic::ID ID; |
14518 | switch (BuiltinID) { |
14519 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 14519); |
14520 | case X86::BI__builtin_ia32_vpshufbitqmb128_mask: |
14521 | ID = Intrinsic::x86_avx512_vpshufbitqmb_128; |
14522 | break; |
14523 | case X86::BI__builtin_ia32_vpshufbitqmb256_mask: |
14524 | ID = Intrinsic::x86_avx512_vpshufbitqmb_256; |
14525 | break; |
14526 | case X86::BI__builtin_ia32_vpshufbitqmb512_mask: |
14527 | ID = Intrinsic::x86_avx512_vpshufbitqmb_512; |
14528 | break; |
14529 | } |
14530 | |
14531 | Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
14532 | return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn); |
14533 | } |
14534 | |
14535 | // packed comparison intrinsics |
14536 | case X86::BI__builtin_ia32_cmpeqps: |
14537 | case X86::BI__builtin_ia32_cmpeqpd: |
14538 | return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false); |
14539 | case X86::BI__builtin_ia32_cmpltps: |
14540 | case X86::BI__builtin_ia32_cmpltpd: |
14541 | return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true); |
14542 | case X86::BI__builtin_ia32_cmpleps: |
14543 | case X86::BI__builtin_ia32_cmplepd: |
14544 | return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true); |
14545 | case X86::BI__builtin_ia32_cmpunordps: |
14546 | case X86::BI__builtin_ia32_cmpunordpd: |
14547 | return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false); |
14548 | case X86::BI__builtin_ia32_cmpneqps: |
14549 | case X86::BI__builtin_ia32_cmpneqpd: |
14550 | return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false); |
14551 | case X86::BI__builtin_ia32_cmpnltps: |
14552 | case X86::BI__builtin_ia32_cmpnltpd: |
14553 | return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true); |
14554 | case X86::BI__builtin_ia32_cmpnleps: |
14555 | case X86::BI__builtin_ia32_cmpnlepd: |
14556 | return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true); |
14557 | case X86::BI__builtin_ia32_cmpordps: |
14558 | case X86::BI__builtin_ia32_cmpordpd: |
14559 | return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false); |
14560 | case X86::BI__builtin_ia32_cmpph128_mask: |
14561 | case X86::BI__builtin_ia32_cmpph256_mask: |
14562 | case X86::BI__builtin_ia32_cmpph512_mask: |
14563 | case X86::BI__builtin_ia32_cmpps128_mask: |
14564 | case X86::BI__builtin_ia32_cmpps256_mask: |
14565 | case X86::BI__builtin_ia32_cmpps512_mask: |
14566 | case X86::BI__builtin_ia32_cmppd128_mask: |
14567 | case X86::BI__builtin_ia32_cmppd256_mask: |
14568 | case X86::BI__builtin_ia32_cmppd512_mask: |
14569 | IsMaskFCmp = true; |
14570 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
14571 | case X86::BI__builtin_ia32_cmpps: |
14572 | case X86::BI__builtin_ia32_cmpps256: |
14573 | case X86::BI__builtin_ia32_cmppd: |
14574 | case X86::BI__builtin_ia32_cmppd256: { |
14575 | // Lowering vector comparisons to fcmp instructions, while |
14576 | // ignoring signalling behaviour requested |
14577 | // ignoring rounding mode requested |
14578 | // This is only possible if fp-model is not strict and FENV_ACCESS is off. |
14579 | |
14580 | // The third argument is the comparison condition, and integer in the |
14581 | // range [0, 31] |
14582 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f; |
14583 | |
14584 | // Lowering to IR fcmp instruction. |
14585 | // Ignoring requested signaling behaviour, |
14586 | // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT. |
14587 | FCmpInst::Predicate Pred; |
14588 | bool IsSignaling; |
14589 | // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling |
14590 | // behavior is inverted. We'll handle that after the switch. |
14591 | switch (CC & 0xf) { |
14592 | case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break; |
14593 | case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break; |
14594 | case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break; |
14595 | case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break; |
14596 | case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break; |
14597 | case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break; |
14598 | case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break; |
14599 | case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break; |
14600 | case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break; |
14601 | case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break; |
14602 | case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break; |
14603 | case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break; |
14604 | case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break; |
14605 | case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break; |
14606 | case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break; |
14607 | case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break; |
14608 | default: llvm_unreachable("Unhandled CC")::llvm::llvm_unreachable_internal("Unhandled CC", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 14608); |
14609 | } |
14610 | |
14611 | // Invert the signalling behavior for 16-31. |
14612 | if (CC & 0x10) |
14613 | IsSignaling = !IsSignaling; |
14614 | |
14615 | // If the predicate is true or false and we're using constrained intrinsics, |
14616 | // we don't have a compare intrinsic we can use. Just use the legacy X86 |
14617 | // specific intrinsic. |
14618 | // If the intrinsic is mask enabled and we're using constrained intrinsics, |
14619 | // use the legacy X86 specific intrinsic. |
14620 | if (Builder.getIsFPConstrained() && |
14621 | (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE || |
14622 | IsMaskFCmp)) { |
14623 | |
14624 | Intrinsic::ID IID; |
14625 | switch (BuiltinID) { |
14626 | default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 14626); |
14627 | case X86::BI__builtin_ia32_cmpps: |
14628 | IID = Intrinsic::x86_sse_cmp_ps; |
14629 | break; |
14630 | case X86::BI__builtin_ia32_cmpps256: |
14631 | IID = Intrinsic::x86_avx_cmp_ps_256; |
14632 | break; |
14633 | case X86::BI__builtin_ia32_cmppd: |
14634 | IID = Intrinsic::x86_sse2_cmp_pd; |
14635 | break; |
14636 | case X86::BI__builtin_ia32_cmppd256: |
14637 | IID = Intrinsic::x86_avx_cmp_pd_256; |
14638 | break; |
14639 | case X86::BI__builtin_ia32_cmpps512_mask: |
14640 | IID = Intrinsic::x86_avx512_mask_cmp_ps_512; |
14641 | break; |
14642 | case X86::BI__builtin_ia32_cmppd512_mask: |
14643 | IID = Intrinsic::x86_avx512_mask_cmp_pd_512; |
14644 | break; |
14645 | case X86::BI__builtin_ia32_cmpps128_mask: |
14646 | IID = Intrinsic::x86_avx512_mask_cmp_ps_128; |
14647 | break; |
14648 | case X86::BI__builtin_ia32_cmpps256_mask: |
14649 | IID = Intrinsic::x86_avx512_mask_cmp_ps_256; |
14650 | break; |
14651 | case X86::BI__builtin_ia32_cmppd128_mask: |
14652 | IID = Intrinsic::x86_avx512_mask_cmp_pd_128; |
14653 | break; |
14654 | case X86::BI__builtin_ia32_cmppd256_mask: |
14655 | IID = Intrinsic::x86_avx512_mask_cmp_pd_256; |
14656 | break; |
14657 | } |
14658 | |
14659 | Function *Intr = CGM.getIntrinsic(IID); |
14660 | if (IsMaskFCmp) { |
14661 | unsigned NumElts = |
14662 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14663 | Ops[3] = getMaskVecValue(*this, Ops[3], NumElts); |
14664 | Value *Cmp = Builder.CreateCall(Intr, Ops); |
14665 | return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr); |
14666 | } |
14667 | |
14668 | return Builder.CreateCall(Intr, Ops); |
14669 | } |
14670 | |
14671 | // Builtins without the _mask suffix return a vector of integers |
14672 | // of the same width as the input vectors |
14673 | if (IsMaskFCmp) { |
14674 | // We ignore SAE if strict FP is disabled. We only keep precise |
14675 | // exception behavior under strict FP. |
14676 | // NOTE: If strict FP does ever go through here a CGFPOptionsRAII |
14677 | // object will be required. |
14678 | unsigned NumElts = |
14679 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14680 | Value *Cmp; |
14681 | if (IsSignaling) |
14682 | Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]); |
14683 | else |
14684 | Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]); |
14685 | return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]); |
14686 | } |
14687 | |
14688 | return getVectorFCmpIR(Pred, IsSignaling); |
14689 | } |
14690 | |
14691 | // SSE scalar comparison intrinsics |
14692 | case X86::BI__builtin_ia32_cmpeqss: |
14693 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0); |
14694 | case X86::BI__builtin_ia32_cmpltss: |
14695 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1); |
14696 | case X86::BI__builtin_ia32_cmpless: |
14697 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2); |
14698 | case X86::BI__builtin_ia32_cmpunordss: |
14699 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3); |
14700 | case X86::BI__builtin_ia32_cmpneqss: |
14701 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4); |
14702 | case X86::BI__builtin_ia32_cmpnltss: |
14703 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5); |
14704 | case X86::BI__builtin_ia32_cmpnless: |
14705 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6); |
14706 | case X86::BI__builtin_ia32_cmpordss: |
14707 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7); |
14708 | case X86::BI__builtin_ia32_cmpeqsd: |
14709 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0); |
14710 | case X86::BI__builtin_ia32_cmpltsd: |
14711 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1); |
14712 | case X86::BI__builtin_ia32_cmplesd: |
14713 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2); |
14714 | case X86::BI__builtin_ia32_cmpunordsd: |
14715 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3); |
14716 | case X86::BI__builtin_ia32_cmpneqsd: |
14717 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4); |
14718 | case X86::BI__builtin_ia32_cmpnltsd: |
14719 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5); |
14720 | case X86::BI__builtin_ia32_cmpnlesd: |
14721 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6); |
14722 | case X86::BI__builtin_ia32_cmpordsd: |
14723 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7); |
14724 | |
14725 | // f16c half2float intrinsics |
14726 | case X86::BI__builtin_ia32_vcvtph2ps: |
14727 | case X86::BI__builtin_ia32_vcvtph2ps256: |
14728 | case X86::BI__builtin_ia32_vcvtph2ps_mask: |
14729 | case X86::BI__builtin_ia32_vcvtph2ps256_mask: |
14730 | case X86::BI__builtin_ia32_vcvtph2ps512_mask: { |
14731 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
14732 | return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType())); |
14733 | } |
14734 | |
14735 | // AVX512 bf16 intrinsics |
14736 | case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: { |
14737 | Ops[2] = getMaskVecValue( |
14738 | *this, Ops[2], |
14739 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements()); |
14740 | Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128; |
14741 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
14742 | } |
14743 | case X86::BI__builtin_ia32_cvtsbf162ss_32: |
14744 | return EmitX86CvtBF16ToFloatExpr(*this, E, Ops); |
14745 | |
14746 | case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: |
14747 | case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: { |
14748 | Intrinsic::ID IID; |
14749 | switch (BuiltinID) { |
14750 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 14750); |
14751 | case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: |
14752 | IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256; |
14753 | break; |
14754 | case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: |
14755 | IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512; |
14756 | break; |
14757 | } |
14758 | Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]); |
14759 | return EmitX86Select(*this, Ops[2], Res, Ops[1]); |
14760 | } |
14761 | |
14762 | case X86::BI__emul: |
14763 | case X86::BI__emulu: { |
14764 | llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64); |
14765 | bool isSigned = (BuiltinID == X86::BI__emul); |
14766 | Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned); |
14767 | Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned); |
14768 | return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned); |
14769 | } |
14770 | case X86::BI__mulh: |
14771 | case X86::BI__umulh: |
14772 | case X86::BI_mul128: |
14773 | case X86::BI_umul128: { |
14774 | llvm::Type *ResType = ConvertType(E->getType()); |
14775 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
14776 | |
14777 | bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128); |
14778 | Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned); |
14779 | Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned); |
14780 | |
14781 | Value *MulResult, *HigherBits; |
14782 | if (IsSigned) { |
14783 | MulResult = Builder.CreateNSWMul(LHS, RHS); |
14784 | HigherBits = Builder.CreateAShr(MulResult, 64); |
14785 | } else { |
14786 | MulResult = Builder.CreateNUWMul(LHS, RHS); |
14787 | HigherBits = Builder.CreateLShr(MulResult, 64); |
14788 | } |
14789 | HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned); |
14790 | |
14791 | if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh) |
14792 | return HigherBits; |
14793 | |
14794 | Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2)); |
14795 | Builder.CreateStore(HigherBits, HighBitsAddress); |
14796 | return Builder.CreateIntCast(MulResult, ResType, IsSigned); |
14797 | } |
14798 | |
14799 | case X86::BI__faststorefence: { |
14800 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
14801 | llvm::SyncScope::System); |
14802 | } |
14803 | case X86::BI__shiftleft128: |
14804 | case X86::BI__shiftright128: { |
14805 | llvm::Function *F = CGM.getIntrinsic( |
14806 | BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr, |
14807 | Int64Ty); |
14808 | // Flip low/high ops and zero-extend amount to matching type. |
14809 | // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt) |
14810 | // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt) |
14811 | std::swap(Ops[0], Ops[1]); |
14812 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
14813 | return Builder.CreateCall(F, Ops); |
14814 | } |
14815 | case X86::BI_ReadWriteBarrier: |
14816 | case X86::BI_ReadBarrier: |
14817 | case X86::BI_WriteBarrier: { |
14818 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
14819 | llvm::SyncScope::SingleThread); |
14820 | } |
14821 | |
14822 | case X86::BI_AddressOfReturnAddress: { |
14823 | Function *F = |
14824 | CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy); |
14825 | return Builder.CreateCall(F); |
14826 | } |
14827 | case X86::BI__stosb: { |
14828 | // We treat __stosb as a volatile memset - it may not generate "rep stosb" |
14829 | // instruction, but it will create a memset that won't be optimized away. |
14830 | return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true); |
14831 | } |
14832 | case X86::BI__ud2: |
14833 | // llvm.trap makes a ud2a instruction on x86. |
14834 | return EmitTrapCall(Intrinsic::trap); |
14835 | case X86::BI__int2c: { |
14836 | // This syscall signals a driver assertion failure in x86 NT kernels. |
14837 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); |
14838 | llvm::InlineAsm *IA = |
14839 | llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true); |
14840 | llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( |
14841 | getLLVMContext(), llvm::AttributeList::FunctionIndex, |
14842 | llvm::Attribute::NoReturn); |
14843 | llvm::CallInst *CI = Builder.CreateCall(IA); |
14844 | CI->setAttributes(NoReturnAttr); |
14845 | return CI; |
14846 | } |
14847 | case X86::BI__readfsbyte: |
14848 | case X86::BI__readfsword: |
14849 | case X86::BI__readfsdword: |
14850 | case X86::BI__readfsqword: { |
14851 | llvm::Type *IntTy = ConvertType(E->getType()); |
14852 | Value *Ptr = |
14853 | Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257)); |
14854 | LoadInst *Load = Builder.CreateAlignedLoad( |
14855 | IntTy, Ptr, getContext().getTypeAlignInChars(E->getType())); |
14856 | Load->setVolatile(true); |
14857 | return Load; |
14858 | } |
14859 | case X86::BI__readgsbyte: |
14860 | case X86::BI__readgsword: |
14861 | case X86::BI__readgsdword: |
14862 | case X86::BI__readgsqword: { |
14863 | llvm::Type *IntTy = ConvertType(E->getType()); |
14864 | Value *Ptr = |
14865 | Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256)); |
14866 | LoadInst *Load = Builder.CreateAlignedLoad( |
14867 | IntTy, Ptr, getContext().getTypeAlignInChars(E->getType())); |
14868 | Load->setVolatile(true); |
14869 | return Load; |
14870 | } |
14871 | case X86::BI__builtin_ia32_paddsb512: |
14872 | case X86::BI__builtin_ia32_paddsw512: |
14873 | case X86::BI__builtin_ia32_paddsb256: |
14874 | case X86::BI__builtin_ia32_paddsw256: |
14875 | case X86::BI__builtin_ia32_paddsb128: |
14876 | case X86::BI__builtin_ia32_paddsw128: |
14877 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat); |
14878 | case X86::BI__builtin_ia32_paddusb512: |
14879 | case X86::BI__builtin_ia32_paddusw512: |
14880 | case X86::BI__builtin_ia32_paddusb256: |
14881 | case X86::BI__builtin_ia32_paddusw256: |
14882 | case X86::BI__builtin_ia32_paddusb128: |
14883 | case X86::BI__builtin_ia32_paddusw128: |
14884 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat); |
14885 | case X86::BI__builtin_ia32_psubsb512: |
14886 | case X86::BI__builtin_ia32_psubsw512: |
14887 | case X86::BI__builtin_ia32_psubsb256: |
14888 | case X86::BI__builtin_ia32_psubsw256: |
14889 | case X86::BI__builtin_ia32_psubsb128: |
14890 | case X86::BI__builtin_ia32_psubsw128: |
14891 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat); |
14892 | case X86::BI__builtin_ia32_psubusb512: |
14893 | case X86::BI__builtin_ia32_psubusw512: |
14894 | case X86::BI__builtin_ia32_psubusb256: |
14895 | case X86::BI__builtin_ia32_psubusw256: |
14896 | case X86::BI__builtin_ia32_psubusb128: |
14897 | case X86::BI__builtin_ia32_psubusw128: |
14898 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat); |
14899 | case X86::BI__builtin_ia32_encodekey128_u32: { |
14900 | Intrinsic::ID IID = Intrinsic::x86_encodekey128; |
14901 | |
14902 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]}); |
14903 | |
14904 | for (int i = 0; i < 6; ++i) { |
14905 | Value *Extract = Builder.CreateExtractValue(Call, i + 1); |
14906 | Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[2], i * 16); |
14907 | Ptr = Builder.CreateBitCast( |
14908 | Ptr, llvm::PointerType::getUnqual(Extract->getType())); |
14909 | Builder.CreateAlignedStore(Extract, Ptr, Align(1)); |
14910 | } |
14911 | |
14912 | return Builder.CreateExtractValue(Call, 0); |
14913 | } |
14914 | case X86::BI__builtin_ia32_encodekey256_u32: { |
14915 | Intrinsic::ID IID = Intrinsic::x86_encodekey256; |
14916 | |
14917 | Value *Call = |
14918 | Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]}); |
14919 | |
14920 | for (int i = 0; i < 7; ++i) { |
14921 | Value *Extract = Builder.CreateExtractValue(Call, i + 1); |
14922 | Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[3], i * 16); |
14923 | Ptr = Builder.CreateBitCast( |
14924 | Ptr, llvm::PointerType::getUnqual(Extract->getType())); |
14925 | Builder.CreateAlignedStore(Extract, Ptr, Align(1)); |
14926 | } |
14927 | |
14928 | return Builder.CreateExtractValue(Call, 0); |
14929 | } |
14930 | case X86::BI__builtin_ia32_aesenc128kl_u8: |
14931 | case X86::BI__builtin_ia32_aesdec128kl_u8: |
14932 | case X86::BI__builtin_ia32_aesenc256kl_u8: |
14933 | case X86::BI__builtin_ia32_aesdec256kl_u8: { |
14934 | Intrinsic::ID IID; |
14935 | StringRef BlockName; |
14936 | switch (BuiltinID) { |
14937 | default: |
14938 | llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 14938); |
14939 | case X86::BI__builtin_ia32_aesenc128kl_u8: |
14940 | IID = Intrinsic::x86_aesenc128kl; |
14941 | BlockName = "aesenc128kl"; |
14942 | break; |
14943 | case X86::BI__builtin_ia32_aesdec128kl_u8: |
14944 | IID = Intrinsic::x86_aesdec128kl; |
14945 | BlockName = "aesdec128kl"; |
14946 | break; |
14947 | case X86::BI__builtin_ia32_aesenc256kl_u8: |
14948 | IID = Intrinsic::x86_aesenc256kl; |
14949 | BlockName = "aesenc256kl"; |
14950 | break; |
14951 | case X86::BI__builtin_ia32_aesdec256kl_u8: |
14952 | IID = Intrinsic::x86_aesdec256kl; |
14953 | BlockName = "aesdec256kl"; |
14954 | break; |
14955 | } |
14956 | |
14957 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]}); |
14958 | |
14959 | BasicBlock *NoError = |
14960 | createBasicBlock(BlockName + "_no_error", this->CurFn); |
14961 | BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn); |
14962 | BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn); |
14963 | |
14964 | Value *Ret = Builder.CreateExtractValue(Call, 0); |
14965 | Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty()); |
14966 | Value *Out = Builder.CreateExtractValue(Call, 1); |
14967 | Builder.CreateCondBr(Succ, NoError, Error); |
14968 | |
14969 | Builder.SetInsertPoint(NoError); |
14970 | Builder.CreateDefaultAlignedStore(Out, Ops[0]); |
14971 | Builder.CreateBr(End); |
14972 | |
14973 | Builder.SetInsertPoint(Error); |
14974 | Constant *Zero = llvm::Constant::getNullValue(Out->getType()); |
14975 | Builder.CreateDefaultAlignedStore(Zero, Ops[0]); |
14976 | Builder.CreateBr(End); |
14977 | |
14978 | Builder.SetInsertPoint(End); |
14979 | return Builder.CreateExtractValue(Call, 0); |
14980 | } |
14981 | case X86::BI__builtin_ia32_aesencwide128kl_u8: |
14982 | case X86::BI__builtin_ia32_aesdecwide128kl_u8: |
14983 | case X86::BI__builtin_ia32_aesencwide256kl_u8: |
14984 | case X86::BI__builtin_ia32_aesdecwide256kl_u8: { |
14985 | Intrinsic::ID IID; |
14986 | StringRef BlockName; |
14987 | switch (BuiltinID) { |
14988 | case X86::BI__builtin_ia32_aesencwide128kl_u8: |
14989 | IID = Intrinsic::x86_aesencwide128kl; |
14990 | BlockName = "aesencwide128kl"; |
14991 | break; |
14992 | case X86::BI__builtin_ia32_aesdecwide128kl_u8: |
14993 | IID = Intrinsic::x86_aesdecwide128kl; |
14994 | BlockName = "aesdecwide128kl"; |
14995 | break; |
14996 | case X86::BI__builtin_ia32_aesencwide256kl_u8: |
14997 | IID = Intrinsic::x86_aesencwide256kl; |
14998 | BlockName = "aesencwide256kl"; |
14999 | break; |
15000 | case X86::BI__builtin_ia32_aesdecwide256kl_u8: |
15001 | IID = Intrinsic::x86_aesdecwide256kl; |
15002 | BlockName = "aesdecwide256kl"; |
15003 | break; |
15004 | } |
15005 | |
15006 | llvm::Type *Ty = FixedVectorType::get(Builder.getInt64Ty(), 2); |
15007 | Value *InOps[9]; |
15008 | InOps[0] = Ops[2]; |
15009 | for (int i = 0; i != 8; ++i) { |
15010 | Value *Ptr = Builder.CreateConstGEP1_32(Ty, Ops[1], i); |
15011 | InOps[i + 1] = Builder.CreateAlignedLoad(Ty, Ptr, Align(16)); |
15012 | } |
15013 | |
15014 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps); |
15015 | |
15016 | BasicBlock *NoError = |
15017 | createBasicBlock(BlockName + "_no_error", this->CurFn); |
15018 | BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn); |
15019 | BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn); |
15020 | |
15021 | Value *Ret = Builder.CreateExtractValue(Call, 0); |
15022 | Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty()); |
15023 | Builder.CreateCondBr(Succ, NoError, Error); |
15024 | |
15025 | Builder.SetInsertPoint(NoError); |
15026 | for (int i = 0; i != 8; ++i) { |
15027 | Value *Extract = Builder.CreateExtractValue(Call, i + 1); |
15028 | Value *Ptr = Builder.CreateConstGEP1_32(Extract->getType(), Ops[0], i); |
15029 | Builder.CreateAlignedStore(Extract, Ptr, Align(16)); |
15030 | } |
15031 | Builder.CreateBr(End); |
15032 | |
15033 | Builder.SetInsertPoint(Error); |
15034 | for (int i = 0; i != 8; ++i) { |
15035 | Value *Out = Builder.CreateExtractValue(Call, i + 1); |
15036 | Constant *Zero = llvm::Constant::getNullValue(Out->getType()); |
15037 | Value *Ptr = Builder.CreateConstGEP1_32(Out->getType(), Ops[0], i); |
15038 | Builder.CreateAlignedStore(Zero, Ptr, Align(16)); |
15039 | } |
15040 | Builder.CreateBr(End); |
15041 | |
15042 | Builder.SetInsertPoint(End); |
15043 | return Builder.CreateExtractValue(Call, 0); |
15044 | } |
15045 | } |
15046 | } |
15047 | |
15048 | Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, |
15049 | const CallExpr *E) { |
15050 | SmallVector<Value*, 4> Ops; |
15051 | |
15052 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) |
15053 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
15054 | |
15055 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
15056 | |
15057 | switch (BuiltinID) { |
15058 | default: return nullptr; |
15059 | |
15060 | // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we |
15061 | // call __builtin_readcyclecounter. |
15062 | case PPC::BI__builtin_ppc_get_timebase: |
15063 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter)); |
15064 | |
15065 | // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr |
15066 | case PPC::BI__builtin_altivec_lvx: |
15067 | case PPC::BI__builtin_altivec_lvxl: |
15068 | case PPC::BI__builtin_altivec_lvebx: |
15069 | case PPC::BI__builtin_altivec_lvehx: |
15070 | case PPC::BI__builtin_altivec_lvewx: |
15071 | case PPC::BI__builtin_altivec_lvsl: |
15072 | case PPC::BI__builtin_altivec_lvsr: |
15073 | case PPC::BI__builtin_vsx_lxvd2x: |
15074 | case PPC::BI__builtin_vsx_lxvw4x: |
15075 | case PPC::BI__builtin_vsx_lxvd2x_be: |
15076 | case PPC::BI__builtin_vsx_lxvw4x_be: |
15077 | case PPC::BI__builtin_vsx_lxvl: |
15078 | case PPC::BI__builtin_vsx_lxvll: |
15079 | { |
15080 | if(BuiltinID == PPC::BI__builtin_vsx_lxvl || |
15081 | BuiltinID == PPC::BI__builtin_vsx_lxvll){ |
15082 | Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy); |
15083 | }else { |
15084 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); |
15085 | Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]); |
15086 | Ops.pop_back(); |
15087 | } |
15088 | |
15089 | switch (BuiltinID) { |
15090 | default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!")::llvm::llvm_unreachable_internal("Unsupported ld/lvsl/lvsr intrinsic!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15090); |
15091 | case PPC::BI__builtin_altivec_lvx: |
15092 | ID = Intrinsic::ppc_altivec_lvx; |
15093 | break; |
15094 | case PPC::BI__builtin_altivec_lvxl: |
15095 | ID = Intrinsic::ppc_altivec_lvxl; |
15096 | break; |
15097 | case PPC::BI__builtin_altivec_lvebx: |
15098 | ID = Intrinsic::ppc_altivec_lvebx; |
15099 | break; |
15100 | case PPC::BI__builtin_altivec_lvehx: |
15101 | ID = Intrinsic::ppc_altivec_lvehx; |
15102 | break; |
15103 | case PPC::BI__builtin_altivec_lvewx: |
15104 | ID = Intrinsic::ppc_altivec_lvewx; |
15105 | break; |
15106 | case PPC::BI__builtin_altivec_lvsl: |
15107 | ID = Intrinsic::ppc_altivec_lvsl; |
15108 | break; |
15109 | case PPC::BI__builtin_altivec_lvsr: |
15110 | ID = Intrinsic::ppc_altivec_lvsr; |
15111 | break; |
15112 | case PPC::BI__builtin_vsx_lxvd2x: |
15113 | ID = Intrinsic::ppc_vsx_lxvd2x; |
15114 | break; |
15115 | case PPC::BI__builtin_vsx_lxvw4x: |
15116 | ID = Intrinsic::ppc_vsx_lxvw4x; |
15117 | break; |
15118 | case PPC::BI__builtin_vsx_lxvd2x_be: |
15119 | ID = Intrinsic::ppc_vsx_lxvd2x_be; |
15120 | break; |
15121 | case PPC::BI__builtin_vsx_lxvw4x_be: |
15122 | ID = Intrinsic::ppc_vsx_lxvw4x_be; |
15123 | break; |
15124 | case PPC::BI__builtin_vsx_lxvl: |
15125 | ID = Intrinsic::ppc_vsx_lxvl; |
15126 | break; |
15127 | case PPC::BI__builtin_vsx_lxvll: |
15128 | ID = Intrinsic::ppc_vsx_lxvll; |
15129 | break; |
15130 | } |
15131 | llvm::Function *F = CGM.getIntrinsic(ID); |
15132 | return Builder.CreateCall(F, Ops, ""); |
15133 | } |
15134 | |
15135 | // vec_st, vec_xst_be |
15136 | case PPC::BI__builtin_altivec_stvx: |
15137 | case PPC::BI__builtin_altivec_stvxl: |
15138 | case PPC::BI__builtin_altivec_stvebx: |
15139 | case PPC::BI__builtin_altivec_stvehx: |
15140 | case PPC::BI__builtin_altivec_stvewx: |
15141 | case PPC::BI__builtin_vsx_stxvd2x: |
15142 | case PPC::BI__builtin_vsx_stxvw4x: |
15143 | case PPC::BI__builtin_vsx_stxvd2x_be: |
15144 | case PPC::BI__builtin_vsx_stxvw4x_be: |
15145 | case PPC::BI__builtin_vsx_stxvl: |
15146 | case PPC::BI__builtin_vsx_stxvll: |
15147 | { |
15148 | if(BuiltinID == PPC::BI__builtin_vsx_stxvl || |
15149 | BuiltinID == PPC::BI__builtin_vsx_stxvll ){ |
15150 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); |
15151 | }else { |
15152 | Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); |
15153 | Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]); |
15154 | Ops.pop_back(); |
15155 | } |
15156 | |
15157 | switch (BuiltinID) { |
15158 | default: llvm_unreachable("Unsupported st intrinsic!")::llvm::llvm_unreachable_internal("Unsupported st intrinsic!" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15158); |
15159 | case PPC::BI__builtin_altivec_stvx: |
15160 | ID = Intrinsic::ppc_altivec_stvx; |
15161 | break; |
15162 | case PPC::BI__builtin_altivec_stvxl: |
15163 | ID = Intrinsic::ppc_altivec_stvxl; |
15164 | break; |
15165 | case PPC::BI__builtin_altivec_stvebx: |
15166 | ID = Intrinsic::ppc_altivec_stvebx; |
15167 | break; |
15168 | case PPC::BI__builtin_altivec_stvehx: |
15169 | ID = Intrinsic::ppc_altivec_stvehx; |
15170 | break; |
15171 | case PPC::BI__builtin_altivec_stvewx: |
15172 | ID = Intrinsic::ppc_altivec_stvewx; |
15173 | break; |
15174 | case PPC::BI__builtin_vsx_stxvd2x: |
15175 | ID = Intrinsic::ppc_vsx_stxvd2x; |
15176 | break; |
15177 | case PPC::BI__builtin_vsx_stxvw4x: |
15178 | ID = Intrinsic::ppc_vsx_stxvw4x; |
15179 | break; |
15180 | case PPC::BI__builtin_vsx_stxvd2x_be: |
15181 | ID = Intrinsic::ppc_vsx_stxvd2x_be; |
15182 | break; |
15183 | case PPC::BI__builtin_vsx_stxvw4x_be: |
15184 | ID = Intrinsic::ppc_vsx_stxvw4x_be; |
15185 | break; |
15186 | case PPC::BI__builtin_vsx_stxvl: |
15187 | ID = Intrinsic::ppc_vsx_stxvl; |
15188 | break; |
15189 | case PPC::BI__builtin_vsx_stxvll: |
15190 | ID = Intrinsic::ppc_vsx_stxvll; |
15191 | break; |
15192 | } |
15193 | llvm::Function *F = CGM.getIntrinsic(ID); |
15194 | return Builder.CreateCall(F, Ops, ""); |
15195 | } |
15196 | case PPC::BI__builtin_vsx_ldrmb: { |
15197 | // Essentially boils down to performing an unaligned VMX load sequence so |
15198 | // as to avoid crossing a page boundary and then shuffling the elements |
15199 | // into the right side of the vector register. |
15200 | int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue(); |
15201 | llvm::Type *ResTy = ConvertType(E->getType()); |
15202 | bool IsLE = getTarget().isLittleEndian(); |
15203 | |
15204 | // If the user wants the entire vector, just load the entire vector. |
15205 | if (NumBytes == 16) { |
15206 | Value *BC = Builder.CreateBitCast(Ops[0], ResTy->getPointerTo()); |
15207 | Value *LD = Builder.CreateLoad(Address(BC, CharUnits::fromQuantity(1))); |
15208 | if (!IsLE) |
15209 | return LD; |
15210 | |
15211 | // Reverse the bytes on LE. |
15212 | SmallVector<int, 16> RevMask; |
15213 | for (int Idx = 0; Idx < 16; Idx++) |
15214 | RevMask.push_back(15 - Idx); |
15215 | return Builder.CreateShuffleVector(LD, LD, RevMask); |
15216 | } |
15217 | |
15218 | llvm::Function *Lvx = CGM.getIntrinsic(Intrinsic::ppc_altivec_lvx); |
15219 | llvm::Function *Lvs = CGM.getIntrinsic(IsLE ? Intrinsic::ppc_altivec_lvsr |
15220 | : Intrinsic::ppc_altivec_lvsl); |
15221 | llvm::Function *Vperm = CGM.getIntrinsic(Intrinsic::ppc_altivec_vperm); |
15222 | Value *HiMem = Builder.CreateGEP( |
15223 | Int8Ty, Ops[0], ConstantInt::get(Ops[1]->getType(), NumBytes - 1)); |
15224 | Value *LoLd = Builder.CreateCall(Lvx, Ops[0], "ld.lo"); |
15225 | Value *HiLd = Builder.CreateCall(Lvx, HiMem, "ld.hi"); |
15226 | Value *Mask1 = Builder.CreateCall(Lvs, Ops[0], "mask1"); |
15227 | |
15228 | Ops.clear(); |
15229 | Ops.push_back(IsLE ? HiLd : LoLd); |
15230 | Ops.push_back(IsLE ? LoLd : HiLd); |
15231 | Ops.push_back(Mask1); |
15232 | Value *AllElts = Builder.CreateCall(Vperm, Ops, "shuffle1"); |
15233 | Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType()); |
15234 | |
15235 | if (IsLE) { |
15236 | SmallVector<int, 16> Consts; |
15237 | for (int Idx = 0; Idx < 16; Idx++) { |
15238 | int Val = (NumBytes - Idx - 1 >= 0) ? (NumBytes - Idx - 1) |
15239 | : 16 - (NumBytes - Idx); |
15240 | Consts.push_back(Val); |
15241 | } |
15242 | return Builder.CreateShuffleVector(Builder.CreateBitCast(AllElts, ResTy), |
15243 | Zero, Consts); |
15244 | } |
15245 | SmallVector<Constant *, 16> Consts; |
15246 | for (int Idx = 0; Idx < 16; Idx++) |
15247 | Consts.push_back(Builder.getInt8(NumBytes + Idx)); |
15248 | Value *Mask2 = ConstantVector::get(Consts); |
15249 | return Builder.CreateBitCast( |
15250 | Builder.CreateCall(Vperm, {Zero, AllElts, Mask2}, "shuffle2"), ResTy); |
15251 | } |
15252 | case PPC::BI__builtin_vsx_strmb: { |
15253 | int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue(); |
15254 | bool IsLE = getTarget().isLittleEndian(); |
15255 | auto StoreSubVec = [&](unsigned Width, unsigned Offset, unsigned EltNo) { |
15256 | // Storing the whole vector, simply store it on BE and reverse bytes and |
15257 | // store on LE. |
15258 | if (Width == 16) { |
15259 | Value *BC = |
15260 | Builder.CreateBitCast(Ops[0], Ops[2]->getType()->getPointerTo()); |
15261 | Value *StVec = Ops[2]; |
15262 | if (IsLE) { |
15263 | SmallVector<int, 16> RevMask; |
15264 | for (int Idx = 0; Idx < 16; Idx++) |
15265 | RevMask.push_back(15 - Idx); |
15266 | StVec = Builder.CreateShuffleVector(Ops[2], Ops[2], RevMask); |
15267 | } |
15268 | return Builder.CreateStore(StVec, |
15269 | Address(BC, CharUnits::fromQuantity(1))); |
15270 | } |
15271 | auto *ConvTy = Int64Ty; |
15272 | unsigned NumElts = 0; |
15273 | switch (Width) { |
15274 | default: |
15275 | llvm_unreachable("width for stores must be a power of 2")::llvm::llvm_unreachable_internal("width for stores must be a power of 2" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15275); |
15276 | case 8: |
15277 | ConvTy = Int64Ty; |
15278 | NumElts = 2; |
15279 | break; |
15280 | case 4: |
15281 | ConvTy = Int32Ty; |
15282 | NumElts = 4; |
15283 | break; |
15284 | case 2: |
15285 | ConvTy = Int16Ty; |
15286 | NumElts = 8; |
15287 | break; |
15288 | case 1: |
15289 | ConvTy = Int8Ty; |
15290 | NumElts = 16; |
15291 | break; |
15292 | } |
15293 | Value *Vec = Builder.CreateBitCast( |
15294 | Ops[2], llvm::FixedVectorType::get(ConvTy, NumElts)); |
15295 | Value *Ptr = Builder.CreateGEP(Int8Ty, Ops[0], |
15296 | ConstantInt::get(Int64Ty, Offset)); |
15297 | Value *PtrBC = Builder.CreateBitCast(Ptr, ConvTy->getPointerTo()); |
15298 | Value *Elt = Builder.CreateExtractElement(Vec, EltNo); |
15299 | if (IsLE && Width > 1) { |
15300 | Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy); |
15301 | Elt = Builder.CreateCall(F, Elt); |
15302 | } |
15303 | return Builder.CreateStore(Elt, |
15304 | Address(PtrBC, CharUnits::fromQuantity(1))); |
15305 | }; |
15306 | unsigned Stored = 0; |
15307 | unsigned RemainingBytes = NumBytes; |
15308 | Value *Result; |
15309 | if (NumBytes == 16) |
15310 | return StoreSubVec(16, 0, 0); |
15311 | if (NumBytes >= 8) { |
15312 | Result = StoreSubVec(8, NumBytes - 8, IsLE ? 0 : 1); |
15313 | RemainingBytes -= 8; |
15314 | Stored += 8; |
15315 | } |
15316 | if (RemainingBytes >= 4) { |
15317 | Result = StoreSubVec(4, NumBytes - Stored - 4, |
15318 | IsLE ? (Stored >> 2) : 3 - (Stored >> 2)); |
15319 | RemainingBytes -= 4; |
15320 | Stored += 4; |
15321 | } |
15322 | if (RemainingBytes >= 2) { |
15323 | Result = StoreSubVec(2, NumBytes - Stored - 2, |
15324 | IsLE ? (Stored >> 1) : 7 - (Stored >> 1)); |
15325 | RemainingBytes -= 2; |
15326 | Stored += 2; |
15327 | } |
15328 | if (RemainingBytes) |
15329 | Result = |
15330 | StoreSubVec(1, NumBytes - Stored - 1, IsLE ? Stored : 15 - Stored); |
15331 | return Result; |
15332 | } |
15333 | // Square root |
15334 | case PPC::BI__builtin_vsx_xvsqrtsp: |
15335 | case PPC::BI__builtin_vsx_xvsqrtdp: { |
15336 | llvm::Type *ResultType = ConvertType(E->getType()); |
15337 | Value *X = EmitScalarExpr(E->getArg(0)); |
15338 | if (Builder.getIsFPConstrained()) { |
15339 | llvm::Function *F = CGM.getIntrinsic( |
15340 | Intrinsic::experimental_constrained_sqrt, ResultType); |
15341 | return Builder.CreateConstrainedFPCall(F, X); |
15342 | } else { |
15343 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); |
15344 | return Builder.CreateCall(F, X); |
15345 | } |
15346 | } |
15347 | // Count leading zeros |
15348 | case PPC::BI__builtin_altivec_vclzb: |
15349 | case PPC::BI__builtin_altivec_vclzh: |
15350 | case PPC::BI__builtin_altivec_vclzw: |
15351 | case PPC::BI__builtin_altivec_vclzd: { |
15352 | llvm::Type *ResultType = ConvertType(E->getType()); |
15353 | Value *X = EmitScalarExpr(E->getArg(0)); |
15354 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
15355 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); |
15356 | return Builder.CreateCall(F, {X, Undef}); |
15357 | } |
15358 | case PPC::BI__builtin_altivec_vctzb: |
15359 | case PPC::BI__builtin_altivec_vctzh: |
15360 | case PPC::BI__builtin_altivec_vctzw: |
15361 | case PPC::BI__builtin_altivec_vctzd: { |
15362 | llvm::Type *ResultType = ConvertType(E->getType()); |
15363 | Value *X = EmitScalarExpr(E->getArg(0)); |
15364 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
15365 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType); |
15366 | return Builder.CreateCall(F, {X, Undef}); |
15367 | } |
15368 | case PPC::BI__builtin_altivec_vec_replace_elt: |
15369 | case PPC::BI__builtin_altivec_vec_replace_unaligned: { |
15370 | // The third argument of vec_replace_elt and vec_replace_unaligned must |
15371 | // be a compile time constant and will be emitted either to the vinsw |
15372 | // or vinsd instruction. |
15373 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); |
15374 | assert(ArgCI &&(static_cast <bool> (ArgCI && "Third Arg to vinsw/vinsd intrinsic must be a constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Third Arg to vinsw/vinsd intrinsic must be a constant integer!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15375, __extension__ __PRETTY_FUNCTION__)) |
15375 | "Third Arg to vinsw/vinsd intrinsic must be a constant integer!")(static_cast <bool> (ArgCI && "Third Arg to vinsw/vinsd intrinsic must be a constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Third Arg to vinsw/vinsd intrinsic must be a constant integer!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15375, __extension__ __PRETTY_FUNCTION__)); |
15376 | llvm::Type *ResultType = ConvertType(E->getType()); |
15377 | llvm::Function *F = nullptr; |
15378 | Value *Call = nullptr; |
15379 | int64_t ConstArg = ArgCI->getSExtValue(); |
15380 | unsigned ArgWidth = Ops[1]->getType()->getPrimitiveSizeInBits(); |
15381 | bool Is32Bit = false; |
15382 | assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width")(static_cast <bool> ((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width") ? void (0) : __assert_fail ("(ArgWidth == 32 || ArgWidth == 64) && \"Invalid argument width\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15382, __extension__ __PRETTY_FUNCTION__)); |
15383 | // The input to vec_replace_elt is an element index, not a byte index. |
15384 | if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt) |
15385 | ConstArg *= ArgWidth / 8; |
15386 | if (ArgWidth == 32) { |
15387 | Is32Bit = true; |
15388 | // When the second argument is 32 bits, it can either be an integer or |
15389 | // a float. The vinsw intrinsic is used in this case. |
15390 | F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw); |
15391 | // Fix the constant according to endianess. |
15392 | if (getTarget().isLittleEndian()) |
15393 | ConstArg = 12 - ConstArg; |
15394 | } else { |
15395 | // When the second argument is 64 bits, it can either be a long long or |
15396 | // a double. The vinsd intrinsic is used in this case. |
15397 | F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd); |
15398 | // Fix the constant for little endian. |
15399 | if (getTarget().isLittleEndian()) |
15400 | ConstArg = 8 - ConstArg; |
15401 | } |
15402 | Ops[2] = ConstantInt::getSigned(Int32Ty, ConstArg); |
15403 | // Depending on ArgWidth, the input vector could be a float or a double. |
15404 | // If the input vector is a float type, bitcast the inputs to integers. Or, |
15405 | // if the input vector is a double, bitcast the inputs to 64-bit integers. |
15406 | if (!Ops[1]->getType()->isIntegerTy(ArgWidth)) { |
15407 | Ops[0] = Builder.CreateBitCast( |
15408 | Ops[0], Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4) |
15409 | : llvm::FixedVectorType::get(Int64Ty, 2)); |
15410 | Ops[1] = Builder.CreateBitCast(Ops[1], Is32Bit ? Int32Ty : Int64Ty); |
15411 | } |
15412 | // Emit the call to vinsw or vinsd. |
15413 | Call = Builder.CreateCall(F, Ops); |
15414 | // Depending on the builtin, bitcast to the approriate result type. |
15415 | if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt && |
15416 | !Ops[1]->getType()->isIntegerTy()) |
15417 | return Builder.CreateBitCast(Call, ResultType); |
15418 | else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt && |
15419 | Ops[1]->getType()->isIntegerTy()) |
15420 | return Call; |
15421 | else |
15422 | return Builder.CreateBitCast(Call, |
15423 | llvm::FixedVectorType::get(Int8Ty, 16)); |
15424 | } |
15425 | case PPC::BI__builtin_altivec_vpopcntb: |
15426 | case PPC::BI__builtin_altivec_vpopcnth: |
15427 | case PPC::BI__builtin_altivec_vpopcntw: |
15428 | case PPC::BI__builtin_altivec_vpopcntd: { |
15429 | llvm::Type *ResultType = ConvertType(E->getType()); |
15430 | Value *X = EmitScalarExpr(E->getArg(0)); |
15431 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); |
15432 | return Builder.CreateCall(F, X); |
15433 | } |
15434 | case PPC::BI__builtin_altivec_vadduqm: |
15435 | case PPC::BI__builtin_altivec_vsubuqm: { |
15436 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
15437 | Ops[0] = |
15438 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int128Ty, 1)); |
15439 | Ops[1] = |
15440 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int128Ty, 1)); |
15441 | if (BuiltinID == PPC::BI__builtin_altivec_vadduqm) |
15442 | return Builder.CreateAdd(Ops[0], Ops[1], "vadduqm"); |
15443 | else |
15444 | return Builder.CreateSub(Ops[0], Ops[1], "vsubuqm"); |
15445 | } |
15446 | // Rotate and insert under mask operation. |
15447 | // __rldimi(rs, is, shift, mask) |
15448 | // (rotl64(rs, shift) & mask) | (is & ~mask) |
15449 | // __rlwimi(rs, is, shift, mask) |
15450 | // (rotl(rs, shift) & mask) | (is & ~mask) |
15451 | case PPC::BI__builtin_ppc_rldimi: |
15452 | case PPC::BI__builtin_ppc_rlwimi: { |
15453 | llvm::Type *Ty = Ops[0]->getType(); |
15454 | Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty); |
15455 | if (BuiltinID == PPC::BI__builtin_ppc_rldimi) |
15456 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
15457 | Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[2]}); |
15458 | Value *X = Builder.CreateAnd(Shift, Ops[3]); |
15459 | Value *Y = Builder.CreateAnd(Ops[1], Builder.CreateNot(Ops[3])); |
15460 | return Builder.CreateOr(X, Y); |
15461 | } |
15462 | // Rotate and insert under mask operation. |
15463 | // __rlwnm(rs, shift, mask) |
15464 | // rotl(rs, shift) & mask |
15465 | case PPC::BI__builtin_ppc_rlwnm: { |
15466 | llvm::Type *Ty = Ops[0]->getType(); |
15467 | Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty); |
15468 | Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[1]}); |
15469 | return Builder.CreateAnd(Shift, Ops[2]); |
15470 | } |
15471 | case PPC::BI__builtin_ppc_poppar4: |
15472 | case PPC::BI__builtin_ppc_poppar8: { |
15473 | llvm::Type *ArgType = Ops[0]->getType(); |
15474 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
15475 | Value *Tmp = Builder.CreateCall(F, Ops[0]); |
15476 | |
15477 | llvm::Type *ResultType = ConvertType(E->getType()); |
15478 | Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); |
15479 | if (Result->getType() != ResultType) |
15480 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
15481 | "cast"); |
15482 | return Result; |
15483 | } |
15484 | case PPC::BI__builtin_ppc_cmpb: { |
15485 | if (getTarget().getTriple().isPPC64()) { |
15486 | Function *F = |
15487 | CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int64Ty, Int64Ty, Int64Ty}); |
15488 | return Builder.CreateCall(F, Ops, "cmpb"); |
15489 | } |
15490 | // For 32 bit, emit the code as below: |
15491 | // %conv = trunc i64 %a to i32 |
15492 | // %conv1 = trunc i64 %b to i32 |
15493 | // %shr = lshr i64 %a, 32 |
15494 | // %conv2 = trunc i64 %shr to i32 |
15495 | // %shr3 = lshr i64 %b, 32 |
15496 | // %conv4 = trunc i64 %shr3 to i32 |
15497 | // %0 = tail call i32 @llvm.ppc.cmpb32(i32 %conv, i32 %conv1) |
15498 | // %conv5 = zext i32 %0 to i64 |
15499 | // %1 = tail call i32 @llvm.ppc.cmpb32(i32 %conv2, i32 %conv4) |
15500 | // %conv614 = zext i32 %1 to i64 |
15501 | // %shl = shl nuw i64 %conv614, 32 |
15502 | // %or = or i64 %shl, %conv5 |
15503 | // ret i64 %or |
15504 | Function *F = |
15505 | CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int32Ty, Int32Ty, Int32Ty}); |
15506 | Value *ArgOneLo = Builder.CreateTrunc(Ops[0], Int32Ty); |
15507 | Value *ArgTwoLo = Builder.CreateTrunc(Ops[1], Int32Ty); |
15508 | Constant *ShiftAmt = ConstantInt::get(Int64Ty, 32); |
15509 | Value *ArgOneHi = |
15510 | Builder.CreateTrunc(Builder.CreateLShr(Ops[0], ShiftAmt), Int32Ty); |
15511 | Value *ArgTwoHi = |
15512 | Builder.CreateTrunc(Builder.CreateLShr(Ops[1], ShiftAmt), Int32Ty); |
15513 | Value *ResLo = Builder.CreateZExt( |
15514 | Builder.CreateCall(F, {ArgOneLo, ArgTwoLo}, "cmpb"), Int64Ty); |
15515 | Value *ResHiShift = Builder.CreateZExt( |
15516 | Builder.CreateCall(F, {ArgOneHi, ArgTwoHi}, "cmpb"), Int64Ty); |
15517 | Value *ResHi = Builder.CreateShl(ResHiShift, ShiftAmt); |
15518 | return Builder.CreateOr(ResLo, ResHi); |
15519 | } |
15520 | // Copy sign |
15521 | case PPC::BI__builtin_vsx_xvcpsgnsp: |
15522 | case PPC::BI__builtin_vsx_xvcpsgndp: { |
15523 | llvm::Type *ResultType = ConvertType(E->getType()); |
15524 | Value *X = EmitScalarExpr(E->getArg(0)); |
15525 | Value *Y = EmitScalarExpr(E->getArg(1)); |
15526 | ID = Intrinsic::copysign; |
15527 | llvm::Function *F = CGM.getIntrinsic(ID, ResultType); |
15528 | return Builder.CreateCall(F, {X, Y}); |
15529 | } |
15530 | // Rounding/truncation |
15531 | case PPC::BI__builtin_vsx_xvrspip: |
15532 | case PPC::BI__builtin_vsx_xvrdpip: |
15533 | case PPC::BI__builtin_vsx_xvrdpim: |
15534 | case PPC::BI__builtin_vsx_xvrspim: |
15535 | case PPC::BI__builtin_vsx_xvrdpi: |
15536 | case PPC::BI__builtin_vsx_xvrspi: |
15537 | case PPC::BI__builtin_vsx_xvrdpic: |
15538 | case PPC::BI__builtin_vsx_xvrspic: |
15539 | case PPC::BI__builtin_vsx_xvrdpiz: |
15540 | case PPC::BI__builtin_vsx_xvrspiz: { |
15541 | llvm::Type *ResultType = ConvertType(E->getType()); |
15542 | Value *X = EmitScalarExpr(E->getArg(0)); |
15543 | if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim || |
15544 | BuiltinID == PPC::BI__builtin_vsx_xvrspim) |
15545 | ID = Builder.getIsFPConstrained() |
15546 | ? Intrinsic::experimental_constrained_floor |
15547 | : Intrinsic::floor; |
15548 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi || |
15549 | BuiltinID == PPC::BI__builtin_vsx_xvrspi) |
15550 | ID = Builder.getIsFPConstrained() |
15551 | ? Intrinsic::experimental_constrained_round |
15552 | : Intrinsic::round; |
15553 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic || |
15554 | BuiltinID == PPC::BI__builtin_vsx_xvrspic) |
15555 | ID = Builder.getIsFPConstrained() |
15556 | ? Intrinsic::experimental_constrained_rint |
15557 | : Intrinsic::rint; |
15558 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip || |
15559 | BuiltinID == PPC::BI__builtin_vsx_xvrspip) |
15560 | ID = Builder.getIsFPConstrained() |
15561 | ? Intrinsic::experimental_constrained_ceil |
15562 | : Intrinsic::ceil; |
15563 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz || |
15564 | BuiltinID == PPC::BI__builtin_vsx_xvrspiz) |
15565 | ID = Builder.getIsFPConstrained() |
15566 | ? Intrinsic::experimental_constrained_trunc |
15567 | : Intrinsic::trunc; |
15568 | llvm::Function *F = CGM.getIntrinsic(ID, ResultType); |
15569 | return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X) |
15570 | : Builder.CreateCall(F, X); |
15571 | } |
15572 | |
15573 | // Absolute value |
15574 | case PPC::BI__builtin_vsx_xvabsdp: |
15575 | case PPC::BI__builtin_vsx_xvabssp: { |
15576 | llvm::Type *ResultType = ConvertType(E->getType()); |
15577 | Value *X = EmitScalarExpr(E->getArg(0)); |
15578 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); |
15579 | return Builder.CreateCall(F, X); |
15580 | } |
15581 | |
15582 | // Fastmath by default |
15583 | case PPC::BI__builtin_ppc_recipdivf: |
15584 | case PPC::BI__builtin_ppc_recipdivd: |
15585 | case PPC::BI__builtin_ppc_rsqrtf: |
15586 | case PPC::BI__builtin_ppc_rsqrtd: { |
15587 | FastMathFlags FMF = Builder.getFastMathFlags(); |
15588 | Builder.getFastMathFlags().setFast(); |
15589 | llvm::Type *ResultType = ConvertType(E->getType()); |
15590 | Value *X = EmitScalarExpr(E->getArg(0)); |
15591 | |
15592 | if (BuiltinID == PPC::BI__builtin_ppc_recipdivf || |
15593 | BuiltinID == PPC::BI__builtin_ppc_recipdivd) { |
15594 | Value *Y = EmitScalarExpr(E->getArg(1)); |
15595 | Value *FDiv = Builder.CreateFDiv(X, Y, "recipdiv"); |
15596 | Builder.getFastMathFlags() &= (FMF); |
15597 | return FDiv; |
15598 | } |
15599 | auto *One = ConstantFP::get(ResultType, 1.0); |
15600 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); |
15601 | Value *FDiv = Builder.CreateFDiv(One, Builder.CreateCall(F, X), "rsqrt"); |
15602 | Builder.getFastMathFlags() &= (FMF); |
15603 | return FDiv; |
15604 | } |
15605 | case PPC::BI__builtin_ppc_alignx: { |
15606 | ConstantInt *AlignmentCI = cast<ConstantInt>(Ops[0]); |
15607 | if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment)) |
15608 | AlignmentCI = ConstantInt::get(AlignmentCI->getType(), |
15609 | llvm::Value::MaximumAlignment); |
15610 | |
15611 | emitAlignmentAssumption(Ops[1], E->getArg(1), |
15612 | /*The expr loc is sufficient.*/ SourceLocation(), |
15613 | AlignmentCI, nullptr); |
15614 | return Ops[1]; |
15615 | } |
15616 | case PPC::BI__builtin_ppc_rdlam: { |
15617 | llvm::Type *Ty = Ops[0]->getType(); |
15618 | Value *ShiftAmt = Builder.CreateIntCast(Ops[1], Ty, false); |
15619 | Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty); |
15620 | Value *Rotate = Builder.CreateCall(F, {Ops[0], Ops[0], ShiftAmt}); |
15621 | return Builder.CreateAnd(Rotate, Ops[2]); |
15622 | } |
15623 | // FMA variations |
15624 | case PPC::BI__builtin_vsx_xvmaddadp: |
15625 | case PPC::BI__builtin_vsx_xvmaddasp: |
15626 | case PPC::BI__builtin_vsx_xvnmaddadp: |
15627 | case PPC::BI__builtin_vsx_xvnmaddasp: |
15628 | case PPC::BI__builtin_vsx_xvmsubadp: |
15629 | case PPC::BI__builtin_vsx_xvmsubasp: |
15630 | case PPC::BI__builtin_vsx_xvnmsubadp: |
15631 | case PPC::BI__builtin_vsx_xvnmsubasp: { |
15632 | llvm::Type *ResultType = ConvertType(E->getType()); |
15633 | Value *X = EmitScalarExpr(E->getArg(0)); |
15634 | Value *Y = EmitScalarExpr(E->getArg(1)); |
15635 | Value *Z = EmitScalarExpr(E->getArg(2)); |
15636 | llvm::Function *F; |
15637 | if (Builder.getIsFPConstrained()) |
15638 | F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
15639 | else |
15640 | F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
15641 | switch (BuiltinID) { |
15642 | case PPC::BI__builtin_vsx_xvmaddadp: |
15643 | case PPC::BI__builtin_vsx_xvmaddasp: |
15644 | if (Builder.getIsFPConstrained()) |
15645 | return Builder.CreateConstrainedFPCall(F, {X, Y, Z}); |
15646 | else |
15647 | return Builder.CreateCall(F, {X, Y, Z}); |
15648 | case PPC::BI__builtin_vsx_xvnmaddadp: |
15649 | case PPC::BI__builtin_vsx_xvnmaddasp: |
15650 | if (Builder.getIsFPConstrained()) |
15651 | return Builder.CreateFNeg( |
15652 | Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg"); |
15653 | else |
15654 | return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg"); |
15655 | case PPC::BI__builtin_vsx_xvmsubadp: |
15656 | case PPC::BI__builtin_vsx_xvmsubasp: |
15657 | if (Builder.getIsFPConstrained()) |
15658 | return Builder.CreateConstrainedFPCall( |
15659 | F, {X, Y, Builder.CreateFNeg(Z, "neg")}); |
15660 | else |
15661 | return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); |
15662 | case PPC::BI__builtin_vsx_xvnmsubadp: |
15663 | case PPC::BI__builtin_vsx_xvnmsubasp: |
15664 | if (Builder.getIsFPConstrained()) |
15665 | return Builder.CreateFNeg( |
15666 | Builder.CreateConstrainedFPCall( |
15667 | F, {X, Y, Builder.CreateFNeg(Z, "neg")}), |
15668 | "neg"); |
15669 | else |
15670 | return Builder.CreateFNeg( |
15671 | Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}), |
15672 | "neg"); |
15673 | } |
15674 | llvm_unreachable("Unknown FMA operation")::llvm::llvm_unreachable_internal("Unknown FMA operation", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15674); |
15675 | return nullptr; // Suppress no-return warning |
15676 | } |
15677 | |
15678 | case PPC::BI__builtin_vsx_insertword: { |
15679 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw); |
15680 | |
15681 | // Third argument is a compile time constant int. It must be clamped to |
15682 | // to the range [0, 12]. |
15683 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); |
15684 | assert(ArgCI &&(static_cast <bool> (ArgCI && "Third arg to xxinsertw intrinsic must be constant integer" ) ? void (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15685, __extension__ __PRETTY_FUNCTION__)) |
15685 | "Third arg to xxinsertw intrinsic must be constant integer")(static_cast <bool> (ArgCI && "Third arg to xxinsertw intrinsic must be constant integer" ) ? void (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15685, __extension__ __PRETTY_FUNCTION__)); |
15686 | const int64_t MaxIndex = 12; |
15687 | int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex); |
15688 | |
15689 | // The builtin semantics don't exactly match the xxinsertw instructions |
15690 | // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the |
15691 | // word from the first argument, and inserts it in the second argument. The |
15692 | // instruction extracts the word from its second input register and inserts |
15693 | // it into its first input register, so swap the first and second arguments. |
15694 | std::swap(Ops[0], Ops[1]); |
15695 | |
15696 | // Need to cast the second argument from a vector of unsigned int to a |
15697 | // vector of long long. |
15698 | Ops[1] = |
15699 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2)); |
15700 | |
15701 | if (getTarget().isLittleEndian()) { |
15702 | // Reverse the double words in the vector we will extract from. |
15703 | Ops[0] = |
15704 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2)); |
15705 | Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0}); |
15706 | |
15707 | // Reverse the index. |
15708 | Index = MaxIndex - Index; |
15709 | } |
15710 | |
15711 | // Intrinsic expects the first arg to be a vector of int. |
15712 | Ops[0] = |
15713 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4)); |
15714 | Ops[2] = ConstantInt::getSigned(Int32Ty, Index); |
15715 | return Builder.CreateCall(F, Ops); |
15716 | } |
15717 | |
15718 | case PPC::BI__builtin_vsx_extractuword: { |
15719 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw); |
15720 | |
15721 | // Intrinsic expects the first argument to be a vector of doublewords. |
15722 | Ops[0] = |
15723 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2)); |
15724 | |
15725 | // The second argument is a compile time constant int that needs to |
15726 | // be clamped to the range [0, 12]. |
15727 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]); |
15728 | assert(ArgCI &&(static_cast <bool> (ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15729, __extension__ __PRETTY_FUNCTION__)) |
15729 | "Second Arg to xxextractuw intrinsic must be a constant integer!")(static_cast <bool> (ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15729, __extension__ __PRETTY_FUNCTION__)); |
15730 | const int64_t MaxIndex = 12; |
15731 | int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex); |
15732 | |
15733 | if (getTarget().isLittleEndian()) { |
15734 | // Reverse the index. |
15735 | Index = MaxIndex - Index; |
15736 | Ops[1] = ConstantInt::getSigned(Int32Ty, Index); |
15737 | |
15738 | // Emit the call, then reverse the double words of the results vector. |
15739 | Value *Call = Builder.CreateCall(F, Ops); |
15740 | |
15741 | Value *ShuffleCall = |
15742 | Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0}); |
15743 | return ShuffleCall; |
15744 | } else { |
15745 | Ops[1] = ConstantInt::getSigned(Int32Ty, Index); |
15746 | return Builder.CreateCall(F, Ops); |
15747 | } |
15748 | } |
15749 | |
15750 | case PPC::BI__builtin_vsx_xxpermdi: { |
15751 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); |
15752 | assert(ArgCI && "Third arg must be constant integer!")(static_cast <bool> (ArgCI && "Third arg must be constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Third arg must be constant integer!\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15752, __extension__ __PRETTY_FUNCTION__)); |
15753 | |
15754 | unsigned Index = ArgCI->getZExtValue(); |
15755 | Ops[0] = |
15756 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2)); |
15757 | Ops[1] = |
15758 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2)); |
15759 | |
15760 | // Account for endianness by treating this as just a shuffle. So we use the |
15761 | // same indices for both LE and BE in order to produce expected results in |
15762 | // both cases. |
15763 | int ElemIdx0 = (Index & 2) >> 1; |
15764 | int ElemIdx1 = 2 + (Index & 1); |
15765 | |
15766 | int ShuffleElts[2] = {ElemIdx0, ElemIdx1}; |
15767 | Value *ShuffleCall = |
15768 | Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts); |
15769 | QualType BIRetType = E->getType(); |
15770 | auto RetTy = ConvertType(BIRetType); |
15771 | return Builder.CreateBitCast(ShuffleCall, RetTy); |
15772 | } |
15773 | |
15774 | case PPC::BI__builtin_vsx_xxsldwi: { |
15775 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); |
15776 | assert(ArgCI && "Third argument must be a compile time constant")(static_cast <bool> (ArgCI && "Third argument must be a compile time constant" ) ? void (0) : __assert_fail ("ArgCI && \"Third argument must be a compile time constant\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 15776, __extension__ __PRETTY_FUNCTION__)); |
15777 | unsigned Index = ArgCI->getZExtValue() & 0x3; |
15778 | Ops[0] = |
15779 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4)); |
15780 | Ops[1] = |
15781 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4)); |
15782 | |
15783 | // Create a shuffle mask |
15784 | int ElemIdx0; |
15785 | int ElemIdx1; |
15786 | int ElemIdx2; |
15787 | int ElemIdx3; |
15788 | if (getTarget().isLittleEndian()) { |
15789 | // Little endian element N comes from element 8+N-Index of the |
15790 | // concatenated wide vector (of course, using modulo arithmetic on |
15791 | // the total number of elements). |
15792 | ElemIdx0 = (8 - Index) % 8; |
15793 | ElemIdx1 = (9 - Index) % 8; |
15794 | ElemIdx2 = (10 - Index) % 8; |
15795 | ElemIdx3 = (11 - Index) % 8; |
15796 | } else { |
15797 | // Big endian ElemIdx<N> = Index + N |
15798 | ElemIdx0 = Index; |
15799 | ElemIdx1 = Index + 1; |
15800 | ElemIdx2 = Index + 2; |
15801 | ElemIdx3 = Index + 3; |
15802 | } |
15803 | |
15804 | int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3}; |
15805 | Value *ShuffleCall = |
15806 | Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts); |
15807 | QualType BIRetType = E->getType(); |
15808 | auto RetTy = ConvertType(BIRetType); |
15809 | return Builder.CreateBitCast(ShuffleCall, RetTy); |
15810 | } |
15811 | |
15812 | case PPC::BI__builtin_pack_vector_int128: { |
15813 | bool isLittleEndian = getTarget().isLittleEndian(); |
15814 | Value *UndefValue = |
15815 | llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2)); |
15816 | Value *Res = Builder.CreateInsertElement( |
15817 | UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0)); |
15818 | Res = Builder.CreateInsertElement(Res, Ops[1], |
15819 | (uint64_t)(isLittleEndian ? 0 : 1)); |
15820 | return Builder.CreateBitCast(Res, ConvertType(E->getType())); |
15821 | } |
15822 | |
15823 | case PPC::BI__builtin_unpack_vector_int128: { |
15824 | ConstantInt *Index = cast<ConstantInt>(Ops[1]); |
15825 | Value *Unpacked = Builder.CreateBitCast( |
15826 | Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2)); |
15827 | |
15828 | if (getTarget().isLittleEndian()) |
15829 | Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue()); |
15830 | |
15831 | return Builder.CreateExtractElement(Unpacked, Index); |
15832 | } |
15833 | |
15834 | case PPC::BI__builtin_ppc_sthcx: { |
15835 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_sthcx); |
15836 | Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy); |
15837 | Ops[1] = Builder.CreateSExt(Ops[1], Int32Ty); |
15838 | return Builder.CreateCall(F, Ops); |
15839 | } |
15840 | |
15841 | // The PPC MMA builtins take a pointer to a __vector_quad as an argument. |
15842 | // Some of the MMA instructions accumulate their result into an existing |
15843 | // accumulator whereas the others generate a new accumulator. So we need to |
15844 | // use custom code generation to expand a builtin call with a pointer to a |
15845 | // load (if the corresponding instruction accumulates its result) followed by |
15846 | // the call to the intrinsic and a store of the result. |
15847 | #define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate) \ |
15848 | case PPC::BI__builtin_##Name: |
15849 | #include "clang/Basic/BuiltinsPPC.def" |
15850 | { |
15851 | // The first argument of these two builtins is a pointer used to store their |
15852 | // result. However, the llvm intrinsics return their result in multiple |
15853 | // return values. So, here we emit code extracting these values from the |
15854 | // intrinsic results and storing them using that pointer. |
15855 | if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc || |
15856 | BuiltinID == PPC::BI__builtin_vsx_disassemble_pair || |
15857 | BuiltinID == PPC::BI__builtin_mma_disassemble_pair) { |
15858 | unsigned NumVecs = 2; |
15859 | auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair; |
15860 | if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) { |
15861 | NumVecs = 4; |
15862 | Intrinsic = Intrinsic::ppc_mma_disassemble_acc; |
15863 | } |
15864 | llvm::Function *F = CGM.getIntrinsic(Intrinsic); |
15865 | Address Addr = EmitPointerWithAlignment(E->getArg(1)); |
15866 | Value *Vec = Builder.CreateLoad(Addr); |
15867 | Value *Call = Builder.CreateCall(F, {Vec}); |
15868 | llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
15869 | Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo()); |
15870 | for (unsigned i=0; i<NumVecs; i++) { |
15871 | Value *Vec = Builder.CreateExtractValue(Call, i); |
15872 | llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i); |
15873 | Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index); |
15874 | Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16)); |
15875 | } |
15876 | return Call; |
15877 | } |
15878 | bool Accumulate; |
15879 | switch (BuiltinID) { |
15880 | #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ |
15881 | case PPC::BI__builtin_##Name: \ |
15882 | ID = Intrinsic::ppc_##Intr; \ |
15883 | Accumulate = Acc; \ |
15884 | break; |
15885 | #include "clang/Basic/BuiltinsPPC.def" |
15886 | } |
15887 | if (BuiltinID == PPC::BI__builtin_vsx_lxvp || |
15888 | BuiltinID == PPC::BI__builtin_vsx_stxvp || |
15889 | BuiltinID == PPC::BI__builtin_mma_lxvp || |
15890 | BuiltinID == PPC::BI__builtin_mma_stxvp) { |
15891 | if (BuiltinID == PPC::BI__builtin_vsx_lxvp || |
15892 | BuiltinID == PPC::BI__builtin_mma_lxvp) { |
15893 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); |
15894 | Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]); |
15895 | } else { |
15896 | Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); |
15897 | Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]); |
15898 | } |
15899 | Ops.pop_back(); |
15900 | llvm::Function *F = CGM.getIntrinsic(ID); |
15901 | return Builder.CreateCall(F, Ops, ""); |
15902 | } |
15903 | SmallVector<Value*, 4> CallOps; |
15904 | if (Accumulate) { |
15905 | Address Addr = EmitPointerWithAlignment(E->getArg(0)); |
15906 | Value *Acc = Builder.CreateLoad(Addr); |
15907 | CallOps.push_back(Acc); |
15908 | } |
15909 | for (unsigned i=1; i<Ops.size(); i++) |
15910 | CallOps.push_back(Ops[i]); |
15911 | llvm::Function *F = CGM.getIntrinsic(ID); |
15912 | Value *Call = Builder.CreateCall(F, CallOps); |
15913 | return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64)); |
15914 | } |
15915 | |
15916 | case PPC::BI__builtin_ppc_compare_and_swap: |
15917 | case PPC::BI__builtin_ppc_compare_and_swaplp: { |
15918 | Address Addr = EmitPointerWithAlignment(E->getArg(0)); |
15919 | Address OldValAddr = EmitPointerWithAlignment(E->getArg(1)); |
15920 | Value *OldVal = Builder.CreateLoad(OldValAddr); |
15921 | QualType AtomicTy = E->getArg(0)->getType()->getPointeeType(); |
15922 | LValue LV = MakeAddrLValue(Addr, AtomicTy); |
15923 | auto Pair = EmitAtomicCompareExchange( |
15924 | LV, RValue::get(OldVal), RValue::get(Ops[2]), E->getExprLoc(), |
15925 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true); |
15926 | // Unlike c11's atomic_compare_exchange, accroding to |
15927 | // https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp |
15928 | // > In either case, the contents of the memory location specified by addr |
15929 | // > are copied into the memory location specified by old_val_addr. |
15930 | // But it hasn't specified storing to OldValAddr is atomic or not and |
15931 | // which order to use. Now following XL's codegen, treat it as a normal |
15932 | // store. |
15933 | Value *LoadedVal = Pair.first.getScalarVal(); |
15934 | Builder.CreateStore(LoadedVal, OldValAddr); |
15935 | return Builder.CreateZExt(Pair.second, Builder.getInt32Ty()); |
15936 | } |
15937 | case PPC::BI__builtin_ppc_fetch_and_add: |
15938 | case PPC::BI__builtin_ppc_fetch_and_addlp: { |
15939 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
15940 | llvm::AtomicOrdering::Monotonic); |
15941 | } |
15942 | case PPC::BI__builtin_ppc_fetch_and_and: |
15943 | case PPC::BI__builtin_ppc_fetch_and_andlp: { |
15944 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
15945 | llvm::AtomicOrdering::Monotonic); |
15946 | } |
15947 | |
15948 | case PPC::BI__builtin_ppc_fetch_and_or: |
15949 | case PPC::BI__builtin_ppc_fetch_and_orlp: { |
15950 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
15951 | llvm::AtomicOrdering::Monotonic); |
15952 | } |
15953 | case PPC::BI__builtin_ppc_fetch_and_swap: |
15954 | case PPC::BI__builtin_ppc_fetch_and_swaplp: { |
15955 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
15956 | llvm::AtomicOrdering::Monotonic); |
15957 | } |
15958 | case PPC::BI__builtin_ppc_ldarx: |
15959 | case PPC::BI__builtin_ppc_lwarx: |
15960 | case PPC::BI__builtin_ppc_lharx: |
15961 | case PPC::BI__builtin_ppc_lbarx: |
15962 | return emitPPCLoadReserveIntrinsic(*this, BuiltinID, E); |
15963 | case PPC::BI__builtin_ppc_mfspr: { |
15964 | llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32 |
15965 | ? Int32Ty |
15966 | : Int64Ty; |
15967 | Function *F = CGM.getIntrinsic(Intrinsic::ppc_mfspr, RetType); |
15968 | return Builder.CreateCall(F, Ops); |
15969 | } |
15970 | case PPC::BI__builtin_ppc_mtspr: { |
15971 | llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32 |
15972 | ? Int32Ty |
15973 | : Int64Ty; |
15974 | Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtspr, RetType); |
15975 | return Builder.CreateCall(F, Ops); |
15976 | } |
15977 | case PPC::BI__builtin_ppc_popcntb: { |
15978 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
15979 | llvm::Type *ArgType = ArgValue->getType(); |
15980 | Function *F = CGM.getIntrinsic(Intrinsic::ppc_popcntb, {ArgType, ArgType}); |
15981 | return Builder.CreateCall(F, Ops, "popcntb"); |
15982 | } |
15983 | case PPC::BI__builtin_ppc_mtfsf: { |
15984 | // The builtin takes a uint32 that needs to be cast to an |
15985 | // f64 to be passed to the intrinsic. |
15986 | Value *Cast = Builder.CreateUIToFP(Ops[1], DoubleTy); |
15987 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtfsf); |
15988 | return Builder.CreateCall(F, {Ops[0], Cast}, ""); |
15989 | } |
15990 | |
15991 | case PPC::BI__builtin_ppc_swdiv_nochk: |
15992 | case PPC::BI__builtin_ppc_swdivs_nochk: { |
15993 | FastMathFlags FMF = Builder.getFastMathFlags(); |
15994 | Builder.getFastMathFlags().setFast(); |
15995 | Value *FDiv = Builder.CreateFDiv(Ops[0], Ops[1], "swdiv_nochk"); |
15996 | Builder.getFastMathFlags() &= (FMF); |
15997 | return FDiv; |
15998 | } |
15999 | case PPC::BI__builtin_ppc_fric: |
16000 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
16001 | *this, E, Intrinsic::rint, |
16002 | Intrinsic::experimental_constrained_rint)) |
16003 | .getScalarVal(); |
16004 | case PPC::BI__builtin_ppc_frim: |
16005 | case PPC::BI__builtin_ppc_frims: |
16006 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
16007 | *this, E, Intrinsic::floor, |
16008 | Intrinsic::experimental_constrained_floor)) |
16009 | .getScalarVal(); |
16010 | case PPC::BI__builtin_ppc_frin: |
16011 | case PPC::BI__builtin_ppc_frins: |
16012 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
16013 | *this, E, Intrinsic::round, |
16014 | Intrinsic::experimental_constrained_round)) |
16015 | .getScalarVal(); |
16016 | case PPC::BI__builtin_ppc_frip: |
16017 | case PPC::BI__builtin_ppc_frips: |
16018 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
16019 | *this, E, Intrinsic::ceil, |
16020 | Intrinsic::experimental_constrained_ceil)) |
16021 | .getScalarVal(); |
16022 | case PPC::BI__builtin_ppc_friz: |
16023 | case PPC::BI__builtin_ppc_frizs: |
16024 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
16025 | *this, E, Intrinsic::trunc, |
16026 | Intrinsic::experimental_constrained_trunc)) |
16027 | .getScalarVal(); |
16028 | case PPC::BI__builtin_ppc_fsqrt: |
16029 | case PPC::BI__builtin_ppc_fsqrts: |
16030 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
16031 | *this, E, Intrinsic::sqrt, |
16032 | Intrinsic::experimental_constrained_sqrt)) |
16033 | .getScalarVal(); |
16034 | } |
16035 | } |
16036 | |
16037 | namespace { |
16038 | // If \p E is not null pointer, insert address space cast to match return |
16039 | // type of \p E if necessary. |
16040 | Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF, |
16041 | const CallExpr *E = nullptr) { |
16042 | auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr); |
16043 | auto *Call = CGF.Builder.CreateCall(F); |
16044 | Call->addRetAttr( |
16045 | Attribute::getWithDereferenceableBytes(Call->getContext(), 64)); |
16046 | Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(4))); |
16047 | if (!E) |
16048 | return Call; |
16049 | QualType BuiltinRetType = E->getType(); |
16050 | auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType)); |
16051 | if (RetTy == Call->getType()) |
16052 | return Call; |
16053 | return CGF.Builder.CreateAddrSpaceCast(Call, RetTy); |
16054 | } |
16055 | |
16056 | // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively. |
16057 | Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) { |
16058 | const unsigned XOffset = 4; |
16059 | auto *DP = EmitAMDGPUDispatchPtr(CGF); |
16060 | // Indexing the HSA kernel_dispatch_packet struct. |
16061 | auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2); |
16062 | auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset); |
16063 | auto *DstTy = |
16064 | CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace()); |
16065 | auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy); |
16066 | auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2))); |
16067 | llvm::MDBuilder MDHelper(CGF.getLLVMContext()); |
16068 | llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1), |
16069 | APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1)); |
16070 | LD->setMetadata(llvm::LLVMContext::MD_range, RNode); |
16071 | LD->setMetadata(llvm::LLVMContext::MD_invariant_load, |
16072 | llvm::MDNode::get(CGF.getLLVMContext(), None)); |
16073 | return LD; |
16074 | } |
16075 | |
16076 | // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively. |
16077 | Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) { |
16078 | const unsigned XOffset = 12; |
16079 | auto *DP = EmitAMDGPUDispatchPtr(CGF); |
16080 | // Indexing the HSA kernel_dispatch_packet struct. |
16081 | auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4); |
16082 | auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset); |
16083 | auto *DstTy = |
16084 | CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace()); |
16085 | auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy); |
16086 | auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4))); |
16087 | LD->setMetadata(llvm::LLVMContext::MD_invariant_load, |
16088 | llvm::MDNode::get(CGF.getLLVMContext(), None)); |
16089 | return LD; |
16090 | } |
16091 | } // namespace |
16092 | |
16093 | // For processing memory ordering and memory scope arguments of various |
16094 | // amdgcn builtins. |
16095 | // \p Order takes a C++11 comptabile memory-ordering specifier and converts |
16096 | // it into LLVM's memory ordering specifier using atomic C ABI, and writes |
16097 | // to \p AO. \p Scope takes a const char * and converts it into AMDGCN |
16098 | // specific SyncScopeID and writes it to \p SSID. |
16099 | bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope, |
16100 | llvm::AtomicOrdering &AO, |
16101 | llvm::SyncScope::ID &SSID) { |
16102 | if (isa<llvm::ConstantInt>(Order)) { |
16103 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
16104 | |
16105 | // Map C11/C++11 memory ordering to LLVM memory ordering |
16106 | assert(llvm::isValidAtomicOrderingCABI(ord))(static_cast <bool> (llvm::isValidAtomicOrderingCABI(ord )) ? void (0) : __assert_fail ("llvm::isValidAtomicOrderingCABI(ord)" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 16106, __extension__ __PRETTY_FUNCTION__)); |
16107 | switch (static_cast<llvm::AtomicOrderingCABI>(ord)) { |
16108 | case llvm::AtomicOrderingCABI::acquire: |
16109 | case llvm::AtomicOrderingCABI::consume: |
16110 | AO = llvm::AtomicOrdering::Acquire; |
16111 | break; |
16112 | case llvm::AtomicOrderingCABI::release: |
16113 | AO = llvm::AtomicOrdering::Release; |
16114 | break; |
16115 | case llvm::AtomicOrderingCABI::acq_rel: |
16116 | AO = llvm::AtomicOrdering::AcquireRelease; |
16117 | break; |
16118 | case llvm::AtomicOrderingCABI::seq_cst: |
16119 | AO = llvm::AtomicOrdering::SequentiallyConsistent; |
16120 | break; |
16121 | case llvm::AtomicOrderingCABI::relaxed: |
16122 | AO = llvm::AtomicOrdering::Monotonic; |
16123 | break; |
16124 | } |
16125 | |
16126 | StringRef scp; |
16127 | llvm::getConstantStringInfo(Scope, scp); |
16128 | SSID = getLLVMContext().getOrInsertSyncScopeID(scp); |
16129 | return true; |
16130 | } |
16131 | return false; |
16132 | } |
16133 | |
16134 | Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, |
16135 | const CallExpr *E) { |
16136 | llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent; |
16137 | llvm::SyncScope::ID SSID; |
16138 | switch (BuiltinID) { |
16139 | case AMDGPU::BI__builtin_amdgcn_div_scale: |
16140 | case AMDGPU::BI__builtin_amdgcn_div_scalef: { |
16141 | // Translate from the intrinsics's struct return to the builtin's out |
16142 | // argument. |
16143 | |
16144 | Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3)); |
16145 | |
16146 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
16147 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
16148 | llvm::Value *Z = EmitScalarExpr(E->getArg(2)); |
16149 | |
16150 | llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale, |
16151 | X->getType()); |
16152 | |
16153 | llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z}); |
16154 | |
16155 | llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0); |
16156 | llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1); |
16157 | |
16158 | llvm::Type *RealFlagType |
16159 | = FlagOutPtr.getPointer()->getType()->getPointerElementType(); |
16160 | |
16161 | llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType); |
16162 | Builder.CreateStore(FlagExt, FlagOutPtr); |
16163 | return Result; |
16164 | } |
16165 | case AMDGPU::BI__builtin_amdgcn_div_fmas: |
16166 | case AMDGPU::BI__builtin_amdgcn_div_fmasf: { |
16167 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16168 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16169 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16170 | llvm::Value *Src3 = EmitScalarExpr(E->getArg(3)); |
16171 | |
16172 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas, |
16173 | Src0->getType()); |
16174 | llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3); |
16175 | return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool}); |
16176 | } |
16177 | |
16178 | case AMDGPU::BI__builtin_amdgcn_ds_swizzle: |
16179 | return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle); |
16180 | case AMDGPU::BI__builtin_amdgcn_mov_dpp8: |
16181 | return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8); |
16182 | case AMDGPU::BI__builtin_amdgcn_mov_dpp: |
16183 | case AMDGPU::BI__builtin_amdgcn_update_dpp: { |
16184 | llvm::SmallVector<llvm::Value *, 6> Args; |
16185 | for (unsigned I = 0; I != E->getNumArgs(); ++I) |
16186 | Args.push_back(EmitScalarExpr(E->getArg(I))); |
16187 | assert(Args.size() == 5 || Args.size() == 6)(static_cast <bool> (Args.size() == 5 || Args.size() == 6) ? void (0) : __assert_fail ("Args.size() == 5 || Args.size() == 6" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 16187, __extension__ __PRETTY_FUNCTION__)); |
16188 | if (Args.size() == 5) |
16189 | Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType())); |
16190 | Function *F = |
16191 | CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType()); |
16192 | return Builder.CreateCall(F, Args); |
16193 | } |
16194 | case AMDGPU::BI__builtin_amdgcn_div_fixup: |
16195 | case AMDGPU::BI__builtin_amdgcn_div_fixupf: |
16196 | case AMDGPU::BI__builtin_amdgcn_div_fixuph: |
16197 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup); |
16198 | case AMDGPU::BI__builtin_amdgcn_trig_preop: |
16199 | case AMDGPU::BI__builtin_amdgcn_trig_preopf: |
16200 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop); |
16201 | case AMDGPU::BI__builtin_amdgcn_rcp: |
16202 | case AMDGPU::BI__builtin_amdgcn_rcpf: |
16203 | case AMDGPU::BI__builtin_amdgcn_rcph: |
16204 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp); |
16205 | case AMDGPU::BI__builtin_amdgcn_sqrt: |
16206 | case AMDGPU::BI__builtin_amdgcn_sqrtf: |
16207 | case AMDGPU::BI__builtin_amdgcn_sqrth: |
16208 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt); |
16209 | case AMDGPU::BI__builtin_amdgcn_rsq: |
16210 | case AMDGPU::BI__builtin_amdgcn_rsqf: |
16211 | case AMDGPU::BI__builtin_amdgcn_rsqh: |
16212 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq); |
16213 | case AMDGPU::BI__builtin_amdgcn_rsq_clamp: |
16214 | case AMDGPU::BI__builtin_amdgcn_rsq_clampf: |
16215 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp); |
16216 | case AMDGPU::BI__builtin_amdgcn_sinf: |
16217 | case AMDGPU::BI__builtin_amdgcn_sinh: |
16218 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin); |
16219 | case AMDGPU::BI__builtin_amdgcn_cosf: |
16220 | case AMDGPU::BI__builtin_amdgcn_cosh: |
16221 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos); |
16222 | case AMDGPU::BI__builtin_amdgcn_dispatch_ptr: |
16223 | return EmitAMDGPUDispatchPtr(*this, E); |
16224 | case AMDGPU::BI__builtin_amdgcn_log_clampf: |
16225 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp); |
16226 | case AMDGPU::BI__builtin_amdgcn_ldexp: |
16227 | case AMDGPU::BI__builtin_amdgcn_ldexpf: |
16228 | case AMDGPU::BI__builtin_amdgcn_ldexph: |
16229 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp); |
16230 | case AMDGPU::BI__builtin_amdgcn_frexp_mant: |
16231 | case AMDGPU::BI__builtin_amdgcn_frexp_mantf: |
16232 | case AMDGPU::BI__builtin_amdgcn_frexp_manth: |
16233 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant); |
16234 | case AMDGPU::BI__builtin_amdgcn_frexp_exp: |
16235 | case AMDGPU::BI__builtin_amdgcn_frexp_expf: { |
16236 | Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16237 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp, |
16238 | { Builder.getInt32Ty(), Src0->getType() }); |
16239 | return Builder.CreateCall(F, Src0); |
16240 | } |
16241 | case AMDGPU::BI__builtin_amdgcn_frexp_exph: { |
16242 | Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16243 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp, |
16244 | { Builder.getInt16Ty(), Src0->getType() }); |
16245 | return Builder.CreateCall(F, Src0); |
16246 | } |
16247 | case AMDGPU::BI__builtin_amdgcn_fract: |
16248 | case AMDGPU::BI__builtin_amdgcn_fractf: |
16249 | case AMDGPU::BI__builtin_amdgcn_fracth: |
16250 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract); |
16251 | case AMDGPU::BI__builtin_amdgcn_lerp: |
16252 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp); |
16253 | case AMDGPU::BI__builtin_amdgcn_ubfe: |
16254 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe); |
16255 | case AMDGPU::BI__builtin_amdgcn_sbfe: |
16256 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe); |
16257 | case AMDGPU::BI__builtin_amdgcn_uicmp: |
16258 | case AMDGPU::BI__builtin_amdgcn_uicmpl: |
16259 | case AMDGPU::BI__builtin_amdgcn_sicmp: |
16260 | case AMDGPU::BI__builtin_amdgcn_sicmpl: { |
16261 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16262 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16263 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16264 | |
16265 | // FIXME-GFX10: How should 32 bit mask be handled? |
16266 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp, |
16267 | { Builder.getInt64Ty(), Src0->getType() }); |
16268 | return Builder.CreateCall(F, { Src0, Src1, Src2 }); |
16269 | } |
16270 | case AMDGPU::BI__builtin_amdgcn_fcmp: |
16271 | case AMDGPU::BI__builtin_amdgcn_fcmpf: { |
16272 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16273 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16274 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16275 | |
16276 | // FIXME-GFX10: How should 32 bit mask be handled? |
16277 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp, |
16278 | { Builder.getInt64Ty(), Src0->getType() }); |
16279 | return Builder.CreateCall(F, { Src0, Src1, Src2 }); |
16280 | } |
16281 | case AMDGPU::BI__builtin_amdgcn_class: |
16282 | case AMDGPU::BI__builtin_amdgcn_classf: |
16283 | case AMDGPU::BI__builtin_amdgcn_classh: |
16284 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class); |
16285 | case AMDGPU::BI__builtin_amdgcn_fmed3f: |
16286 | case AMDGPU::BI__builtin_amdgcn_fmed3h: |
16287 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3); |
16288 | case AMDGPU::BI__builtin_amdgcn_ds_append: |
16289 | case AMDGPU::BI__builtin_amdgcn_ds_consume: { |
16290 | Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ? |
16291 | Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume; |
16292 | Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16293 | Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() }); |
16294 | return Builder.CreateCall(F, { Src0, Builder.getFalse() }); |
16295 | } |
16296 | case AMDGPU::BI__builtin_amdgcn_ds_faddf: |
16297 | case AMDGPU::BI__builtin_amdgcn_ds_fminf: |
16298 | case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: { |
16299 | Intrinsic::ID Intrin; |
16300 | switch (BuiltinID) { |
16301 | case AMDGPU::BI__builtin_amdgcn_ds_faddf: |
16302 | Intrin = Intrinsic::amdgcn_ds_fadd; |
16303 | break; |
16304 | case AMDGPU::BI__builtin_amdgcn_ds_fminf: |
16305 | Intrin = Intrinsic::amdgcn_ds_fmin; |
16306 | break; |
16307 | case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: |
16308 | Intrin = Intrinsic::amdgcn_ds_fmax; |
16309 | break; |
16310 | } |
16311 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16312 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16313 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16314 | llvm::Value *Src3 = EmitScalarExpr(E->getArg(3)); |
16315 | llvm::Value *Src4 = EmitScalarExpr(E->getArg(4)); |
16316 | llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() }); |
16317 | llvm::FunctionType *FTy = F->getFunctionType(); |
16318 | llvm::Type *PTy = FTy->getParamType(0); |
16319 | Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy); |
16320 | return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 }); |
16321 | } |
16322 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64: |
16323 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32: |
16324 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16: |
16325 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64: |
16326 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64: |
16327 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64: |
16328 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64: |
16329 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64: { |
16330 | Intrinsic::ID IID; |
16331 | llvm::Type *ArgTy = llvm::Type::getDoubleTy(getLLVMContext()); |
16332 | switch (BuiltinID) { |
16333 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32: |
16334 | ArgTy = llvm::Type::getFloatTy(getLLVMContext()); |
16335 | IID = Intrinsic::amdgcn_global_atomic_fadd; |
16336 | break; |
16337 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16: |
16338 | ArgTy = llvm::FixedVectorType::get( |
16339 | llvm::Type::getHalfTy(getLLVMContext()), 2); |
16340 | IID = Intrinsic::amdgcn_global_atomic_fadd; |
16341 | break; |
16342 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64: |
16343 | IID = Intrinsic::amdgcn_global_atomic_fadd; |
16344 | break; |
16345 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64: |
16346 | IID = Intrinsic::amdgcn_global_atomic_fmin; |
16347 | break; |
16348 | case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64: |
16349 | IID = Intrinsic::amdgcn_global_atomic_fmax; |
16350 | break; |
16351 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64: |
16352 | IID = Intrinsic::amdgcn_flat_atomic_fadd; |
16353 | break; |
16354 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64: |
16355 | IID = Intrinsic::amdgcn_flat_atomic_fmin; |
16356 | break; |
16357 | case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64: |
16358 | IID = Intrinsic::amdgcn_flat_atomic_fmax; |
16359 | break; |
16360 | } |
16361 | llvm::Value *Addr = EmitScalarExpr(E->getArg(0)); |
16362 | llvm::Value *Val = EmitScalarExpr(E->getArg(1)); |
16363 | llvm::Function *F = |
16364 | CGM.getIntrinsic(IID, {ArgTy, Addr->getType(), Val->getType()}); |
16365 | return Builder.CreateCall(F, {Addr, Val}); |
16366 | } |
16367 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64: |
16368 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32: { |
16369 | Intrinsic::ID IID; |
16370 | llvm::Type *ArgTy; |
16371 | switch (BuiltinID) { |
16372 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32: |
16373 | ArgTy = llvm::Type::getFloatTy(getLLVMContext()); |
16374 | IID = Intrinsic::amdgcn_ds_fadd; |
16375 | break; |
16376 | case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64: |
16377 | ArgTy = llvm::Type::getDoubleTy(getLLVMContext()); |
16378 | IID = Intrinsic::amdgcn_ds_fadd; |
16379 | break; |
16380 | } |
16381 | llvm::Value *Addr = EmitScalarExpr(E->getArg(0)); |
16382 | llvm::Value *Val = EmitScalarExpr(E->getArg(1)); |
16383 | llvm::Constant *ZeroI32 = llvm::ConstantInt::getIntegerValue( |
16384 | llvm::Type::getInt32Ty(getLLVMContext()), APInt(32, 0, true)); |
16385 | llvm::Constant *ZeroI1 = llvm::ConstantInt::getIntegerValue( |
16386 | llvm::Type::getInt1Ty(getLLVMContext()), APInt(1, 0)); |
16387 | llvm::Function *F = CGM.getIntrinsic(IID, {ArgTy}); |
16388 | return Builder.CreateCall(F, {Addr, Val, ZeroI32, ZeroI32, ZeroI1}); |
16389 | } |
16390 | case AMDGPU::BI__builtin_amdgcn_read_exec: { |
16391 | CallInst *CI = cast<CallInst>( |
16392 | EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec")); |
16393 | CI->setConvergent(); |
16394 | return CI; |
16395 | } |
16396 | case AMDGPU::BI__builtin_amdgcn_read_exec_lo: |
16397 | case AMDGPU::BI__builtin_amdgcn_read_exec_hi: { |
16398 | StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ? |
16399 | "exec_lo" : "exec_hi"; |
16400 | CallInst *CI = cast<CallInst>( |
16401 | EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName)); |
16402 | CI->setConvergent(); |
16403 | return CI; |
16404 | } |
16405 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray: |
16406 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h: |
16407 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l: |
16408 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_lh: { |
16409 | llvm::Value *NodePtr = EmitScalarExpr(E->getArg(0)); |
16410 | llvm::Value *RayExtent = EmitScalarExpr(E->getArg(1)); |
16411 | llvm::Value *RayOrigin = EmitScalarExpr(E->getArg(2)); |
16412 | llvm::Value *RayDir = EmitScalarExpr(E->getArg(3)); |
16413 | llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4)); |
16414 | llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5)); |
16415 | |
16416 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray, |
16417 | {NodePtr->getType(), RayDir->getType()}); |
16418 | return Builder.CreateCall(F, {NodePtr, RayExtent, RayOrigin, RayDir, |
16419 | RayInverseDir, TextureDescr}); |
16420 | } |
16421 | |
16422 | // amdgcn workitem |
16423 | case AMDGPU::BI__builtin_amdgcn_workitem_id_x: |
16424 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024); |
16425 | case AMDGPU::BI__builtin_amdgcn_workitem_id_y: |
16426 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024); |
16427 | case AMDGPU::BI__builtin_amdgcn_workitem_id_z: |
16428 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024); |
16429 | |
16430 | // amdgcn workgroup size |
16431 | case AMDGPU::BI__builtin_amdgcn_workgroup_size_x: |
16432 | return EmitAMDGPUWorkGroupSize(*this, 0); |
16433 | case AMDGPU::BI__builtin_amdgcn_workgroup_size_y: |
16434 | return EmitAMDGPUWorkGroupSize(*this, 1); |
16435 | case AMDGPU::BI__builtin_amdgcn_workgroup_size_z: |
16436 | return EmitAMDGPUWorkGroupSize(*this, 2); |
16437 | |
16438 | // amdgcn grid size |
16439 | case AMDGPU::BI__builtin_amdgcn_grid_size_x: |
16440 | return EmitAMDGPUGridSize(*this, 0); |
16441 | case AMDGPU::BI__builtin_amdgcn_grid_size_y: |
16442 | return EmitAMDGPUGridSize(*this, 1); |
16443 | case AMDGPU::BI__builtin_amdgcn_grid_size_z: |
16444 | return EmitAMDGPUGridSize(*this, 2); |
16445 | |
16446 | // r600 intrinsics |
16447 | case AMDGPU::BI__builtin_r600_recipsqrt_ieee: |
16448 | case AMDGPU::BI__builtin_r600_recipsqrt_ieeef: |
16449 | return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee); |
16450 | case AMDGPU::BI__builtin_r600_read_tidig_x: |
16451 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024); |
16452 | case AMDGPU::BI__builtin_r600_read_tidig_y: |
16453 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024); |
16454 | case AMDGPU::BI__builtin_r600_read_tidig_z: |
16455 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024); |
16456 | case AMDGPU::BI__builtin_amdgcn_alignbit: { |
16457 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16458 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16459 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16460 | Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType()); |
16461 | return Builder.CreateCall(F, { Src0, Src1, Src2 }); |
16462 | } |
16463 | |
16464 | case AMDGPU::BI__builtin_amdgcn_fence: { |
16465 | if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)), |
16466 | EmitScalarExpr(E->getArg(1)), AO, SSID)) |
16467 | return Builder.CreateFence(AO, SSID); |
16468 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
16469 | } |
16470 | case AMDGPU::BI__builtin_amdgcn_atomic_inc32: |
16471 | case AMDGPU::BI__builtin_amdgcn_atomic_inc64: |
16472 | case AMDGPU::BI__builtin_amdgcn_atomic_dec32: |
16473 | case AMDGPU::BI__builtin_amdgcn_atomic_dec64: { |
16474 | unsigned BuiltinAtomicOp; |
16475 | llvm::Type *ResultType = ConvertType(E->getType()); |
16476 | |
16477 | switch (BuiltinID) { |
16478 | case AMDGPU::BI__builtin_amdgcn_atomic_inc32: |
16479 | case AMDGPU::BI__builtin_amdgcn_atomic_inc64: |
16480 | BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc; |
16481 | break; |
16482 | case AMDGPU::BI__builtin_amdgcn_atomic_dec32: |
16483 | case AMDGPU::BI__builtin_amdgcn_atomic_dec64: |
16484 | BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec; |
16485 | break; |
16486 | } |
16487 | |
16488 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
16489 | Value *Val = EmitScalarExpr(E->getArg(1)); |
16490 | |
16491 | llvm::Function *F = |
16492 | CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()}); |
16493 | |
16494 | if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)), |
16495 | EmitScalarExpr(E->getArg(3)), AO, SSID)) { |
16496 | |
16497 | // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and |
16498 | // scope as unsigned values |
16499 | Value *MemOrder = Builder.getInt32(static_cast<int>(AO)); |
16500 | Value *MemScope = Builder.getInt32(static_cast<int>(SSID)); |
16501 | |
16502 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
16503 | bool Volatile = |
16504 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
16505 | Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile)); |
16506 | |
16507 | return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile}); |
16508 | } |
16509 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
16510 | } |
16511 | default: |
16512 | return nullptr; |
16513 | } |
16514 | } |
16515 | |
16516 | /// Handle a SystemZ function in which the final argument is a pointer |
16517 | /// to an int that receives the post-instruction CC value. At the LLVM level |
16518 | /// this is represented as a function that returns a {result, cc} pair. |
16519 | static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF, |
16520 | unsigned IntrinsicID, |
16521 | const CallExpr *E) { |
16522 | unsigned NumArgs = E->getNumArgs() - 1; |
16523 | SmallVector<Value *, 8> Args(NumArgs); |
16524 | for (unsigned I = 0; I < NumArgs; ++I) |
16525 | Args[I] = CGF.EmitScalarExpr(E->getArg(I)); |
16526 | Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs)); |
16527 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID); |
16528 | Value *Call = CGF.Builder.CreateCall(F, Args); |
16529 | Value *CC = CGF.Builder.CreateExtractValue(Call, 1); |
16530 | CGF.Builder.CreateStore(CC, CCPtr); |
16531 | return CGF.Builder.CreateExtractValue(Call, 0); |
16532 | } |
16533 | |
16534 | Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID, |
16535 | const CallExpr *E) { |
16536 | switch (BuiltinID) { |
16537 | case SystemZ::BI__builtin_tbegin: { |
16538 | Value *TDB = EmitScalarExpr(E->getArg(0)); |
16539 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); |
16540 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin); |
16541 | return Builder.CreateCall(F, {TDB, Control}); |
16542 | } |
16543 | case SystemZ::BI__builtin_tbegin_nofloat: { |
16544 | Value *TDB = EmitScalarExpr(E->getArg(0)); |
16545 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); |
16546 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat); |
16547 | return Builder.CreateCall(F, {TDB, Control}); |
16548 | } |
16549 | case SystemZ::BI__builtin_tbeginc: { |
16550 | Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy); |
16551 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08); |
16552 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc); |
16553 | return Builder.CreateCall(F, {TDB, Control}); |
16554 | } |
16555 | case SystemZ::BI__builtin_tabort: { |
16556 | Value *Data = EmitScalarExpr(E->getArg(0)); |
16557 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort); |
16558 | return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort")); |
16559 | } |
16560 | case SystemZ::BI__builtin_non_tx_store: { |
16561 | Value *Address = EmitScalarExpr(E->getArg(0)); |
16562 | Value *Data = EmitScalarExpr(E->getArg(1)); |
16563 | Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg); |
16564 | return Builder.CreateCall(F, {Data, Address}); |
16565 | } |
16566 | |
16567 | // Vector builtins. Note that most vector builtins are mapped automatically |
16568 | // to target-specific LLVM intrinsics. The ones handled specially here can |
16569 | // be represented via standard LLVM IR, which is preferable to enable common |
16570 | // LLVM optimizations. |
16571 | |
16572 | case SystemZ::BI__builtin_s390_vpopctb: |
16573 | case SystemZ::BI__builtin_s390_vpopcth: |
16574 | case SystemZ::BI__builtin_s390_vpopctf: |
16575 | case SystemZ::BI__builtin_s390_vpopctg: { |
16576 | llvm::Type *ResultType = ConvertType(E->getType()); |
16577 | Value *X = EmitScalarExpr(E->getArg(0)); |
16578 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); |
16579 | return Builder.CreateCall(F, X); |
16580 | } |
16581 | |
16582 | case SystemZ::BI__builtin_s390_vclzb: |
16583 | case SystemZ::BI__builtin_s390_vclzh: |
16584 | case SystemZ::BI__builtin_s390_vclzf: |
16585 | case SystemZ::BI__builtin_s390_vclzg: { |
16586 | llvm::Type *ResultType = ConvertType(E->getType()); |
16587 | Value *X = EmitScalarExpr(E->getArg(0)); |
16588 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
16589 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); |
16590 | return Builder.CreateCall(F, {X, Undef}); |
16591 | } |
16592 | |
16593 | case SystemZ::BI__builtin_s390_vctzb: |
16594 | case SystemZ::BI__builtin_s390_vctzh: |
16595 | case SystemZ::BI__builtin_s390_vctzf: |
16596 | case SystemZ::BI__builtin_s390_vctzg: { |
16597 | llvm::Type *ResultType = ConvertType(E->getType()); |
16598 | Value *X = EmitScalarExpr(E->getArg(0)); |
16599 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
16600 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType); |
16601 | return Builder.CreateCall(F, {X, Undef}); |
16602 | } |
16603 | |
16604 | case SystemZ::BI__builtin_s390_vfsqsb: |
16605 | case SystemZ::BI__builtin_s390_vfsqdb: { |
16606 | llvm::Type *ResultType = ConvertType(E->getType()); |
16607 | Value *X = EmitScalarExpr(E->getArg(0)); |
16608 | if (Builder.getIsFPConstrained()) { |
16609 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType); |
16610 | return Builder.CreateConstrainedFPCall(F, { X }); |
16611 | } else { |
16612 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); |
16613 | return Builder.CreateCall(F, X); |
16614 | } |
16615 | } |
16616 | case SystemZ::BI__builtin_s390_vfmasb: |
16617 | case SystemZ::BI__builtin_s390_vfmadb: { |
16618 | llvm::Type *ResultType = ConvertType(E->getType()); |
16619 | Value *X = EmitScalarExpr(E->getArg(0)); |
16620 | Value *Y = EmitScalarExpr(E->getArg(1)); |
16621 | Value *Z = EmitScalarExpr(E->getArg(2)); |
16622 | if (Builder.getIsFPConstrained()) { |
16623 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
16624 | return Builder.CreateConstrainedFPCall(F, {X, Y, Z}); |
16625 | } else { |
16626 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
16627 | return Builder.CreateCall(F, {X, Y, Z}); |
16628 | } |
16629 | } |
16630 | case SystemZ::BI__builtin_s390_vfmssb: |
16631 | case SystemZ::BI__builtin_s390_vfmsdb: { |
16632 | llvm::Type *ResultType = ConvertType(E->getType()); |
16633 | Value *X = EmitScalarExpr(E->getArg(0)); |
16634 | Value *Y = EmitScalarExpr(E->getArg(1)); |
16635 | Value *Z = EmitScalarExpr(E->getArg(2)); |
16636 | if (Builder.getIsFPConstrained()) { |
16637 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
16638 | return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); |
16639 | } else { |
16640 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
16641 | return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); |
16642 | } |
16643 | } |
16644 | case SystemZ::BI__builtin_s390_vfnmasb: |
16645 | case SystemZ::BI__builtin_s390_vfnmadb: { |
16646 | llvm::Type *ResultType = ConvertType(E->getType()); |
16647 | Value *X = EmitScalarExpr(E->getArg(0)); |
16648 | Value *Y = EmitScalarExpr(E->getArg(1)); |
16649 | Value *Z = EmitScalarExpr(E->getArg(2)); |
16650 | if (Builder.getIsFPConstrained()) { |
16651 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
16652 | return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg"); |
16653 | } else { |
16654 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
16655 | return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg"); |
16656 | } |
16657 | } |
16658 | case SystemZ::BI__builtin_s390_vfnmssb: |
16659 | case SystemZ::BI__builtin_s390_vfnmsdb: { |
16660 | llvm::Type *ResultType = ConvertType(E->getType()); |
16661 | Value *X = EmitScalarExpr(E->getArg(0)); |
16662 | Value *Y = EmitScalarExpr(E->getArg(1)); |
16663 | Value *Z = EmitScalarExpr(E->getArg(2)); |
16664 | if (Builder.getIsFPConstrained()) { |
16665 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
16666 | Value *NegZ = Builder.CreateFNeg(Z, "sub"); |
16667 | return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ})); |
16668 | } else { |
16669 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
16670 | Value *NegZ = Builder.CreateFNeg(Z, "neg"); |
16671 | return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ})); |
16672 | } |
16673 | } |
16674 | case SystemZ::BI__builtin_s390_vflpsb: |
16675 | case SystemZ::BI__builtin_s390_vflpdb: { |
16676 | llvm::Type *ResultType = ConvertType(E->getType()); |
16677 | Value *X = EmitScalarExpr(E->getArg(0)); |
16678 | Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); |
16679 | return Builder.CreateCall(F, X); |
16680 | } |
16681 | case SystemZ::BI__builtin_s390_vflnsb: |
16682 | case SystemZ::BI__builtin_s390_vflndb: { |
16683 | llvm::Type *ResultType = ConvertType(E->getType()); |
16684 | Value *X = EmitScalarExpr(E->getArg(0)); |
16685 | Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); |
16686 | return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg"); |
16687 | } |
16688 | case SystemZ::BI__builtin_s390_vfisb: |
16689 | case SystemZ::BI__builtin_s390_vfidb: { |
16690 | llvm::Type *ResultType = ConvertType(E->getType()); |
16691 | Value *X = EmitScalarExpr(E->getArg(0)); |
16692 | // Constant-fold the M4 and M5 mask arguments. |
16693 | llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext()); |
16694 | llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext()); |
16695 | // Check whether this instance can be represented via a LLVM standard |
16696 | // intrinsic. We only support some combinations of M4 and M5. |
16697 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
16698 | Intrinsic::ID CI; |
16699 | switch (M4.getZExtValue()) { |
16700 | default: break; |
16701 | case 0: // IEEE-inexact exception allowed |
16702 | switch (M5.getZExtValue()) { |
16703 | default: break; |
16704 | case 0: ID = Intrinsic::rint; |
16705 | CI = Intrinsic::experimental_constrained_rint; break; |
16706 | } |
16707 | break; |
16708 | case 4: // IEEE-inexact exception suppressed |
16709 | switch (M5.getZExtValue()) { |
16710 | default: break; |
16711 | case 0: ID = Intrinsic::nearbyint; |
16712 | CI = Intrinsic::experimental_constrained_nearbyint; break; |
16713 | case 1: ID = Intrinsic::round; |
16714 | CI = Intrinsic::experimental_constrained_round; break; |
16715 | case 5: ID = Intrinsic::trunc; |
16716 | CI = Intrinsic::experimental_constrained_trunc; break; |
16717 | case 6: ID = Intrinsic::ceil; |
16718 | CI = Intrinsic::experimental_constrained_ceil; break; |
16719 | case 7: ID = Intrinsic::floor; |
16720 | CI = Intrinsic::experimental_constrained_floor; break; |
16721 | } |
16722 | break; |
16723 | } |
16724 | if (ID != Intrinsic::not_intrinsic) { |
16725 | if (Builder.getIsFPConstrained()) { |
16726 | Function *F = CGM.getIntrinsic(CI, ResultType); |
16727 | return Builder.CreateConstrainedFPCall(F, X); |
16728 | } else { |
16729 | Function *F = CGM.getIntrinsic(ID, ResultType); |
16730 | return Builder.CreateCall(F, X); |
16731 | } |
16732 | } |
16733 | switch (BuiltinID) { // FIXME: constrained version? |
16734 | case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break; |
16735 | case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break; |
16736 | default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 16736); |
16737 | } |
16738 | Function *F = CGM.getIntrinsic(ID); |
16739 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); |
16740 | Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5); |
16741 | return Builder.CreateCall(F, {X, M4Value, M5Value}); |
16742 | } |
16743 | case SystemZ::BI__builtin_s390_vfmaxsb: |
16744 | case SystemZ::BI__builtin_s390_vfmaxdb: { |
16745 | llvm::Type *ResultType = ConvertType(E->getType()); |
16746 | Value *X = EmitScalarExpr(E->getArg(0)); |
16747 | Value *Y = EmitScalarExpr(E->getArg(1)); |
16748 | // Constant-fold the M4 mask argument. |
16749 | llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext()); |
16750 | // Check whether this instance can be represented via a LLVM standard |
16751 | // intrinsic. We only support some values of M4. |
16752 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
16753 | Intrinsic::ID CI; |
16754 | switch (M4.getZExtValue()) { |
16755 | default: break; |
16756 | case 4: ID = Intrinsic::maxnum; |
16757 | CI = Intrinsic::experimental_constrained_maxnum; break; |
16758 | } |
16759 | if (ID != Intrinsic::not_intrinsic) { |
16760 | if (Builder.getIsFPConstrained()) { |
16761 | Function *F = CGM.getIntrinsic(CI, ResultType); |
16762 | return Builder.CreateConstrainedFPCall(F, {X, Y}); |
16763 | } else { |
16764 | Function *F = CGM.getIntrinsic(ID, ResultType); |
16765 | return Builder.CreateCall(F, {X, Y}); |
16766 | } |
16767 | } |
16768 | switch (BuiltinID) { |
16769 | case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break; |
16770 | case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break; |
16771 | default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 16771); |
16772 | } |
16773 | Function *F = CGM.getIntrinsic(ID); |
16774 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); |
16775 | return Builder.CreateCall(F, {X, Y, M4Value}); |
16776 | } |
16777 | case SystemZ::BI__builtin_s390_vfminsb: |
16778 | case SystemZ::BI__builtin_s390_vfmindb: { |
16779 | llvm::Type *ResultType = ConvertType(E->getType()); |
16780 | Value *X = EmitScalarExpr(E->getArg(0)); |
16781 | Value *Y = EmitScalarExpr(E->getArg(1)); |
16782 | // Constant-fold the M4 mask argument. |
16783 | llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext()); |
16784 | // Check whether this instance can be represented via a LLVM standard |
16785 | // intrinsic. We only support some values of M4. |
16786 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
16787 | Intrinsic::ID CI; |
16788 | switch (M4.getZExtValue()) { |
16789 | default: break; |
16790 | case 4: ID = Intrinsic::minnum; |
16791 | CI = Intrinsic::experimental_constrained_minnum; break; |
16792 | } |
16793 | if (ID != Intrinsic::not_intrinsic) { |
16794 | if (Builder.getIsFPConstrained()) { |
16795 | Function *F = CGM.getIntrinsic(CI, ResultType); |
16796 | return Builder.CreateConstrainedFPCall(F, {X, Y}); |
16797 | } else { |
16798 | Function *F = CGM.getIntrinsic(ID, ResultType); |
16799 | return Builder.CreateCall(F, {X, Y}); |
16800 | } |
16801 | } |
16802 | switch (BuiltinID) { |
16803 | case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break; |
16804 | case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break; |
16805 | default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 16805); |
16806 | } |
16807 | Function *F = CGM.getIntrinsic(ID); |
16808 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); |
16809 | return Builder.CreateCall(F, {X, Y, M4Value}); |
16810 | } |
16811 | |
16812 | case SystemZ::BI__builtin_s390_vlbrh: |
16813 | case SystemZ::BI__builtin_s390_vlbrf: |
16814 | case SystemZ::BI__builtin_s390_vlbrg: { |
16815 | llvm::Type *ResultType = ConvertType(E->getType()); |
16816 | Value *X = EmitScalarExpr(E->getArg(0)); |
16817 | Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType); |
16818 | return Builder.CreateCall(F, X); |
16819 | } |
16820 | |
16821 | // Vector intrinsics that output the post-instruction CC value. |
16822 | |
16823 | #define INTRINSIC_WITH_CC(NAME) \ |
16824 | case SystemZ::BI__builtin_##NAME: \ |
16825 | return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E) |
16826 | |
16827 | INTRINSIC_WITH_CC(s390_vpkshs); |
16828 | INTRINSIC_WITH_CC(s390_vpksfs); |
16829 | INTRINSIC_WITH_CC(s390_vpksgs); |
16830 | |
16831 | INTRINSIC_WITH_CC(s390_vpklshs); |
16832 | INTRINSIC_WITH_CC(s390_vpklsfs); |
16833 | INTRINSIC_WITH_CC(s390_vpklsgs); |
16834 | |
16835 | INTRINSIC_WITH_CC(s390_vceqbs); |
16836 | INTRINSIC_WITH_CC(s390_vceqhs); |
16837 | INTRINSIC_WITH_CC(s390_vceqfs); |
16838 | INTRINSIC_WITH_CC(s390_vceqgs); |
16839 | |
16840 | INTRINSIC_WITH_CC(s390_vchbs); |
16841 | INTRINSIC_WITH_CC(s390_vchhs); |
16842 | INTRINSIC_WITH_CC(s390_vchfs); |
16843 | INTRINSIC_WITH_CC(s390_vchgs); |
16844 | |
16845 | INTRINSIC_WITH_CC(s390_vchlbs); |
16846 | INTRINSIC_WITH_CC(s390_vchlhs); |
16847 | INTRINSIC_WITH_CC(s390_vchlfs); |
16848 | INTRINSIC_WITH_CC(s390_vchlgs); |
16849 | |
16850 | INTRINSIC_WITH_CC(s390_vfaebs); |
16851 | INTRINSIC_WITH_CC(s390_vfaehs); |
16852 | INTRINSIC_WITH_CC(s390_vfaefs); |
16853 | |
16854 | INTRINSIC_WITH_CC(s390_vfaezbs); |
16855 | INTRINSIC_WITH_CC(s390_vfaezhs); |
16856 | INTRINSIC_WITH_CC(s390_vfaezfs); |
16857 | |
16858 | INTRINSIC_WITH_CC(s390_vfeebs); |
16859 | INTRINSIC_WITH_CC(s390_vfeehs); |
16860 | INTRINSIC_WITH_CC(s390_vfeefs); |
16861 | |
16862 | INTRINSIC_WITH_CC(s390_vfeezbs); |
16863 | INTRINSIC_WITH_CC(s390_vfeezhs); |
16864 | INTRINSIC_WITH_CC(s390_vfeezfs); |
16865 | |
16866 | INTRINSIC_WITH_CC(s390_vfenebs); |
16867 | INTRINSIC_WITH_CC(s390_vfenehs); |
16868 | INTRINSIC_WITH_CC(s390_vfenefs); |
16869 | |
16870 | INTRINSIC_WITH_CC(s390_vfenezbs); |
16871 | INTRINSIC_WITH_CC(s390_vfenezhs); |
16872 | INTRINSIC_WITH_CC(s390_vfenezfs); |
16873 | |
16874 | INTRINSIC_WITH_CC(s390_vistrbs); |
16875 | INTRINSIC_WITH_CC(s390_vistrhs); |
16876 | INTRINSIC_WITH_CC(s390_vistrfs); |
16877 | |
16878 | INTRINSIC_WITH_CC(s390_vstrcbs); |
16879 | INTRINSIC_WITH_CC(s390_vstrchs); |
16880 | INTRINSIC_WITH_CC(s390_vstrcfs); |
16881 | |
16882 | INTRINSIC_WITH_CC(s390_vstrczbs); |
16883 | INTRINSIC_WITH_CC(s390_vstrczhs); |
16884 | INTRINSIC_WITH_CC(s390_vstrczfs); |
16885 | |
16886 | INTRINSIC_WITH_CC(s390_vfcesbs); |
16887 | INTRINSIC_WITH_CC(s390_vfcedbs); |
16888 | INTRINSIC_WITH_CC(s390_vfchsbs); |
16889 | INTRINSIC_WITH_CC(s390_vfchdbs); |
16890 | INTRINSIC_WITH_CC(s390_vfchesbs); |
16891 | INTRINSIC_WITH_CC(s390_vfchedbs); |
16892 | |
16893 | INTRINSIC_WITH_CC(s390_vftcisb); |
16894 | INTRINSIC_WITH_CC(s390_vftcidb); |
16895 | |
16896 | INTRINSIC_WITH_CC(s390_vstrsb); |
16897 | INTRINSIC_WITH_CC(s390_vstrsh); |
16898 | INTRINSIC_WITH_CC(s390_vstrsf); |
16899 | |
16900 | INTRINSIC_WITH_CC(s390_vstrszb); |
16901 | INTRINSIC_WITH_CC(s390_vstrszh); |
16902 | INTRINSIC_WITH_CC(s390_vstrszf); |
16903 | |
16904 | #undef INTRINSIC_WITH_CC |
16905 | |
16906 | default: |
16907 | return nullptr; |
16908 | } |
16909 | } |
16910 | |
16911 | namespace { |
16912 | // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant. |
16913 | struct NVPTXMmaLdstInfo { |
16914 | unsigned NumResults; // Number of elements to load/store |
16915 | // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported. |
16916 | unsigned IID_col; |
16917 | unsigned IID_row; |
16918 | }; |
16919 | |
16920 | #define MMA_INTR(geom_op_type, layout) \ |
16921 | Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride |
16922 | #define MMA_LDST(n, geom_op_type) \ |
16923 | { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) } |
16924 | |
16925 | static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) { |
16926 | switch (BuiltinID) { |
16927 | // FP MMA loads |
16928 | case NVPTX::BI__hmma_m16n16k16_ld_a: |
16929 | return MMA_LDST(8, m16n16k16_load_a_f16); |
16930 | case NVPTX::BI__hmma_m16n16k16_ld_b: |
16931 | return MMA_LDST(8, m16n16k16_load_b_f16); |
16932 | case NVPTX::BI__hmma_m16n16k16_ld_c_f16: |
16933 | return MMA_LDST(4, m16n16k16_load_c_f16); |
16934 | case NVPTX::BI__hmma_m16n16k16_ld_c_f32: |
16935 | return MMA_LDST(8, m16n16k16_load_c_f32); |
16936 | case NVPTX::BI__hmma_m32n8k16_ld_a: |
16937 | return MMA_LDST(8, m32n8k16_load_a_f16); |
16938 | case NVPTX::BI__hmma_m32n8k16_ld_b: |
16939 | return MMA_LDST(8, m32n8k16_load_b_f16); |
16940 | case NVPTX::BI__hmma_m32n8k16_ld_c_f16: |
16941 | return MMA_LDST(4, m32n8k16_load_c_f16); |
16942 | case NVPTX::BI__hmma_m32n8k16_ld_c_f32: |
16943 | return MMA_LDST(8, m32n8k16_load_c_f32); |
16944 | case NVPTX::BI__hmma_m8n32k16_ld_a: |
16945 | return MMA_LDST(8, m8n32k16_load_a_f16); |
16946 | case NVPTX::BI__hmma_m8n32k16_ld_b: |
16947 | return MMA_LDST(8, m8n32k16_load_b_f16); |
16948 | case NVPTX::BI__hmma_m8n32k16_ld_c_f16: |
16949 | return MMA_LDST(4, m8n32k16_load_c_f16); |
16950 | case NVPTX::BI__hmma_m8n32k16_ld_c_f32: |
16951 | return MMA_LDST(8, m8n32k16_load_c_f32); |
16952 | |
16953 | // Integer MMA loads |
16954 | case NVPTX::BI__imma_m16n16k16_ld_a_s8: |
16955 | return MMA_LDST(2, m16n16k16_load_a_s8); |
16956 | case NVPTX::BI__imma_m16n16k16_ld_a_u8: |
16957 | return MMA_LDST(2, m16n16k16_load_a_u8); |
16958 | case NVPTX::BI__imma_m16n16k16_ld_b_s8: |
16959 | return MMA_LDST(2, m16n16k16_load_b_s8); |
16960 | case NVPTX::BI__imma_m16n16k16_ld_b_u8: |
16961 | return MMA_LDST(2, m16n16k16_load_b_u8); |
16962 | case NVPTX::BI__imma_m16n16k16_ld_c: |
16963 | return MMA_LDST(8, m16n16k16_load_c_s32); |
16964 | case NVPTX::BI__imma_m32n8k16_ld_a_s8: |
16965 | return MMA_LDST(4, m32n8k16_load_a_s8); |
16966 | case NVPTX::BI__imma_m32n8k16_ld_a_u8: |
16967 | return MMA_LDST(4, m32n8k16_load_a_u8); |
16968 | case NVPTX::BI__imma_m32n8k16_ld_b_s8: |
16969 | return MMA_LDST(1, m32n8k16_load_b_s8); |
16970 | case NVPTX::BI__imma_m32n8k16_ld_b_u8: |
16971 | return MMA_LDST(1, m32n8k16_load_b_u8); |
16972 | case NVPTX::BI__imma_m32n8k16_ld_c: |
16973 | return MMA_LDST(8, m32n8k16_load_c_s32); |
16974 | case NVPTX::BI__imma_m8n32k16_ld_a_s8: |
16975 | return MMA_LDST(1, m8n32k16_load_a_s8); |
16976 | case NVPTX::BI__imma_m8n32k16_ld_a_u8: |
16977 | return MMA_LDST(1, m8n32k16_load_a_u8); |
16978 | case NVPTX::BI__imma_m8n32k16_ld_b_s8: |
16979 | return MMA_LDST(4, m8n32k16_load_b_s8); |
16980 | case NVPTX::BI__imma_m8n32k16_ld_b_u8: |
16981 | return MMA_LDST(4, m8n32k16_load_b_u8); |
16982 | case NVPTX::BI__imma_m8n32k16_ld_c: |
16983 | return MMA_LDST(8, m8n32k16_load_c_s32); |
16984 | |
16985 | // Sub-integer MMA loads. |
16986 | // Only row/col layout is supported by A/B fragments. |
16987 | case NVPTX::BI__imma_m8n8k32_ld_a_s4: |
16988 | return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)}; |
16989 | case NVPTX::BI__imma_m8n8k32_ld_a_u4: |
16990 | return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)}; |
16991 | case NVPTX::BI__imma_m8n8k32_ld_b_s4: |
16992 | return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0}; |
16993 | case NVPTX::BI__imma_m8n8k32_ld_b_u4: |
16994 | return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0}; |
16995 | case NVPTX::BI__imma_m8n8k32_ld_c: |
16996 | return MMA_LDST(2, m8n8k32_load_c_s32); |
16997 | case NVPTX::BI__bmma_m8n8k128_ld_a_b1: |
16998 | return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)}; |
16999 | case NVPTX::BI__bmma_m8n8k128_ld_b_b1: |
17000 | return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0}; |
17001 | case NVPTX::BI__bmma_m8n8k128_ld_c: |
17002 | return MMA_LDST(2, m8n8k128_load_c_s32); |
17003 | |
17004 | // Double MMA loads |
17005 | case NVPTX::BI__dmma_m8n8k4_ld_a: |
17006 | return MMA_LDST(1, m8n8k4_load_a_f64); |
17007 | case NVPTX::BI__dmma_m8n8k4_ld_b: |
17008 | return MMA_LDST(1, m8n8k4_load_b_f64); |
17009 | case NVPTX::BI__dmma_m8n8k4_ld_c: |
17010 | return MMA_LDST(2, m8n8k4_load_c_f64); |
17011 | |
17012 | // Alternate float MMA loads |
17013 | case NVPTX::BI__mma_bf16_m16n16k16_ld_a: |
17014 | return MMA_LDST(4, m16n16k16_load_a_bf16); |
17015 | case NVPTX::BI__mma_bf16_m16n16k16_ld_b: |
17016 | return MMA_LDST(4, m16n16k16_load_b_bf16); |
17017 | case NVPTX::BI__mma_bf16_m8n32k16_ld_a: |
17018 | return MMA_LDST(2, m8n32k16_load_a_bf16); |
17019 | case NVPTX::BI__mma_bf16_m8n32k16_ld_b: |
17020 | return MMA_LDST(8, m8n32k16_load_b_bf16); |
17021 | case NVPTX::BI__mma_bf16_m32n8k16_ld_a: |
17022 | return MMA_LDST(8, m32n8k16_load_a_bf16); |
17023 | case NVPTX::BI__mma_bf16_m32n8k16_ld_b: |
17024 | return MMA_LDST(2, m32n8k16_load_b_bf16); |
17025 | case NVPTX::BI__mma_tf32_m16n16k8_ld_a: |
17026 | return MMA_LDST(4, m16n16k8_load_a_tf32); |
17027 | case NVPTX::BI__mma_tf32_m16n16k8_ld_b: |
17028 | return MMA_LDST(2, m16n16k8_load_b_tf32); |
17029 | case NVPTX::BI__mma_tf32_m16n16k8_ld_c: |
17030 | return MMA_LDST(8, m16n16k8_load_c_f32); |
17031 | |
17032 | // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike |
17033 | // PTX and LLVM IR where stores always use fragment D, NVCC builtins always |
17034 | // use fragment C for both loads and stores. |
17035 | // FP MMA stores. |
17036 | case NVPTX::BI__hmma_m16n16k16_st_c_f16: |
17037 | return MMA_LDST(4, m16n16k16_store_d_f16); |
17038 | case NVPTX::BI__hmma_m16n16k16_st_c_f32: |
17039 | return MMA_LDST(8, m16n16k16_store_d_f32); |
17040 | case NVPTX::BI__hmma_m32n8k16_st_c_f16: |
17041 | return MMA_LDST(4, m32n8k16_store_d_f16); |
17042 | case NVPTX::BI__hmma_m32n8k16_st_c_f32: |
17043 | return MMA_LDST(8, m32n8k16_store_d_f32); |
17044 | case NVPTX::BI__hmma_m8n32k16_st_c_f16: |
17045 | return MMA_LDST(4, m8n32k16_store_d_f16); |
17046 | case NVPTX::BI__hmma_m8n32k16_st_c_f32: |
17047 | return MMA_LDST(8, m8n32k16_store_d_f32); |
17048 | |
17049 | // Integer and sub-integer MMA stores. |
17050 | // Another naming quirk. Unlike other MMA builtins that use PTX types in the |
17051 | // name, integer loads/stores use LLVM's i32. |
17052 | case NVPTX::BI__imma_m16n16k16_st_c_i32: |
17053 | return MMA_LDST(8, m16n16k16_store_d_s32); |
17054 | case NVPTX::BI__imma_m32n8k16_st_c_i32: |
17055 | return MMA_LDST(8, m32n8k16_store_d_s32); |
17056 | case NVPTX::BI__imma_m8n32k16_st_c_i32: |
17057 | return MMA_LDST(8, m8n32k16_store_d_s32); |
17058 | case NVPTX::BI__imma_m8n8k32_st_c_i32: |
17059 | return MMA_LDST(2, m8n8k32_store_d_s32); |
17060 | case NVPTX::BI__bmma_m8n8k128_st_c_i32: |
17061 | return MMA_LDST(2, m8n8k128_store_d_s32); |
17062 | |
17063 | // Double MMA store |
17064 | case NVPTX::BI__dmma_m8n8k4_st_c_f64: |
17065 | return MMA_LDST(2, m8n8k4_store_d_f64); |
17066 | |
17067 | // Alternate float MMA store |
17068 | case NVPTX::BI__mma_m16n16k8_st_c_f32: |
17069 | return MMA_LDST(8, m16n16k8_store_d_f32); |
17070 | |
17071 | default: |
17072 | llvm_unreachable("Unknown MMA builtin")::llvm::llvm_unreachable_internal("Unknown MMA builtin", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 17072); |
17073 | } |
17074 | } |
17075 | #undef MMA_LDST |
17076 | #undef MMA_INTR |
17077 | |
17078 | |
17079 | struct NVPTXMmaInfo { |
17080 | unsigned NumEltsA; |
17081 | unsigned NumEltsB; |
17082 | unsigned NumEltsC; |
17083 | unsigned NumEltsD; |
17084 | |
17085 | // Variants are ordered by layout-A/layout-B/satf, where 'row' has priority |
17086 | // over 'col' for layout. The index of non-satf variants is expected to match |
17087 | // the undocumented layout constants used by CUDA's mma.hpp. |
17088 | std::array<unsigned, 8> Variants; |
17089 | |
17090 | unsigned getMMAIntrinsic(int Layout, bool Satf) { |
17091 | unsigned Index = Layout + 4 * Satf; |
17092 | if (Index >= Variants.size()) |
17093 | return 0; |
17094 | return Variants[Index]; |
17095 | } |
17096 | }; |
17097 | |
17098 | // Returns an intrinsic that matches Layout and Satf for valid combinations of |
17099 | // Layout and Satf, 0 otherwise. |
17100 | static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) { |
17101 | // clang-format off |
17102 | #define MMA_VARIANTS(geom, type) \ |
17103 | Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \ |
17104 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \ |
17105 | Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \ |
17106 | Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type |
17107 | #define MMA_SATF_VARIANTS(geom, type) \ |
17108 | MMA_VARIANTS(geom, type), \ |
17109 | Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \ |
17110 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \ |
17111 | Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \ |
17112 | Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite |
17113 | // Sub-integer MMA only supports row.col layout. |
17114 | #define MMA_VARIANTS_I4(geom, type) \ |
17115 | 0, \ |
17116 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \ |
17117 | 0, \ |
17118 | 0, \ |
17119 | 0, \ |
17120 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \ |
17121 | 0, \ |
17122 | 0 |
17123 | // b1 MMA does not support .satfinite. |
17124 | #define MMA_VARIANTS_B1_XOR(geom, type) \ |
17125 | 0, \ |
17126 | Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type, \ |
17127 | 0, \ |
17128 | 0, \ |
17129 | 0, \ |
17130 | 0, \ |
17131 | 0, \ |
17132 | 0 |
17133 | #define MMA_VARIANTS_B1_AND(geom, type) \ |
17134 | 0, \ |
17135 | Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type, \ |
17136 | 0, \ |
17137 | 0, \ |
17138 | 0, \ |
17139 | 0, \ |
17140 | 0, \ |
17141 | 0 |
17142 | // clang-format on |
17143 | switch (BuiltinID) { |
17144 | // FP MMA |
17145 | // Note that 'type' argument of MMA_SATF_VARIANTS uses D_C notation, while |
17146 | // NumEltsN of return value are ordered as A,B,C,D. |
17147 | case NVPTX::BI__hmma_m16n16k16_mma_f16f16: |
17148 | return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f16)}}}; |
17149 | case NVPTX::BI__hmma_m16n16k16_mma_f32f16: |
17150 | return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f16)}}}; |
17151 | case NVPTX::BI__hmma_m16n16k16_mma_f16f32: |
17152 | return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f32)}}}; |
17153 | case NVPTX::BI__hmma_m16n16k16_mma_f32f32: |
17154 | return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f32)}}}; |
17155 | case NVPTX::BI__hmma_m32n8k16_mma_f16f16: |
17156 | return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f16)}}}; |
17157 | case NVPTX::BI__hmma_m32n8k16_mma_f32f16: |
17158 | return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f16)}}}; |
17159 | case NVPTX::BI__hmma_m32n8k16_mma_f16f32: |
17160 | return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f32)}}}; |
17161 | case NVPTX::BI__hmma_m32n8k16_mma_f32f32: |
17162 | return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f32)}}}; |
17163 | case NVPTX::BI__hmma_m8n32k16_mma_f16f16: |
17164 | return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f16)}}}; |
17165 | case NVPTX::BI__hmma_m8n32k16_mma_f32f16: |
17166 | return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f16)}}}; |
17167 | case NVPTX::BI__hmma_m8n32k16_mma_f16f32: |
17168 | return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f32)}}}; |
17169 | case NVPTX::BI__hmma_m8n32k16_mma_f32f32: |
17170 | return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f32)}}}; |
17171 | |
17172 | // Integer MMA |
17173 | case NVPTX::BI__imma_m16n16k16_mma_s8: |
17174 | return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, s8)}}}; |
17175 | case NVPTX::BI__imma_m16n16k16_mma_u8: |
17176 | return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, u8)}}}; |
17177 | case NVPTX::BI__imma_m32n8k16_mma_s8: |
17178 | return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, s8)}}}; |
17179 | case NVPTX::BI__imma_m32n8k16_mma_u8: |
17180 | return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, u8)}}}; |
17181 | case NVPTX::BI__imma_m8n32k16_mma_s8: |
17182 | return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, s8)}}}; |
17183 | case NVPTX::BI__imma_m8n32k16_mma_u8: |
17184 | return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, u8)}}}; |
17185 | |
17186 | // Sub-integer MMA |
17187 | case NVPTX::BI__imma_m8n8k32_mma_s4: |
17188 | return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, s4)}}}; |
17189 | case NVPTX::BI__imma_m8n8k32_mma_u4: |
17190 | return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, u4)}}}; |
17191 | case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: |
17192 | return {1, 1, 2, 2, {{MMA_VARIANTS_B1_XOR(m8n8k128, b1)}}}; |
17193 | case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1: |
17194 | return {1, 1, 2, 2, {{MMA_VARIANTS_B1_AND(m8n8k128, b1)}}}; |
17195 | |
17196 | // Double MMA |
17197 | case NVPTX::BI__dmma_m8n8k4_mma_f64: |
17198 | return {1, 1, 2, 2, {{MMA_VARIANTS(m8n8k4, f64)}}}; |
17199 | |
17200 | // Alternate FP MMA |
17201 | case NVPTX::BI__mma_bf16_m16n16k16_mma_f32: |
17202 | return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k16, bf16)}}}; |
17203 | case NVPTX::BI__mma_bf16_m8n32k16_mma_f32: |
17204 | return {2, 8, 8, 8, {{MMA_VARIANTS(m8n32k16, bf16)}}}; |
17205 | case NVPTX::BI__mma_bf16_m32n8k16_mma_f32: |
17206 | return {8, 2, 8, 8, {{MMA_VARIANTS(m32n8k16, bf16)}}}; |
17207 | case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: |
17208 | return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k8, tf32)}}}; |
17209 | default: |
17210 | llvm_unreachable("Unexpected builtin ID.")::llvm::llvm_unreachable_internal("Unexpected builtin ID.", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 17210); |
17211 | } |
17212 | #undef MMA_VARIANTS |
17213 | #undef MMA_SATF_VARIANTS |
17214 | #undef MMA_VARIANTS_I4 |
17215 | #undef MMA_VARIANTS_B1_AND |
17216 | #undef MMA_VARIANTS_B1_XOR |
17217 | } |
17218 | |
17219 | } // namespace |
17220 | |
17221 | Value * |
17222 | CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) { |
17223 | auto MakeLdg = [&](unsigned IntrinsicID) { |
17224 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17225 | clang::CharUnits Align = |
17226 | CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType()); |
17227 | return Builder.CreateCall( |
17228 | CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(), |
17229 | Ptr->getType()}), |
17230 | {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())}); |
17231 | }; |
17232 | auto MakeScopedAtomic = [&](unsigned IntrinsicID) { |
17233 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17234 | return Builder.CreateCall( |
17235 | CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(), |
17236 | Ptr->getType()}), |
17237 | {Ptr, EmitScalarExpr(E->getArg(1))}); |
17238 | }; |
17239 | switch (BuiltinID) { |
17240 | case NVPTX::BI__nvvm_atom_add_gen_i: |
17241 | case NVPTX::BI__nvvm_atom_add_gen_l: |
17242 | case NVPTX::BI__nvvm_atom_add_gen_ll: |
17243 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E); |
17244 | |
17245 | case NVPTX::BI__nvvm_atom_sub_gen_i: |
17246 | case NVPTX::BI__nvvm_atom_sub_gen_l: |
17247 | case NVPTX::BI__nvvm_atom_sub_gen_ll: |
17248 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E); |
17249 | |
17250 | case NVPTX::BI__nvvm_atom_and_gen_i: |
17251 | case NVPTX::BI__nvvm_atom_and_gen_l: |
17252 | case NVPTX::BI__nvvm_atom_and_gen_ll: |
17253 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E); |
17254 | |
17255 | case NVPTX::BI__nvvm_atom_or_gen_i: |
17256 | case NVPTX::BI__nvvm_atom_or_gen_l: |
17257 | case NVPTX::BI__nvvm_atom_or_gen_ll: |
17258 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E); |
17259 | |
17260 | case NVPTX::BI__nvvm_atom_xor_gen_i: |
17261 | case NVPTX::BI__nvvm_atom_xor_gen_l: |
17262 | case NVPTX::BI__nvvm_atom_xor_gen_ll: |
17263 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E); |
17264 | |
17265 | case NVPTX::BI__nvvm_atom_xchg_gen_i: |
17266 | case NVPTX::BI__nvvm_atom_xchg_gen_l: |
17267 | case NVPTX::BI__nvvm_atom_xchg_gen_ll: |
17268 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E); |
17269 | |
17270 | case NVPTX::BI__nvvm_atom_max_gen_i: |
17271 | case NVPTX::BI__nvvm_atom_max_gen_l: |
17272 | case NVPTX::BI__nvvm_atom_max_gen_ll: |
17273 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E); |
17274 | |
17275 | case NVPTX::BI__nvvm_atom_max_gen_ui: |
17276 | case NVPTX::BI__nvvm_atom_max_gen_ul: |
17277 | case NVPTX::BI__nvvm_atom_max_gen_ull: |
17278 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E); |
17279 | |
17280 | case NVPTX::BI__nvvm_atom_min_gen_i: |
17281 | case NVPTX::BI__nvvm_atom_min_gen_l: |
17282 | case NVPTX::BI__nvvm_atom_min_gen_ll: |
17283 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E); |
17284 | |
17285 | case NVPTX::BI__nvvm_atom_min_gen_ui: |
17286 | case NVPTX::BI__nvvm_atom_min_gen_ul: |
17287 | case NVPTX::BI__nvvm_atom_min_gen_ull: |
17288 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E); |
17289 | |
17290 | case NVPTX::BI__nvvm_atom_cas_gen_i: |
17291 | case NVPTX::BI__nvvm_atom_cas_gen_l: |
17292 | case NVPTX::BI__nvvm_atom_cas_gen_ll: |
17293 | // __nvvm_atom_cas_gen_* should return the old value rather than the |
17294 | // success flag. |
17295 | return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false); |
17296 | |
17297 | case NVPTX::BI__nvvm_atom_add_gen_f: |
17298 | case NVPTX::BI__nvvm_atom_add_gen_d: { |
17299 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17300 | Value *Val = EmitScalarExpr(E->getArg(1)); |
17301 | return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val, |
17302 | AtomicOrdering::SequentiallyConsistent); |
17303 | } |
17304 | |
17305 | case NVPTX::BI__nvvm_atom_inc_gen_ui: { |
17306 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17307 | Value *Val = EmitScalarExpr(E->getArg(1)); |
17308 | Function *FnALI32 = |
17309 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType()); |
17310 | return Builder.CreateCall(FnALI32, {Ptr, Val}); |
17311 | } |
17312 | |
17313 | case NVPTX::BI__nvvm_atom_dec_gen_ui: { |
17314 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17315 | Value *Val = EmitScalarExpr(E->getArg(1)); |
17316 | Function *FnALD32 = |
17317 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType()); |
17318 | return Builder.CreateCall(FnALD32, {Ptr, Val}); |
17319 | } |
17320 | |
17321 | case NVPTX::BI__nvvm_ldg_c: |
17322 | case NVPTX::BI__nvvm_ldg_c2: |
17323 | case NVPTX::BI__nvvm_ldg_c4: |
17324 | case NVPTX::BI__nvvm_ldg_s: |
17325 | case NVPTX::BI__nvvm_ldg_s2: |
17326 | case NVPTX::BI__nvvm_ldg_s4: |
17327 | case NVPTX::BI__nvvm_ldg_i: |
17328 | case NVPTX::BI__nvvm_ldg_i2: |
17329 | case NVPTX::BI__nvvm_ldg_i4: |
17330 | case NVPTX::BI__nvvm_ldg_l: |
17331 | case NVPTX::BI__nvvm_ldg_ll: |
17332 | case NVPTX::BI__nvvm_ldg_ll2: |
17333 | case NVPTX::BI__nvvm_ldg_uc: |
17334 | case NVPTX::BI__nvvm_ldg_uc2: |
17335 | case NVPTX::BI__nvvm_ldg_uc4: |
17336 | case NVPTX::BI__nvvm_ldg_us: |
17337 | case NVPTX::BI__nvvm_ldg_us2: |
17338 | case NVPTX::BI__nvvm_ldg_us4: |
17339 | case NVPTX::BI__nvvm_ldg_ui: |
17340 | case NVPTX::BI__nvvm_ldg_ui2: |
17341 | case NVPTX::BI__nvvm_ldg_ui4: |
17342 | case NVPTX::BI__nvvm_ldg_ul: |
17343 | case NVPTX::BI__nvvm_ldg_ull: |
17344 | case NVPTX::BI__nvvm_ldg_ull2: |
17345 | // PTX Interoperability section 2.2: "For a vector with an even number of |
17346 | // elements, its alignment is set to number of elements times the alignment |
17347 | // of its member: n*alignof(t)." |
17348 | return MakeLdg(Intrinsic::nvvm_ldg_global_i); |
17349 | case NVPTX::BI__nvvm_ldg_f: |
17350 | case NVPTX::BI__nvvm_ldg_f2: |
17351 | case NVPTX::BI__nvvm_ldg_f4: |
17352 | case NVPTX::BI__nvvm_ldg_d: |
17353 | case NVPTX::BI__nvvm_ldg_d2: |
17354 | return MakeLdg(Intrinsic::nvvm_ldg_global_f); |
17355 | |
17356 | case NVPTX::BI__nvvm_atom_cta_add_gen_i: |
17357 | case NVPTX::BI__nvvm_atom_cta_add_gen_l: |
17358 | case NVPTX::BI__nvvm_atom_cta_add_gen_ll: |
17359 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta); |
17360 | case NVPTX::BI__nvvm_atom_sys_add_gen_i: |
17361 | case NVPTX::BI__nvvm_atom_sys_add_gen_l: |
17362 | case NVPTX::BI__nvvm_atom_sys_add_gen_ll: |
17363 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys); |
17364 | case NVPTX::BI__nvvm_atom_cta_add_gen_f: |
17365 | case NVPTX::BI__nvvm_atom_cta_add_gen_d: |
17366 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta); |
17367 | case NVPTX::BI__nvvm_atom_sys_add_gen_f: |
17368 | case NVPTX::BI__nvvm_atom_sys_add_gen_d: |
17369 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys); |
17370 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_i: |
17371 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_l: |
17372 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll: |
17373 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta); |
17374 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_i: |
17375 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_l: |
17376 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll: |
17377 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys); |
17378 | case NVPTX::BI__nvvm_atom_cta_max_gen_i: |
17379 | case NVPTX::BI__nvvm_atom_cta_max_gen_ui: |
17380 | case NVPTX::BI__nvvm_atom_cta_max_gen_l: |
17381 | case NVPTX::BI__nvvm_atom_cta_max_gen_ul: |
17382 | case NVPTX::BI__nvvm_atom_cta_max_gen_ll: |
17383 | case NVPTX::BI__nvvm_atom_cta_max_gen_ull: |
17384 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta); |
17385 | case NVPTX::BI__nvvm_atom_sys_max_gen_i: |
17386 | case NVPTX::BI__nvvm_atom_sys_max_gen_ui: |
17387 | case NVPTX::BI__nvvm_atom_sys_max_gen_l: |
17388 | case NVPTX::BI__nvvm_atom_sys_max_gen_ul: |
17389 | case NVPTX::BI__nvvm_atom_sys_max_gen_ll: |
17390 | case NVPTX::BI__nvvm_atom_sys_max_gen_ull: |
17391 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys); |
17392 | case NVPTX::BI__nvvm_atom_cta_min_gen_i: |
17393 | case NVPTX::BI__nvvm_atom_cta_min_gen_ui: |
17394 | case NVPTX::BI__nvvm_atom_cta_min_gen_l: |
17395 | case NVPTX::BI__nvvm_atom_cta_min_gen_ul: |
17396 | case NVPTX::BI__nvvm_atom_cta_min_gen_ll: |
17397 | case NVPTX::BI__nvvm_atom_cta_min_gen_ull: |
17398 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta); |
17399 | case NVPTX::BI__nvvm_atom_sys_min_gen_i: |
17400 | case NVPTX::BI__nvvm_atom_sys_min_gen_ui: |
17401 | case NVPTX::BI__nvvm_atom_sys_min_gen_l: |
17402 | case NVPTX::BI__nvvm_atom_sys_min_gen_ul: |
17403 | case NVPTX::BI__nvvm_atom_sys_min_gen_ll: |
17404 | case NVPTX::BI__nvvm_atom_sys_min_gen_ull: |
17405 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys); |
17406 | case NVPTX::BI__nvvm_atom_cta_inc_gen_ui: |
17407 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta); |
17408 | case NVPTX::BI__nvvm_atom_cta_dec_gen_ui: |
17409 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta); |
17410 | case NVPTX::BI__nvvm_atom_sys_inc_gen_ui: |
17411 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys); |
17412 | case NVPTX::BI__nvvm_atom_sys_dec_gen_ui: |
17413 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys); |
17414 | case NVPTX::BI__nvvm_atom_cta_and_gen_i: |
17415 | case NVPTX::BI__nvvm_atom_cta_and_gen_l: |
17416 | case NVPTX::BI__nvvm_atom_cta_and_gen_ll: |
17417 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta); |
17418 | case NVPTX::BI__nvvm_atom_sys_and_gen_i: |
17419 | case NVPTX::BI__nvvm_atom_sys_and_gen_l: |
17420 | case NVPTX::BI__nvvm_atom_sys_and_gen_ll: |
17421 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys); |
17422 | case NVPTX::BI__nvvm_atom_cta_or_gen_i: |
17423 | case NVPTX::BI__nvvm_atom_cta_or_gen_l: |
17424 | case NVPTX::BI__nvvm_atom_cta_or_gen_ll: |
17425 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta); |
17426 | case NVPTX::BI__nvvm_atom_sys_or_gen_i: |
17427 | case NVPTX::BI__nvvm_atom_sys_or_gen_l: |
17428 | case NVPTX::BI__nvvm_atom_sys_or_gen_ll: |
17429 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys); |
17430 | case NVPTX::BI__nvvm_atom_cta_xor_gen_i: |
17431 | case NVPTX::BI__nvvm_atom_cta_xor_gen_l: |
17432 | case NVPTX::BI__nvvm_atom_cta_xor_gen_ll: |
17433 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta); |
17434 | case NVPTX::BI__nvvm_atom_sys_xor_gen_i: |
17435 | case NVPTX::BI__nvvm_atom_sys_xor_gen_l: |
17436 | case NVPTX::BI__nvvm_atom_sys_xor_gen_ll: |
17437 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys); |
17438 | case NVPTX::BI__nvvm_atom_cta_cas_gen_i: |
17439 | case NVPTX::BI__nvvm_atom_cta_cas_gen_l: |
17440 | case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: { |
17441 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17442 | return Builder.CreateCall( |
17443 | CGM.getIntrinsic( |
17444 | Intrinsic::nvvm_atomic_cas_gen_i_cta, |
17445 | {Ptr->getType()->getPointerElementType(), Ptr->getType()}), |
17446 | {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); |
17447 | } |
17448 | case NVPTX::BI__nvvm_atom_sys_cas_gen_i: |
17449 | case NVPTX::BI__nvvm_atom_sys_cas_gen_l: |
17450 | case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: { |
17451 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17452 | return Builder.CreateCall( |
17453 | CGM.getIntrinsic( |
17454 | Intrinsic::nvvm_atomic_cas_gen_i_sys, |
17455 | {Ptr->getType()->getPointerElementType(), Ptr->getType()}), |
17456 | {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); |
17457 | } |
17458 | case NVPTX::BI__nvvm_match_all_sync_i32p: |
17459 | case NVPTX::BI__nvvm_match_all_sync_i64p: { |
17460 | Value *Mask = EmitScalarExpr(E->getArg(0)); |
17461 | Value *Val = EmitScalarExpr(E->getArg(1)); |
17462 | Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2)); |
17463 | Value *ResultPair = Builder.CreateCall( |
17464 | CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p |
17465 | ? Intrinsic::nvvm_match_all_sync_i32p |
17466 | : Intrinsic::nvvm_match_all_sync_i64p), |
17467 | {Mask, Val}); |
17468 | Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1), |
17469 | PredOutPtr.getElementType()); |
17470 | Builder.CreateStore(Pred, PredOutPtr); |
17471 | return Builder.CreateExtractValue(ResultPair, 0); |
17472 | } |
17473 | |
17474 | // FP MMA loads |
17475 | case NVPTX::BI__hmma_m16n16k16_ld_a: |
17476 | case NVPTX::BI__hmma_m16n16k16_ld_b: |
17477 | case NVPTX::BI__hmma_m16n16k16_ld_c_f16: |
17478 | case NVPTX::BI__hmma_m16n16k16_ld_c_f32: |
17479 | case NVPTX::BI__hmma_m32n8k16_ld_a: |
17480 | case NVPTX::BI__hmma_m32n8k16_ld_b: |
17481 | case NVPTX::BI__hmma_m32n8k16_ld_c_f16: |
17482 | case NVPTX::BI__hmma_m32n8k16_ld_c_f32: |
17483 | case NVPTX::BI__hmma_m8n32k16_ld_a: |
17484 | case NVPTX::BI__hmma_m8n32k16_ld_b: |
17485 | case NVPTX::BI__hmma_m8n32k16_ld_c_f16: |
17486 | case NVPTX::BI__hmma_m8n32k16_ld_c_f32: |
17487 | // Integer MMA loads. |
17488 | case NVPTX::BI__imma_m16n16k16_ld_a_s8: |
17489 | case NVPTX::BI__imma_m16n16k16_ld_a_u8: |
17490 | case NVPTX::BI__imma_m16n16k16_ld_b_s8: |
17491 | case NVPTX::BI__imma_m16n16k16_ld_b_u8: |
17492 | case NVPTX::BI__imma_m16n16k16_ld_c: |
17493 | case NVPTX::BI__imma_m32n8k16_ld_a_s8: |
17494 | case NVPTX::BI__imma_m32n8k16_ld_a_u8: |
17495 | case NVPTX::BI__imma_m32n8k16_ld_b_s8: |
17496 | case NVPTX::BI__imma_m32n8k16_ld_b_u8: |
17497 | case NVPTX::BI__imma_m32n8k16_ld_c: |
17498 | case NVPTX::BI__imma_m8n32k16_ld_a_s8: |
17499 | case NVPTX::BI__imma_m8n32k16_ld_a_u8: |
17500 | case NVPTX::BI__imma_m8n32k16_ld_b_s8: |
17501 | case NVPTX::BI__imma_m8n32k16_ld_b_u8: |
17502 | case NVPTX::BI__imma_m8n32k16_ld_c: |
17503 | // Sub-integer MMA loads. |
17504 | case NVPTX::BI__imma_m8n8k32_ld_a_s4: |
17505 | case NVPTX::BI__imma_m8n8k32_ld_a_u4: |
17506 | case NVPTX::BI__imma_m8n8k32_ld_b_s4: |
17507 | case NVPTX::BI__imma_m8n8k32_ld_b_u4: |
17508 | case NVPTX::BI__imma_m8n8k32_ld_c: |
17509 | case NVPTX::BI__bmma_m8n8k128_ld_a_b1: |
17510 | case NVPTX::BI__bmma_m8n8k128_ld_b_b1: |
17511 | case NVPTX::BI__bmma_m8n8k128_ld_c: |
17512 | // Double MMA loads. |
17513 | case NVPTX::BI__dmma_m8n8k4_ld_a: |
17514 | case NVPTX::BI__dmma_m8n8k4_ld_b: |
17515 | case NVPTX::BI__dmma_m8n8k4_ld_c: |
17516 | // Alternate float MMA loads. |
17517 | case NVPTX::BI__mma_bf16_m16n16k16_ld_a: |
17518 | case NVPTX::BI__mma_bf16_m16n16k16_ld_b: |
17519 | case NVPTX::BI__mma_bf16_m8n32k16_ld_a: |
17520 | case NVPTX::BI__mma_bf16_m8n32k16_ld_b: |
17521 | case NVPTX::BI__mma_bf16_m32n8k16_ld_a: |
17522 | case NVPTX::BI__mma_bf16_m32n8k16_ld_b: |
17523 | case NVPTX::BI__mma_tf32_m16n16k8_ld_a: |
17524 | case NVPTX::BI__mma_tf32_m16n16k8_ld_b: |
17525 | case NVPTX::BI__mma_tf32_m16n16k8_ld_c: { |
17526 | Address Dst = EmitPointerWithAlignment(E->getArg(0)); |
17527 | Value *Src = EmitScalarExpr(E->getArg(1)); |
17528 | Value *Ldm = EmitScalarExpr(E->getArg(2)); |
17529 | Optional<llvm::APSInt> isColMajorArg = |
17530 | E->getArg(3)->getIntegerConstantExpr(getContext()); |
17531 | if (!isColMajorArg) |
17532 | return nullptr; |
17533 | bool isColMajor = isColMajorArg->getSExtValue(); |
17534 | NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID); |
17535 | unsigned IID = isColMajor ? II.IID_col : II.IID_row; |
17536 | if (IID == 0) |
17537 | return nullptr; |
17538 | |
17539 | Value *Result = |
17540 | Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm}); |
17541 | |
17542 | // Save returned values. |
17543 | assert(II.NumResults)(static_cast <bool> (II.NumResults) ? void (0) : __assert_fail ("II.NumResults", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 17543, __extension__ __PRETTY_FUNCTION__)); |
17544 | if (II.NumResults == 1) { |
17545 | Builder.CreateAlignedStore(Result, Dst.getPointer(), |
17546 | CharUnits::fromQuantity(4)); |
17547 | } else { |
17548 | for (unsigned i = 0; i < II.NumResults; ++i) { |
17549 | Builder.CreateAlignedStore( |
17550 | Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), |
17551 | Dst.getElementType()), |
17552 | Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(), |
17553 | llvm::ConstantInt::get(IntTy, i)), |
17554 | CharUnits::fromQuantity(4)); |
17555 | } |
17556 | } |
17557 | return Result; |
17558 | } |
17559 | |
17560 | case NVPTX::BI__hmma_m16n16k16_st_c_f16: |
17561 | case NVPTX::BI__hmma_m16n16k16_st_c_f32: |
17562 | case NVPTX::BI__hmma_m32n8k16_st_c_f16: |
17563 | case NVPTX::BI__hmma_m32n8k16_st_c_f32: |
17564 | case NVPTX::BI__hmma_m8n32k16_st_c_f16: |
17565 | case NVPTX::BI__hmma_m8n32k16_st_c_f32: |
17566 | case NVPTX::BI__imma_m16n16k16_st_c_i32: |
17567 | case NVPTX::BI__imma_m32n8k16_st_c_i32: |
17568 | case NVPTX::BI__imma_m8n32k16_st_c_i32: |
17569 | case NVPTX::BI__imma_m8n8k32_st_c_i32: |
17570 | case NVPTX::BI__bmma_m8n8k128_st_c_i32: |
17571 | case NVPTX::BI__dmma_m8n8k4_st_c_f64: |
17572 | case NVPTX::BI__mma_m16n16k8_st_c_f32: { |
17573 | Value *Dst = EmitScalarExpr(E->getArg(0)); |
17574 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
17575 | Value *Ldm = EmitScalarExpr(E->getArg(2)); |
17576 | Optional<llvm::APSInt> isColMajorArg = |
17577 | E->getArg(3)->getIntegerConstantExpr(getContext()); |
17578 | if (!isColMajorArg) |
17579 | return nullptr; |
17580 | bool isColMajor = isColMajorArg->getSExtValue(); |
17581 | NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID); |
17582 | unsigned IID = isColMajor ? II.IID_col : II.IID_row; |
17583 | if (IID == 0) |
17584 | return nullptr; |
17585 | Function *Intrinsic = |
17586 | CGM.getIntrinsic(IID, Dst->getType()); |
17587 | llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1); |
17588 | SmallVector<Value *, 10> Values = {Dst}; |
17589 | for (unsigned i = 0; i < II.NumResults; ++i) { |
17590 | Value *V = Builder.CreateAlignedLoad( |
17591 | Src.getElementType(), |
17592 | Builder.CreateGEP(Src.getElementType(), Src.getPointer(), |
17593 | llvm::ConstantInt::get(IntTy, i)), |
17594 | CharUnits::fromQuantity(4)); |
17595 | Values.push_back(Builder.CreateBitCast(V, ParamType)); |
17596 | } |
17597 | Values.push_back(Ldm); |
17598 | Value *Result = Builder.CreateCall(Intrinsic, Values); |
17599 | return Result; |
17600 | } |
17601 | |
17602 | // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) --> |
17603 | // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf> |
17604 | case NVPTX::BI__hmma_m16n16k16_mma_f16f16: |
17605 | case NVPTX::BI__hmma_m16n16k16_mma_f32f16: |
17606 | case NVPTX::BI__hmma_m16n16k16_mma_f32f32: |
17607 | case NVPTX::BI__hmma_m16n16k16_mma_f16f32: |
17608 | case NVPTX::BI__hmma_m32n8k16_mma_f16f16: |
17609 | case NVPTX::BI__hmma_m32n8k16_mma_f32f16: |
17610 | case NVPTX::BI__hmma_m32n8k16_mma_f32f32: |
17611 | case NVPTX::BI__hmma_m32n8k16_mma_f16f32: |
17612 | case NVPTX::BI__hmma_m8n32k16_mma_f16f16: |
17613 | case NVPTX::BI__hmma_m8n32k16_mma_f32f16: |
17614 | case NVPTX::BI__hmma_m8n32k16_mma_f32f32: |
17615 | case NVPTX::BI__hmma_m8n32k16_mma_f16f32: |
17616 | case NVPTX::BI__imma_m16n16k16_mma_s8: |
17617 | case NVPTX::BI__imma_m16n16k16_mma_u8: |
17618 | case NVPTX::BI__imma_m32n8k16_mma_s8: |
17619 | case NVPTX::BI__imma_m32n8k16_mma_u8: |
17620 | case NVPTX::BI__imma_m8n32k16_mma_s8: |
17621 | case NVPTX::BI__imma_m8n32k16_mma_u8: |
17622 | case NVPTX::BI__imma_m8n8k32_mma_s4: |
17623 | case NVPTX::BI__imma_m8n8k32_mma_u4: |
17624 | case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: |
17625 | case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1: |
17626 | case NVPTX::BI__dmma_m8n8k4_mma_f64: |
17627 | case NVPTX::BI__mma_bf16_m16n16k16_mma_f32: |
17628 | case NVPTX::BI__mma_bf16_m8n32k16_mma_f32: |
17629 | case NVPTX::BI__mma_bf16_m32n8k16_mma_f32: |
17630 | case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: { |
17631 | Address Dst = EmitPointerWithAlignment(E->getArg(0)); |
17632 | Address SrcA = EmitPointerWithAlignment(E->getArg(1)); |
17633 | Address SrcB = EmitPointerWithAlignment(E->getArg(2)); |
17634 | Address SrcC = EmitPointerWithAlignment(E->getArg(3)); |
17635 | Optional<llvm::APSInt> LayoutArg = |
17636 | E->getArg(4)->getIntegerConstantExpr(getContext()); |
17637 | if (!LayoutArg) |
17638 | return nullptr; |
17639 | int Layout = LayoutArg->getSExtValue(); |
17640 | if (Layout < 0 || Layout > 3) |
17641 | return nullptr; |
17642 | llvm::APSInt SatfArg; |
17643 | if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 || |
17644 | BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1) |
17645 | SatfArg = 0; // .b1 does not have satf argument. |
17646 | else if (Optional<llvm::APSInt> OptSatfArg = |
17647 | E->getArg(5)->getIntegerConstantExpr(getContext())) |
17648 | SatfArg = *OptSatfArg; |
17649 | else |
17650 | return nullptr; |
17651 | bool Satf = SatfArg.getSExtValue(); |
17652 | NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID); |
17653 | unsigned IID = MI.getMMAIntrinsic(Layout, Satf); |
17654 | if (IID == 0) // Unsupported combination of Layout/Satf. |
17655 | return nullptr; |
17656 | |
17657 | SmallVector<Value *, 24> Values; |
17658 | Function *Intrinsic = CGM.getIntrinsic(IID); |
17659 | llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0); |
17660 | // Load A |
17661 | for (unsigned i = 0; i < MI.NumEltsA; ++i) { |
17662 | Value *V = Builder.CreateAlignedLoad( |
17663 | SrcA.getElementType(), |
17664 | Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(), |
17665 | llvm::ConstantInt::get(IntTy, i)), |
17666 | CharUnits::fromQuantity(4)); |
17667 | Values.push_back(Builder.CreateBitCast(V, AType)); |
17668 | } |
17669 | // Load B |
17670 | llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA); |
17671 | for (unsigned i = 0; i < MI.NumEltsB; ++i) { |
17672 | Value *V = Builder.CreateAlignedLoad( |
17673 | SrcB.getElementType(), |
17674 | Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(), |
17675 | llvm::ConstantInt::get(IntTy, i)), |
17676 | CharUnits::fromQuantity(4)); |
17677 | Values.push_back(Builder.CreateBitCast(V, BType)); |
17678 | } |
17679 | // Load C |
17680 | llvm::Type *CType = |
17681 | Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB); |
17682 | for (unsigned i = 0; i < MI.NumEltsC; ++i) { |
17683 | Value *V = Builder.CreateAlignedLoad( |
17684 | SrcC.getElementType(), |
17685 | Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(), |
17686 | llvm::ConstantInt::get(IntTy, i)), |
17687 | CharUnits::fromQuantity(4)); |
17688 | Values.push_back(Builder.CreateBitCast(V, CType)); |
17689 | } |
17690 | Value *Result = Builder.CreateCall(Intrinsic, Values); |
17691 | llvm::Type *DType = Dst.getElementType(); |
17692 | for (unsigned i = 0; i < MI.NumEltsD; ++i) |
17693 | Builder.CreateAlignedStore( |
17694 | Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType), |
17695 | Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(), |
17696 | llvm::ConstantInt::get(IntTy, i)), |
17697 | CharUnits::fromQuantity(4)); |
17698 | return Result; |
17699 | } |
17700 | default: |
17701 | return nullptr; |
17702 | } |
17703 | } |
17704 | |
17705 | namespace { |
17706 | struct BuiltinAlignArgs { |
17707 | llvm::Value *Src = nullptr; |
17708 | llvm::Type *SrcType = nullptr; |
17709 | llvm::Value *Alignment = nullptr; |
17710 | llvm::Value *Mask = nullptr; |
17711 | llvm::IntegerType *IntType = nullptr; |
17712 | |
17713 | BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) { |
17714 | QualType AstType = E->getArg(0)->getType(); |
17715 | if (AstType->isArrayType()) |
17716 | Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer(); |
17717 | else |
17718 | Src = CGF.EmitScalarExpr(E->getArg(0)); |
17719 | SrcType = Src->getType(); |
17720 | if (SrcType->isPointerTy()) { |
17721 | IntType = IntegerType::get( |
17722 | CGF.getLLVMContext(), |
17723 | CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType)); |
17724 | } else { |
17725 | assert(SrcType->isIntegerTy())(static_cast <bool> (SrcType->isIntegerTy()) ? void ( 0) : __assert_fail ("SrcType->isIntegerTy()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 17725, __extension__ __PRETTY_FUNCTION__)); |
17726 | IntType = cast<llvm::IntegerType>(SrcType); |
17727 | } |
17728 | Alignment = CGF.EmitScalarExpr(E->getArg(1)); |
17729 | Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment"); |
17730 | auto *One = llvm::ConstantInt::get(IntType, 1); |
17731 | Mask = CGF.Builder.CreateSub(Alignment, One, "mask"); |
17732 | } |
17733 | }; |
17734 | } // namespace |
17735 | |
17736 | /// Generate (x & (y-1)) == 0. |
17737 | RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) { |
17738 | BuiltinAlignArgs Args(E, *this); |
17739 | llvm::Value *SrcAddress = Args.Src; |
17740 | if (Args.SrcType->isPointerTy()) |
17741 | SrcAddress = |
17742 | Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr"); |
17743 | return RValue::get(Builder.CreateICmpEQ( |
17744 | Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"), |
17745 | llvm::Constant::getNullValue(Args.IntType), "is_aligned")); |
17746 | } |
17747 | |
17748 | /// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up. |
17749 | /// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the |
17750 | /// llvm.ptrmask instrinsic (with a GEP before in the align_up case). |
17751 | /// TODO: actually use ptrmask once most optimization passes know about it. |
17752 | RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) { |
17753 | BuiltinAlignArgs Args(E, *this); |
17754 | llvm::Value *SrcAddr = Args.Src; |
17755 | if (Args.Src->getType()->isPointerTy()) |
17756 | SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr"); |
17757 | llvm::Value *SrcForMask = SrcAddr; |
17758 | if (AlignUp) { |
17759 | // When aligning up we have to first add the mask to ensure we go over the |
17760 | // next alignment value and then align down to the next valid multiple. |
17761 | // By adding the mask, we ensure that align_up on an already aligned |
17762 | // value will not change the value. |
17763 | SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary"); |
17764 | } |
17765 | // Invert the mask to only clear the lower bits. |
17766 | llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask"); |
17767 | llvm::Value *Result = |
17768 | Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result"); |
17769 | if (Args.Src->getType()->isPointerTy()) { |
17770 | /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well. |
17771 | // Result = Builder.CreateIntrinsic( |
17772 | // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType}, |
17773 | // {SrcForMask, NegatedMask}, nullptr, "aligned_result"); |
17774 | Result->setName("aligned_intptr"); |
17775 | llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff"); |
17776 | // The result must point to the same underlying allocation. This means we |
17777 | // can use an inbounds GEP to enable better optimization. |
17778 | Value *Base = EmitCastToVoidPtr(Args.Src); |
17779 | if (getLangOpts().isSignedOverflowDefined()) |
17780 | Result = Builder.CreateGEP(Int8Ty, Base, Difference, "aligned_result"); |
17781 | else |
17782 | Result = EmitCheckedInBoundsGEP(Base, Difference, |
17783 | /*SignedIndices=*/true, |
17784 | /*isSubtraction=*/!AlignUp, |
17785 | E->getExprLoc(), "aligned_result"); |
17786 | Result = Builder.CreatePointerCast(Result, Args.SrcType); |
17787 | // Emit an alignment assumption to ensure that the new alignment is |
17788 | // propagated to loads/stores, etc. |
17789 | emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment); |
17790 | } |
17791 | assert(Result->getType() == Args.SrcType)(static_cast <bool> (Result->getType() == Args.SrcType ) ? void (0) : __assert_fail ("Result->getType() == Args.SrcType" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 17791, __extension__ __PRETTY_FUNCTION__)); |
17792 | return RValue::get(Result); |
17793 | } |
17794 | |
17795 | Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, |
17796 | const CallExpr *E) { |
17797 | switch (BuiltinID) { |
17798 | case WebAssembly::BI__builtin_wasm_memory_size: { |
17799 | llvm::Type *ResultType = ConvertType(E->getType()); |
17800 | Value *I = EmitScalarExpr(E->getArg(0)); |
17801 | Function *Callee = |
17802 | CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType); |
17803 | return Builder.CreateCall(Callee, I); |
17804 | } |
17805 | case WebAssembly::BI__builtin_wasm_memory_grow: { |
17806 | llvm::Type *ResultType = ConvertType(E->getType()); |
17807 | Value *Args[] = {EmitScalarExpr(E->getArg(0)), |
17808 | EmitScalarExpr(E->getArg(1))}; |
17809 | Function *Callee = |
17810 | CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType); |
17811 | return Builder.CreateCall(Callee, Args); |
17812 | } |
17813 | case WebAssembly::BI__builtin_wasm_tls_size: { |
17814 | llvm::Type *ResultType = ConvertType(E->getType()); |
17815 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType); |
17816 | return Builder.CreateCall(Callee); |
17817 | } |
17818 | case WebAssembly::BI__builtin_wasm_tls_align: { |
17819 | llvm::Type *ResultType = ConvertType(E->getType()); |
17820 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType); |
17821 | return Builder.CreateCall(Callee); |
17822 | } |
17823 | case WebAssembly::BI__builtin_wasm_tls_base: { |
17824 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base); |
17825 | return Builder.CreateCall(Callee); |
17826 | } |
17827 | case WebAssembly::BI__builtin_wasm_throw: { |
17828 | Value *Tag = EmitScalarExpr(E->getArg(0)); |
17829 | Value *Obj = EmitScalarExpr(E->getArg(1)); |
17830 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw); |
17831 | return Builder.CreateCall(Callee, {Tag, Obj}); |
17832 | } |
17833 | case WebAssembly::BI__builtin_wasm_rethrow: { |
17834 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow); |
17835 | return Builder.CreateCall(Callee); |
17836 | } |
17837 | case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: { |
17838 | Value *Addr = EmitScalarExpr(E->getArg(0)); |
17839 | Value *Expected = EmitScalarExpr(E->getArg(1)); |
17840 | Value *Timeout = EmitScalarExpr(E->getArg(2)); |
17841 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32); |
17842 | return Builder.CreateCall(Callee, {Addr, Expected, Timeout}); |
17843 | } |
17844 | case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: { |
17845 | Value *Addr = EmitScalarExpr(E->getArg(0)); |
17846 | Value *Expected = EmitScalarExpr(E->getArg(1)); |
17847 | Value *Timeout = EmitScalarExpr(E->getArg(2)); |
17848 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64); |
17849 | return Builder.CreateCall(Callee, {Addr, Expected, Timeout}); |
17850 | } |
17851 | case WebAssembly::BI__builtin_wasm_memory_atomic_notify: { |
17852 | Value *Addr = EmitScalarExpr(E->getArg(0)); |
17853 | Value *Count = EmitScalarExpr(E->getArg(1)); |
17854 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_notify); |
17855 | return Builder.CreateCall(Callee, {Addr, Count}); |
17856 | } |
17857 | case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32: |
17858 | case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64: |
17859 | case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32: |
17860 | case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: { |
17861 | Value *Src = EmitScalarExpr(E->getArg(0)); |
17862 | llvm::Type *ResT = ConvertType(E->getType()); |
17863 | Function *Callee = |
17864 | CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()}); |
17865 | return Builder.CreateCall(Callee, {Src}); |
17866 | } |
17867 | case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32: |
17868 | case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64: |
17869 | case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32: |
17870 | case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: { |
17871 | Value *Src = EmitScalarExpr(E->getArg(0)); |
17872 | llvm::Type *ResT = ConvertType(E->getType()); |
17873 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned, |
17874 | {ResT, Src->getType()}); |
17875 | return Builder.CreateCall(Callee, {Src}); |
17876 | } |
17877 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32: |
17878 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64: |
17879 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32: |
17880 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64: |
17881 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: { |
17882 | Value *Src = EmitScalarExpr(E->getArg(0)); |
17883 | llvm::Type *ResT = ConvertType(E->getType()); |
17884 | Function *Callee = |
17885 | CGM.getIntrinsic(Intrinsic::fptosi_sat, {ResT, Src->getType()}); |
17886 | return Builder.CreateCall(Callee, {Src}); |
17887 | } |
17888 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32: |
17889 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64: |
17890 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32: |
17891 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64: |
17892 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: { |
17893 | Value *Src = EmitScalarExpr(E->getArg(0)); |
17894 | llvm::Type *ResT = ConvertType(E->getType()); |
17895 | Function *Callee = |
17896 | CGM.getIntrinsic(Intrinsic::fptoui_sat, {ResT, Src->getType()}); |
17897 | return Builder.CreateCall(Callee, {Src}); |
17898 | } |
17899 | case WebAssembly::BI__builtin_wasm_min_f32: |
17900 | case WebAssembly::BI__builtin_wasm_min_f64: |
17901 | case WebAssembly::BI__builtin_wasm_min_f32x4: |
17902 | case WebAssembly::BI__builtin_wasm_min_f64x2: { |
17903 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
17904 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
17905 | Function *Callee = |
17906 | CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType())); |
17907 | return Builder.CreateCall(Callee, {LHS, RHS}); |
17908 | } |
17909 | case WebAssembly::BI__builtin_wasm_max_f32: |
17910 | case WebAssembly::BI__builtin_wasm_max_f64: |
17911 | case WebAssembly::BI__builtin_wasm_max_f32x4: |
17912 | case WebAssembly::BI__builtin_wasm_max_f64x2: { |
17913 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
17914 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
17915 | Function *Callee = |
17916 | CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType())); |
17917 | return Builder.CreateCall(Callee, {LHS, RHS}); |
17918 | } |
17919 | case WebAssembly::BI__builtin_wasm_pmin_f32x4: |
17920 | case WebAssembly::BI__builtin_wasm_pmin_f64x2: { |
17921 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
17922 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
17923 | Function *Callee = |
17924 | CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType())); |
17925 | return Builder.CreateCall(Callee, {LHS, RHS}); |
17926 | } |
17927 | case WebAssembly::BI__builtin_wasm_pmax_f32x4: |
17928 | case WebAssembly::BI__builtin_wasm_pmax_f64x2: { |
17929 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
17930 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
17931 | Function *Callee = |
17932 | CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType())); |
17933 | return Builder.CreateCall(Callee, {LHS, RHS}); |
17934 | } |
17935 | case WebAssembly::BI__builtin_wasm_ceil_f32x4: |
17936 | case WebAssembly::BI__builtin_wasm_floor_f32x4: |
17937 | case WebAssembly::BI__builtin_wasm_trunc_f32x4: |
17938 | case WebAssembly::BI__builtin_wasm_nearest_f32x4: |
17939 | case WebAssembly::BI__builtin_wasm_ceil_f64x2: |
17940 | case WebAssembly::BI__builtin_wasm_floor_f64x2: |
17941 | case WebAssembly::BI__builtin_wasm_trunc_f64x2: |
17942 | case WebAssembly::BI__builtin_wasm_nearest_f64x2: { |
17943 | unsigned IntNo; |
17944 | switch (BuiltinID) { |
17945 | case WebAssembly::BI__builtin_wasm_ceil_f32x4: |
17946 | case WebAssembly::BI__builtin_wasm_ceil_f64x2: |
17947 | IntNo = Intrinsic::ceil; |
17948 | break; |
17949 | case WebAssembly::BI__builtin_wasm_floor_f32x4: |
17950 | case WebAssembly::BI__builtin_wasm_floor_f64x2: |
17951 | IntNo = Intrinsic::floor; |
17952 | break; |
17953 | case WebAssembly::BI__builtin_wasm_trunc_f32x4: |
17954 | case WebAssembly::BI__builtin_wasm_trunc_f64x2: |
17955 | IntNo = Intrinsic::trunc; |
17956 | break; |
17957 | case WebAssembly::BI__builtin_wasm_nearest_f32x4: |
17958 | case WebAssembly::BI__builtin_wasm_nearest_f64x2: |
17959 | IntNo = Intrinsic::nearbyint; |
17960 | break; |
17961 | default: |
17962 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 17962); |
17963 | } |
17964 | Value *Value = EmitScalarExpr(E->getArg(0)); |
17965 | Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); |
17966 | return Builder.CreateCall(Callee, Value); |
17967 | } |
17968 | case WebAssembly::BI__builtin_wasm_swizzle_i8x16: { |
17969 | Value *Src = EmitScalarExpr(E->getArg(0)); |
17970 | Value *Indices = EmitScalarExpr(E->getArg(1)); |
17971 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle); |
17972 | return Builder.CreateCall(Callee, {Src, Indices}); |
17973 | } |
17974 | case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16: |
17975 | case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16: |
17976 | case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8: |
17977 | case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8: |
17978 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16: |
17979 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16: |
17980 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8: |
17981 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: { |
17982 | unsigned IntNo; |
17983 | switch (BuiltinID) { |
17984 | case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16: |
17985 | case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8: |
17986 | IntNo = Intrinsic::sadd_sat; |
17987 | break; |
17988 | case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16: |
17989 | case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8: |
17990 | IntNo = Intrinsic::uadd_sat; |
17991 | break; |
17992 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16: |
17993 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8: |
17994 | IntNo = Intrinsic::wasm_sub_sat_signed; |
17995 | break; |
17996 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16: |
17997 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: |
17998 | IntNo = Intrinsic::wasm_sub_sat_unsigned; |
17999 | break; |
18000 | default: |
18001 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 18001); |
18002 | } |
18003 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18004 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18005 | Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); |
18006 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18007 | } |
18008 | case WebAssembly::BI__builtin_wasm_abs_i8x16: |
18009 | case WebAssembly::BI__builtin_wasm_abs_i16x8: |
18010 | case WebAssembly::BI__builtin_wasm_abs_i32x4: |
18011 | case WebAssembly::BI__builtin_wasm_abs_i64x2: { |
18012 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18013 | Value *Neg = Builder.CreateNeg(Vec, "neg"); |
18014 | Constant *Zero = llvm::Constant::getNullValue(Vec->getType()); |
18015 | Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond"); |
18016 | return Builder.CreateSelect(ICmp, Neg, Vec, "abs"); |
18017 | } |
18018 | case WebAssembly::BI__builtin_wasm_min_s_i8x16: |
18019 | case WebAssembly::BI__builtin_wasm_min_u_i8x16: |
18020 | case WebAssembly::BI__builtin_wasm_max_s_i8x16: |
18021 | case WebAssembly::BI__builtin_wasm_max_u_i8x16: |
18022 | case WebAssembly::BI__builtin_wasm_min_s_i16x8: |
18023 | case WebAssembly::BI__builtin_wasm_min_u_i16x8: |
18024 | case WebAssembly::BI__builtin_wasm_max_s_i16x8: |
18025 | case WebAssembly::BI__builtin_wasm_max_u_i16x8: |
18026 | case WebAssembly::BI__builtin_wasm_min_s_i32x4: |
18027 | case WebAssembly::BI__builtin_wasm_min_u_i32x4: |
18028 | case WebAssembly::BI__builtin_wasm_max_s_i32x4: |
18029 | case WebAssembly::BI__builtin_wasm_max_u_i32x4: { |
18030 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18031 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18032 | Value *ICmp; |
18033 | switch (BuiltinID) { |
18034 | case WebAssembly::BI__builtin_wasm_min_s_i8x16: |
18035 | case WebAssembly::BI__builtin_wasm_min_s_i16x8: |
18036 | case WebAssembly::BI__builtin_wasm_min_s_i32x4: |
18037 | ICmp = Builder.CreateICmpSLT(LHS, RHS); |
18038 | break; |
18039 | case WebAssembly::BI__builtin_wasm_min_u_i8x16: |
18040 | case WebAssembly::BI__builtin_wasm_min_u_i16x8: |
18041 | case WebAssembly::BI__builtin_wasm_min_u_i32x4: |
18042 | ICmp = Builder.CreateICmpULT(LHS, RHS); |
18043 | break; |
18044 | case WebAssembly::BI__builtin_wasm_max_s_i8x16: |
18045 | case WebAssembly::BI__builtin_wasm_max_s_i16x8: |
18046 | case WebAssembly::BI__builtin_wasm_max_s_i32x4: |
18047 | ICmp = Builder.CreateICmpSGT(LHS, RHS); |
18048 | break; |
18049 | case WebAssembly::BI__builtin_wasm_max_u_i8x16: |
18050 | case WebAssembly::BI__builtin_wasm_max_u_i16x8: |
18051 | case WebAssembly::BI__builtin_wasm_max_u_i32x4: |
18052 | ICmp = Builder.CreateICmpUGT(LHS, RHS); |
18053 | break; |
18054 | default: |
18055 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 18055); |
18056 | } |
18057 | return Builder.CreateSelect(ICmp, LHS, RHS); |
18058 | } |
18059 | case WebAssembly::BI__builtin_wasm_avgr_u_i8x16: |
18060 | case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: { |
18061 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18062 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18063 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned, |
18064 | ConvertType(E->getType())); |
18065 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18066 | } |
18067 | case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8: { |
18068 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18069 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18070 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_q15mulr_sat_signed); |
18071 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18072 | } |
18073 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8: |
18074 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8: |
18075 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4: |
18076 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: { |
18077 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18078 | unsigned IntNo; |
18079 | switch (BuiltinID) { |
18080 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8: |
18081 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4: |
18082 | IntNo = Intrinsic::wasm_extadd_pairwise_signed; |
18083 | break; |
18084 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8: |
18085 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: |
18086 | IntNo = Intrinsic::wasm_extadd_pairwise_unsigned; |
18087 | break; |
18088 | default: |
18089 | llvm_unreachable("unexptected builtin ID")::llvm::llvm_unreachable_internal("unexptected builtin ID", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 18089); |
18090 | } |
18091 | |
18092 | Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); |
18093 | return Builder.CreateCall(Callee, Vec); |
18094 | } |
18095 | case WebAssembly::BI__builtin_wasm_bitselect: { |
18096 | Value *V1 = EmitScalarExpr(E->getArg(0)); |
18097 | Value *V2 = EmitScalarExpr(E->getArg(1)); |
18098 | Value *C = EmitScalarExpr(E->getArg(2)); |
18099 | Function *Callee = |
18100 | CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType())); |
18101 | return Builder.CreateCall(Callee, {V1, V2, C}); |
18102 | } |
18103 | case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: { |
18104 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
18105 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
18106 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot); |
18107 | return Builder.CreateCall(Callee, {LHS, RHS}); |
18108 | } |
18109 | case WebAssembly::BI__builtin_wasm_popcnt_i8x16: { |
18110 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18111 | Function *Callee = |
18112 | CGM.getIntrinsic(Intrinsic::ctpop, ConvertType(E->getType())); |
18113 | return Builder.CreateCall(Callee, {Vec}); |
18114 | } |
18115 | case WebAssembly::BI__builtin_wasm_any_true_v128: |
18116 | case WebAssembly::BI__builtin_wasm_all_true_i8x16: |
18117 | case WebAssembly::BI__builtin_wasm_all_true_i16x8: |
18118 | case WebAssembly::BI__builtin_wasm_all_true_i32x4: |
18119 | case WebAssembly::BI__builtin_wasm_all_true_i64x2: { |
18120 | unsigned IntNo; |
18121 | switch (BuiltinID) { |
18122 | case WebAssembly::BI__builtin_wasm_any_true_v128: |
18123 | IntNo = Intrinsic::wasm_anytrue; |
18124 | break; |
18125 | case WebAssembly::BI__builtin_wasm_all_true_i8x16: |
18126 | case WebAssembly::BI__builtin_wasm_all_true_i16x8: |
18127 | case WebAssembly::BI__builtin_wasm_all_true_i32x4: |
18128 | case WebAssembly::BI__builtin_wasm_all_true_i64x2: |
18129 | IntNo = Intrinsic::wasm_alltrue; |
18130 | break; |
18131 | default: |
18132 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 18132); |
18133 | } |
18134 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18135 | Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType()); |
18136 | return Builder.CreateCall(Callee, {Vec}); |
18137 | } |
18138 | case WebAssembly::BI__builtin_wasm_bitmask_i8x16: |
18139 | case WebAssembly::BI__builtin_wasm_bitmask_i16x8: |
18140 | case WebAssembly::BI__builtin_wasm_bitmask_i32x4: |
18141 | case WebAssembly::BI__builtin_wasm_bitmask_i64x2: { |
18142 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18143 | Function *Callee = |
18144 | CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType()); |
18145 | return Builder.CreateCall(Callee, {Vec}); |
18146 | } |
18147 | case WebAssembly::BI__builtin_wasm_abs_f32x4: |
18148 | case WebAssembly::BI__builtin_wasm_abs_f64x2: { |
18149 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18150 | Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType()); |
18151 | return Builder.CreateCall(Callee, {Vec}); |
18152 | } |
18153 | case WebAssembly::BI__builtin_wasm_sqrt_f32x4: |
18154 | case WebAssembly::BI__builtin_wasm_sqrt_f64x2: { |
18155 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18156 | Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType()); |
18157 | return Builder.CreateCall(Callee, {Vec}); |
18158 | } |
18159 | case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8: |
18160 | case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8: |
18161 | case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4: |
18162 | case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: { |
18163 | Value *Low = EmitScalarExpr(E->getArg(0)); |
18164 | Value *High = EmitScalarExpr(E->getArg(1)); |
18165 | unsigned IntNo; |
18166 | switch (BuiltinID) { |
18167 | case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8: |
18168 | case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4: |
18169 | IntNo = Intrinsic::wasm_narrow_signed; |
18170 | break; |
18171 | case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8: |
18172 | case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: |
18173 | IntNo = Intrinsic::wasm_narrow_unsigned; |
18174 | break; |
18175 | default: |
18176 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 18176); |
18177 | } |
18178 | Function *Callee = |
18179 | CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()}); |
18180 | return Builder.CreateCall(Callee, {Low, High}); |
18181 | } |
18182 | case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4: |
18183 | case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4: { |
18184 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18185 | unsigned IntNo; |
18186 | switch (BuiltinID) { |
18187 | case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4: |
18188 | IntNo = Intrinsic::fptosi_sat; |
18189 | break; |
18190 | case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4: |
18191 | IntNo = Intrinsic::fptoui_sat; |
18192 | break; |
18193 | default: |
18194 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 18194); |
18195 | } |
18196 | llvm::Type *SrcT = Vec->getType(); |
18197 | llvm::Type *TruncT = |
18198 | SrcT->getWithNewType(llvm::IntegerType::get(getLLVMContext(), 32)); |
18199 | Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT}); |
18200 | Value *Trunc = Builder.CreateCall(Callee, Vec); |
18201 | Value *Splat = Builder.CreateVectorSplat(2, Builder.getInt32(0)); |
18202 | Value *ConcatMask = |
18203 | llvm::ConstantVector::get({Builder.getInt32(0), Builder.getInt32(1), |
18204 | Builder.getInt32(2), Builder.getInt32(3)}); |
18205 | return Builder.CreateShuffleVector(Trunc, Splat, ConcatMask); |
18206 | } |
18207 | case WebAssembly::BI__builtin_wasm_shuffle_i8x16: { |
18208 | Value *Ops[18]; |
18209 | size_t OpIdx = 0; |
18210 | Ops[OpIdx++] = EmitScalarExpr(E->getArg(0)); |
18211 | Ops[OpIdx++] = EmitScalarExpr(E->getArg(1)); |
18212 | while (OpIdx < 18) { |
18213 | Optional<llvm::APSInt> LaneConst = |
18214 | E->getArg(OpIdx)->getIntegerConstantExpr(getContext()); |
18215 | assert(LaneConst && "Constant arg isn't actually constant?")(static_cast <bool> (LaneConst && "Constant arg isn't actually constant?" ) ? void (0) : __assert_fail ("LaneConst && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 18215, __extension__ __PRETTY_FUNCTION__)); |
18216 | Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst); |
18217 | } |
18218 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle); |
18219 | return Builder.CreateCall(Callee, Ops); |
18220 | } |
18221 | default: |
18222 | return nullptr; |
18223 | } |
18224 | } |
18225 | |
18226 | static std::pair<Intrinsic::ID, unsigned> |
18227 | getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) { |
18228 | struct Info { |
18229 | unsigned BuiltinID; |
18230 | Intrinsic::ID IntrinsicID; |
18231 | unsigned VecLen; |
18232 | }; |
18233 | Info Infos[] = { |
18234 | #define CUSTOM_BUILTIN_MAPPING(x,s) \ |
18235 | { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s }, |
18236 | CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0) |
18237 | CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0) |
18238 | CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0) |
18239 | CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0) |
18240 | CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0) |
18241 | CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0) |
18242 | CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0) |
18243 | CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0) |
18244 | CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0) |
18245 | CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0) |
18246 | CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0) |
18247 | CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0) |
18248 | CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0) |
18249 | CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0) |
18250 | CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0) |
18251 | CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0) |
18252 | CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0) |
18253 | CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0) |
18254 | CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0) |
18255 | CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0) |
18256 | CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0) |
18257 | CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0) |
18258 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64) |
18259 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64) |
18260 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64) |
18261 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64) |
18262 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128) |
18263 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128) |
18264 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128) |
18265 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128) |
18266 | #include "clang/Basic/BuiltinsHexagonMapCustomDep.def" |
18267 | #undef CUSTOM_BUILTIN_MAPPING |
18268 | }; |
18269 | |
18270 | auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; }; |
18271 | static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true); |
18272 | (void)SortOnce; |
18273 | |
18274 | const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos), |
18275 | Info{BuiltinID, 0, 0}, CmpInfo); |
18276 | if (F == std::end(Infos) || F->BuiltinID != BuiltinID) |
18277 | return {Intrinsic::not_intrinsic, 0}; |
18278 | |
18279 | return {F->IntrinsicID, F->VecLen}; |
18280 | } |
18281 | |
18282 | Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID, |
18283 | const CallExpr *E) { |
18284 | Intrinsic::ID ID; |
18285 | unsigned VecLen; |
18286 | std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID); |
18287 | |
18288 | auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) { |
18289 | // The base pointer is passed by address, so it needs to be loaded. |
18290 | Address A = EmitPointerWithAlignment(E->getArg(0)); |
18291 | Address BP = Address( |
18292 | Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment()); |
18293 | llvm::Value *Base = Builder.CreateLoad(BP); |
18294 | // The treatment of both loads and stores is the same: the arguments for |
18295 | // the builtin are the same as the arguments for the intrinsic. |
18296 | // Load: |
18297 | // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start) |
18298 | // builtin(Base, Mod, Start) -> intr(Base, Mod, Start) |
18299 | // Store: |
18300 | // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start) |
18301 | // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start) |
18302 | SmallVector<llvm::Value*,5> Ops = { Base }; |
18303 | for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i) |
18304 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
18305 | |
18306 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops); |
18307 | // The load intrinsics generate two results (Value, NewBase), stores |
18308 | // generate one (NewBase). The new base address needs to be stored. |
18309 | llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1) |
18310 | : Result; |
18311 | llvm::Value *LV = Builder.CreateBitCast( |
18312 | EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo()); |
18313 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
18314 | llvm::Value *RetVal = |
18315 | Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment()); |
18316 | if (IsLoad) |
18317 | RetVal = Builder.CreateExtractValue(Result, 0); |
18318 | return RetVal; |
18319 | }; |
18320 | |
18321 | // Handle the conversion of bit-reverse load intrinsics to bit code. |
18322 | // The intrinsic call after this function only reads from memory and the |
18323 | // write to memory is dealt by the store instruction. |
18324 | auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) { |
18325 | // The intrinsic generates one result, which is the new value for the base |
18326 | // pointer. It needs to be returned. The result of the load instruction is |
18327 | // passed to intrinsic by address, so the value needs to be stored. |
18328 | llvm::Value *BaseAddress = |
18329 | Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy); |
18330 | |
18331 | // Expressions like &(*pt++) will be incremented per evaluation. |
18332 | // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression |
18333 | // per call. |
18334 | Address DestAddr = EmitPointerWithAlignment(E->getArg(1)); |
18335 | DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy), |
18336 | DestAddr.getAlignment()); |
18337 | llvm::Value *DestAddress = DestAddr.getPointer(); |
18338 | |
18339 | // Operands are Base, Dest, Modifier. |
18340 | // The intrinsic format in LLVM IR is defined as |
18341 | // { ValueType, i8* } (i8*, i32). |
18342 | llvm::Value *Result = Builder.CreateCall( |
18343 | CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))}); |
18344 | |
18345 | // The value needs to be stored as the variable is passed by reference. |
18346 | llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0); |
18347 | |
18348 | // The store needs to be truncated to fit the destination type. |
18349 | // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs |
18350 | // to be handled with stores of respective destination type. |
18351 | DestVal = Builder.CreateTrunc(DestVal, DestTy); |
18352 | |
18353 | llvm::Value *DestForStore = |
18354 | Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo()); |
18355 | Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment()); |
18356 | // The updated value of the base pointer is returned. |
18357 | return Builder.CreateExtractValue(Result, 1); |
18358 | }; |
18359 | |
18360 | auto V2Q = [this, VecLen] (llvm::Value *Vec) { |
18361 | Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B |
18362 | : Intrinsic::hexagon_V6_vandvrt; |
18363 | return Builder.CreateCall(CGM.getIntrinsic(ID), |
18364 | {Vec, Builder.getInt32(-1)}); |
18365 | }; |
18366 | auto Q2V = [this, VecLen] (llvm::Value *Pred) { |
18367 | Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B |
18368 | : Intrinsic::hexagon_V6_vandqrt; |
18369 | return Builder.CreateCall(CGM.getIntrinsic(ID), |
18370 | {Pred, Builder.getInt32(-1)}); |
18371 | }; |
18372 | |
18373 | switch (BuiltinID) { |
18374 | // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR, |
18375 | // and the corresponding C/C++ builtins use loads/stores to update |
18376 | // the predicate. |
18377 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry: |
18378 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: |
18379 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry: |
18380 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: { |
18381 | // Get the type from the 0-th argument. |
18382 | llvm::Type *VecType = ConvertType(E->getArg(0)->getType()); |
18383 | Address PredAddr = Builder.CreateBitCast( |
18384 | EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0)); |
18385 | llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr)); |
18386 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), |
18387 | {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn}); |
18388 | |
18389 | llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1); |
18390 | Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(), |
18391 | PredAddr.getAlignment()); |
18392 | return Builder.CreateExtractValue(Result, 0); |
18393 | } |
18394 | |
18395 | case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci: |
18396 | case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci: |
18397 | case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci: |
18398 | case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci: |
18399 | case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci: |
18400 | case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci: |
18401 | case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr: |
18402 | case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr: |
18403 | case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr: |
18404 | case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr: |
18405 | case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr: |
18406 | case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr: |
18407 | return MakeCircOp(ID, /*IsLoad=*/true); |
18408 | case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci: |
18409 | case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci: |
18410 | case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci: |
18411 | case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci: |
18412 | case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci: |
18413 | case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr: |
18414 | case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr: |
18415 | case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr: |
18416 | case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr: |
18417 | case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr: |
18418 | return MakeCircOp(ID, /*IsLoad=*/false); |
18419 | case Hexagon::BI__builtin_brev_ldub: |
18420 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty); |
18421 | case Hexagon::BI__builtin_brev_ldb: |
18422 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty); |
18423 | case Hexagon::BI__builtin_brev_lduh: |
18424 | return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty); |
18425 | case Hexagon::BI__builtin_brev_ldh: |
18426 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty); |
18427 | case Hexagon::BI__builtin_brev_ldw: |
18428 | return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty); |
18429 | case Hexagon::BI__builtin_brev_ldd: |
18430 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty); |
18431 | |
18432 | default: { |
18433 | if (ID == Intrinsic::not_intrinsic) |
18434 | return nullptr; |
18435 | |
18436 | auto IsVectorPredTy = [](llvm::Type *T) { |
18437 | return T->isVectorTy() && |
18438 | cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1); |
18439 | }; |
18440 | |
18441 | llvm::Function *IntrFn = CGM.getIntrinsic(ID); |
18442 | llvm::FunctionType *IntrTy = IntrFn->getFunctionType(); |
18443 | SmallVector<llvm::Value*,4> Ops; |
18444 | for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) { |
18445 | llvm::Type *T = IntrTy->getParamType(i); |
18446 | const Expr *A = E->getArg(i); |
18447 | if (IsVectorPredTy(T)) { |
18448 | // There will be an implicit cast to a boolean vector. Strip it. |
18449 | if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) { |
18450 | if (Cast->getCastKind() == CK_BitCast) |
18451 | A = Cast->getSubExpr(); |
18452 | } |
18453 | Ops.push_back(V2Q(EmitScalarExpr(A))); |
18454 | } else { |
18455 | Ops.push_back(EmitScalarExpr(A)); |
18456 | } |
18457 | } |
18458 | |
18459 | llvm::Value *Call = Builder.CreateCall(IntrFn, Ops); |
18460 | if (IsVectorPredTy(IntrTy->getReturnType())) |
18461 | Call = Q2V(Call); |
18462 | |
18463 | return Call; |
18464 | } // default |
18465 | } // switch |
18466 | |
18467 | return nullptr; |
18468 | } |
18469 | |
18470 | Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID, |
18471 | const CallExpr *E, |
18472 | ReturnValueSlot ReturnValue) { |
18473 | SmallVector<Value *, 4> Ops; |
18474 | llvm::Type *ResultType = ConvertType(E->getType()); |
18475 | |
18476 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) |
18477 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
18478 | |
18479 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
18480 | unsigned NF = 1; |
18481 | |
18482 | // Required for overloaded intrinsics. |
18483 | llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes; |
18484 | switch (BuiltinID) { |
18485 | default: llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 18485); |
18486 | case RISCV::BI__builtin_riscv_orc_b_32: |
18487 | case RISCV::BI__builtin_riscv_orc_b_64: |
18488 | case RISCV::BI__builtin_riscv_clmul: |
18489 | case RISCV::BI__builtin_riscv_clmulh: |
18490 | case RISCV::BI__builtin_riscv_clmulr: |
18491 | case RISCV::BI__builtin_riscv_bcompress_32: |
18492 | case RISCV::BI__builtin_riscv_bcompress_64: |
18493 | case RISCV::BI__builtin_riscv_bdecompress_32: |
18494 | case RISCV::BI__builtin_riscv_bdecompress_64: |
18495 | case RISCV::BI__builtin_riscv_grev_32: |
18496 | case RISCV::BI__builtin_riscv_grev_64: |
18497 | case RISCV::BI__builtin_riscv_gorc_32: |
18498 | case RISCV::BI__builtin_riscv_gorc_64: |
18499 | case RISCV::BI__builtin_riscv_shfl_32: |
18500 | case RISCV::BI__builtin_riscv_shfl_64: |
18501 | case RISCV::BI__builtin_riscv_unshfl_32: |
18502 | case RISCV::BI__builtin_riscv_unshfl_64: |
18503 | case RISCV::BI__builtin_riscv_xperm_n: |
18504 | case RISCV::BI__builtin_riscv_xperm_b: |
18505 | case RISCV::BI__builtin_riscv_xperm_h: |
18506 | case RISCV::BI__builtin_riscv_xperm_w: |
18507 | case RISCV::BI__builtin_riscv_crc32_b: |
18508 | case RISCV::BI__builtin_riscv_crc32_h: |
18509 | case RISCV::BI__builtin_riscv_crc32_w: |
18510 | case RISCV::BI__builtin_riscv_crc32_d: |
18511 | case RISCV::BI__builtin_riscv_crc32c_b: |
18512 | case RISCV::BI__builtin_riscv_crc32c_h: |
18513 | case RISCV::BI__builtin_riscv_crc32c_w: |
18514 | case RISCV::BI__builtin_riscv_crc32c_d: { |
18515 | switch (BuiltinID) { |
18516 | default: llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 18516); |
18517 | // Zbb |
18518 | case RISCV::BI__builtin_riscv_orc_b_32: |
18519 | case RISCV::BI__builtin_riscv_orc_b_64: |
18520 | ID = Intrinsic::riscv_orc_b; |
18521 | break; |
18522 | |
18523 | // Zbc |
18524 | case RISCV::BI__builtin_riscv_clmul: |
18525 | ID = Intrinsic::riscv_clmul; |
18526 | break; |
18527 | case RISCV::BI__builtin_riscv_clmulh: |
18528 | ID = Intrinsic::riscv_clmulh; |
18529 | break; |
18530 | case RISCV::BI__builtin_riscv_clmulr: |
18531 | ID = Intrinsic::riscv_clmulr; |
18532 | break; |
18533 | |
18534 | // Zbe |
18535 | case RISCV::BI__builtin_riscv_bcompress_32: |
18536 | case RISCV::BI__builtin_riscv_bcompress_64: |
18537 | ID = Intrinsic::riscv_bcompress; |
18538 | break; |
18539 | case RISCV::BI__builtin_riscv_bdecompress_32: |
18540 | case RISCV::BI__builtin_riscv_bdecompress_64: |
18541 | ID = Intrinsic::riscv_bdecompress; |
18542 | break; |
18543 | |
18544 | // Zbp |
18545 | case RISCV::BI__builtin_riscv_grev_32: |
18546 | case RISCV::BI__builtin_riscv_grev_64: |
18547 | ID = Intrinsic::riscv_grev; |
18548 | break; |
18549 | case RISCV::BI__builtin_riscv_gorc_32: |
18550 | case RISCV::BI__builtin_riscv_gorc_64: |
18551 | ID = Intrinsic::riscv_gorc; |
18552 | break; |
18553 | case RISCV::BI__builtin_riscv_shfl_32: |
18554 | case RISCV::BI__builtin_riscv_shfl_64: |
18555 | ID = Intrinsic::riscv_shfl; |
18556 | break; |
18557 | case RISCV::BI__builtin_riscv_unshfl_32: |
18558 | case RISCV::BI__builtin_riscv_unshfl_64: |
18559 | ID = Intrinsic::riscv_unshfl; |
18560 | break; |
18561 | case RISCV::BI__builtin_riscv_xperm_n: |
18562 | ID = Intrinsic::riscv_xperm_n; |
18563 | break; |
18564 | case RISCV::BI__builtin_riscv_xperm_b: |
18565 | ID = Intrinsic::riscv_xperm_b; |
18566 | break; |
18567 | case RISCV::BI__builtin_riscv_xperm_h: |
18568 | ID = Intrinsic::riscv_xperm_h; |
18569 | break; |
18570 | case RISCV::BI__builtin_riscv_xperm_w: |
18571 | ID = Intrinsic::riscv_xperm_w; |
18572 | break; |
18573 | |
18574 | // Zbr |
18575 | case RISCV::BI__builtin_riscv_crc32_b: |
18576 | ID = Intrinsic::riscv_crc32_b; |
18577 | break; |
18578 | case RISCV::BI__builtin_riscv_crc32_h: |
18579 | ID = Intrinsic::riscv_crc32_h; |
18580 | break; |
18581 | case RISCV::BI__builtin_riscv_crc32_w: |
18582 | ID = Intrinsic::riscv_crc32_w; |
18583 | break; |
18584 | case RISCV::BI__builtin_riscv_crc32_d: |
18585 | ID = Intrinsic::riscv_crc32_d; |
18586 | break; |
18587 | case RISCV::BI__builtin_riscv_crc32c_b: |
18588 | ID = Intrinsic::riscv_crc32c_b; |
18589 | break; |
18590 | case RISCV::BI__builtin_riscv_crc32c_h: |
18591 | ID = Intrinsic::riscv_crc32c_h; |
18592 | break; |
18593 | case RISCV::BI__builtin_riscv_crc32c_w: |
18594 | ID = Intrinsic::riscv_crc32c_w; |
18595 | break; |
18596 | case RISCV::BI__builtin_riscv_crc32c_d: |
18597 | ID = Intrinsic::riscv_crc32c_d; |
18598 | break; |
18599 | } |
18600 | |
18601 | IntrinsicTypes = {ResultType}; |
18602 | break; |
18603 | } |
18604 | // Vector builtins are handled from here. |
18605 | #include "clang/Basic/riscv_vector_builtin_cg.inc" |
18606 | } |
18607 | |
18608 | assert(ID != Intrinsic::not_intrinsic)(static_cast <bool> (ID != Intrinsic::not_intrinsic) ? void (0) : __assert_fail ("ID != Intrinsic::not_intrinsic", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/clang/lib/CodeGen/CGBuiltin.cpp" , 18608, __extension__ __PRETTY_FUNCTION__)); |
18609 | |
18610 | llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); |
18611 | return Builder.CreateCall(F, Ops, ""); |
18612 | } |