File: | tools/clang/lib/CodeGen/CGBuiltin.cpp |
Warning: | line 11747, column 12 Value stored to 'Store' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code to emit Builtin calls as LLVM code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CGCXXABI.h" |
14 | #include "CGObjCRuntime.h" |
15 | #include "CGOpenCLRuntime.h" |
16 | #include "CGRecordLayout.h" |
17 | #include "CodeGenFunction.h" |
18 | #include "CodeGenModule.h" |
19 | #include "ConstantEmitter.h" |
20 | #include "PatternInit.h" |
21 | #include "TargetInfo.h" |
22 | #include "clang/AST/ASTContext.h" |
23 | #include "clang/AST/Decl.h" |
24 | #include "clang/AST/OSLog.h" |
25 | #include "clang/Basic/TargetBuiltins.h" |
26 | #include "clang/Basic/TargetInfo.h" |
27 | #include "clang/CodeGen/CGFunctionInfo.h" |
28 | #include "llvm/ADT/SmallPtrSet.h" |
29 | #include "llvm/ADT/StringExtras.h" |
30 | #include "llvm/IR/DataLayout.h" |
31 | #include "llvm/IR/InlineAsm.h" |
32 | #include "llvm/IR/Intrinsics.h" |
33 | #include "llvm/IR/MDBuilder.h" |
34 | #include "llvm/Support/ConvertUTF.h" |
35 | #include "llvm/Support/ScopedPrinter.h" |
36 | #include "llvm/Support/TargetParser.h" |
37 | #include <sstream> |
38 | |
39 | using namespace clang; |
40 | using namespace CodeGen; |
41 | using namespace llvm; |
42 | |
43 | static |
44 | int64_t clamp(int64_t Value, int64_t Low, int64_t High) { |
45 | return std::min(High, std::max(Low, Value)); |
46 | } |
47 | |
48 | static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, unsigned AlignmentInBytes) { |
49 | ConstantInt *Byte; |
50 | switch (CGF.getLangOpts().getTrivialAutoVarInit()) { |
51 | case LangOptions::TrivialAutoVarInitKind::Uninitialized: |
52 | // Nothing to initialize. |
53 | return; |
54 | case LangOptions::TrivialAutoVarInitKind::Zero: |
55 | Byte = CGF.Builder.getInt8(0x00); |
56 | break; |
57 | case LangOptions::TrivialAutoVarInitKind::Pattern: { |
58 | llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext()); |
59 | Byte = llvm::dyn_cast<llvm::ConstantInt>( |
60 | initializationPatternFor(CGF.CGM, Int8)); |
61 | break; |
62 | } |
63 | } |
64 | CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes); |
65 | } |
66 | |
67 | /// getBuiltinLibFunction - Given a builtin id for a function like |
68 | /// "__builtin_fabsf", return a Function* for "fabsf". |
69 | llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, |
70 | unsigned BuiltinID) { |
71 | assert(Context.BuiltinInfo.isLibFunction(BuiltinID))((Context.BuiltinInfo.isLibFunction(BuiltinID)) ? static_cast <void> (0) : __assert_fail ("Context.BuiltinInfo.isLibFunction(BuiltinID)" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 71, __PRETTY_FUNCTION__)); |
72 | |
73 | // Get the name, skip over the __builtin_ prefix (if necessary). |
74 | StringRef Name; |
75 | GlobalDecl D(FD); |
76 | |
77 | // If the builtin has been declared explicitly with an assembler label, |
78 | // use the mangled name. This differs from the plain label on platforms |
79 | // that prefix labels. |
80 | if (FD->hasAttr<AsmLabelAttr>()) |
81 | Name = getMangledName(D); |
82 | else |
83 | Name = Context.BuiltinInfo.getName(BuiltinID) + 10; |
84 | |
85 | llvm::FunctionType *Ty = |
86 | cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); |
87 | |
88 | return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); |
89 | } |
90 | |
91 | /// Emit the conversions required to turn the given value into an |
92 | /// integer of the given size. |
93 | static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, |
94 | QualType T, llvm::IntegerType *IntType) { |
95 | V = CGF.EmitToMemory(V, T); |
96 | |
97 | if (V->getType()->isPointerTy()) |
98 | return CGF.Builder.CreatePtrToInt(V, IntType); |
99 | |
100 | assert(V->getType() == IntType)((V->getType() == IntType) ? static_cast<void> (0) : __assert_fail ("V->getType() == IntType", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 100, __PRETTY_FUNCTION__)); |
101 | return V; |
102 | } |
103 | |
104 | static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, |
105 | QualType T, llvm::Type *ResultType) { |
106 | V = CGF.EmitFromMemory(V, T); |
107 | |
108 | if (ResultType->isPointerTy()) |
109 | return CGF.Builder.CreateIntToPtr(V, ResultType); |
110 | |
111 | assert(V->getType() == ResultType)((V->getType() == ResultType) ? static_cast<void> (0 ) : __assert_fail ("V->getType() == ResultType", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 111, __PRETTY_FUNCTION__)); |
112 | return V; |
113 | } |
114 | |
115 | /// Utility to insert an atomic instruction based on Intrinsic::ID |
116 | /// and the expression node. |
117 | static Value *MakeBinaryAtomicValue( |
118 | CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, |
119 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
120 | QualType T = E->getType(); |
121 | assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast <void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 121, __PRETTY_FUNCTION__)); |
122 | assert(CGF.getContext().hasSameUnqualifiedType(T,((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)-> getType()->getPointeeType())) ? static_cast<void> (0 ) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 123, __PRETTY_FUNCTION__)) |
123 | E->getArg(0)->getType()->getPointeeType()))((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)-> getType()->getPointeeType())) ? static_cast<void> (0 ) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 123, __PRETTY_FUNCTION__)); |
124 | assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)-> getType())) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 124, __PRETTY_FUNCTION__)); |
125 | |
126 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
127 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
128 | |
129 | llvm::IntegerType *IntType = |
130 | llvm::IntegerType::get(CGF.getLLVMContext(), |
131 | CGF.getContext().getTypeSize(T)); |
132 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
133 | |
134 | llvm::Value *Args[2]; |
135 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
136 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
137 | llvm::Type *ValueType = Args[1]->getType(); |
138 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
139 | |
140 | llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
141 | Kind, Args[0], Args[1], Ordering); |
142 | return EmitFromInt(CGF, Result, T, ValueType); |
143 | } |
144 | |
145 | static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) { |
146 | Value *Val = CGF.EmitScalarExpr(E->getArg(0)); |
147 | Value *Address = CGF.EmitScalarExpr(E->getArg(1)); |
148 | |
149 | // Convert the type of the pointer to a pointer to the stored type. |
150 | Val = CGF.EmitToMemory(Val, E->getArg(0)->getType()); |
151 | Value *BC = CGF.Builder.CreateBitCast( |
152 | Address, llvm::PointerType::getUnqual(Val->getType()), "cast"); |
153 | LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType()); |
154 | LV.setNontemporal(true); |
155 | CGF.EmitStoreOfScalar(Val, LV, false); |
156 | return nullptr; |
157 | } |
158 | |
159 | static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) { |
160 | Value *Address = CGF.EmitScalarExpr(E->getArg(0)); |
161 | |
162 | LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType()); |
163 | LV.setNontemporal(true); |
164 | return CGF.EmitLoadOfScalar(LV, E->getExprLoc()); |
165 | } |
166 | |
167 | static RValue EmitBinaryAtomic(CodeGenFunction &CGF, |
168 | llvm::AtomicRMWInst::BinOp Kind, |
169 | const CallExpr *E) { |
170 | return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E)); |
171 | } |
172 | |
173 | /// Utility to insert an atomic instruction based Intrinsic::ID and |
174 | /// the expression node, where the return value is the result of the |
175 | /// operation. |
176 | static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, |
177 | llvm::AtomicRMWInst::BinOp Kind, |
178 | const CallExpr *E, |
179 | Instruction::BinaryOps Op, |
180 | bool Invert = false) { |
181 | QualType T = E->getType(); |
182 | assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast <void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 182, __PRETTY_FUNCTION__)); |
183 | assert(CGF.getContext().hasSameUnqualifiedType(T,((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)-> getType()->getPointeeType())) ? static_cast<void> (0 ) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 184, __PRETTY_FUNCTION__)) |
184 | E->getArg(0)->getType()->getPointeeType()))((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)-> getType()->getPointeeType())) ? static_cast<void> (0 ) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 184, __PRETTY_FUNCTION__)); |
185 | assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))((CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)-> getType())) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 185, __PRETTY_FUNCTION__)); |
186 | |
187 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
188 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
189 | |
190 | llvm::IntegerType *IntType = |
191 | llvm::IntegerType::get(CGF.getLLVMContext(), |
192 | CGF.getContext().getTypeSize(T)); |
193 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
194 | |
195 | llvm::Value *Args[2]; |
196 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
197 | llvm::Type *ValueType = Args[1]->getType(); |
198 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
199 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
200 | |
201 | llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
202 | Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); |
203 | Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); |
204 | if (Invert) |
205 | Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result, |
206 | llvm::ConstantInt::get(IntType, -1)); |
207 | Result = EmitFromInt(CGF, Result, T, ValueType); |
208 | return RValue::get(Result); |
209 | } |
210 | |
211 | /// Utility to insert an atomic cmpxchg instruction. |
212 | /// |
213 | /// @param CGF The current codegen function. |
214 | /// @param E Builtin call expression to convert to cmpxchg. |
215 | /// arg0 - address to operate on |
216 | /// arg1 - value to compare with |
217 | /// arg2 - new value |
218 | /// @param ReturnBool Specifies whether to return success flag of |
219 | /// cmpxchg result or the old value. |
220 | /// |
221 | /// @returns result of cmpxchg, according to ReturnBool |
222 | /// |
223 | /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics |
224 | /// invoke the function EmitAtomicCmpXchgForMSIntrin. |
225 | static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, |
226 | bool ReturnBool) { |
227 | QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType(); |
228 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
229 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
230 | |
231 | llvm::IntegerType *IntType = llvm::IntegerType::get( |
232 | CGF.getLLVMContext(), CGF.getContext().getTypeSize(T)); |
233 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
234 | |
235 | Value *Args[3]; |
236 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
237 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
238 | llvm::Type *ValueType = Args[1]->getType(); |
239 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
240 | Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); |
241 | |
242 | Value *Pair = CGF.Builder.CreateAtomicCmpXchg( |
243 | Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent, |
244 | llvm::AtomicOrdering::SequentiallyConsistent); |
245 | if (ReturnBool) |
246 | // Extract boolean success flag and zext it to int. |
247 | return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1), |
248 | CGF.ConvertType(E->getType())); |
249 | else |
250 | // Extract old value and emit it using the same type as compare value. |
251 | return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T, |
252 | ValueType); |
253 | } |
254 | |
255 | /// This function should be invoked to emit atomic cmpxchg for Microsoft's |
256 | /// _InterlockedCompareExchange* intrinsics which have the following signature: |
257 | /// T _InterlockedCompareExchange(T volatile *Destination, |
258 | /// T Exchange, |
259 | /// T Comparand); |
260 | /// |
261 | /// Whereas the llvm 'cmpxchg' instruction has the following syntax: |
262 | /// cmpxchg *Destination, Comparand, Exchange. |
263 | /// So we need to swap Comparand and Exchange when invoking |
264 | /// CreateAtomicCmpXchg. That is the reason we could not use the above utility |
265 | /// function MakeAtomicCmpXchgValue since it expects the arguments to be |
266 | /// already swapped. |
267 | |
268 | static |
269 | Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, |
270 | AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) { |
271 | assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast <void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 271, __PRETTY_FUNCTION__)); |
272 | assert(CGF.getContext().hasSameUnqualifiedType(((CGF.getContext().hasSameUnqualifiedType( E->getType(), E ->getArg(0)->getType()->getPointeeType())) ? static_cast <void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 273, __PRETTY_FUNCTION__)) |
273 | E->getType(), E->getArg(0)->getType()->getPointeeType()))((CGF.getContext().hasSameUnqualifiedType( E->getType(), E ->getArg(0)->getType()->getPointeeType())) ? static_cast <void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType( E->getType(), E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 273, __PRETTY_FUNCTION__)); |
274 | assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),((CGF.getContext().hasSameUnqualifiedType(E->getType(), E-> getArg(1)->getType())) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 275, __PRETTY_FUNCTION__)) |
275 | E->getArg(1)->getType()))((CGF.getContext().hasSameUnqualifiedType(E->getType(), E-> getArg(1)->getType())) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(1)->getType())" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 275, __PRETTY_FUNCTION__)); |
276 | assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),((CGF.getContext().hasSameUnqualifiedType(E->getType(), E-> getArg(2)->getType())) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 277, __PRETTY_FUNCTION__)) |
277 | E->getArg(2)->getType()))((CGF.getContext().hasSameUnqualifiedType(E->getType(), E-> getArg(2)->getType())) ? static_cast<void> (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(E->getType(), E->getArg(2)->getType())" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 277, __PRETTY_FUNCTION__)); |
278 | |
279 | auto *Destination = CGF.EmitScalarExpr(E->getArg(0)); |
280 | auto *Comparand = CGF.EmitScalarExpr(E->getArg(2)); |
281 | auto *Exchange = CGF.EmitScalarExpr(E->getArg(1)); |
282 | |
283 | // For Release ordering, the failure ordering should be Monotonic. |
284 | auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ? |
285 | AtomicOrdering::Monotonic : |
286 | SuccessOrdering; |
287 | |
288 | auto *Result = CGF.Builder.CreateAtomicCmpXchg( |
289 | Destination, Comparand, Exchange, |
290 | SuccessOrdering, FailureOrdering); |
291 | Result->setVolatile(true); |
292 | return CGF.Builder.CreateExtractValue(Result, 0); |
293 | } |
294 | |
295 | static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, |
296 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
297 | assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast <void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 297, __PRETTY_FUNCTION__)); |
298 | |
299 | auto *IntTy = CGF.ConvertType(E->getType()); |
300 | auto *Result = CGF.Builder.CreateAtomicRMW( |
301 | AtomicRMWInst::Add, |
302 | CGF.EmitScalarExpr(E->getArg(0)), |
303 | ConstantInt::get(IntTy, 1), |
304 | Ordering); |
305 | return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1)); |
306 | } |
307 | |
308 | static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, |
309 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
310 | assert(E->getArg(0)->getType()->isPointerType())((E->getArg(0)->getType()->isPointerType()) ? static_cast <void> (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 310, __PRETTY_FUNCTION__)); |
311 | |
312 | auto *IntTy = CGF.ConvertType(E->getType()); |
313 | auto *Result = CGF.Builder.CreateAtomicRMW( |
314 | AtomicRMWInst::Sub, |
315 | CGF.EmitScalarExpr(E->getArg(0)), |
316 | ConstantInt::get(IntTy, 1), |
317 | Ordering); |
318 | return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1)); |
319 | } |
320 | |
321 | // Build a plain volatile load. |
322 | static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) { |
323 | Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); |
324 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
325 | CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy); |
326 | llvm::Type *ITy = |
327 | llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8); |
328 | Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
329 | llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize); |
330 | Load->setVolatile(true); |
331 | return Load; |
332 | } |
333 | |
334 | // Build a plain volatile store. |
335 | static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) { |
336 | Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); |
337 | Value *Value = CGF.EmitScalarExpr(E->getArg(1)); |
338 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
339 | CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy); |
340 | llvm::Type *ITy = |
341 | llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8); |
342 | Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
343 | llvm::StoreInst *Store = |
344 | CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize); |
345 | Store->setVolatile(true); |
346 | return Store; |
347 | } |
348 | |
349 | // Emit a simple mangled intrinsic that has 1 argument and a return type |
350 | // matching the argument type. |
351 | static Value *emitUnaryBuiltin(CodeGenFunction &CGF, |
352 | const CallExpr *E, |
353 | unsigned IntrinsicID) { |
354 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
355 | |
356 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
357 | return CGF.Builder.CreateCall(F, Src0); |
358 | } |
359 | |
360 | // Emit an intrinsic that has 2 operands of the same type as its result. |
361 | static Value *emitBinaryBuiltin(CodeGenFunction &CGF, |
362 | const CallExpr *E, |
363 | unsigned IntrinsicID) { |
364 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
365 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
366 | |
367 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
368 | return CGF.Builder.CreateCall(F, { Src0, Src1 }); |
369 | } |
370 | |
371 | // Emit an intrinsic that has 3 operands of the same type as its result. |
372 | static Value *emitTernaryBuiltin(CodeGenFunction &CGF, |
373 | const CallExpr *E, |
374 | unsigned IntrinsicID) { |
375 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
376 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
377 | llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); |
378 | |
379 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
380 | return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); |
381 | } |
382 | |
383 | // Emit an intrinsic that has 1 float or double operand, and 1 integer. |
384 | static Value *emitFPIntBuiltin(CodeGenFunction &CGF, |
385 | const CallExpr *E, |
386 | unsigned IntrinsicID) { |
387 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
388 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
389 | |
390 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
391 | return CGF.Builder.CreateCall(F, {Src0, Src1}); |
392 | } |
393 | |
394 | // Emit an intrinsic that has overloaded integer result and fp operand. |
395 | static Value *emitFPToIntRoundBuiltin(CodeGenFunction &CGF, |
396 | const CallExpr *E, |
397 | unsigned IntrinsicID) { |
398 | llvm::Type *ResultType = CGF.ConvertType(E->getType()); |
399 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
400 | |
401 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, |
402 | {ResultType, Src0->getType()}); |
403 | return CGF.Builder.CreateCall(F, Src0); |
404 | } |
405 | |
406 | /// EmitFAbs - Emit a call to @llvm.fabs(). |
407 | static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) { |
408 | Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType()); |
409 | llvm::CallInst *Call = CGF.Builder.CreateCall(F, V); |
410 | Call->setDoesNotAccessMemory(); |
411 | return Call; |
412 | } |
413 | |
414 | /// Emit the computation of the sign bit for a floating point value. Returns |
415 | /// the i1 sign bit value. |
416 | static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) { |
417 | LLVMContext &C = CGF.CGM.getLLVMContext(); |
418 | |
419 | llvm::Type *Ty = V->getType(); |
420 | int Width = Ty->getPrimitiveSizeInBits(); |
421 | llvm::Type *IntTy = llvm::IntegerType::get(C, Width); |
422 | V = CGF.Builder.CreateBitCast(V, IntTy); |
423 | if (Ty->isPPC_FP128Ty()) { |
424 | // We want the sign bit of the higher-order double. The bitcast we just |
425 | // did works as if the double-double was stored to memory and then |
426 | // read as an i128. The "store" will put the higher-order double in the |
427 | // lower address in both little- and big-Endian modes, but the "load" |
428 | // will treat those bits as a different part of the i128: the low bits in |
429 | // little-Endian, the high bits in big-Endian. Therefore, on big-Endian |
430 | // we need to shift the high bits down to the low before truncating. |
431 | Width >>= 1; |
432 | if (CGF.getTarget().isBigEndian()) { |
433 | Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width); |
434 | V = CGF.Builder.CreateLShr(V, ShiftCst); |
435 | } |
436 | // We are truncating value in order to extract the higher-order |
437 | // double, which we will be using to extract the sign from. |
438 | IntTy = llvm::IntegerType::get(C, Width); |
439 | V = CGF.Builder.CreateTrunc(V, IntTy); |
440 | } |
441 | Value *Zero = llvm::Constant::getNullValue(IntTy); |
442 | return CGF.Builder.CreateICmpSLT(V, Zero); |
443 | } |
444 | |
445 | static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD, |
446 | const CallExpr *E, llvm::Constant *calleeValue) { |
447 | CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD)); |
448 | return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); |
449 | } |
450 | |
451 | /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.* |
452 | /// depending on IntrinsicID. |
453 | /// |
454 | /// \arg CGF The current codegen function. |
455 | /// \arg IntrinsicID The ID for the Intrinsic we wish to generate. |
456 | /// \arg X The first argument to the llvm.*.with.overflow.*. |
457 | /// \arg Y The second argument to the llvm.*.with.overflow.*. |
458 | /// \arg Carry The carry returned by the llvm.*.with.overflow.*. |
459 | /// \returns The result (i.e. sum/product) returned by the intrinsic. |
460 | static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, |
461 | const llvm::Intrinsic::ID IntrinsicID, |
462 | llvm::Value *X, llvm::Value *Y, |
463 | llvm::Value *&Carry) { |
464 | // Make sure we have integers of the same width. |
465 | assert(X->getType() == Y->getType() &&((X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both " "arguments have the same integer width?)") ? static_cast< void> (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 467, __PRETTY_FUNCTION__)) |
466 | "Arguments must be the same type. (Did you forget to make sure both "((X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both " "arguments have the same integer width?)") ? static_cast< void> (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 467, __PRETTY_FUNCTION__)) |
467 | "arguments have the same integer width?)")((X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both " "arguments have the same integer width?)") ? static_cast< void> (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 467, __PRETTY_FUNCTION__)); |
468 | |
469 | Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); |
470 | llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y}); |
471 | Carry = CGF.Builder.CreateExtractValue(Tmp, 1); |
472 | return CGF.Builder.CreateExtractValue(Tmp, 0); |
473 | } |
474 | |
475 | static Value *emitRangedBuiltin(CodeGenFunction &CGF, |
476 | unsigned IntrinsicID, |
477 | int low, int high) { |
478 | llvm::MDBuilder MDHelper(CGF.getLLVMContext()); |
479 | llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high)); |
480 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {}); |
481 | llvm::Instruction *Call = CGF.Builder.CreateCall(F); |
482 | Call->setMetadata(llvm::LLVMContext::MD_range, RNode); |
483 | return Call; |
484 | } |
485 | |
486 | namespace { |
487 | struct WidthAndSignedness { |
488 | unsigned Width; |
489 | bool Signed; |
490 | }; |
491 | } |
492 | |
493 | static WidthAndSignedness |
494 | getIntegerWidthAndSignedness(const clang::ASTContext &context, |
495 | const clang::QualType Type) { |
496 | assert(Type->isIntegerType() && "Given type is not an integer.")((Type->isIntegerType() && "Given type is not an integer." ) ? static_cast<void> (0) : __assert_fail ("Type->isIntegerType() && \"Given type is not an integer.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 496, __PRETTY_FUNCTION__)); |
497 | unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width; |
498 | bool Signed = Type->isSignedIntegerType(); |
499 | return {Width, Signed}; |
500 | } |
501 | |
502 | // Given one or more integer types, this function produces an integer type that |
503 | // encompasses them: any value in one of the given types could be expressed in |
504 | // the encompassing type. |
505 | static struct WidthAndSignedness |
506 | EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) { |
507 | assert(Types.size() > 0 && "Empty list of types.")((Types.size() > 0 && "Empty list of types.") ? static_cast <void> (0) : __assert_fail ("Types.size() > 0 && \"Empty list of types.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 507, __PRETTY_FUNCTION__)); |
508 | |
509 | // If any of the given types is signed, we must return a signed type. |
510 | bool Signed = false; |
511 | for (const auto &Type : Types) { |
512 | Signed |= Type.Signed; |
513 | } |
514 | |
515 | // The encompassing type must have a width greater than or equal to the width |
516 | // of the specified types. Additionally, if the encompassing type is signed, |
517 | // its width must be strictly greater than the width of any unsigned types |
518 | // given. |
519 | unsigned Width = 0; |
520 | for (const auto &Type : Types) { |
521 | unsigned MinWidth = Type.Width + (Signed && !Type.Signed); |
522 | if (Width < MinWidth) { |
523 | Width = MinWidth; |
524 | } |
525 | } |
526 | |
527 | return {Width, Signed}; |
528 | } |
529 | |
530 | Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) { |
531 | llvm::Type *DestType = Int8PtrTy; |
532 | if (ArgValue->getType() != DestType) |
533 | ArgValue = |
534 | Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data()); |
535 | |
536 | Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend; |
537 | return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue); |
538 | } |
539 | |
540 | /// Checks if using the result of __builtin_object_size(p, @p From) in place of |
541 | /// __builtin_object_size(p, @p To) is correct |
542 | static bool areBOSTypesCompatible(int From, int To) { |
543 | // Note: Our __builtin_object_size implementation currently treats Type=0 and |
544 | // Type=2 identically. Encoding this implementation detail here may make |
545 | // improving __builtin_object_size difficult in the future, so it's omitted. |
546 | return From == To || (From == 0 && To == 1) || (From == 3 && To == 2); |
547 | } |
548 | |
549 | static llvm::Value * |
550 | getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) { |
551 | return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true); |
552 | } |
553 | |
554 | llvm::Value * |
555 | CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, |
556 | llvm::IntegerType *ResType, |
557 | llvm::Value *EmittedE, |
558 | bool IsDynamic) { |
559 | uint64_t ObjectSize; |
560 | if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) |
561 | return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic); |
562 | return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true); |
563 | } |
564 | |
565 | /// Returns a Value corresponding to the size of the given expression. |
566 | /// This Value may be either of the following: |
567 | /// - A llvm::Argument (if E is a param with the pass_object_size attribute on |
568 | /// it) |
569 | /// - A call to the @llvm.objectsize intrinsic |
570 | /// |
571 | /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null |
572 | /// and we wouldn't otherwise try to reference a pass_object_size parameter, |
573 | /// we'll call @llvm.objectsize on EmittedE, rather than emitting E. |
574 | llvm::Value * |
575 | CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, |
576 | llvm::IntegerType *ResType, |
577 | llvm::Value *EmittedE, bool IsDynamic) { |
578 | // We need to reference an argument if the pointer is a parameter with the |
579 | // pass_object_size attribute. |
580 | if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) { |
581 | auto *Param = dyn_cast<ParmVarDecl>(D->getDecl()); |
582 | auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>(); |
583 | if (Param != nullptr && PS != nullptr && |
584 | areBOSTypesCompatible(PS->getType(), Type)) { |
585 | auto Iter = SizeArguments.find(Param); |
586 | assert(Iter != SizeArguments.end())((Iter != SizeArguments.end()) ? static_cast<void> (0) : __assert_fail ("Iter != SizeArguments.end()", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 586, __PRETTY_FUNCTION__)); |
587 | |
588 | const ImplicitParamDecl *D = Iter->second; |
589 | auto DIter = LocalDeclMap.find(D); |
590 | assert(DIter != LocalDeclMap.end())((DIter != LocalDeclMap.end()) ? static_cast<void> (0) : __assert_fail ("DIter != LocalDeclMap.end()", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 590, __PRETTY_FUNCTION__)); |
591 | |
592 | return EmitLoadOfScalar(DIter->second, /*volatile=*/false, |
593 | getContext().getSizeType(), E->getBeginLoc()); |
594 | } |
595 | } |
596 | |
597 | // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't |
598 | // evaluate E for side-effects. In either case, we shouldn't lower to |
599 | // @llvm.objectsize. |
600 | if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext()))) |
601 | return getDefaultBuiltinObjectSizeResult(Type, ResType); |
602 | |
603 | Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E); |
604 | assert(Ptr->getType()->isPointerTy() &&((Ptr->getType()->isPointerTy() && "Non-pointer passed to __builtin_object_size?" ) ? static_cast<void> (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 605, __PRETTY_FUNCTION__)) |
605 | "Non-pointer passed to __builtin_object_size?")((Ptr->getType()->isPointerTy() && "Non-pointer passed to __builtin_object_size?" ) ? static_cast<void> (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 605, __PRETTY_FUNCTION__)); |
606 | |
607 | Function *F = |
608 | CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()}); |
609 | |
610 | // LLVM only supports 0 and 2, make sure that we pass along that as a boolean. |
611 | Value *Min = Builder.getInt1((Type & 2) != 0); |
612 | // For GCC compatibility, __builtin_object_size treat NULL as unknown size. |
613 | Value *NullIsUnknown = Builder.getTrue(); |
614 | Value *Dynamic = Builder.getInt1(IsDynamic); |
615 | return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}); |
616 | } |
617 | |
618 | namespace { |
619 | /// A struct to generically describe a bit test intrinsic. |
620 | struct BitTest { |
621 | enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set }; |
622 | enum InterlockingKind : uint8_t { |
623 | Unlocked, |
624 | Sequential, |
625 | Acquire, |
626 | Release, |
627 | NoFence |
628 | }; |
629 | |
630 | ActionKind Action; |
631 | InterlockingKind Interlocking; |
632 | bool Is64Bit; |
633 | |
634 | static BitTest decodeBitTestBuiltin(unsigned BuiltinID); |
635 | }; |
636 | } // namespace |
637 | |
638 | BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) { |
639 | switch (BuiltinID) { |
640 | // Main portable variants. |
641 | case Builtin::BI_bittest: |
642 | return {TestOnly, Unlocked, false}; |
643 | case Builtin::BI_bittestandcomplement: |
644 | return {Complement, Unlocked, false}; |
645 | case Builtin::BI_bittestandreset: |
646 | return {Reset, Unlocked, false}; |
647 | case Builtin::BI_bittestandset: |
648 | return {Set, Unlocked, false}; |
649 | case Builtin::BI_interlockedbittestandreset: |
650 | return {Reset, Sequential, false}; |
651 | case Builtin::BI_interlockedbittestandset: |
652 | return {Set, Sequential, false}; |
653 | |
654 | // X86-specific 64-bit variants. |
655 | case Builtin::BI_bittest64: |
656 | return {TestOnly, Unlocked, true}; |
657 | case Builtin::BI_bittestandcomplement64: |
658 | return {Complement, Unlocked, true}; |
659 | case Builtin::BI_bittestandreset64: |
660 | return {Reset, Unlocked, true}; |
661 | case Builtin::BI_bittestandset64: |
662 | return {Set, Unlocked, true}; |
663 | case Builtin::BI_interlockedbittestandreset64: |
664 | return {Reset, Sequential, true}; |
665 | case Builtin::BI_interlockedbittestandset64: |
666 | return {Set, Sequential, true}; |
667 | |
668 | // ARM/AArch64-specific ordering variants. |
669 | case Builtin::BI_interlockedbittestandset_acq: |
670 | return {Set, Acquire, false}; |
671 | case Builtin::BI_interlockedbittestandset_rel: |
672 | return {Set, Release, false}; |
673 | case Builtin::BI_interlockedbittestandset_nf: |
674 | return {Set, NoFence, false}; |
675 | case Builtin::BI_interlockedbittestandreset_acq: |
676 | return {Reset, Acquire, false}; |
677 | case Builtin::BI_interlockedbittestandreset_rel: |
678 | return {Reset, Release, false}; |
679 | case Builtin::BI_interlockedbittestandreset_nf: |
680 | return {Reset, NoFence, false}; |
681 | } |
682 | llvm_unreachable("expected only bittest intrinsics")::llvm::llvm_unreachable_internal("expected only bittest intrinsics" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 682); |
683 | } |
684 | |
685 | static char bitActionToX86BTCode(BitTest::ActionKind A) { |
686 | switch (A) { |
687 | case BitTest::TestOnly: return '\0'; |
688 | case BitTest::Complement: return 'c'; |
689 | case BitTest::Reset: return 'r'; |
690 | case BitTest::Set: return 's'; |
691 | } |
692 | llvm_unreachable("invalid action")::llvm::llvm_unreachable_internal("invalid action", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 692); |
693 | } |
694 | |
695 | static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF, |
696 | BitTest BT, |
697 | const CallExpr *E, Value *BitBase, |
698 | Value *BitPos) { |
699 | char Action = bitActionToX86BTCode(BT.Action); |
700 | char SizeSuffix = BT.Is64Bit ? 'q' : 'l'; |
701 | |
702 | // Build the assembly. |
703 | SmallString<64> Asm; |
704 | raw_svector_ostream AsmOS(Asm); |
705 | if (BT.Interlocking != BitTest::Unlocked) |
706 | AsmOS << "lock "; |
707 | AsmOS << "bt"; |
708 | if (Action) |
709 | AsmOS << Action; |
710 | AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}"; |
711 | |
712 | // Build the constraints. FIXME: We should support immediates when possible. |
713 | std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}"; |
714 | llvm::IntegerType *IntType = llvm::IntegerType::get( |
715 | CGF.getLLVMContext(), |
716 | CGF.getContext().getTypeSize(E->getArg(1)->getType())); |
717 | llvm::Type *IntPtrType = IntType->getPointerTo(); |
718 | llvm::FunctionType *FTy = |
719 | llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false); |
720 | |
721 | llvm::InlineAsm *IA = |
722 | llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true); |
723 | return CGF.Builder.CreateCall(IA, {BitBase, BitPos}); |
724 | } |
725 | |
726 | static llvm::AtomicOrdering |
727 | getBitTestAtomicOrdering(BitTest::InterlockingKind I) { |
728 | switch (I) { |
729 | case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic; |
730 | case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent; |
731 | case BitTest::Acquire: return llvm::AtomicOrdering::Acquire; |
732 | case BitTest::Release: return llvm::AtomicOrdering::Release; |
733 | case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic; |
734 | } |
735 | llvm_unreachable("invalid interlocking")::llvm::llvm_unreachable_internal("invalid interlocking", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 735); |
736 | } |
737 | |
738 | /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of |
739 | /// bits and a bit position and read and optionally modify the bit at that |
740 | /// position. The position index can be arbitrarily large, i.e. it can be larger |
741 | /// than 31 or 63, so we need an indexed load in the general case. |
742 | static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF, |
743 | unsigned BuiltinID, |
744 | const CallExpr *E) { |
745 | Value *BitBase = CGF.EmitScalarExpr(E->getArg(0)); |
746 | Value *BitPos = CGF.EmitScalarExpr(E->getArg(1)); |
747 | |
748 | BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID); |
749 | |
750 | // X86 has special BT, BTC, BTR, and BTS instructions that handle the array |
751 | // indexing operation internally. Use them if possible. |
752 | llvm::Triple::ArchType Arch = CGF.getTarget().getTriple().getArch(); |
753 | if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) |
754 | return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos); |
755 | |
756 | // Otherwise, use generic code to load one byte and test the bit. Use all but |
757 | // the bottom three bits as the array index, and the bottom three bits to form |
758 | // a mask. |
759 | // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0; |
760 | Value *ByteIndex = CGF.Builder.CreateAShr( |
761 | BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx"); |
762 | Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy); |
763 | Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8, |
764 | ByteIndex, "bittest.byteaddr"), |
765 | CharUnits::One()); |
766 | Value *PosLow = |
767 | CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty), |
768 | llvm::ConstantInt::get(CGF.Int8Ty, 0x7)); |
769 | |
770 | // The updating instructions will need a mask. |
771 | Value *Mask = nullptr; |
772 | if (BT.Action != BitTest::TestOnly) { |
773 | Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow, |
774 | "bittest.mask"); |
775 | } |
776 | |
777 | // Check the action and ordering of the interlocked intrinsics. |
778 | llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking); |
779 | |
780 | Value *OldByte = nullptr; |
781 | if (Ordering != llvm::AtomicOrdering::NotAtomic) { |
782 | // Emit a combined atomicrmw load/store operation for the interlocked |
783 | // intrinsics. |
784 | llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or; |
785 | if (BT.Action == BitTest::Reset) { |
786 | Mask = CGF.Builder.CreateNot(Mask); |
787 | RMWOp = llvm::AtomicRMWInst::And; |
788 | } |
789 | OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask, |
790 | Ordering); |
791 | } else { |
792 | // Emit a plain load for the non-interlocked intrinsics. |
793 | OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte"); |
794 | Value *NewByte = nullptr; |
795 | switch (BT.Action) { |
796 | case BitTest::TestOnly: |
797 | // Don't store anything. |
798 | break; |
799 | case BitTest::Complement: |
800 | NewByte = CGF.Builder.CreateXor(OldByte, Mask); |
801 | break; |
802 | case BitTest::Reset: |
803 | NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask)); |
804 | break; |
805 | case BitTest::Set: |
806 | NewByte = CGF.Builder.CreateOr(OldByte, Mask); |
807 | break; |
808 | } |
809 | if (NewByte) |
810 | CGF.Builder.CreateStore(NewByte, ByteAddr); |
811 | } |
812 | |
813 | // However we loaded the old byte, either by plain load or atomicrmw, shift |
814 | // the bit into the low position and mask it to 0 or 1. |
815 | Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr"); |
816 | return CGF.Builder.CreateAnd( |
817 | ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res"); |
818 | } |
819 | |
820 | namespace { |
821 | enum class MSVCSetJmpKind { |
822 | _setjmpex, |
823 | _setjmp3, |
824 | _setjmp |
825 | }; |
826 | } |
827 | |
828 | /// MSVC handles setjmp a bit differently on different platforms. On every |
829 | /// architecture except 32-bit x86, the frame address is passed. On x86, extra |
830 | /// parameters can be passed as variadic arguments, but we always pass none. |
831 | static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, |
832 | const CallExpr *E) { |
833 | llvm::Value *Arg1 = nullptr; |
834 | llvm::Type *Arg1Ty = nullptr; |
835 | StringRef Name; |
836 | bool IsVarArg = false; |
837 | if (SJKind == MSVCSetJmpKind::_setjmp3) { |
838 | Name = "_setjmp3"; |
839 | Arg1Ty = CGF.Int32Ty; |
840 | Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0); |
841 | IsVarArg = true; |
842 | } else { |
843 | Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex"; |
844 | Arg1Ty = CGF.Int8PtrTy; |
845 | if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) { |
846 | Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::sponentry)); |
847 | } else |
848 | Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::frameaddress), |
849 | llvm::ConstantInt::get(CGF.Int32Ty, 0)); |
850 | } |
851 | |
852 | // Mark the call site and declaration with ReturnsTwice. |
853 | llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty}; |
854 | llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get( |
855 | CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, |
856 | llvm::Attribute::ReturnsTwice); |
857 | llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction( |
858 | llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name, |
859 | ReturnsTwiceAttr, /*Local=*/true); |
860 | |
861 | llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast( |
862 | CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy); |
863 | llvm::Value *Args[] = {Buf, Arg1}; |
864 | llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args); |
865 | CB->setAttributes(ReturnsTwiceAttr); |
866 | return RValue::get(CB); |
867 | } |
868 | |
869 | // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code, |
870 | // we handle them here. |
871 | enum class CodeGenFunction::MSVCIntrin { |
872 | _BitScanForward, |
873 | _BitScanReverse, |
874 | _InterlockedAnd, |
875 | _InterlockedDecrement, |
876 | _InterlockedExchange, |
877 | _InterlockedExchangeAdd, |
878 | _InterlockedExchangeSub, |
879 | _InterlockedIncrement, |
880 | _InterlockedOr, |
881 | _InterlockedXor, |
882 | _InterlockedExchangeAdd_acq, |
883 | _InterlockedExchangeAdd_rel, |
884 | _InterlockedExchangeAdd_nf, |
885 | _InterlockedExchange_acq, |
886 | _InterlockedExchange_rel, |
887 | _InterlockedExchange_nf, |
888 | _InterlockedCompareExchange_acq, |
889 | _InterlockedCompareExchange_rel, |
890 | _InterlockedCompareExchange_nf, |
891 | _InterlockedOr_acq, |
892 | _InterlockedOr_rel, |
893 | _InterlockedOr_nf, |
894 | _InterlockedXor_acq, |
895 | _InterlockedXor_rel, |
896 | _InterlockedXor_nf, |
897 | _InterlockedAnd_acq, |
898 | _InterlockedAnd_rel, |
899 | _InterlockedAnd_nf, |
900 | _InterlockedIncrement_acq, |
901 | _InterlockedIncrement_rel, |
902 | _InterlockedIncrement_nf, |
903 | _InterlockedDecrement_acq, |
904 | _InterlockedDecrement_rel, |
905 | _InterlockedDecrement_nf, |
906 | __fastfail, |
907 | }; |
908 | |
909 | Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, |
910 | const CallExpr *E) { |
911 | switch (BuiltinID) { |
912 | case MSVCIntrin::_BitScanForward: |
913 | case MSVCIntrin::_BitScanReverse: { |
914 | Value *ArgValue = EmitScalarExpr(E->getArg(1)); |
915 | |
916 | llvm::Type *ArgType = ArgValue->getType(); |
917 | llvm::Type *IndexType = |
918 | EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType(); |
919 | llvm::Type *ResultType = ConvertType(E->getType()); |
920 | |
921 | Value *ArgZero = llvm::Constant::getNullValue(ArgType); |
922 | Value *ResZero = llvm::Constant::getNullValue(ResultType); |
923 | Value *ResOne = llvm::ConstantInt::get(ResultType, 1); |
924 | |
925 | BasicBlock *Begin = Builder.GetInsertBlock(); |
926 | BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn); |
927 | Builder.SetInsertPoint(End); |
928 | PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result"); |
929 | |
930 | Builder.SetInsertPoint(Begin); |
931 | Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero); |
932 | BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn); |
933 | Builder.CreateCondBr(IsZero, End, NotZero); |
934 | Result->addIncoming(ResZero, Begin); |
935 | |
936 | Builder.SetInsertPoint(NotZero); |
937 | Address IndexAddress = EmitPointerWithAlignment(E->getArg(0)); |
938 | |
939 | if (BuiltinID == MSVCIntrin::_BitScanForward) { |
940 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
941 | Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); |
942 | ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); |
943 | Builder.CreateStore(ZeroCount, IndexAddress, false); |
944 | } else { |
945 | unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth(); |
946 | Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1); |
947 | |
948 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
949 | Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); |
950 | ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); |
951 | Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount); |
952 | Builder.CreateStore(Index, IndexAddress, false); |
953 | } |
954 | Builder.CreateBr(End); |
955 | Result->addIncoming(ResOne, NotZero); |
956 | |
957 | Builder.SetInsertPoint(End); |
958 | return Result; |
959 | } |
960 | case MSVCIntrin::_InterlockedAnd: |
961 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E); |
962 | case MSVCIntrin::_InterlockedExchange: |
963 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E); |
964 | case MSVCIntrin::_InterlockedExchangeAdd: |
965 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E); |
966 | case MSVCIntrin::_InterlockedExchangeSub: |
967 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E); |
968 | case MSVCIntrin::_InterlockedOr: |
969 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E); |
970 | case MSVCIntrin::_InterlockedXor: |
971 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E); |
972 | case MSVCIntrin::_InterlockedExchangeAdd_acq: |
973 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
974 | AtomicOrdering::Acquire); |
975 | case MSVCIntrin::_InterlockedExchangeAdd_rel: |
976 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
977 | AtomicOrdering::Release); |
978 | case MSVCIntrin::_InterlockedExchangeAdd_nf: |
979 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
980 | AtomicOrdering::Monotonic); |
981 | case MSVCIntrin::_InterlockedExchange_acq: |
982 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
983 | AtomicOrdering::Acquire); |
984 | case MSVCIntrin::_InterlockedExchange_rel: |
985 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
986 | AtomicOrdering::Release); |
987 | case MSVCIntrin::_InterlockedExchange_nf: |
988 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
989 | AtomicOrdering::Monotonic); |
990 | case MSVCIntrin::_InterlockedCompareExchange_acq: |
991 | return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire); |
992 | case MSVCIntrin::_InterlockedCompareExchange_rel: |
993 | return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release); |
994 | case MSVCIntrin::_InterlockedCompareExchange_nf: |
995 | return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic); |
996 | case MSVCIntrin::_InterlockedOr_acq: |
997 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
998 | AtomicOrdering::Acquire); |
999 | case MSVCIntrin::_InterlockedOr_rel: |
1000 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
1001 | AtomicOrdering::Release); |
1002 | case MSVCIntrin::_InterlockedOr_nf: |
1003 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
1004 | AtomicOrdering::Monotonic); |
1005 | case MSVCIntrin::_InterlockedXor_acq: |
1006 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
1007 | AtomicOrdering::Acquire); |
1008 | case MSVCIntrin::_InterlockedXor_rel: |
1009 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
1010 | AtomicOrdering::Release); |
1011 | case MSVCIntrin::_InterlockedXor_nf: |
1012 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
1013 | AtomicOrdering::Monotonic); |
1014 | case MSVCIntrin::_InterlockedAnd_acq: |
1015 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
1016 | AtomicOrdering::Acquire); |
1017 | case MSVCIntrin::_InterlockedAnd_rel: |
1018 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
1019 | AtomicOrdering::Release); |
1020 | case MSVCIntrin::_InterlockedAnd_nf: |
1021 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
1022 | AtomicOrdering::Monotonic); |
1023 | case MSVCIntrin::_InterlockedIncrement_acq: |
1024 | return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire); |
1025 | case MSVCIntrin::_InterlockedIncrement_rel: |
1026 | return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release); |
1027 | case MSVCIntrin::_InterlockedIncrement_nf: |
1028 | return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic); |
1029 | case MSVCIntrin::_InterlockedDecrement_acq: |
1030 | return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire); |
1031 | case MSVCIntrin::_InterlockedDecrement_rel: |
1032 | return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release); |
1033 | case MSVCIntrin::_InterlockedDecrement_nf: |
1034 | return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic); |
1035 | |
1036 | case MSVCIntrin::_InterlockedDecrement: |
1037 | return EmitAtomicDecrementValue(*this, E); |
1038 | case MSVCIntrin::_InterlockedIncrement: |
1039 | return EmitAtomicIncrementValue(*this, E); |
1040 | |
1041 | case MSVCIntrin::__fastfail: { |
1042 | // Request immediate process termination from the kernel. The instruction |
1043 | // sequences to do this are documented on MSDN: |
1044 | // https://msdn.microsoft.com/en-us/library/dn774154.aspx |
1045 | llvm::Triple::ArchType ISA = getTarget().getTriple().getArch(); |
1046 | StringRef Asm, Constraints; |
1047 | switch (ISA) { |
1048 | default: |
1049 | ErrorUnsupported(E, "__fastfail call for this architecture"); |
1050 | break; |
1051 | case llvm::Triple::x86: |
1052 | case llvm::Triple::x86_64: |
1053 | Asm = "int $$0x29"; |
1054 | Constraints = "{cx}"; |
1055 | break; |
1056 | case llvm::Triple::thumb: |
1057 | Asm = "udf #251"; |
1058 | Constraints = "{r0}"; |
1059 | break; |
1060 | case llvm::Triple::aarch64: |
1061 | Asm = "brk #0xF003"; |
1062 | Constraints = "{w0}"; |
1063 | } |
1064 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false); |
1065 | llvm::InlineAsm *IA = |
1066 | llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true); |
1067 | llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( |
1068 | getLLVMContext(), llvm::AttributeList::FunctionIndex, |
1069 | llvm::Attribute::NoReturn); |
1070 | llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0))); |
1071 | CI->setAttributes(NoReturnAttr); |
1072 | return CI; |
1073 | } |
1074 | } |
1075 | llvm_unreachable("Incorrect MSVC intrinsic!")::llvm::llvm_unreachable_internal("Incorrect MSVC intrinsic!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1075); |
1076 | } |
1077 | |
1078 | namespace { |
1079 | // ARC cleanup for __builtin_os_log_format |
1080 | struct CallObjCArcUse final : EHScopeStack::Cleanup { |
1081 | CallObjCArcUse(llvm::Value *object) : object(object) {} |
1082 | llvm::Value *object; |
1083 | |
1084 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
1085 | CGF.EmitARCIntrinsicUse(object); |
1086 | } |
1087 | }; |
1088 | } |
1089 | |
1090 | Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E, |
1091 | BuiltinCheckKind Kind) { |
1092 | assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)(((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && "Unsupported builtin check kind") ? static_cast<void> ( 0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1093, __PRETTY_FUNCTION__)) |
1093 | && "Unsupported builtin check kind")(((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && "Unsupported builtin check kind") ? static_cast<void> ( 0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1093, __PRETTY_FUNCTION__)); |
1094 | |
1095 | Value *ArgValue = EmitScalarExpr(E); |
1096 | if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef()) |
1097 | return ArgValue; |
1098 | |
1099 | SanitizerScope SanScope(this); |
1100 | Value *Cond = Builder.CreateICmpNE( |
1101 | ArgValue, llvm::Constant::getNullValue(ArgValue->getType())); |
1102 | EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin), |
1103 | SanitizerHandler::InvalidBuiltin, |
1104 | {EmitCheckSourceLocation(E->getExprLoc()), |
1105 | llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)}, |
1106 | None); |
1107 | return ArgValue; |
1108 | } |
1109 | |
1110 | /// Get the argument type for arguments to os_log_helper. |
1111 | static CanQualType getOSLogArgType(ASTContext &C, int Size) { |
1112 | QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false); |
1113 | return C.getCanonicalType(UnsignedTy); |
1114 | } |
1115 | |
1116 | llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction( |
1117 | const analyze_os_log::OSLogBufferLayout &Layout, |
1118 | CharUnits BufferAlignment) { |
1119 | ASTContext &Ctx = getContext(); |
1120 | |
1121 | llvm::SmallString<64> Name; |
1122 | { |
1123 | raw_svector_ostream OS(Name); |
1124 | OS << "__os_log_helper"; |
1125 | OS << "_" << BufferAlignment.getQuantity(); |
1126 | OS << "_" << int(Layout.getSummaryByte()); |
1127 | OS << "_" << int(Layout.getNumArgsByte()); |
1128 | for (const auto &Item : Layout.Items) |
1129 | OS << "_" << int(Item.getSizeByte()) << "_" |
1130 | << int(Item.getDescriptorByte()); |
1131 | } |
1132 | |
1133 | if (llvm::Function *F = CGM.getModule().getFunction(Name)) |
1134 | return F; |
1135 | |
1136 | llvm::SmallVector<QualType, 4> ArgTys; |
1137 | FunctionArgList Args; |
1138 | Args.push_back(ImplicitParamDecl::Create( |
1139 | Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy, |
1140 | ImplicitParamDecl::Other)); |
1141 | ArgTys.emplace_back(Ctx.VoidPtrTy); |
1142 | |
1143 | for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) { |
1144 | char Size = Layout.Items[I].getSizeByte(); |
1145 | if (!Size) |
1146 | continue; |
1147 | |
1148 | QualType ArgTy = getOSLogArgType(Ctx, Size); |
1149 | Args.push_back(ImplicitParamDecl::Create( |
1150 | Ctx, nullptr, SourceLocation(), |
1151 | &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy, |
1152 | ImplicitParamDecl::Other)); |
1153 | ArgTys.emplace_back(ArgTy); |
1154 | } |
1155 | |
1156 | QualType ReturnTy = Ctx.VoidTy; |
1157 | QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {}); |
1158 | |
1159 | // The helper function has linkonce_odr linkage to enable the linker to merge |
1160 | // identical functions. To ensure the merging always happens, 'noinline' is |
1161 | // attached to the function when compiling with -Oz. |
1162 | const CGFunctionInfo &FI = |
1163 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args); |
1164 | llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI); |
1165 | llvm::Function *Fn = llvm::Function::Create( |
1166 | FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule()); |
1167 | Fn->setVisibility(llvm::GlobalValue::HiddenVisibility); |
1168 | CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn); |
1169 | CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn); |
1170 | Fn->setDoesNotThrow(); |
1171 | |
1172 | // Attach 'noinline' at -Oz. |
1173 | if (CGM.getCodeGenOpts().OptimizeSize == 2) |
1174 | Fn->addFnAttr(llvm::Attribute::NoInline); |
1175 | |
1176 | auto NL = ApplyDebugLocation::CreateEmpty(*this); |
1177 | IdentifierInfo *II = &Ctx.Idents.get(Name); |
1178 | FunctionDecl *FD = FunctionDecl::Create( |
1179 | Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, |
1180 | FuncionTy, nullptr, SC_PrivateExtern, false, false); |
1181 | |
1182 | StartFunction(FD, ReturnTy, Fn, FI, Args); |
1183 | |
1184 | // Create a scope with an artificial location for the body of this function. |
1185 | auto AL = ApplyDebugLocation::CreateArtificial(*this); |
1186 | |
1187 | CharUnits Offset; |
1188 | Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), |
1189 | BufferAlignment); |
1190 | Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()), |
1191 | Builder.CreateConstByteGEP(BufAddr, Offset++, "summary")); |
1192 | Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()), |
1193 | Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs")); |
1194 | |
1195 | unsigned I = 1; |
1196 | for (const auto &Item : Layout.Items) { |
1197 | Builder.CreateStore( |
1198 | Builder.getInt8(Item.getDescriptorByte()), |
1199 | Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor")); |
1200 | Builder.CreateStore( |
1201 | Builder.getInt8(Item.getSizeByte()), |
1202 | Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize")); |
1203 | |
1204 | CharUnits Size = Item.size(); |
1205 | if (!Size.getQuantity()) |
1206 | continue; |
1207 | |
1208 | Address Arg = GetAddrOfLocalVar(Args[I]); |
1209 | Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData"); |
1210 | Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(), |
1211 | "argDataCast"); |
1212 | Builder.CreateStore(Builder.CreateLoad(Arg), Addr); |
1213 | Offset += Size; |
1214 | ++I; |
1215 | } |
1216 | |
1217 | FinishFunction(); |
1218 | |
1219 | return Fn; |
1220 | } |
1221 | |
1222 | RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) { |
1223 | assert(E.getNumArgs() >= 2 &&((E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments" ) ? static_cast<void> (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1224, __PRETTY_FUNCTION__)) |
1224 | "__builtin_os_log_format takes at least 2 arguments")((E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments" ) ? static_cast<void> (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1224, __PRETTY_FUNCTION__)); |
1225 | ASTContext &Ctx = getContext(); |
1226 | analyze_os_log::OSLogBufferLayout Layout; |
1227 | analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout); |
1228 | Address BufAddr = EmitPointerWithAlignment(E.getArg(0)); |
1229 | llvm::SmallVector<llvm::Value *, 4> RetainableOperands; |
1230 | |
1231 | // Ignore argument 1, the format string. It is not currently used. |
1232 | CallArgList Args; |
1233 | Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy); |
1234 | |
1235 | for (const auto &Item : Layout.Items) { |
1236 | int Size = Item.getSizeByte(); |
1237 | if (!Size) |
1238 | continue; |
1239 | |
1240 | llvm::Value *ArgVal; |
1241 | |
1242 | if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) { |
1243 | uint64_t Val = 0; |
1244 | for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I) |
1245 | Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8; |
1246 | ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val)); |
1247 | } else if (const Expr *TheExpr = Item.getExpr()) { |
1248 | ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false); |
1249 | |
1250 | // Check if this is a retainable type. |
1251 | if (TheExpr->getType()->isObjCRetainableType()) { |
1252 | assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&((getEvaluationKind(TheExpr->getType()) == TEK_Scalar && "Only scalar can be a ObjC retainable type") ? static_cast< void> (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1253, __PRETTY_FUNCTION__)) |
1253 | "Only scalar can be a ObjC retainable type")((getEvaluationKind(TheExpr->getType()) == TEK_Scalar && "Only scalar can be a ObjC retainable type") ? static_cast< void> (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1253, __PRETTY_FUNCTION__)); |
1254 | // Check if the object is constant, if not, save it in |
1255 | // RetainableOperands. |
1256 | if (!isa<Constant>(ArgVal)) |
1257 | RetainableOperands.push_back(ArgVal); |
1258 | } |
1259 | } else { |
1260 | ArgVal = Builder.getInt32(Item.getConstValue().getQuantity()); |
1261 | } |
1262 | |
1263 | unsigned ArgValSize = |
1264 | CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType()); |
1265 | llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(), |
1266 | ArgValSize); |
1267 | ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy); |
1268 | CanQualType ArgTy = getOSLogArgType(Ctx, Size); |
1269 | // If ArgVal has type x86_fp80, zero-extend ArgVal. |
1270 | ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy)); |
1271 | Args.add(RValue::get(ArgVal), ArgTy); |
1272 | } |
1273 | |
1274 | const CGFunctionInfo &FI = |
1275 | CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args); |
1276 | llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction( |
1277 | Layout, BufAddr.getAlignment()); |
1278 | EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args); |
1279 | |
1280 | // Push a clang.arc.use cleanup for each object in RetainableOperands. The |
1281 | // cleanup will cause the use to appear after the final log call, keeping |
1282 | // the object valid while it’s held in the log buffer. Note that if there’s |
1283 | // a release cleanup on the object, it will already be active; since |
1284 | // cleanups are emitted in reverse order, the use will occur before the |
1285 | // object is released. |
1286 | if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount && |
1287 | CGM.getCodeGenOpts().OptimizationLevel != 0) |
1288 | for (llvm::Value *Object : RetainableOperands) |
1289 | pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), Object); |
1290 | |
1291 | return RValue::get(BufAddr.getPointer()); |
1292 | } |
1293 | |
1294 | /// Determine if a binop is a checked mixed-sign multiply we can specialize. |
1295 | static bool isSpecialMixedSignMultiply(unsigned BuiltinID, |
1296 | WidthAndSignedness Op1Info, |
1297 | WidthAndSignedness Op2Info, |
1298 | WidthAndSignedness ResultInfo) { |
1299 | return BuiltinID == Builtin::BI__builtin_mul_overflow && |
1300 | std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width && |
1301 | Op1Info.Signed != Op2Info.Signed; |
1302 | } |
1303 | |
1304 | /// Emit a checked mixed-sign multiply. This is a cheaper specialization of |
1305 | /// the generic checked-binop irgen. |
1306 | static RValue |
1307 | EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, |
1308 | WidthAndSignedness Op1Info, const clang::Expr *Op2, |
1309 | WidthAndSignedness Op2Info, |
1310 | const clang::Expr *ResultArg, QualType ResultQTy, |
1311 | WidthAndSignedness ResultInfo) { |
1312 | assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,((isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow , Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize" ) ? static_cast<void> (0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1314, __PRETTY_FUNCTION__)) |
1313 | Op2Info, ResultInfo) &&((isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow , Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize" ) ? static_cast<void> (0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1314, __PRETTY_FUNCTION__)) |
1314 | "Not a mixed-sign multipliction we can specialize")((isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow , Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize" ) ? static_cast<void> (0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1314, __PRETTY_FUNCTION__)); |
1315 | |
1316 | // Emit the signed and unsigned operands. |
1317 | const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2; |
1318 | const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1; |
1319 | llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp); |
1320 | llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp); |
1321 | unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width; |
1322 | unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width; |
1323 | |
1324 | // One of the operands may be smaller than the other. If so, [s|z]ext it. |
1325 | if (SignedOpWidth < UnsignedOpWidth) |
1326 | Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext"); |
1327 | if (UnsignedOpWidth < SignedOpWidth) |
1328 | Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext"); |
1329 | |
1330 | llvm::Type *OpTy = Signed->getType(); |
1331 | llvm::Value *Zero = llvm::Constant::getNullValue(OpTy); |
1332 | Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg); |
1333 | llvm::Type *ResTy = ResultPtr.getElementType(); |
1334 | unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width); |
1335 | |
1336 | // Take the absolute value of the signed operand. |
1337 | llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero); |
1338 | llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed); |
1339 | llvm::Value *AbsSigned = |
1340 | CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed); |
1341 | |
1342 | // Perform a checked unsigned multiplication. |
1343 | llvm::Value *UnsignedOverflow; |
1344 | llvm::Value *UnsignedResult = |
1345 | EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned, |
1346 | Unsigned, UnsignedOverflow); |
1347 | |
1348 | llvm::Value *Overflow, *Result; |
1349 | if (ResultInfo.Signed) { |
1350 | // Signed overflow occurs if the result is greater than INT_MAX or lesser |
1351 | // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative). |
1352 | auto IntMax = |
1353 | llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth); |
1354 | llvm::Value *MaxResult = |
1355 | CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax), |
1356 | CGF.Builder.CreateZExt(IsNegative, OpTy)); |
1357 | llvm::Value *SignedOverflow = |
1358 | CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult); |
1359 | Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow); |
1360 | |
1361 | // Prepare the signed result (possibly by negating it). |
1362 | llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult); |
1363 | llvm::Value *SignedResult = |
1364 | CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult); |
1365 | Result = CGF.Builder.CreateTrunc(SignedResult, ResTy); |
1366 | } else { |
1367 | // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX. |
1368 | llvm::Value *Underflow = CGF.Builder.CreateAnd( |
1369 | IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult)); |
1370 | Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow); |
1371 | if (ResultInfo.Width < OpWidth) { |
1372 | auto IntMax = |
1373 | llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth); |
1374 | llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT( |
1375 | UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax)); |
1376 | Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow); |
1377 | } |
1378 | |
1379 | // Negate the product if it would be negative in infinite precision. |
1380 | Result = CGF.Builder.CreateSelect( |
1381 | IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult); |
1382 | |
1383 | Result = CGF.Builder.CreateTrunc(Result, ResTy); |
1384 | } |
1385 | assert(Overflow && Result && "Missing overflow or result")((Overflow && Result && "Missing overflow or result" ) ? static_cast<void> (0) : __assert_fail ("Overflow && Result && \"Missing overflow or result\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1385, __PRETTY_FUNCTION__)); |
1386 | |
1387 | bool isVolatile = |
1388 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
1389 | CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr, |
1390 | isVolatile); |
1391 | return RValue::get(Overflow); |
1392 | } |
1393 | |
1394 | static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType, |
1395 | Value *&RecordPtr, CharUnits Align, |
1396 | llvm::FunctionCallee Func, int Lvl) { |
1397 | const auto *RT = RType->getAs<RecordType>(); |
1398 | ASTContext &Context = CGF.getContext(); |
1399 | RecordDecl *RD = RT->getDecl()->getDefinition(); |
1400 | std::string Pad = std::string(Lvl * 4, ' '); |
1401 | |
1402 | Value *GString = |
1403 | CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n"); |
1404 | Value *Res = CGF.Builder.CreateCall(Func, {GString}); |
1405 | |
1406 | static llvm::DenseMap<QualType, const char *> Types; |
1407 | if (Types.empty()) { |
1408 | Types[Context.CharTy] = "%c"; |
1409 | Types[Context.BoolTy] = "%d"; |
1410 | Types[Context.SignedCharTy] = "%hhd"; |
1411 | Types[Context.UnsignedCharTy] = "%hhu"; |
1412 | Types[Context.IntTy] = "%d"; |
1413 | Types[Context.UnsignedIntTy] = "%u"; |
1414 | Types[Context.LongTy] = "%ld"; |
1415 | Types[Context.UnsignedLongTy] = "%lu"; |
1416 | Types[Context.LongLongTy] = "%lld"; |
1417 | Types[Context.UnsignedLongLongTy] = "%llu"; |
1418 | Types[Context.ShortTy] = "%hd"; |
1419 | Types[Context.UnsignedShortTy] = "%hu"; |
1420 | Types[Context.VoidPtrTy] = "%p"; |
1421 | Types[Context.FloatTy] = "%f"; |
1422 | Types[Context.DoubleTy] = "%f"; |
1423 | Types[Context.LongDoubleTy] = "%Lf"; |
1424 | Types[Context.getPointerType(Context.CharTy)] = "%s"; |
1425 | Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s"; |
1426 | } |
1427 | |
1428 | for (const auto *FD : RD->fields()) { |
1429 | Value *FieldPtr = RecordPtr; |
1430 | if (RD->isUnion()) |
1431 | FieldPtr = CGF.Builder.CreatePointerCast( |
1432 | FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType()))); |
1433 | else |
1434 | FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr, |
1435 | FD->getFieldIndex()); |
1436 | |
1437 | GString = CGF.Builder.CreateGlobalStringPtr( |
1438 | llvm::Twine(Pad) |
1439 | .concat(FD->getType().getAsString()) |
1440 | .concat(llvm::Twine(' ')) |
1441 | .concat(FD->getNameAsString()) |
1442 | .concat(" : ") |
1443 | .str()); |
1444 | Value *TmpRes = CGF.Builder.CreateCall(Func, {GString}); |
1445 | Res = CGF.Builder.CreateAdd(Res, TmpRes); |
1446 | |
1447 | QualType CanonicalType = |
1448 | FD->getType().getUnqualifiedType().getCanonicalType(); |
1449 | |
1450 | // We check whether we are in a recursive type |
1451 | if (CanonicalType->isRecordType()) { |
1452 | Value *TmpRes = |
1453 | dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1); |
1454 | Res = CGF.Builder.CreateAdd(TmpRes, Res); |
1455 | continue; |
1456 | } |
1457 | |
1458 | // We try to determine the best format to print the current field |
1459 | llvm::Twine Format = Types.find(CanonicalType) == Types.end() |
1460 | ? Types[Context.VoidPtrTy] |
1461 | : Types[CanonicalType]; |
1462 | |
1463 | Address FieldAddress = Address(FieldPtr, Align); |
1464 | FieldPtr = CGF.Builder.CreateLoad(FieldAddress); |
1465 | |
1466 | // FIXME Need to handle bitfield here |
1467 | GString = CGF.Builder.CreateGlobalStringPtr( |
1468 | Format.concat(llvm::Twine('\n')).str()); |
1469 | TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr}); |
1470 | Res = CGF.Builder.CreateAdd(Res, TmpRes); |
1471 | } |
1472 | |
1473 | GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n"); |
1474 | Value *TmpRes = CGF.Builder.CreateCall(Func, {GString}); |
1475 | Res = CGF.Builder.CreateAdd(Res, TmpRes); |
1476 | return Res; |
1477 | } |
1478 | |
1479 | static bool |
1480 | TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, |
1481 | llvm::SmallPtrSetImpl<const Decl *> &Seen) { |
1482 | if (const auto *Arr = Ctx.getAsArrayType(Ty)) |
1483 | Ty = Ctx.getBaseElementType(Arr); |
1484 | |
1485 | const auto *Record = Ty->getAsCXXRecordDecl(); |
1486 | if (!Record) |
1487 | return false; |
1488 | |
1489 | // We've already checked this type, or are in the process of checking it. |
1490 | if (!Seen.insert(Record).second) |
1491 | return false; |
1492 | |
1493 | assert(Record->hasDefinition() &&((Record->hasDefinition() && "Incomplete types should already be diagnosed" ) ? static_cast<void> (0) : __assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1494, __PRETTY_FUNCTION__)) |
1494 | "Incomplete types should already be diagnosed")((Record->hasDefinition() && "Incomplete types should already be diagnosed" ) ? static_cast<void> (0) : __assert_fail ("Record->hasDefinition() && \"Incomplete types should already be diagnosed\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1494, __PRETTY_FUNCTION__)); |
1495 | |
1496 | if (Record->isDynamicClass()) |
1497 | return true; |
1498 | |
1499 | for (FieldDecl *F : Record->fields()) { |
1500 | if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen)) |
1501 | return true; |
1502 | } |
1503 | return false; |
1504 | } |
1505 | |
1506 | /// Determine if the specified type requires laundering by checking if it is a |
1507 | /// dynamic class type or contains a subobject which is a dynamic class type. |
1508 | static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) { |
1509 | if (!CGM.getCodeGenOpts().StrictVTablePointers) |
1510 | return false; |
1511 | llvm::SmallPtrSet<const Decl *, 16> Seen; |
1512 | return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen); |
1513 | } |
1514 | |
1515 | RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) { |
1516 | llvm::Value *Src = EmitScalarExpr(E->getArg(0)); |
1517 | llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1)); |
1518 | |
1519 | // The builtin's shift arg may have a different type than the source arg and |
1520 | // result, but the LLVM intrinsic uses the same type for all values. |
1521 | llvm::Type *Ty = Src->getType(); |
1522 | ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false); |
1523 | |
1524 | // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same. |
1525 | unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl; |
1526 | Function *F = CGM.getIntrinsic(IID, Ty); |
1527 | return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt })); |
1528 | } |
1529 | |
1530 | RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, |
1531 | const CallExpr *E, |
1532 | ReturnValueSlot ReturnValue) { |
1533 | const FunctionDecl *FD = GD.getDecl()->getAsFunction(); |
1534 | // See if we can constant fold this builtin. If so, don't emit it at all. |
1535 | Expr::EvalResult Result; |
1536 | if (E->EvaluateAsRValue(Result, CGM.getContext()) && |
1537 | !Result.hasSideEffects()) { |
1538 | if (Result.Val.isInt()) |
1539 | return RValue::get(llvm::ConstantInt::get(getLLVMContext(), |
1540 | Result.Val.getInt())); |
1541 | if (Result.Val.isFloat()) |
1542 | return RValue::get(llvm::ConstantFP::get(getLLVMContext(), |
1543 | Result.Val.getFloat())); |
1544 | } |
1545 | |
1546 | // There are LLVM math intrinsics/instructions corresponding to math library |
1547 | // functions except the LLVM op will never set errno while the math library |
1548 | // might. Also, math builtins have the same semantics as their math library |
1549 | // twins. Thus, we can transform math library and builtin calls to their |
1550 | // LLVM counterparts if the call is marked 'const' (known to never set errno). |
1551 | if (FD->hasAttr<ConstAttr>()) { |
1552 | switch (BuiltinID) { |
1553 | case Builtin::BIceil: |
1554 | case Builtin::BIceilf: |
1555 | case Builtin::BIceill: |
1556 | case Builtin::BI__builtin_ceil: |
1557 | case Builtin::BI__builtin_ceilf: |
1558 | case Builtin::BI__builtin_ceill: |
1559 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::ceil)); |
1560 | |
1561 | case Builtin::BIcopysign: |
1562 | case Builtin::BIcopysignf: |
1563 | case Builtin::BIcopysignl: |
1564 | case Builtin::BI__builtin_copysign: |
1565 | case Builtin::BI__builtin_copysignf: |
1566 | case Builtin::BI__builtin_copysignl: |
1567 | case Builtin::BI__builtin_copysignf128: |
1568 | return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign)); |
1569 | |
1570 | case Builtin::BIcos: |
1571 | case Builtin::BIcosf: |
1572 | case Builtin::BIcosl: |
1573 | case Builtin::BI__builtin_cos: |
1574 | case Builtin::BI__builtin_cosf: |
1575 | case Builtin::BI__builtin_cosl: |
1576 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::cos)); |
1577 | |
1578 | case Builtin::BIexp: |
1579 | case Builtin::BIexpf: |
1580 | case Builtin::BIexpl: |
1581 | case Builtin::BI__builtin_exp: |
1582 | case Builtin::BI__builtin_expf: |
1583 | case Builtin::BI__builtin_expl: |
1584 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp)); |
1585 | |
1586 | case Builtin::BIexp2: |
1587 | case Builtin::BIexp2f: |
1588 | case Builtin::BIexp2l: |
1589 | case Builtin::BI__builtin_exp2: |
1590 | case Builtin::BI__builtin_exp2f: |
1591 | case Builtin::BI__builtin_exp2l: |
1592 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp2)); |
1593 | |
1594 | case Builtin::BIfabs: |
1595 | case Builtin::BIfabsf: |
1596 | case Builtin::BIfabsl: |
1597 | case Builtin::BI__builtin_fabs: |
1598 | case Builtin::BI__builtin_fabsf: |
1599 | case Builtin::BI__builtin_fabsl: |
1600 | case Builtin::BI__builtin_fabsf128: |
1601 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs)); |
1602 | |
1603 | case Builtin::BIfloor: |
1604 | case Builtin::BIfloorf: |
1605 | case Builtin::BIfloorl: |
1606 | case Builtin::BI__builtin_floor: |
1607 | case Builtin::BI__builtin_floorf: |
1608 | case Builtin::BI__builtin_floorl: |
1609 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::floor)); |
1610 | |
1611 | case Builtin::BIfma: |
1612 | case Builtin::BIfmaf: |
1613 | case Builtin::BIfmal: |
1614 | case Builtin::BI__builtin_fma: |
1615 | case Builtin::BI__builtin_fmaf: |
1616 | case Builtin::BI__builtin_fmal: |
1617 | return RValue::get(emitTernaryBuiltin(*this, E, Intrinsic::fma)); |
1618 | |
1619 | case Builtin::BIfmax: |
1620 | case Builtin::BIfmaxf: |
1621 | case Builtin::BIfmaxl: |
1622 | case Builtin::BI__builtin_fmax: |
1623 | case Builtin::BI__builtin_fmaxf: |
1624 | case Builtin::BI__builtin_fmaxl: |
1625 | return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::maxnum)); |
1626 | |
1627 | case Builtin::BIfmin: |
1628 | case Builtin::BIfminf: |
1629 | case Builtin::BIfminl: |
1630 | case Builtin::BI__builtin_fmin: |
1631 | case Builtin::BI__builtin_fminf: |
1632 | case Builtin::BI__builtin_fminl: |
1633 | return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::minnum)); |
1634 | |
1635 | // fmod() is a special-case. It maps to the frem instruction rather than an |
1636 | // LLVM intrinsic. |
1637 | case Builtin::BIfmod: |
1638 | case Builtin::BIfmodf: |
1639 | case Builtin::BIfmodl: |
1640 | case Builtin::BI__builtin_fmod: |
1641 | case Builtin::BI__builtin_fmodf: |
1642 | case Builtin::BI__builtin_fmodl: { |
1643 | Value *Arg1 = EmitScalarExpr(E->getArg(0)); |
1644 | Value *Arg2 = EmitScalarExpr(E->getArg(1)); |
1645 | return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod")); |
1646 | } |
1647 | |
1648 | case Builtin::BIlog: |
1649 | case Builtin::BIlogf: |
1650 | case Builtin::BIlogl: |
1651 | case Builtin::BI__builtin_log: |
1652 | case Builtin::BI__builtin_logf: |
1653 | case Builtin::BI__builtin_logl: |
1654 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log)); |
1655 | |
1656 | case Builtin::BIlog10: |
1657 | case Builtin::BIlog10f: |
1658 | case Builtin::BIlog10l: |
1659 | case Builtin::BI__builtin_log10: |
1660 | case Builtin::BI__builtin_log10f: |
1661 | case Builtin::BI__builtin_log10l: |
1662 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log10)); |
1663 | |
1664 | case Builtin::BIlog2: |
1665 | case Builtin::BIlog2f: |
1666 | case Builtin::BIlog2l: |
1667 | case Builtin::BI__builtin_log2: |
1668 | case Builtin::BI__builtin_log2f: |
1669 | case Builtin::BI__builtin_log2l: |
1670 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log2)); |
1671 | |
1672 | case Builtin::BInearbyint: |
1673 | case Builtin::BInearbyintf: |
1674 | case Builtin::BInearbyintl: |
1675 | case Builtin::BI__builtin_nearbyint: |
1676 | case Builtin::BI__builtin_nearbyintf: |
1677 | case Builtin::BI__builtin_nearbyintl: |
1678 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::nearbyint)); |
1679 | |
1680 | case Builtin::BIpow: |
1681 | case Builtin::BIpowf: |
1682 | case Builtin::BIpowl: |
1683 | case Builtin::BI__builtin_pow: |
1684 | case Builtin::BI__builtin_powf: |
1685 | case Builtin::BI__builtin_powl: |
1686 | return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::pow)); |
1687 | |
1688 | case Builtin::BIrint: |
1689 | case Builtin::BIrintf: |
1690 | case Builtin::BIrintl: |
1691 | case Builtin::BI__builtin_rint: |
1692 | case Builtin::BI__builtin_rintf: |
1693 | case Builtin::BI__builtin_rintl: |
1694 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::rint)); |
1695 | |
1696 | case Builtin::BIround: |
1697 | case Builtin::BIroundf: |
1698 | case Builtin::BIroundl: |
1699 | case Builtin::BI__builtin_round: |
1700 | case Builtin::BI__builtin_roundf: |
1701 | case Builtin::BI__builtin_roundl: |
1702 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::round)); |
1703 | |
1704 | case Builtin::BIsin: |
1705 | case Builtin::BIsinf: |
1706 | case Builtin::BIsinl: |
1707 | case Builtin::BI__builtin_sin: |
1708 | case Builtin::BI__builtin_sinf: |
1709 | case Builtin::BI__builtin_sinl: |
1710 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sin)); |
1711 | |
1712 | case Builtin::BIsqrt: |
1713 | case Builtin::BIsqrtf: |
1714 | case Builtin::BIsqrtl: |
1715 | case Builtin::BI__builtin_sqrt: |
1716 | case Builtin::BI__builtin_sqrtf: |
1717 | case Builtin::BI__builtin_sqrtl: |
1718 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sqrt)); |
1719 | |
1720 | case Builtin::BItrunc: |
1721 | case Builtin::BItruncf: |
1722 | case Builtin::BItruncl: |
1723 | case Builtin::BI__builtin_trunc: |
1724 | case Builtin::BI__builtin_truncf: |
1725 | case Builtin::BI__builtin_truncl: |
1726 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::trunc)); |
1727 | |
1728 | case Builtin::BIlround: |
1729 | case Builtin::BIlroundf: |
1730 | case Builtin::BIlroundl: |
1731 | case Builtin::BI__builtin_lround: |
1732 | case Builtin::BI__builtin_lroundf: |
1733 | case Builtin::BI__builtin_lroundl: |
1734 | return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lround)); |
1735 | |
1736 | case Builtin::BIllround: |
1737 | case Builtin::BIllroundf: |
1738 | case Builtin::BIllroundl: |
1739 | case Builtin::BI__builtin_llround: |
1740 | case Builtin::BI__builtin_llroundf: |
1741 | case Builtin::BI__builtin_llroundl: |
1742 | return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llround)); |
1743 | |
1744 | case Builtin::BIlrint: |
1745 | case Builtin::BIlrintf: |
1746 | case Builtin::BIlrintl: |
1747 | case Builtin::BI__builtin_lrint: |
1748 | case Builtin::BI__builtin_lrintf: |
1749 | case Builtin::BI__builtin_lrintl: |
1750 | return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lrint)); |
1751 | |
1752 | case Builtin::BIllrint: |
1753 | case Builtin::BIllrintf: |
1754 | case Builtin::BIllrintl: |
1755 | case Builtin::BI__builtin_llrint: |
1756 | case Builtin::BI__builtin_llrintf: |
1757 | case Builtin::BI__builtin_llrintl: |
1758 | return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llrint)); |
1759 | |
1760 | default: |
1761 | break; |
1762 | } |
1763 | } |
1764 | |
1765 | switch (BuiltinID) { |
1766 | default: break; |
1767 | case Builtin::BI__builtin___CFStringMakeConstantString: |
1768 | case Builtin::BI__builtin___NSStringMakeConstantString: |
1769 | return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType())); |
1770 | case Builtin::BI__builtin_stdarg_start: |
1771 | case Builtin::BI__builtin_va_start: |
1772 | case Builtin::BI__va_start: |
1773 | case Builtin::BI__builtin_va_end: |
1774 | return RValue::get( |
1775 | EmitVAStartEnd(BuiltinID == Builtin::BI__va_start |
1776 | ? EmitScalarExpr(E->getArg(0)) |
1777 | : EmitVAListRef(E->getArg(0)).getPointer(), |
1778 | BuiltinID != Builtin::BI__builtin_va_end)); |
1779 | case Builtin::BI__builtin_va_copy: { |
1780 | Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer(); |
1781 | Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer(); |
1782 | |
1783 | llvm::Type *Type = Int8PtrTy; |
1784 | |
1785 | DstPtr = Builder.CreateBitCast(DstPtr, Type); |
1786 | SrcPtr = Builder.CreateBitCast(SrcPtr, Type); |
1787 | return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), |
1788 | {DstPtr, SrcPtr})); |
1789 | } |
1790 | case Builtin::BI__builtin_abs: |
1791 | case Builtin::BI__builtin_labs: |
1792 | case Builtin::BI__builtin_llabs: { |
1793 | // X < 0 ? -X : X |
1794 | // The negation has 'nsw' because abs of INT_MIN is undefined. |
1795 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
1796 | Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg"); |
1797 | Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType()); |
1798 | Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond"); |
1799 | Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs"); |
1800 | return RValue::get(Result); |
1801 | } |
1802 | case Builtin::BI__builtin_conj: |
1803 | case Builtin::BI__builtin_conjf: |
1804 | case Builtin::BI__builtin_conjl: { |
1805 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
1806 | Value *Real = ComplexVal.first; |
1807 | Value *Imag = ComplexVal.second; |
1808 | Value *Zero = |
1809 | Imag->getType()->isFPOrFPVectorTy() |
1810 | ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType()) |
1811 | : llvm::Constant::getNullValue(Imag->getType()); |
1812 | |
1813 | Imag = Builder.CreateFSub(Zero, Imag, "sub"); |
1814 | return RValue::getComplex(std::make_pair(Real, Imag)); |
1815 | } |
1816 | case Builtin::BI__builtin_creal: |
1817 | case Builtin::BI__builtin_crealf: |
1818 | case Builtin::BI__builtin_creall: |
1819 | case Builtin::BIcreal: |
1820 | case Builtin::BIcrealf: |
1821 | case Builtin::BIcreall: { |
1822 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
1823 | return RValue::get(ComplexVal.first); |
1824 | } |
1825 | |
1826 | case Builtin::BI__builtin_dump_struct: { |
1827 | llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy); |
1828 | llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get( |
1829 | LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true); |
1830 | |
1831 | Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts()); |
1832 | CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment(); |
1833 | |
1834 | const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts(); |
1835 | QualType Arg0Type = Arg0->getType()->getPointeeType(); |
1836 | |
1837 | Value *RecordPtr = EmitScalarExpr(Arg0); |
1838 | Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align, |
1839 | {LLVMFuncType, Func}, 0); |
1840 | return RValue::get(Res); |
1841 | } |
1842 | |
1843 | case Builtin::BI__builtin_cimag: |
1844 | case Builtin::BI__builtin_cimagf: |
1845 | case Builtin::BI__builtin_cimagl: |
1846 | case Builtin::BIcimag: |
1847 | case Builtin::BIcimagf: |
1848 | case Builtin::BIcimagl: { |
1849 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
1850 | return RValue::get(ComplexVal.second); |
1851 | } |
1852 | |
1853 | case Builtin::BI__builtin_clrsb: |
1854 | case Builtin::BI__builtin_clrsbl: |
1855 | case Builtin::BI__builtin_clrsbll: { |
1856 | // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or |
1857 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
1858 | |
1859 | llvm::Type *ArgType = ArgValue->getType(); |
1860 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
1861 | |
1862 | llvm::Type *ResultType = ConvertType(E->getType()); |
1863 | Value *Zero = llvm::Constant::getNullValue(ArgType); |
1864 | Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg"); |
1865 | Value *Inverse = Builder.CreateNot(ArgValue, "not"); |
1866 | Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue); |
1867 | Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()}); |
1868 | Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1)); |
1869 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
1870 | "cast"); |
1871 | return RValue::get(Result); |
1872 | } |
1873 | case Builtin::BI__builtin_ctzs: |
1874 | case Builtin::BI__builtin_ctz: |
1875 | case Builtin::BI__builtin_ctzl: |
1876 | case Builtin::BI__builtin_ctzll: { |
1877 | Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero); |
1878 | |
1879 | llvm::Type *ArgType = ArgValue->getType(); |
1880 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
1881 | |
1882 | llvm::Type *ResultType = ConvertType(E->getType()); |
1883 | Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
1884 | Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
1885 | if (Result->getType() != ResultType) |
1886 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
1887 | "cast"); |
1888 | return RValue::get(Result); |
1889 | } |
1890 | case Builtin::BI__builtin_clzs: |
1891 | case Builtin::BI__builtin_clz: |
1892 | case Builtin::BI__builtin_clzl: |
1893 | case Builtin::BI__builtin_clzll: { |
1894 | Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero); |
1895 | |
1896 | llvm::Type *ArgType = ArgValue->getType(); |
1897 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
1898 | |
1899 | llvm::Type *ResultType = ConvertType(E->getType()); |
1900 | Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
1901 | Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
1902 | if (Result->getType() != ResultType) |
1903 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
1904 | "cast"); |
1905 | return RValue::get(Result); |
1906 | } |
1907 | case Builtin::BI__builtin_ffs: |
1908 | case Builtin::BI__builtin_ffsl: |
1909 | case Builtin::BI__builtin_ffsll: { |
1910 | // ffs(x) -> x ? cttz(x) + 1 : 0 |
1911 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
1912 | |
1913 | llvm::Type *ArgType = ArgValue->getType(); |
1914 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
1915 | |
1916 | llvm::Type *ResultType = ConvertType(E->getType()); |
1917 | Value *Tmp = |
1918 | Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}), |
1919 | llvm::ConstantInt::get(ArgType, 1)); |
1920 | Value *Zero = llvm::Constant::getNullValue(ArgType); |
1921 | Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); |
1922 | Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); |
1923 | if (Result->getType() != ResultType) |
1924 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
1925 | "cast"); |
1926 | return RValue::get(Result); |
1927 | } |
1928 | case Builtin::BI__builtin_parity: |
1929 | case Builtin::BI__builtin_parityl: |
1930 | case Builtin::BI__builtin_parityll: { |
1931 | // parity(x) -> ctpop(x) & 1 |
1932 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
1933 | |
1934 | llvm::Type *ArgType = ArgValue->getType(); |
1935 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
1936 | |
1937 | llvm::Type *ResultType = ConvertType(E->getType()); |
1938 | Value *Tmp = Builder.CreateCall(F, ArgValue); |
1939 | Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); |
1940 | if (Result->getType() != ResultType) |
1941 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
1942 | "cast"); |
1943 | return RValue::get(Result); |
1944 | } |
1945 | case Builtin::BI__lzcnt16: |
1946 | case Builtin::BI__lzcnt: |
1947 | case Builtin::BI__lzcnt64: { |
1948 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
1949 | |
1950 | llvm::Type *ArgType = ArgValue->getType(); |
1951 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
1952 | |
1953 | llvm::Type *ResultType = ConvertType(E->getType()); |
1954 | Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()}); |
1955 | if (Result->getType() != ResultType) |
1956 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
1957 | "cast"); |
1958 | return RValue::get(Result); |
1959 | } |
1960 | case Builtin::BI__popcnt16: |
1961 | case Builtin::BI__popcnt: |
1962 | case Builtin::BI__popcnt64: |
1963 | case Builtin::BI__builtin_popcount: |
1964 | case Builtin::BI__builtin_popcountl: |
1965 | case Builtin::BI__builtin_popcountll: { |
1966 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
1967 | |
1968 | llvm::Type *ArgType = ArgValue->getType(); |
1969 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
1970 | |
1971 | llvm::Type *ResultType = ConvertType(E->getType()); |
1972 | Value *Result = Builder.CreateCall(F, ArgValue); |
1973 | if (Result->getType() != ResultType) |
1974 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
1975 | "cast"); |
1976 | return RValue::get(Result); |
1977 | } |
1978 | case Builtin::BI__builtin_unpredictable: { |
1979 | // Always return the argument of __builtin_unpredictable. LLVM does not |
1980 | // handle this builtin. Metadata for this builtin should be added directly |
1981 | // to instructions such as branches or switches that use it. |
1982 | return RValue::get(EmitScalarExpr(E->getArg(0))); |
1983 | } |
1984 | case Builtin::BI__builtin_expect: { |
1985 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
1986 | llvm::Type *ArgType = ArgValue->getType(); |
1987 | |
1988 | Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); |
1989 | // Don't generate llvm.expect on -O0 as the backend won't use it for |
1990 | // anything. |
1991 | // Note, we still IRGen ExpectedValue because it could have side-effects. |
1992 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
1993 | return RValue::get(ArgValue); |
1994 | |
1995 | Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); |
1996 | Value *Result = |
1997 | Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval"); |
1998 | return RValue::get(Result); |
1999 | } |
2000 | case Builtin::BI__builtin_assume_aligned: { |
2001 | const Expr *Ptr = E->getArg(0); |
2002 | Value *PtrValue = EmitScalarExpr(Ptr); |
2003 | Value *OffsetValue = |
2004 | (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr; |
2005 | |
2006 | Value *AlignmentValue = EmitScalarExpr(E->getArg(1)); |
2007 | ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue); |
2008 | unsigned Alignment = (unsigned)AlignmentCI->getZExtValue(); |
2009 | |
2010 | EmitAlignmentAssumption(PtrValue, Ptr, |
2011 | /*The expr loc is sufficient.*/ SourceLocation(), |
2012 | Alignment, OffsetValue); |
2013 | return RValue::get(PtrValue); |
2014 | } |
2015 | case Builtin::BI__assume: |
2016 | case Builtin::BI__builtin_assume: { |
2017 | if (E->getArg(0)->HasSideEffects(getContext())) |
2018 | return RValue::get(nullptr); |
2019 | |
2020 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2021 | Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume); |
2022 | return RValue::get(Builder.CreateCall(FnAssume, ArgValue)); |
2023 | } |
2024 | case Builtin::BI__builtin_bswap16: |
2025 | case Builtin::BI__builtin_bswap32: |
2026 | case Builtin::BI__builtin_bswap64: { |
2027 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap)); |
2028 | } |
2029 | case Builtin::BI__builtin_bitreverse8: |
2030 | case Builtin::BI__builtin_bitreverse16: |
2031 | case Builtin::BI__builtin_bitreverse32: |
2032 | case Builtin::BI__builtin_bitreverse64: { |
2033 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse)); |
2034 | } |
2035 | case Builtin::BI__builtin_rotateleft8: |
2036 | case Builtin::BI__builtin_rotateleft16: |
2037 | case Builtin::BI__builtin_rotateleft32: |
2038 | case Builtin::BI__builtin_rotateleft64: |
2039 | case Builtin::BI_rotl8: // Microsoft variants of rotate left |
2040 | case Builtin::BI_rotl16: |
2041 | case Builtin::BI_rotl: |
2042 | case Builtin::BI_lrotl: |
2043 | case Builtin::BI_rotl64: |
2044 | return emitRotate(E, false); |
2045 | |
2046 | case Builtin::BI__builtin_rotateright8: |
2047 | case Builtin::BI__builtin_rotateright16: |
2048 | case Builtin::BI__builtin_rotateright32: |
2049 | case Builtin::BI__builtin_rotateright64: |
2050 | case Builtin::BI_rotr8: // Microsoft variants of rotate right |
2051 | case Builtin::BI_rotr16: |
2052 | case Builtin::BI_rotr: |
2053 | case Builtin::BI_lrotr: |
2054 | case Builtin::BI_rotr64: |
2055 | return emitRotate(E, true); |
2056 | |
2057 | case Builtin::BI__builtin_constant_p: { |
2058 | llvm::Type *ResultType = ConvertType(E->getType()); |
2059 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
2060 | // At -O0, we don't perform inlining, so we don't need to delay the |
2061 | // processing. |
2062 | return RValue::get(ConstantInt::get(ResultType, 0)); |
2063 | |
2064 | const Expr *Arg = E->getArg(0); |
2065 | QualType ArgType = Arg->getType(); |
2066 | // FIXME: The allowance for Obj-C pointers and block pointers is historical |
2067 | // and likely a mistake. |
2068 | if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() && |
2069 | !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType()) |
2070 | // Per the GCC documentation, only numeric constants are recognized after |
2071 | // inlining. |
2072 | return RValue::get(ConstantInt::get(ResultType, 0)); |
2073 | |
2074 | if (Arg->HasSideEffects(getContext())) |
2075 | // The argument is unevaluated, so be conservative if it might have |
2076 | // side-effects. |
2077 | return RValue::get(ConstantInt::get(ResultType, 0)); |
2078 | |
2079 | Value *ArgValue = EmitScalarExpr(Arg); |
2080 | if (ArgType->isObjCObjectPointerType()) { |
2081 | // Convert Objective-C objects to id because we cannot distinguish between |
2082 | // LLVM types for Obj-C classes as they are opaque. |
2083 | ArgType = CGM.getContext().getObjCIdType(); |
2084 | ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType)); |
2085 | } |
2086 | Function *F = |
2087 | CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType)); |
2088 | Value *Result = Builder.CreateCall(F, ArgValue); |
2089 | if (Result->getType() != ResultType) |
2090 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false); |
2091 | return RValue::get(Result); |
2092 | } |
2093 | case Builtin::BI__builtin_dynamic_object_size: |
2094 | case Builtin::BI__builtin_object_size: { |
2095 | unsigned Type = |
2096 | E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
2097 | auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType())); |
2098 | |
2099 | // We pass this builtin onto the optimizer so that it can figure out the |
2100 | // object size in more complex cases. |
2101 | bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size; |
2102 | return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, |
2103 | /*EmittedE=*/nullptr, IsDynamic)); |
2104 | } |
2105 | case Builtin::BI__builtin_prefetch: { |
2106 | Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); |
2107 | // FIXME: Technically these constants should of type 'int', yes? |
2108 | RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : |
2109 | llvm::ConstantInt::get(Int32Ty, 0); |
2110 | Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : |
2111 | llvm::ConstantInt::get(Int32Ty, 3); |
2112 | Value *Data = llvm::ConstantInt::get(Int32Ty, 1); |
2113 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch); |
2114 | return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data})); |
2115 | } |
2116 | case Builtin::BI__builtin_readcyclecounter: { |
2117 | Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); |
2118 | return RValue::get(Builder.CreateCall(F)); |
2119 | } |
2120 | case Builtin::BI__builtin___clear_cache: { |
2121 | Value *Begin = EmitScalarExpr(E->getArg(0)); |
2122 | Value *End = EmitScalarExpr(E->getArg(1)); |
2123 | Function *F = CGM.getIntrinsic(Intrinsic::clear_cache); |
2124 | return RValue::get(Builder.CreateCall(F, {Begin, End})); |
2125 | } |
2126 | case Builtin::BI__builtin_trap: |
2127 | return RValue::get(EmitTrapCall(Intrinsic::trap)); |
2128 | case Builtin::BI__debugbreak: |
2129 | return RValue::get(EmitTrapCall(Intrinsic::debugtrap)); |
2130 | case Builtin::BI__builtin_unreachable: { |
2131 | EmitUnreachable(E->getExprLoc()); |
2132 | |
2133 | // We do need to preserve an insertion point. |
2134 | EmitBlock(createBasicBlock("unreachable.cont")); |
2135 | |
2136 | return RValue::get(nullptr); |
2137 | } |
2138 | |
2139 | case Builtin::BI__builtin_powi: |
2140 | case Builtin::BI__builtin_powif: |
2141 | case Builtin::BI__builtin_powil: { |
2142 | Value *Base = EmitScalarExpr(E->getArg(0)); |
2143 | Value *Exponent = EmitScalarExpr(E->getArg(1)); |
2144 | llvm::Type *ArgType = Base->getType(); |
2145 | Function *F = CGM.getIntrinsic(Intrinsic::powi, ArgType); |
2146 | return RValue::get(Builder.CreateCall(F, {Base, Exponent})); |
2147 | } |
2148 | |
2149 | case Builtin::BI__builtin_isgreater: |
2150 | case Builtin::BI__builtin_isgreaterequal: |
2151 | case Builtin::BI__builtin_isless: |
2152 | case Builtin::BI__builtin_islessequal: |
2153 | case Builtin::BI__builtin_islessgreater: |
2154 | case Builtin::BI__builtin_isunordered: { |
2155 | // Ordered comparisons: we know the arguments to these are matching scalar |
2156 | // floating point values. |
2157 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
2158 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
2159 | |
2160 | switch (BuiltinID) { |
2161 | default: llvm_unreachable("Unknown ordered comparison")::llvm::llvm_unreachable_internal("Unknown ordered comparison" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 2161); |
2162 | case Builtin::BI__builtin_isgreater: |
2163 | LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); |
2164 | break; |
2165 | case Builtin::BI__builtin_isgreaterequal: |
2166 | LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); |
2167 | break; |
2168 | case Builtin::BI__builtin_isless: |
2169 | LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); |
2170 | break; |
2171 | case Builtin::BI__builtin_islessequal: |
2172 | LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); |
2173 | break; |
2174 | case Builtin::BI__builtin_islessgreater: |
2175 | LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); |
2176 | break; |
2177 | case Builtin::BI__builtin_isunordered: |
2178 | LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); |
2179 | break; |
2180 | } |
2181 | // ZExt bool to int type. |
2182 | return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); |
2183 | } |
2184 | case Builtin::BI__builtin_isnan: { |
2185 | Value *V = EmitScalarExpr(E->getArg(0)); |
2186 | V = Builder.CreateFCmpUNO(V, V, "cmp"); |
2187 | return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
2188 | } |
2189 | |
2190 | case Builtin::BIfinite: |
2191 | case Builtin::BI__finite: |
2192 | case Builtin::BIfinitef: |
2193 | case Builtin::BI__finitef: |
2194 | case Builtin::BIfinitel: |
2195 | case Builtin::BI__finitel: |
2196 | case Builtin::BI__builtin_isinf: |
2197 | case Builtin::BI__builtin_isfinite: { |
2198 | // isinf(x) --> fabs(x) == infinity |
2199 | // isfinite(x) --> fabs(x) != infinity |
2200 | // x != NaN via the ordered compare in either case. |
2201 | Value *V = EmitScalarExpr(E->getArg(0)); |
2202 | Value *Fabs = EmitFAbs(*this, V); |
2203 | Constant *Infinity = ConstantFP::getInfinity(V->getType()); |
2204 | CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf) |
2205 | ? CmpInst::FCMP_OEQ |
2206 | : CmpInst::FCMP_ONE; |
2207 | Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf"); |
2208 | return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType()))); |
2209 | } |
2210 | |
2211 | case Builtin::BI__builtin_isinf_sign: { |
2212 | // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0 |
2213 | Value *Arg = EmitScalarExpr(E->getArg(0)); |
2214 | Value *AbsArg = EmitFAbs(*this, Arg); |
2215 | Value *IsInf = Builder.CreateFCmpOEQ( |
2216 | AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf"); |
2217 | Value *IsNeg = EmitSignBit(*this, Arg); |
2218 | |
2219 | llvm::Type *IntTy = ConvertType(E->getType()); |
2220 | Value *Zero = Constant::getNullValue(IntTy); |
2221 | Value *One = ConstantInt::get(IntTy, 1); |
2222 | Value *NegativeOne = ConstantInt::get(IntTy, -1); |
2223 | Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One); |
2224 | Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero); |
2225 | return RValue::get(Result); |
2226 | } |
2227 | |
2228 | case Builtin::BI__builtin_isnormal: { |
2229 | // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min |
2230 | Value *V = EmitScalarExpr(E->getArg(0)); |
2231 | Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); |
2232 | |
2233 | Value *Abs = EmitFAbs(*this, V); |
2234 | Value *IsLessThanInf = |
2235 | Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); |
2236 | APFloat Smallest = APFloat::getSmallestNormalized( |
2237 | getContext().getFloatTypeSemantics(E->getArg(0)->getType())); |
2238 | Value *IsNormal = |
2239 | Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), |
2240 | "isnormal"); |
2241 | V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); |
2242 | V = Builder.CreateAnd(V, IsNormal, "and"); |
2243 | return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
2244 | } |
2245 | |
2246 | case Builtin::BI__builtin_flt_rounds: { |
2247 | Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds); |
2248 | |
2249 | llvm::Type *ResultType = ConvertType(E->getType()); |
2250 | Value *Result = Builder.CreateCall(F); |
2251 | if (Result->getType() != ResultType) |
2252 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, |
2253 | "cast"); |
2254 | return RValue::get(Result); |
2255 | } |
2256 | |
2257 | case Builtin::BI__builtin_fpclassify: { |
2258 | Value *V = EmitScalarExpr(E->getArg(5)); |
2259 | llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); |
2260 | |
2261 | // Create Result |
2262 | BasicBlock *Begin = Builder.GetInsertBlock(); |
2263 | BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); |
2264 | Builder.SetInsertPoint(End); |
2265 | PHINode *Result = |
2266 | Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, |
2267 | "fpclassify_result"); |
2268 | |
2269 | // if (V==0) return FP_ZERO |
2270 | Builder.SetInsertPoint(Begin); |
2271 | Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), |
2272 | "iszero"); |
2273 | Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); |
2274 | BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); |
2275 | Builder.CreateCondBr(IsZero, End, NotZero); |
2276 | Result->addIncoming(ZeroLiteral, Begin); |
2277 | |
2278 | // if (V != V) return FP_NAN |
2279 | Builder.SetInsertPoint(NotZero); |
2280 | Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); |
2281 | Value *NanLiteral = EmitScalarExpr(E->getArg(0)); |
2282 | BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); |
2283 | Builder.CreateCondBr(IsNan, End, NotNan); |
2284 | Result->addIncoming(NanLiteral, NotZero); |
2285 | |
2286 | // if (fabs(V) == infinity) return FP_INFINITY |
2287 | Builder.SetInsertPoint(NotNan); |
2288 | Value *VAbs = EmitFAbs(*this, V); |
2289 | Value *IsInf = |
2290 | Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), |
2291 | "isinf"); |
2292 | Value *InfLiteral = EmitScalarExpr(E->getArg(1)); |
2293 | BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); |
2294 | Builder.CreateCondBr(IsInf, End, NotInf); |
2295 | Result->addIncoming(InfLiteral, NotNan); |
2296 | |
2297 | // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL |
2298 | Builder.SetInsertPoint(NotInf); |
2299 | APFloat Smallest = APFloat::getSmallestNormalized( |
2300 | getContext().getFloatTypeSemantics(E->getArg(5)->getType())); |
2301 | Value *IsNormal = |
2302 | Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), |
2303 | "isnormal"); |
2304 | Value *NormalResult = |
2305 | Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), |
2306 | EmitScalarExpr(E->getArg(3))); |
2307 | Builder.CreateBr(End); |
2308 | Result->addIncoming(NormalResult, NotInf); |
2309 | |
2310 | // return Result |
2311 | Builder.SetInsertPoint(End); |
2312 | return RValue::get(Result); |
2313 | } |
2314 | |
2315 | case Builtin::BIalloca: |
2316 | case Builtin::BI_alloca: |
2317 | case Builtin::BI__builtin_alloca: { |
2318 | Value *Size = EmitScalarExpr(E->getArg(0)); |
2319 | const TargetInfo &TI = getContext().getTargetInfo(); |
2320 | // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__. |
2321 | unsigned SuitableAlignmentInBytes = |
2322 | CGM.getContext() |
2323 | .toCharUnitsFromBits(TI.getSuitableAlign()) |
2324 | .getQuantity(); |
2325 | AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size); |
2326 | AI->setAlignment(SuitableAlignmentInBytes); |
2327 | initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes); |
2328 | return RValue::get(AI); |
2329 | } |
2330 | |
2331 | case Builtin::BI__builtin_alloca_with_align: { |
2332 | Value *Size = EmitScalarExpr(E->getArg(0)); |
2333 | Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1)); |
2334 | auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue); |
2335 | unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue(); |
2336 | unsigned AlignmentInBytes = |
2337 | CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getQuantity(); |
2338 | AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size); |
2339 | AI->setAlignment(AlignmentInBytes); |
2340 | initializeAlloca(*this, AI, Size, AlignmentInBytes); |
2341 | return RValue::get(AI); |
2342 | } |
2343 | |
2344 | case Builtin::BIbzero: |
2345 | case Builtin::BI__builtin_bzero: { |
2346 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
2347 | Value *SizeVal = EmitScalarExpr(E->getArg(1)); |
2348 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
2349 | E->getArg(0)->getExprLoc(), FD, 0); |
2350 | Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false); |
2351 | return RValue::get(nullptr); |
2352 | } |
2353 | case Builtin::BImemcpy: |
2354 | case Builtin::BI__builtin_memcpy: { |
2355 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
2356 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
2357 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
2358 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
2359 | E->getArg(0)->getExprLoc(), FD, 0); |
2360 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
2361 | E->getArg(1)->getExprLoc(), FD, 1); |
2362 | Builder.CreateMemCpy(Dest, Src, SizeVal, false); |
2363 | return RValue::get(Dest.getPointer()); |
2364 | } |
2365 | |
2366 | case Builtin::BI__builtin_char_memchr: |
2367 | BuiltinID = Builtin::BI__builtin_memchr; |
2368 | break; |
2369 | |
2370 | case Builtin::BI__builtin___memcpy_chk: { |
2371 | // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. |
2372 | Expr::EvalResult SizeResult, DstSizeResult; |
2373 | if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || |
2374 | !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) |
2375 | break; |
2376 | llvm::APSInt Size = SizeResult.Val.getInt(); |
2377 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
2378 | if (Size.ugt(DstSize)) |
2379 | break; |
2380 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
2381 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
2382 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
2383 | Builder.CreateMemCpy(Dest, Src, SizeVal, false); |
2384 | return RValue::get(Dest.getPointer()); |
2385 | } |
2386 | |
2387 | case Builtin::BI__builtin_objc_memmove_collectable: { |
2388 | Address DestAddr = EmitPointerWithAlignment(E->getArg(0)); |
2389 | Address SrcAddr = EmitPointerWithAlignment(E->getArg(1)); |
2390 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
2391 | CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, |
2392 | DestAddr, SrcAddr, SizeVal); |
2393 | return RValue::get(DestAddr.getPointer()); |
2394 | } |
2395 | |
2396 | case Builtin::BI__builtin___memmove_chk: { |
2397 | // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2. |
2398 | Expr::EvalResult SizeResult, DstSizeResult; |
2399 | if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || |
2400 | !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) |
2401 | break; |
2402 | llvm::APSInt Size = SizeResult.Val.getInt(); |
2403 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
2404 | if (Size.ugt(DstSize)) |
2405 | break; |
2406 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
2407 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
2408 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
2409 | Builder.CreateMemMove(Dest, Src, SizeVal, false); |
2410 | return RValue::get(Dest.getPointer()); |
2411 | } |
2412 | |
2413 | case Builtin::BImemmove: |
2414 | case Builtin::BI__builtin_memmove: { |
2415 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
2416 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
2417 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
2418 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
2419 | E->getArg(0)->getExprLoc(), FD, 0); |
2420 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
2421 | E->getArg(1)->getExprLoc(), FD, 1); |
2422 | Builder.CreateMemMove(Dest, Src, SizeVal, false); |
2423 | return RValue::get(Dest.getPointer()); |
2424 | } |
2425 | case Builtin::BImemset: |
2426 | case Builtin::BI__builtin_memset: { |
2427 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
2428 | Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), |
2429 | Builder.getInt8Ty()); |
2430 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
2431 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
2432 | E->getArg(0)->getExprLoc(), FD, 0); |
2433 | Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); |
2434 | return RValue::get(Dest.getPointer()); |
2435 | } |
2436 | case Builtin::BI__builtin___memset_chk: { |
2437 | // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. |
2438 | Expr::EvalResult SizeResult, DstSizeResult; |
2439 | if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || |
2440 | !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) |
2441 | break; |
2442 | llvm::APSInt Size = SizeResult.Val.getInt(); |
2443 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
2444 | if (Size.ugt(DstSize)) |
2445 | break; |
2446 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
2447 | Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), |
2448 | Builder.getInt8Ty()); |
2449 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
2450 | Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); |
2451 | return RValue::get(Dest.getPointer()); |
2452 | } |
2453 | case Builtin::BI__builtin_wmemcmp: { |
2454 | // The MSVC runtime library does not provide a definition of wmemcmp, so we |
2455 | // need an inline implementation. |
2456 | if (!getTarget().getTriple().isOSMSVCRT()) |
2457 | break; |
2458 | |
2459 | llvm::Type *WCharTy = ConvertType(getContext().WCharTy); |
2460 | |
2461 | Value *Dst = EmitScalarExpr(E->getArg(0)); |
2462 | Value *Src = EmitScalarExpr(E->getArg(1)); |
2463 | Value *Size = EmitScalarExpr(E->getArg(2)); |
2464 | |
2465 | BasicBlock *Entry = Builder.GetInsertBlock(); |
2466 | BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt"); |
2467 | BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt"); |
2468 | BasicBlock *Next = createBasicBlock("wmemcmp.next"); |
2469 | BasicBlock *Exit = createBasicBlock("wmemcmp.exit"); |
2470 | Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0)); |
2471 | Builder.CreateCondBr(SizeEq0, Exit, CmpGT); |
2472 | |
2473 | EmitBlock(CmpGT); |
2474 | PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2); |
2475 | DstPhi->addIncoming(Dst, Entry); |
2476 | PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2); |
2477 | SrcPhi->addIncoming(Src, Entry); |
2478 | PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2); |
2479 | SizePhi->addIncoming(Size, Entry); |
2480 | CharUnits WCharAlign = |
2481 | getContext().getTypeAlignInChars(getContext().WCharTy); |
2482 | Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign); |
2483 | Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign); |
2484 | Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh); |
2485 | Builder.CreateCondBr(DstGtSrc, Exit, CmpLT); |
2486 | |
2487 | EmitBlock(CmpLT); |
2488 | Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh); |
2489 | Builder.CreateCondBr(DstLtSrc, Exit, Next); |
2490 | |
2491 | EmitBlock(Next); |
2492 | Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1); |
2493 | Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1); |
2494 | Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1)); |
2495 | Value *NextSizeEq0 = |
2496 | Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0)); |
2497 | Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT); |
2498 | DstPhi->addIncoming(NextDst, Next); |
2499 | SrcPhi->addIncoming(NextSrc, Next); |
2500 | SizePhi->addIncoming(NextSize, Next); |
2501 | |
2502 | EmitBlock(Exit); |
2503 | PHINode *Ret = Builder.CreatePHI(IntTy, 4); |
2504 | Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry); |
2505 | Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT); |
2506 | Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT); |
2507 | Ret->addIncoming(ConstantInt::get(IntTy, 0), Next); |
2508 | return RValue::get(Ret); |
2509 | } |
2510 | case Builtin::BI__builtin_dwarf_cfa: { |
2511 | // The offset in bytes from the first argument to the CFA. |
2512 | // |
2513 | // Why on earth is this in the frontend? Is there any reason at |
2514 | // all that the backend can't reasonably determine this while |
2515 | // lowering llvm.eh.dwarf.cfa()? |
2516 | // |
2517 | // TODO: If there's a satisfactory reason, add a target hook for |
2518 | // this instead of hard-coding 0, which is correct for most targets. |
2519 | int32_t Offset = 0; |
2520 | |
2521 | Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); |
2522 | return RValue::get(Builder.CreateCall(F, |
2523 | llvm::ConstantInt::get(Int32Ty, Offset))); |
2524 | } |
2525 | case Builtin::BI__builtin_return_address: { |
2526 | Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), |
2527 | getContext().UnsignedIntTy); |
2528 | Function *F = CGM.getIntrinsic(Intrinsic::returnaddress); |
2529 | return RValue::get(Builder.CreateCall(F, Depth)); |
2530 | } |
2531 | case Builtin::BI_ReturnAddress: { |
2532 | Function *F = CGM.getIntrinsic(Intrinsic::returnaddress); |
2533 | return RValue::get(Builder.CreateCall(F, Builder.getInt32(0))); |
2534 | } |
2535 | case Builtin::BI__builtin_frame_address: { |
2536 | Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), |
2537 | getContext().UnsignedIntTy); |
2538 | Function *F = CGM.getIntrinsic(Intrinsic::frameaddress); |
2539 | return RValue::get(Builder.CreateCall(F, Depth)); |
2540 | } |
2541 | case Builtin::BI__builtin_extract_return_addr: { |
2542 | Value *Address = EmitScalarExpr(E->getArg(0)); |
2543 | Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); |
2544 | return RValue::get(Result); |
2545 | } |
2546 | case Builtin::BI__builtin_frob_return_addr: { |
2547 | Value *Address = EmitScalarExpr(E->getArg(0)); |
2548 | Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); |
2549 | return RValue::get(Result); |
2550 | } |
2551 | case Builtin::BI__builtin_dwarf_sp_column: { |
2552 | llvm::IntegerType *Ty |
2553 | = cast<llvm::IntegerType>(ConvertType(E->getType())); |
2554 | int Column = getTargetHooks().getDwarfEHStackPointer(CGM); |
2555 | if (Column == -1) { |
2556 | CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); |
2557 | return RValue::get(llvm::UndefValue::get(Ty)); |
2558 | } |
2559 | return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); |
2560 | } |
2561 | case Builtin::BI__builtin_init_dwarf_reg_size_table: { |
2562 | Value *Address = EmitScalarExpr(E->getArg(0)); |
2563 | if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) |
2564 | CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); |
2565 | return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); |
2566 | } |
2567 | case Builtin::BI__builtin_eh_return: { |
2568 | Value *Int = EmitScalarExpr(E->getArg(0)); |
2569 | Value *Ptr = EmitScalarExpr(E->getArg(1)); |
2570 | |
2571 | llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); |
2572 | assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&(((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants" ) ? static_cast<void> (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 2573, __PRETTY_FUNCTION__)) |
2573 | "LLVM's __builtin_eh_return only supports 32- and 64-bit variants")(((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants" ) ? static_cast<void> (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 2573, __PRETTY_FUNCTION__)); |
2574 | Function *F = |
2575 | CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32 |
2576 | : Intrinsic::eh_return_i64); |
2577 | Builder.CreateCall(F, {Int, Ptr}); |
2578 | Builder.CreateUnreachable(); |
2579 | |
2580 | // We do need to preserve an insertion point. |
2581 | EmitBlock(createBasicBlock("builtin_eh_return.cont")); |
2582 | |
2583 | return RValue::get(nullptr); |
2584 | } |
2585 | case Builtin::BI__builtin_unwind_init: { |
2586 | Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); |
2587 | return RValue::get(Builder.CreateCall(F)); |
2588 | } |
2589 | case Builtin::BI__builtin_extend_pointer: { |
2590 | // Extends a pointer to the size of an _Unwind_Word, which is |
2591 | // uint64_t on all platforms. Generally this gets poked into a |
2592 | // register and eventually used as an address, so if the |
2593 | // addressing registers are wider than pointers and the platform |
2594 | // doesn't implicitly ignore high-order bits when doing |
2595 | // addressing, we need to make sure we zext / sext based on |
2596 | // the platform's expectations. |
2597 | // |
2598 | // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html |
2599 | |
2600 | // Cast the pointer to intptr_t. |
2601 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
2602 | Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); |
2603 | |
2604 | // If that's 64 bits, we're done. |
2605 | if (IntPtrTy->getBitWidth() == 64) |
2606 | return RValue::get(Result); |
2607 | |
2608 | // Otherwise, ask the codegen data what to do. |
2609 | if (getTargetHooks().extendPointerWithSExt()) |
2610 | return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); |
2611 | else |
2612 | return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); |
2613 | } |
2614 | case Builtin::BI__builtin_setjmp: { |
2615 | // Buffer is a void**. |
2616 | Address Buf = EmitPointerWithAlignment(E->getArg(0)); |
2617 | |
2618 | // Store the frame pointer to the setjmp buffer. |
2619 | Value *FrameAddr = |
2620 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), |
2621 | ConstantInt::get(Int32Ty, 0)); |
2622 | Builder.CreateStore(FrameAddr, Buf); |
2623 | |
2624 | // Store the stack pointer to the setjmp buffer. |
2625 | Value *StackAddr = |
2626 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); |
2627 | Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2); |
2628 | Builder.CreateStore(StackAddr, StackSaveSlot); |
2629 | |
2630 | // Call LLVM's EH setjmp, which is lightweight. |
2631 | Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); |
2632 | Buf = Builder.CreateBitCast(Buf, Int8PtrTy); |
2633 | return RValue::get(Builder.CreateCall(F, Buf.getPointer())); |
2634 | } |
2635 | case Builtin::BI__builtin_longjmp: { |
2636 | Value *Buf = EmitScalarExpr(E->getArg(0)); |
2637 | Buf = Builder.CreateBitCast(Buf, Int8PtrTy); |
2638 | |
2639 | // Call LLVM's EH longjmp, which is lightweight. |
2640 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); |
2641 | |
2642 | // longjmp doesn't return; mark this as unreachable. |
2643 | Builder.CreateUnreachable(); |
2644 | |
2645 | // We do need to preserve an insertion point. |
2646 | EmitBlock(createBasicBlock("longjmp.cont")); |
2647 | |
2648 | return RValue::get(nullptr); |
2649 | } |
2650 | case Builtin::BI__builtin_launder: { |
2651 | const Expr *Arg = E->getArg(0); |
2652 | QualType ArgTy = Arg->getType()->getPointeeType(); |
2653 | Value *Ptr = EmitScalarExpr(Arg); |
2654 | if (TypeRequiresBuiltinLaunder(CGM, ArgTy)) |
2655 | Ptr = Builder.CreateLaunderInvariantGroup(Ptr); |
2656 | |
2657 | return RValue::get(Ptr); |
2658 | } |
2659 | case Builtin::BI__sync_fetch_and_add: |
2660 | case Builtin::BI__sync_fetch_and_sub: |
2661 | case Builtin::BI__sync_fetch_and_or: |
2662 | case Builtin::BI__sync_fetch_and_and: |
2663 | case Builtin::BI__sync_fetch_and_xor: |
2664 | case Builtin::BI__sync_fetch_and_nand: |
2665 | case Builtin::BI__sync_add_and_fetch: |
2666 | case Builtin::BI__sync_sub_and_fetch: |
2667 | case Builtin::BI__sync_and_and_fetch: |
2668 | case Builtin::BI__sync_or_and_fetch: |
2669 | case Builtin::BI__sync_xor_and_fetch: |
2670 | case Builtin::BI__sync_nand_and_fetch: |
2671 | case Builtin::BI__sync_val_compare_and_swap: |
2672 | case Builtin::BI__sync_bool_compare_and_swap: |
2673 | case Builtin::BI__sync_lock_test_and_set: |
2674 | case Builtin::BI__sync_lock_release: |
2675 | case Builtin::BI__sync_swap: |
2676 | llvm_unreachable("Shouldn't make it through sema")::llvm::llvm_unreachable_internal("Shouldn't make it through sema" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 2676); |
2677 | case Builtin::BI__sync_fetch_and_add_1: |
2678 | case Builtin::BI__sync_fetch_and_add_2: |
2679 | case Builtin::BI__sync_fetch_and_add_4: |
2680 | case Builtin::BI__sync_fetch_and_add_8: |
2681 | case Builtin::BI__sync_fetch_and_add_16: |
2682 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E); |
2683 | case Builtin::BI__sync_fetch_and_sub_1: |
2684 | case Builtin::BI__sync_fetch_and_sub_2: |
2685 | case Builtin::BI__sync_fetch_and_sub_4: |
2686 | case Builtin::BI__sync_fetch_and_sub_8: |
2687 | case Builtin::BI__sync_fetch_and_sub_16: |
2688 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E); |
2689 | case Builtin::BI__sync_fetch_and_or_1: |
2690 | case Builtin::BI__sync_fetch_and_or_2: |
2691 | case Builtin::BI__sync_fetch_and_or_4: |
2692 | case Builtin::BI__sync_fetch_and_or_8: |
2693 | case Builtin::BI__sync_fetch_and_or_16: |
2694 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E); |
2695 | case Builtin::BI__sync_fetch_and_and_1: |
2696 | case Builtin::BI__sync_fetch_and_and_2: |
2697 | case Builtin::BI__sync_fetch_and_and_4: |
2698 | case Builtin::BI__sync_fetch_and_and_8: |
2699 | case Builtin::BI__sync_fetch_and_and_16: |
2700 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E); |
2701 | case Builtin::BI__sync_fetch_and_xor_1: |
2702 | case Builtin::BI__sync_fetch_and_xor_2: |
2703 | case Builtin::BI__sync_fetch_and_xor_4: |
2704 | case Builtin::BI__sync_fetch_and_xor_8: |
2705 | case Builtin::BI__sync_fetch_and_xor_16: |
2706 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E); |
2707 | case Builtin::BI__sync_fetch_and_nand_1: |
2708 | case Builtin::BI__sync_fetch_and_nand_2: |
2709 | case Builtin::BI__sync_fetch_and_nand_4: |
2710 | case Builtin::BI__sync_fetch_and_nand_8: |
2711 | case Builtin::BI__sync_fetch_and_nand_16: |
2712 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E); |
2713 | |
2714 | // Clang extensions: not overloaded yet. |
2715 | case Builtin::BI__sync_fetch_and_min: |
2716 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E); |
2717 | case Builtin::BI__sync_fetch_and_max: |
2718 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E); |
2719 | case Builtin::BI__sync_fetch_and_umin: |
2720 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E); |
2721 | case Builtin::BI__sync_fetch_and_umax: |
2722 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E); |
2723 | |
2724 | case Builtin::BI__sync_add_and_fetch_1: |
2725 | case Builtin::BI__sync_add_and_fetch_2: |
2726 | case Builtin::BI__sync_add_and_fetch_4: |
2727 | case Builtin::BI__sync_add_and_fetch_8: |
2728 | case Builtin::BI__sync_add_and_fetch_16: |
2729 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E, |
2730 | llvm::Instruction::Add); |
2731 | case Builtin::BI__sync_sub_and_fetch_1: |
2732 | case Builtin::BI__sync_sub_and_fetch_2: |
2733 | case Builtin::BI__sync_sub_and_fetch_4: |
2734 | case Builtin::BI__sync_sub_and_fetch_8: |
2735 | case Builtin::BI__sync_sub_and_fetch_16: |
2736 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E, |
2737 | llvm::Instruction::Sub); |
2738 | case Builtin::BI__sync_and_and_fetch_1: |
2739 | case Builtin::BI__sync_and_and_fetch_2: |
2740 | case Builtin::BI__sync_and_and_fetch_4: |
2741 | case Builtin::BI__sync_and_and_fetch_8: |
2742 | case Builtin::BI__sync_and_and_fetch_16: |
2743 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E, |
2744 | llvm::Instruction::And); |
2745 | case Builtin::BI__sync_or_and_fetch_1: |
2746 | case Builtin::BI__sync_or_and_fetch_2: |
2747 | case Builtin::BI__sync_or_and_fetch_4: |
2748 | case Builtin::BI__sync_or_and_fetch_8: |
2749 | case Builtin::BI__sync_or_and_fetch_16: |
2750 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E, |
2751 | llvm::Instruction::Or); |
2752 | case Builtin::BI__sync_xor_and_fetch_1: |
2753 | case Builtin::BI__sync_xor_and_fetch_2: |
2754 | case Builtin::BI__sync_xor_and_fetch_4: |
2755 | case Builtin::BI__sync_xor_and_fetch_8: |
2756 | case Builtin::BI__sync_xor_and_fetch_16: |
2757 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E, |
2758 | llvm::Instruction::Xor); |
2759 | case Builtin::BI__sync_nand_and_fetch_1: |
2760 | case Builtin::BI__sync_nand_and_fetch_2: |
2761 | case Builtin::BI__sync_nand_and_fetch_4: |
2762 | case Builtin::BI__sync_nand_and_fetch_8: |
2763 | case Builtin::BI__sync_nand_and_fetch_16: |
2764 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E, |
2765 | llvm::Instruction::And, true); |
2766 | |
2767 | case Builtin::BI__sync_val_compare_and_swap_1: |
2768 | case Builtin::BI__sync_val_compare_and_swap_2: |
2769 | case Builtin::BI__sync_val_compare_and_swap_4: |
2770 | case Builtin::BI__sync_val_compare_and_swap_8: |
2771 | case Builtin::BI__sync_val_compare_and_swap_16: |
2772 | return RValue::get(MakeAtomicCmpXchgValue(*this, E, false)); |
2773 | |
2774 | case Builtin::BI__sync_bool_compare_and_swap_1: |
2775 | case Builtin::BI__sync_bool_compare_and_swap_2: |
2776 | case Builtin::BI__sync_bool_compare_and_swap_4: |
2777 | case Builtin::BI__sync_bool_compare_and_swap_8: |
2778 | case Builtin::BI__sync_bool_compare_and_swap_16: |
2779 | return RValue::get(MakeAtomicCmpXchgValue(*this, E, true)); |
2780 | |
2781 | case Builtin::BI__sync_swap_1: |
2782 | case Builtin::BI__sync_swap_2: |
2783 | case Builtin::BI__sync_swap_4: |
2784 | case Builtin::BI__sync_swap_8: |
2785 | case Builtin::BI__sync_swap_16: |
2786 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
2787 | |
2788 | case Builtin::BI__sync_lock_test_and_set_1: |
2789 | case Builtin::BI__sync_lock_test_and_set_2: |
2790 | case Builtin::BI__sync_lock_test_and_set_4: |
2791 | case Builtin::BI__sync_lock_test_and_set_8: |
2792 | case Builtin::BI__sync_lock_test_and_set_16: |
2793 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
2794 | |
2795 | case Builtin::BI__sync_lock_release_1: |
2796 | case Builtin::BI__sync_lock_release_2: |
2797 | case Builtin::BI__sync_lock_release_4: |
2798 | case Builtin::BI__sync_lock_release_8: |
2799 | case Builtin::BI__sync_lock_release_16: { |
2800 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
2801 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
2802 | CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); |
2803 | llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), |
2804 | StoreSize.getQuantity() * 8); |
2805 | Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
2806 | llvm::StoreInst *Store = |
2807 | Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr, |
2808 | StoreSize); |
2809 | Store->setAtomic(llvm::AtomicOrdering::Release); |
2810 | return RValue::get(nullptr); |
2811 | } |
2812 | |
2813 | case Builtin::BI__sync_synchronize: { |
2814 | // We assume this is supposed to correspond to a C++0x-style |
2815 | // sequentially-consistent fence (i.e. this is only usable for |
2816 | // synchronization, not device I/O or anything like that). This intrinsic |
2817 | // is really badly designed in the sense that in theory, there isn't |
2818 | // any way to safely use it... but in practice, it mostly works |
2819 | // to use it with non-atomic loads and stores to get acquire/release |
2820 | // semantics. |
2821 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent); |
2822 | return RValue::get(nullptr); |
2823 | } |
2824 | |
2825 | case Builtin::BI__builtin_nontemporal_load: |
2826 | return RValue::get(EmitNontemporalLoad(*this, E)); |
2827 | case Builtin::BI__builtin_nontemporal_store: |
2828 | return RValue::get(EmitNontemporalStore(*this, E)); |
2829 | case Builtin::BI__c11_atomic_is_lock_free: |
2830 | case Builtin::BI__atomic_is_lock_free: { |
2831 | // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the |
2832 | // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since |
2833 | // _Atomic(T) is always properly-aligned. |
2834 | const char *LibCallName = "__atomic_is_lock_free"; |
2835 | CallArgList Args; |
2836 | Args.add(RValue::get(EmitScalarExpr(E->getArg(0))), |
2837 | getContext().getSizeType()); |
2838 | if (BuiltinID == Builtin::BI__atomic_is_lock_free) |
2839 | Args.add(RValue::get(EmitScalarExpr(E->getArg(1))), |
2840 | getContext().VoidPtrTy); |
2841 | else |
2842 | Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)), |
2843 | getContext().VoidPtrTy); |
2844 | const CGFunctionInfo &FuncInfo = |
2845 | CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args); |
2846 | llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); |
2847 | llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName); |
2848 | return EmitCall(FuncInfo, CGCallee::forDirect(Func), |
2849 | ReturnValueSlot(), Args); |
2850 | } |
2851 | |
2852 | case Builtin::BI__atomic_test_and_set: { |
2853 | // Look at the argument type to determine whether this is a volatile |
2854 | // operation. The parameter type is always volatile. |
2855 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
2856 | bool Volatile = |
2857 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
2858 | |
2859 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
2860 | unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); |
2861 | Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); |
2862 | Value *NewVal = Builder.getInt8(1); |
2863 | Value *Order = EmitScalarExpr(E->getArg(1)); |
2864 | if (isa<llvm::ConstantInt>(Order)) { |
2865 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
2866 | AtomicRMWInst *Result = nullptr; |
2867 | switch (ord) { |
2868 | case 0: // memory_order_relaxed |
2869 | default: // invalid order |
2870 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
2871 | llvm::AtomicOrdering::Monotonic); |
2872 | break; |
2873 | case 1: // memory_order_consume |
2874 | case 2: // memory_order_acquire |
2875 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
2876 | llvm::AtomicOrdering::Acquire); |
2877 | break; |
2878 | case 3: // memory_order_release |
2879 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
2880 | llvm::AtomicOrdering::Release); |
2881 | break; |
2882 | case 4: // memory_order_acq_rel |
2883 | |
2884 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
2885 | llvm::AtomicOrdering::AcquireRelease); |
2886 | break; |
2887 | case 5: // memory_order_seq_cst |
2888 | Result = Builder.CreateAtomicRMW( |
2889 | llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
2890 | llvm::AtomicOrdering::SequentiallyConsistent); |
2891 | break; |
2892 | } |
2893 | Result->setVolatile(Volatile); |
2894 | return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); |
2895 | } |
2896 | |
2897 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
2898 | |
2899 | llvm::BasicBlock *BBs[5] = { |
2900 | createBasicBlock("monotonic", CurFn), |
2901 | createBasicBlock("acquire", CurFn), |
2902 | createBasicBlock("release", CurFn), |
2903 | createBasicBlock("acqrel", CurFn), |
2904 | createBasicBlock("seqcst", CurFn) |
2905 | }; |
2906 | llvm::AtomicOrdering Orders[5] = { |
2907 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire, |
2908 | llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease, |
2909 | llvm::AtomicOrdering::SequentiallyConsistent}; |
2910 | |
2911 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
2912 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); |
2913 | |
2914 | Builder.SetInsertPoint(ContBB); |
2915 | PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set"); |
2916 | |
2917 | for (unsigned i = 0; i < 5; ++i) { |
2918 | Builder.SetInsertPoint(BBs[i]); |
2919 | AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, |
2920 | Ptr, NewVal, Orders[i]); |
2921 | RMW->setVolatile(Volatile); |
2922 | Result->addIncoming(RMW, BBs[i]); |
2923 | Builder.CreateBr(ContBB); |
2924 | } |
2925 | |
2926 | SI->addCase(Builder.getInt32(0), BBs[0]); |
2927 | SI->addCase(Builder.getInt32(1), BBs[1]); |
2928 | SI->addCase(Builder.getInt32(2), BBs[1]); |
2929 | SI->addCase(Builder.getInt32(3), BBs[2]); |
2930 | SI->addCase(Builder.getInt32(4), BBs[3]); |
2931 | SI->addCase(Builder.getInt32(5), BBs[4]); |
2932 | |
2933 | Builder.SetInsertPoint(ContBB); |
2934 | return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); |
2935 | } |
2936 | |
2937 | case Builtin::BI__atomic_clear: { |
2938 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
2939 | bool Volatile = |
2940 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
2941 | |
2942 | Address Ptr = EmitPointerWithAlignment(E->getArg(0)); |
2943 | unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace(); |
2944 | Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); |
2945 | Value *NewVal = Builder.getInt8(0); |
2946 | Value *Order = EmitScalarExpr(E->getArg(1)); |
2947 | if (isa<llvm::ConstantInt>(Order)) { |
2948 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
2949 | StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); |
2950 | switch (ord) { |
2951 | case 0: // memory_order_relaxed |
2952 | default: // invalid order |
2953 | Store->setOrdering(llvm::AtomicOrdering::Monotonic); |
2954 | break; |
2955 | case 3: // memory_order_release |
2956 | Store->setOrdering(llvm::AtomicOrdering::Release); |
2957 | break; |
2958 | case 5: // memory_order_seq_cst |
2959 | Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); |
2960 | break; |
2961 | } |
2962 | return RValue::get(nullptr); |
2963 | } |
2964 | |
2965 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
2966 | |
2967 | llvm::BasicBlock *BBs[3] = { |
2968 | createBasicBlock("monotonic", CurFn), |
2969 | createBasicBlock("release", CurFn), |
2970 | createBasicBlock("seqcst", CurFn) |
2971 | }; |
2972 | llvm::AtomicOrdering Orders[3] = { |
2973 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release, |
2974 | llvm::AtomicOrdering::SequentiallyConsistent}; |
2975 | |
2976 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
2977 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); |
2978 | |
2979 | for (unsigned i = 0; i < 3; ++i) { |
2980 | Builder.SetInsertPoint(BBs[i]); |
2981 | StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); |
2982 | Store->setOrdering(Orders[i]); |
2983 | Builder.CreateBr(ContBB); |
2984 | } |
2985 | |
2986 | SI->addCase(Builder.getInt32(0), BBs[0]); |
2987 | SI->addCase(Builder.getInt32(3), BBs[1]); |
2988 | SI->addCase(Builder.getInt32(5), BBs[2]); |
2989 | |
2990 | Builder.SetInsertPoint(ContBB); |
2991 | return RValue::get(nullptr); |
2992 | } |
2993 | |
2994 | case Builtin::BI__atomic_thread_fence: |
2995 | case Builtin::BI__atomic_signal_fence: |
2996 | case Builtin::BI__c11_atomic_thread_fence: |
2997 | case Builtin::BI__c11_atomic_signal_fence: { |
2998 | llvm::SyncScope::ID SSID; |
2999 | if (BuiltinID == Builtin::BI__atomic_signal_fence || |
3000 | BuiltinID == Builtin::BI__c11_atomic_signal_fence) |
3001 | SSID = llvm::SyncScope::SingleThread; |
3002 | else |
3003 | SSID = llvm::SyncScope::System; |
3004 | Value *Order = EmitScalarExpr(E->getArg(0)); |
3005 | if (isa<llvm::ConstantInt>(Order)) { |
3006 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
3007 | switch (ord) { |
3008 | case 0: // memory_order_relaxed |
3009 | default: // invalid order |
3010 | break; |
3011 | case 1: // memory_order_consume |
3012 | case 2: // memory_order_acquire |
3013 | Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); |
3014 | break; |
3015 | case 3: // memory_order_release |
3016 | Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); |
3017 | break; |
3018 | case 4: // memory_order_acq_rel |
3019 | Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); |
3020 | break; |
3021 | case 5: // memory_order_seq_cst |
3022 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); |
3023 | break; |
3024 | } |
3025 | return RValue::get(nullptr); |
3026 | } |
3027 | |
3028 | llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; |
3029 | AcquireBB = createBasicBlock("acquire", CurFn); |
3030 | ReleaseBB = createBasicBlock("release", CurFn); |
3031 | AcqRelBB = createBasicBlock("acqrel", CurFn); |
3032 | SeqCstBB = createBasicBlock("seqcst", CurFn); |
3033 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
3034 | |
3035 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
3036 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB); |
3037 | |
3038 | Builder.SetInsertPoint(AcquireBB); |
3039 | Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); |
3040 | Builder.CreateBr(ContBB); |
3041 | SI->addCase(Builder.getInt32(1), AcquireBB); |
3042 | SI->addCase(Builder.getInt32(2), AcquireBB); |
3043 | |
3044 | Builder.SetInsertPoint(ReleaseBB); |
3045 | Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); |
3046 | Builder.CreateBr(ContBB); |
3047 | SI->addCase(Builder.getInt32(3), ReleaseBB); |
3048 | |
3049 | Builder.SetInsertPoint(AcqRelBB); |
3050 | Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); |
3051 | Builder.CreateBr(ContBB); |
3052 | SI->addCase(Builder.getInt32(4), AcqRelBB); |
3053 | |
3054 | Builder.SetInsertPoint(SeqCstBB); |
3055 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); |
3056 | Builder.CreateBr(ContBB); |
3057 | SI->addCase(Builder.getInt32(5), SeqCstBB); |
3058 | |
3059 | Builder.SetInsertPoint(ContBB); |
3060 | return RValue::get(nullptr); |
3061 | } |
3062 | |
3063 | case Builtin::BI__builtin_signbit: |
3064 | case Builtin::BI__builtin_signbitf: |
3065 | case Builtin::BI__builtin_signbitl: { |
3066 | return RValue::get( |
3067 | Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))), |
3068 | ConvertType(E->getType()))); |
3069 | } |
3070 | case Builtin::BI__annotation: { |
3071 | // Re-encode each wide string to UTF8 and make an MDString. |
3072 | SmallVector<Metadata *, 1> Strings; |
3073 | for (const Expr *Arg : E->arguments()) { |
3074 | const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts()); |
3075 | assert(Str->getCharByteWidth() == 2)((Str->getCharByteWidth() == 2) ? static_cast<void> ( 0) : __assert_fail ("Str->getCharByteWidth() == 2", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3075, __PRETTY_FUNCTION__)); |
3076 | StringRef WideBytes = Str->getBytes(); |
3077 | std::string StrUtf8; |
3078 | if (!convertUTF16ToUTF8String( |
3079 | makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) { |
3080 | CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument"); |
3081 | continue; |
3082 | } |
3083 | Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8)); |
3084 | } |
3085 | |
3086 | // Build and MDTuple of MDStrings and emit the intrinsic call. |
3087 | llvm::Function *F = |
3088 | CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {}); |
3089 | MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings); |
3090 | Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple)); |
3091 | return RValue::getIgnored(); |
3092 | } |
3093 | case Builtin::BI__builtin_annotation: { |
3094 | llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); |
3095 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, |
3096 | AnnVal->getType()); |
3097 | |
3098 | // Get the annotation string, go through casts. Sema requires this to be a |
3099 | // non-wide string literal, potentially casted, so the cast<> is safe. |
3100 | const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); |
3101 | StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); |
3102 | return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc())); |
3103 | } |
3104 | case Builtin::BI__builtin_addcb: |
3105 | case Builtin::BI__builtin_addcs: |
3106 | case Builtin::BI__builtin_addc: |
3107 | case Builtin::BI__builtin_addcl: |
3108 | case Builtin::BI__builtin_addcll: |
3109 | case Builtin::BI__builtin_subcb: |
3110 | case Builtin::BI__builtin_subcs: |
3111 | case Builtin::BI__builtin_subc: |
3112 | case Builtin::BI__builtin_subcl: |
3113 | case Builtin::BI__builtin_subcll: { |
3114 | |
3115 | // We translate all of these builtins from expressions of the form: |
3116 | // int x = ..., y = ..., carryin = ..., carryout, result; |
3117 | // result = __builtin_addc(x, y, carryin, &carryout); |
3118 | // |
3119 | // to LLVM IR of the form: |
3120 | // |
3121 | // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) |
3122 | // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0 |
3123 | // %carry1 = extractvalue {i32, i1} %tmp1, 1 |
3124 | // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1, |
3125 | // i32 %carryin) |
3126 | // %result = extractvalue {i32, i1} %tmp2, 0 |
3127 | // %carry2 = extractvalue {i32, i1} %tmp2, 1 |
3128 | // %tmp3 = or i1 %carry1, %carry2 |
3129 | // %tmp4 = zext i1 %tmp3 to i32 |
3130 | // store i32 %tmp4, i32* %carryout |
3131 | |
3132 | // Scalarize our inputs. |
3133 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
3134 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
3135 | llvm::Value *Carryin = EmitScalarExpr(E->getArg(2)); |
3136 | Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3)); |
3137 | |
3138 | // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow. |
3139 | llvm::Intrinsic::ID IntrinsicId; |
3140 | switch (BuiltinID) { |
3141 | default: llvm_unreachable("Unknown multiprecision builtin id.")::llvm::llvm_unreachable_internal("Unknown multiprecision builtin id." , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3141); |
3142 | case Builtin::BI__builtin_addcb: |
3143 | case Builtin::BI__builtin_addcs: |
3144 | case Builtin::BI__builtin_addc: |
3145 | case Builtin::BI__builtin_addcl: |
3146 | case Builtin::BI__builtin_addcll: |
3147 | IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
3148 | break; |
3149 | case Builtin::BI__builtin_subcb: |
3150 | case Builtin::BI__builtin_subcs: |
3151 | case Builtin::BI__builtin_subc: |
3152 | case Builtin::BI__builtin_subcl: |
3153 | case Builtin::BI__builtin_subcll: |
3154 | IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
3155 | break; |
3156 | } |
3157 | |
3158 | // Construct our resulting LLVM IR expression. |
3159 | llvm::Value *Carry1; |
3160 | llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, |
3161 | X, Y, Carry1); |
3162 | llvm::Value *Carry2; |
3163 | llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId, |
3164 | Sum1, Carryin, Carry2); |
3165 | llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), |
3166 | X->getType()); |
3167 | Builder.CreateStore(CarryOut, CarryOutPtr); |
3168 | return RValue::get(Sum2); |
3169 | } |
3170 | |
3171 | case Builtin::BI__builtin_add_overflow: |
3172 | case Builtin::BI__builtin_sub_overflow: |
3173 | case Builtin::BI__builtin_mul_overflow: { |
3174 | const clang::Expr *LeftArg = E->getArg(0); |
3175 | const clang::Expr *RightArg = E->getArg(1); |
3176 | const clang::Expr *ResultArg = E->getArg(2); |
3177 | |
3178 | clang::QualType ResultQTy = |
3179 | ResultArg->getType()->castAs<PointerType>()->getPointeeType(); |
3180 | |
3181 | WidthAndSignedness LeftInfo = |
3182 | getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType()); |
3183 | WidthAndSignedness RightInfo = |
3184 | getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType()); |
3185 | WidthAndSignedness ResultInfo = |
3186 | getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy); |
3187 | |
3188 | // Handle mixed-sign multiplication as a special case, because adding |
3189 | // runtime or backend support for our generic irgen would be too expensive. |
3190 | if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo)) |
3191 | return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg, |
3192 | RightInfo, ResultArg, ResultQTy, |
3193 | ResultInfo); |
3194 | |
3195 | WidthAndSignedness EncompassingInfo = |
3196 | EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo}); |
3197 | |
3198 | llvm::Type *EncompassingLLVMTy = |
3199 | llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width); |
3200 | |
3201 | llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy); |
3202 | |
3203 | llvm::Intrinsic::ID IntrinsicId; |
3204 | switch (BuiltinID) { |
3205 | default: |
3206 | llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id." , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3206); |
3207 | case Builtin::BI__builtin_add_overflow: |
3208 | IntrinsicId = EncompassingInfo.Signed |
3209 | ? llvm::Intrinsic::sadd_with_overflow |
3210 | : llvm::Intrinsic::uadd_with_overflow; |
3211 | break; |
3212 | case Builtin::BI__builtin_sub_overflow: |
3213 | IntrinsicId = EncompassingInfo.Signed |
3214 | ? llvm::Intrinsic::ssub_with_overflow |
3215 | : llvm::Intrinsic::usub_with_overflow; |
3216 | break; |
3217 | case Builtin::BI__builtin_mul_overflow: |
3218 | IntrinsicId = EncompassingInfo.Signed |
3219 | ? llvm::Intrinsic::smul_with_overflow |
3220 | : llvm::Intrinsic::umul_with_overflow; |
3221 | break; |
3222 | } |
3223 | |
3224 | llvm::Value *Left = EmitScalarExpr(LeftArg); |
3225 | llvm::Value *Right = EmitScalarExpr(RightArg); |
3226 | Address ResultPtr = EmitPointerWithAlignment(ResultArg); |
3227 | |
3228 | // Extend each operand to the encompassing type. |
3229 | Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed); |
3230 | Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed); |
3231 | |
3232 | // Perform the operation on the extended values. |
3233 | llvm::Value *Overflow, *Result; |
3234 | Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow); |
3235 | |
3236 | if (EncompassingInfo.Width > ResultInfo.Width) { |
3237 | // The encompassing type is wider than the result type, so we need to |
3238 | // truncate it. |
3239 | llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy); |
3240 | |
3241 | // To see if the truncation caused an overflow, we will extend |
3242 | // the result and then compare it to the original result. |
3243 | llvm::Value *ResultTruncExt = Builder.CreateIntCast( |
3244 | ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed); |
3245 | llvm::Value *TruncationOverflow = |
3246 | Builder.CreateICmpNE(Result, ResultTruncExt); |
3247 | |
3248 | Overflow = Builder.CreateOr(Overflow, TruncationOverflow); |
3249 | Result = ResultTrunc; |
3250 | } |
3251 | |
3252 | // Finally, store the result using the pointer. |
3253 | bool isVolatile = |
3254 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
3255 | Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile); |
3256 | |
3257 | return RValue::get(Overflow); |
3258 | } |
3259 | |
3260 | case Builtin::BI__builtin_uadd_overflow: |
3261 | case Builtin::BI__builtin_uaddl_overflow: |
3262 | case Builtin::BI__builtin_uaddll_overflow: |
3263 | case Builtin::BI__builtin_usub_overflow: |
3264 | case Builtin::BI__builtin_usubl_overflow: |
3265 | case Builtin::BI__builtin_usubll_overflow: |
3266 | case Builtin::BI__builtin_umul_overflow: |
3267 | case Builtin::BI__builtin_umull_overflow: |
3268 | case Builtin::BI__builtin_umulll_overflow: |
3269 | case Builtin::BI__builtin_sadd_overflow: |
3270 | case Builtin::BI__builtin_saddl_overflow: |
3271 | case Builtin::BI__builtin_saddll_overflow: |
3272 | case Builtin::BI__builtin_ssub_overflow: |
3273 | case Builtin::BI__builtin_ssubl_overflow: |
3274 | case Builtin::BI__builtin_ssubll_overflow: |
3275 | case Builtin::BI__builtin_smul_overflow: |
3276 | case Builtin::BI__builtin_smull_overflow: |
3277 | case Builtin::BI__builtin_smulll_overflow: { |
3278 | |
3279 | // We translate all of these builtins directly to the relevant llvm IR node. |
3280 | |
3281 | // Scalarize our inputs. |
3282 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
3283 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
3284 | Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2)); |
3285 | |
3286 | // Decide which of the overflow intrinsics we are lowering to: |
3287 | llvm::Intrinsic::ID IntrinsicId; |
3288 | switch (BuiltinID) { |
3289 | default: llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id." , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3289); |
3290 | case Builtin::BI__builtin_uadd_overflow: |
3291 | case Builtin::BI__builtin_uaddl_overflow: |
3292 | case Builtin::BI__builtin_uaddll_overflow: |
3293 | IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
3294 | break; |
3295 | case Builtin::BI__builtin_usub_overflow: |
3296 | case Builtin::BI__builtin_usubl_overflow: |
3297 | case Builtin::BI__builtin_usubll_overflow: |
3298 | IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
3299 | break; |
3300 | case Builtin::BI__builtin_umul_overflow: |
3301 | case Builtin::BI__builtin_umull_overflow: |
3302 | case Builtin::BI__builtin_umulll_overflow: |
3303 | IntrinsicId = llvm::Intrinsic::umul_with_overflow; |
3304 | break; |
3305 | case Builtin::BI__builtin_sadd_overflow: |
3306 | case Builtin::BI__builtin_saddl_overflow: |
3307 | case Builtin::BI__builtin_saddll_overflow: |
3308 | IntrinsicId = llvm::Intrinsic::sadd_with_overflow; |
3309 | break; |
3310 | case Builtin::BI__builtin_ssub_overflow: |
3311 | case Builtin::BI__builtin_ssubl_overflow: |
3312 | case Builtin::BI__builtin_ssubll_overflow: |
3313 | IntrinsicId = llvm::Intrinsic::ssub_with_overflow; |
3314 | break; |
3315 | case Builtin::BI__builtin_smul_overflow: |
3316 | case Builtin::BI__builtin_smull_overflow: |
3317 | case Builtin::BI__builtin_smulll_overflow: |
3318 | IntrinsicId = llvm::Intrinsic::smul_with_overflow; |
3319 | break; |
3320 | } |
3321 | |
3322 | |
3323 | llvm::Value *Carry; |
3324 | llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry); |
3325 | Builder.CreateStore(Sum, SumOutPtr); |
3326 | |
3327 | return RValue::get(Carry); |
3328 | } |
3329 | case Builtin::BI__builtin_addressof: |
3330 | return RValue::get(EmitLValue(E->getArg(0)).getPointer()); |
3331 | case Builtin::BI__builtin_operator_new: |
3332 | return EmitBuiltinNewDeleteCall( |
3333 | E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false); |
3334 | case Builtin::BI__builtin_operator_delete: |
3335 | return EmitBuiltinNewDeleteCall( |
3336 | E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true); |
3337 | |
3338 | case Builtin::BI__noop: |
3339 | // __noop always evaluates to an integer literal zero. |
3340 | return RValue::get(ConstantInt::get(IntTy, 0)); |
3341 | case Builtin::BI__builtin_call_with_static_chain: { |
3342 | const CallExpr *Call = cast<CallExpr>(E->getArg(0)); |
3343 | const Expr *Chain = E->getArg(1); |
3344 | return EmitCall(Call->getCallee()->getType(), |
3345 | EmitCallee(Call->getCallee()), Call, ReturnValue, |
3346 | EmitScalarExpr(Chain)); |
3347 | } |
3348 | case Builtin::BI_InterlockedExchange8: |
3349 | case Builtin::BI_InterlockedExchange16: |
3350 | case Builtin::BI_InterlockedExchange: |
3351 | case Builtin::BI_InterlockedExchangePointer: |
3352 | return RValue::get( |
3353 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E)); |
3354 | case Builtin::BI_InterlockedCompareExchangePointer: |
3355 | case Builtin::BI_InterlockedCompareExchangePointer_nf: { |
3356 | llvm::Type *RTy; |
3357 | llvm::IntegerType *IntType = |
3358 | IntegerType::get(getLLVMContext(), |
3359 | getContext().getTypeSize(E->getType())); |
3360 | llvm::Type *IntPtrType = IntType->getPointerTo(); |
3361 | |
3362 | llvm::Value *Destination = |
3363 | Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType); |
3364 | |
3365 | llvm::Value *Exchange = EmitScalarExpr(E->getArg(1)); |
3366 | RTy = Exchange->getType(); |
3367 | Exchange = Builder.CreatePtrToInt(Exchange, IntType); |
3368 | |
3369 | llvm::Value *Comparand = |
3370 | Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType); |
3371 | |
3372 | auto Ordering = |
3373 | BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ? |
3374 | AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent; |
3375 | |
3376 | auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, |
3377 | Ordering, Ordering); |
3378 | Result->setVolatile(true); |
3379 | |
3380 | return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result, |
3381 | 0), |
3382 | RTy)); |
3383 | } |
3384 | case Builtin::BI_InterlockedCompareExchange8: |
3385 | case Builtin::BI_InterlockedCompareExchange16: |
3386 | case Builtin::BI_InterlockedCompareExchange: |
3387 | case Builtin::BI_InterlockedCompareExchange64: |
3388 | return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E)); |
3389 | case Builtin::BI_InterlockedIncrement16: |
3390 | case Builtin::BI_InterlockedIncrement: |
3391 | return RValue::get( |
3392 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E)); |
3393 | case Builtin::BI_InterlockedDecrement16: |
3394 | case Builtin::BI_InterlockedDecrement: |
3395 | return RValue::get( |
3396 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E)); |
3397 | case Builtin::BI_InterlockedAnd8: |
3398 | case Builtin::BI_InterlockedAnd16: |
3399 | case Builtin::BI_InterlockedAnd: |
3400 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E)); |
3401 | case Builtin::BI_InterlockedExchangeAdd8: |
3402 | case Builtin::BI_InterlockedExchangeAdd16: |
3403 | case Builtin::BI_InterlockedExchangeAdd: |
3404 | return RValue::get( |
3405 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E)); |
3406 | case Builtin::BI_InterlockedExchangeSub8: |
3407 | case Builtin::BI_InterlockedExchangeSub16: |
3408 | case Builtin::BI_InterlockedExchangeSub: |
3409 | return RValue::get( |
3410 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E)); |
3411 | case Builtin::BI_InterlockedOr8: |
3412 | case Builtin::BI_InterlockedOr16: |
3413 | case Builtin::BI_InterlockedOr: |
3414 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E)); |
3415 | case Builtin::BI_InterlockedXor8: |
3416 | case Builtin::BI_InterlockedXor16: |
3417 | case Builtin::BI_InterlockedXor: |
3418 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E)); |
3419 | |
3420 | case Builtin::BI_bittest64: |
3421 | case Builtin::BI_bittest: |
3422 | case Builtin::BI_bittestandcomplement64: |
3423 | case Builtin::BI_bittestandcomplement: |
3424 | case Builtin::BI_bittestandreset64: |
3425 | case Builtin::BI_bittestandreset: |
3426 | case Builtin::BI_bittestandset64: |
3427 | case Builtin::BI_bittestandset: |
3428 | case Builtin::BI_interlockedbittestandreset: |
3429 | case Builtin::BI_interlockedbittestandreset64: |
3430 | case Builtin::BI_interlockedbittestandset64: |
3431 | case Builtin::BI_interlockedbittestandset: |
3432 | case Builtin::BI_interlockedbittestandset_acq: |
3433 | case Builtin::BI_interlockedbittestandset_rel: |
3434 | case Builtin::BI_interlockedbittestandset_nf: |
3435 | case Builtin::BI_interlockedbittestandreset_acq: |
3436 | case Builtin::BI_interlockedbittestandreset_rel: |
3437 | case Builtin::BI_interlockedbittestandreset_nf: |
3438 | return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E)); |
3439 | |
3440 | // These builtins exist to emit regular volatile loads and stores not |
3441 | // affected by the -fms-volatile setting. |
3442 | case Builtin::BI__iso_volatile_load8: |
3443 | case Builtin::BI__iso_volatile_load16: |
3444 | case Builtin::BI__iso_volatile_load32: |
3445 | case Builtin::BI__iso_volatile_load64: |
3446 | return RValue::get(EmitISOVolatileLoad(*this, E)); |
3447 | case Builtin::BI__iso_volatile_store8: |
3448 | case Builtin::BI__iso_volatile_store16: |
3449 | case Builtin::BI__iso_volatile_store32: |
3450 | case Builtin::BI__iso_volatile_store64: |
3451 | return RValue::get(EmitISOVolatileStore(*this, E)); |
3452 | |
3453 | case Builtin::BI__exception_code: |
3454 | case Builtin::BI_exception_code: |
3455 | return RValue::get(EmitSEHExceptionCode()); |
3456 | case Builtin::BI__exception_info: |
3457 | case Builtin::BI_exception_info: |
3458 | return RValue::get(EmitSEHExceptionInfo()); |
3459 | case Builtin::BI__abnormal_termination: |
3460 | case Builtin::BI_abnormal_termination: |
3461 | return RValue::get(EmitSEHAbnormalTermination()); |
3462 | case Builtin::BI_setjmpex: |
3463 | if (getTarget().getTriple().isOSMSVCRT()) |
3464 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E); |
3465 | break; |
3466 | case Builtin::BI_setjmp: |
3467 | if (getTarget().getTriple().isOSMSVCRT()) { |
3468 | if (getTarget().getTriple().getArch() == llvm::Triple::x86) |
3469 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E); |
3470 | else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64) |
3471 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E); |
3472 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E); |
3473 | } |
3474 | break; |
3475 | |
3476 | case Builtin::BI__GetExceptionInfo: { |
3477 | if (llvm::GlobalVariable *GV = |
3478 | CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType())) |
3479 | return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy)); |
3480 | break; |
3481 | } |
3482 | |
3483 | case Builtin::BI__fastfail: |
3484 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E)); |
3485 | |
3486 | case Builtin::BI__builtin_coro_size: { |
3487 | auto & Context = getContext(); |
3488 | auto SizeTy = Context.getSizeType(); |
3489 | auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); |
3490 | Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T); |
3491 | return RValue::get(Builder.CreateCall(F)); |
3492 | } |
3493 | |
3494 | case Builtin::BI__builtin_coro_id: |
3495 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_id); |
3496 | case Builtin::BI__builtin_coro_promise: |
3497 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise); |
3498 | case Builtin::BI__builtin_coro_resume: |
3499 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume); |
3500 | case Builtin::BI__builtin_coro_frame: |
3501 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame); |
3502 | case Builtin::BI__builtin_coro_noop: |
3503 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop); |
3504 | case Builtin::BI__builtin_coro_free: |
3505 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_free); |
3506 | case Builtin::BI__builtin_coro_destroy: |
3507 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy); |
3508 | case Builtin::BI__builtin_coro_done: |
3509 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_done); |
3510 | case Builtin::BI__builtin_coro_alloc: |
3511 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc); |
3512 | case Builtin::BI__builtin_coro_begin: |
3513 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin); |
3514 | case Builtin::BI__builtin_coro_end: |
3515 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_end); |
3516 | case Builtin::BI__builtin_coro_suspend: |
3517 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend); |
3518 | case Builtin::BI__builtin_coro_param: |
3519 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_param); |
3520 | |
3521 | // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions |
3522 | case Builtin::BIread_pipe: |
3523 | case Builtin::BIwrite_pipe: { |
3524 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
3525 | *Arg1 = EmitScalarExpr(E->getArg(1)); |
3526 | CGOpenCLRuntime OpenCLRT(CGM); |
3527 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
3528 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
3529 | |
3530 | // Type of the generic packet parameter. |
3531 | unsigned GenericAS = |
3532 | getContext().getTargetAddressSpace(LangAS::opencl_generic); |
3533 | llvm::Type *I8PTy = llvm::PointerType::get( |
3534 | llvm::Type::getInt8Ty(getLLVMContext()), GenericAS); |
3535 | |
3536 | // Testing which overloaded version we should generate the call for. |
3537 | if (2U == E->getNumArgs()) { |
3538 | const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2" |
3539 | : "__write_pipe_2"; |
3540 | // Creating a generic function type to be able to call with any builtin or |
3541 | // user defined type. |
3542 | llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty}; |
3543 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
3544 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
3545 | Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy); |
3546 | return RValue::get( |
3547 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), |
3548 | {Arg0, BCast, PacketSize, PacketAlign})); |
3549 | } else { |
3550 | assert(4 == E->getNumArgs() &&((4 == E->getNumArgs() && "Illegal number of parameters to pipe function" ) ? static_cast<void> (0) : __assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3551, __PRETTY_FUNCTION__)) |
3551 | "Illegal number of parameters to pipe function")((4 == E->getNumArgs() && "Illegal number of parameters to pipe function" ) ? static_cast<void> (0) : __assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3551, __PRETTY_FUNCTION__)); |
3552 | const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4" |
3553 | : "__write_pipe_4"; |
3554 | |
3555 | llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy, |
3556 | Int32Ty, Int32Ty}; |
3557 | Value *Arg2 = EmitScalarExpr(E->getArg(2)), |
3558 | *Arg3 = EmitScalarExpr(E->getArg(3)); |
3559 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
3560 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
3561 | Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy); |
3562 | // We know the third argument is an integer type, but we may need to cast |
3563 | // it to i32. |
3564 | if (Arg2->getType() != Int32Ty) |
3565 | Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty); |
3566 | return RValue::get(Builder.CreateCall( |
3567 | CGM.CreateRuntimeFunction(FTy, Name), |
3568 | {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign})); |
3569 | } |
3570 | } |
3571 | // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write |
3572 | // functions |
3573 | case Builtin::BIreserve_read_pipe: |
3574 | case Builtin::BIreserve_write_pipe: |
3575 | case Builtin::BIwork_group_reserve_read_pipe: |
3576 | case Builtin::BIwork_group_reserve_write_pipe: |
3577 | case Builtin::BIsub_group_reserve_read_pipe: |
3578 | case Builtin::BIsub_group_reserve_write_pipe: { |
3579 | // Composing the mangled name for the function. |
3580 | const char *Name; |
3581 | if (BuiltinID == Builtin::BIreserve_read_pipe) |
3582 | Name = "__reserve_read_pipe"; |
3583 | else if (BuiltinID == Builtin::BIreserve_write_pipe) |
3584 | Name = "__reserve_write_pipe"; |
3585 | else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe) |
3586 | Name = "__work_group_reserve_read_pipe"; |
3587 | else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe) |
3588 | Name = "__work_group_reserve_write_pipe"; |
3589 | else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe) |
3590 | Name = "__sub_group_reserve_read_pipe"; |
3591 | else |
3592 | Name = "__sub_group_reserve_write_pipe"; |
3593 | |
3594 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
3595 | *Arg1 = EmitScalarExpr(E->getArg(1)); |
3596 | llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy); |
3597 | CGOpenCLRuntime OpenCLRT(CGM); |
3598 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
3599 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
3600 | |
3601 | // Building the generic function prototype. |
3602 | llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty}; |
3603 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
3604 | ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
3605 | // We know the second argument is an integer type, but we may need to cast |
3606 | // it to i32. |
3607 | if (Arg1->getType() != Int32Ty) |
3608 | Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty); |
3609 | return RValue::get( |
3610 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), |
3611 | {Arg0, Arg1, PacketSize, PacketAlign})); |
3612 | } |
3613 | // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write |
3614 | // functions |
3615 | case Builtin::BIcommit_read_pipe: |
3616 | case Builtin::BIcommit_write_pipe: |
3617 | case Builtin::BIwork_group_commit_read_pipe: |
3618 | case Builtin::BIwork_group_commit_write_pipe: |
3619 | case Builtin::BIsub_group_commit_read_pipe: |
3620 | case Builtin::BIsub_group_commit_write_pipe: { |
3621 | const char *Name; |
3622 | if (BuiltinID == Builtin::BIcommit_read_pipe) |
3623 | Name = "__commit_read_pipe"; |
3624 | else if (BuiltinID == Builtin::BIcommit_write_pipe) |
3625 | Name = "__commit_write_pipe"; |
3626 | else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe) |
3627 | Name = "__work_group_commit_read_pipe"; |
3628 | else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe) |
3629 | Name = "__work_group_commit_write_pipe"; |
3630 | else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe) |
3631 | Name = "__sub_group_commit_read_pipe"; |
3632 | else |
3633 | Name = "__sub_group_commit_write_pipe"; |
3634 | |
3635 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
3636 | *Arg1 = EmitScalarExpr(E->getArg(1)); |
3637 | CGOpenCLRuntime OpenCLRT(CGM); |
3638 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
3639 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
3640 | |
3641 | // Building the generic function prototype. |
3642 | llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty}; |
3643 | llvm::FunctionType *FTy = |
3644 | llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), |
3645 | llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
3646 | |
3647 | return RValue::get( |
3648 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), |
3649 | {Arg0, Arg1, PacketSize, PacketAlign})); |
3650 | } |
3651 | // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions |
3652 | case Builtin::BIget_pipe_num_packets: |
3653 | case Builtin::BIget_pipe_max_packets: { |
3654 | const char *BaseName; |
3655 | const PipeType *PipeTy = E->getArg(0)->getType()->getAs<PipeType>(); |
3656 | if (BuiltinID == Builtin::BIget_pipe_num_packets) |
3657 | BaseName = "__get_pipe_num_packets"; |
3658 | else |
3659 | BaseName = "__get_pipe_max_packets"; |
3660 | auto Name = std::string(BaseName) + |
3661 | std::string(PipeTy->isReadOnly() ? "_ro" : "_wo"); |
3662 | |
3663 | // Building the generic function prototype. |
3664 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
3665 | CGOpenCLRuntime OpenCLRT(CGM); |
3666 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
3667 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
3668 | llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty}; |
3669 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
3670 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
3671 | |
3672 | return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), |
3673 | {Arg0, PacketSize, PacketAlign})); |
3674 | } |
3675 | |
3676 | // OpenCL v2.0 s6.13.9 - Address space qualifier functions. |
3677 | case Builtin::BIto_global: |
3678 | case Builtin::BIto_local: |
3679 | case Builtin::BIto_private: { |
3680 | auto Arg0 = EmitScalarExpr(E->getArg(0)); |
3681 | auto NewArgT = llvm::PointerType::get(Int8Ty, |
3682 | CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
3683 | auto NewRetT = llvm::PointerType::get(Int8Ty, |
3684 | CGM.getContext().getTargetAddressSpace( |
3685 | E->getType()->getPointeeType().getAddressSpace())); |
3686 | auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false); |
3687 | llvm::Value *NewArg; |
3688 | if (Arg0->getType()->getPointerAddressSpace() != |
3689 | NewArgT->getPointerAddressSpace()) |
3690 | NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT); |
3691 | else |
3692 | NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT); |
3693 | auto NewName = std::string("__") + E->getDirectCallee()->getName().str(); |
3694 | auto NewCall = |
3695 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg}); |
3696 | return RValue::get(Builder.CreateBitOrPointerCast(NewCall, |
3697 | ConvertType(E->getType()))); |
3698 | } |
3699 | |
3700 | // OpenCL v2.0, s6.13.17 - Enqueue kernel function. |
3701 | // It contains four different overload formats specified in Table 6.13.17.1. |
3702 | case Builtin::BIenqueue_kernel: { |
3703 | StringRef Name; // Generated function call name |
3704 | unsigned NumArgs = E->getNumArgs(); |
3705 | |
3706 | llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy); |
3707 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
3708 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
3709 | |
3710 | llvm::Value *Queue = EmitScalarExpr(E->getArg(0)); |
3711 | llvm::Value *Flags = EmitScalarExpr(E->getArg(1)); |
3712 | LValue NDRangeL = EmitAggExprToLValue(E->getArg(2)); |
3713 | llvm::Value *Range = NDRangeL.getAddress().getPointer(); |
3714 | llvm::Type *RangeTy = NDRangeL.getAddress().getType(); |
3715 | |
3716 | if (NumArgs == 4) { |
3717 | // The most basic form of the call with parameters: |
3718 | // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void) |
3719 | Name = "__enqueue_kernel_basic"; |
3720 | llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy, |
3721 | GenericVoidPtrTy}; |
3722 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
3723 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
3724 | |
3725 | auto Info = |
3726 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3)); |
3727 | llvm::Value *Kernel = |
3728 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
3729 | llvm::Value *Block = |
3730 | Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
3731 | |
3732 | AttrBuilder B; |
3733 | B.addAttribute(Attribute::ByVal); |
3734 | llvm::AttributeList ByValAttrSet = |
3735 | llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B); |
3736 | |
3737 | auto RTCall = |
3738 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet), |
3739 | {Queue, Flags, Range, Kernel, Block}); |
3740 | RTCall->setAttributes(ByValAttrSet); |
3741 | return RValue::get(RTCall); |
3742 | } |
3743 | assert(NumArgs >= 5 && "Invalid enqueue_kernel signature")((NumArgs >= 5 && "Invalid enqueue_kernel signature" ) ? static_cast<void> (0) : __assert_fail ("NumArgs >= 5 && \"Invalid enqueue_kernel signature\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3743, __PRETTY_FUNCTION__)); |
3744 | |
3745 | // Create a temporary array to hold the sizes of local pointer arguments |
3746 | // for the block. \p First is the position of the first size argument. |
3747 | auto CreateArrayForSizeVar = [=](unsigned First) |
3748 | -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> { |
3749 | llvm::APInt ArraySize(32, NumArgs - First); |
3750 | QualType SizeArrayTy = getContext().getConstantArrayType( |
3751 | getContext().getSizeType(), ArraySize, ArrayType::Normal, |
3752 | /*IndexTypeQuals=*/0); |
3753 | auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes"); |
3754 | llvm::Value *TmpPtr = Tmp.getPointer(); |
3755 | llvm::Value *TmpSize = EmitLifetimeStart( |
3756 | CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr); |
3757 | llvm::Value *ElemPtr; |
3758 | // Each of the following arguments specifies the size of the corresponding |
3759 | // argument passed to the enqueued block. |
3760 | auto *Zero = llvm::ConstantInt::get(IntTy, 0); |
3761 | for (unsigned I = First; I < NumArgs; ++I) { |
3762 | auto *Index = llvm::ConstantInt::get(IntTy, I - First); |
3763 | auto *GEP = Builder.CreateGEP(TmpPtr, {Zero, Index}); |
3764 | if (I == First) |
3765 | ElemPtr = GEP; |
3766 | auto *V = |
3767 | Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy); |
3768 | Builder.CreateAlignedStore( |
3769 | V, GEP, CGM.getDataLayout().getPrefTypeAlignment(SizeTy)); |
3770 | } |
3771 | return std::tie(ElemPtr, TmpSize, TmpPtr); |
3772 | }; |
3773 | |
3774 | // Could have events and/or varargs. |
3775 | if (E->getArg(3)->getType()->isBlockPointerType()) { |
3776 | // No events passed, but has variadic arguments. |
3777 | Name = "__enqueue_kernel_varargs"; |
3778 | auto Info = |
3779 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3)); |
3780 | llvm::Value *Kernel = |
3781 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
3782 | auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
3783 | llvm::Value *ElemPtr, *TmpSize, *TmpPtr; |
3784 | std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4); |
3785 | |
3786 | // Create a vector of the arguments, as well as a constant value to |
3787 | // express to the runtime the number of variadic arguments. |
3788 | std::vector<llvm::Value *> Args = { |
3789 | Queue, Flags, Range, |
3790 | Kernel, Block, ConstantInt::get(IntTy, NumArgs - 4), |
3791 | ElemPtr}; |
3792 | std::vector<llvm::Type *> ArgTys = { |
3793 | QueueTy, IntTy, RangeTy, GenericVoidPtrTy, |
3794 | GenericVoidPtrTy, IntTy, ElemPtr->getType()}; |
3795 | |
3796 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
3797 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
3798 | auto Call = |
3799 | RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), |
3800 | llvm::ArrayRef<llvm::Value *>(Args))); |
3801 | if (TmpSize) |
3802 | EmitLifetimeEnd(TmpSize, TmpPtr); |
3803 | return Call; |
3804 | } |
3805 | // Any calls now have event arguments passed. |
3806 | if (NumArgs >= 7) { |
3807 | llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy); |
3808 | llvm::PointerType *EventPtrTy = EventTy->getPointerTo( |
3809 | CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
3810 | |
3811 | llvm::Value *NumEvents = |
3812 | Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty); |
3813 | |
3814 | // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments |
3815 | // to be a null pointer constant (including `0` literal), we can take it |
3816 | // into account and emit null pointer directly. |
3817 | llvm::Value *EventWaitList = nullptr; |
3818 | if (E->getArg(4)->isNullPointerConstant( |
3819 | getContext(), Expr::NPC_ValueDependentIsNotNull)) { |
3820 | EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy); |
3821 | } else { |
3822 | EventWaitList = E->getArg(4)->getType()->isArrayType() |
3823 | ? EmitArrayToPointerDecay(E->getArg(4)).getPointer() |
3824 | : EmitScalarExpr(E->getArg(4)); |
3825 | // Convert to generic address space. |
3826 | EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy); |
3827 | } |
3828 | llvm::Value *EventRet = nullptr; |
3829 | if (E->getArg(5)->isNullPointerConstant( |
3830 | getContext(), Expr::NPC_ValueDependentIsNotNull)) { |
3831 | EventRet = llvm::ConstantPointerNull::get(EventPtrTy); |
3832 | } else { |
3833 | EventRet = |
3834 | Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy); |
3835 | } |
3836 | |
3837 | auto Info = |
3838 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6)); |
3839 | llvm::Value *Kernel = |
3840 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
3841 | llvm::Value *Block = |
3842 | Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
3843 | |
3844 | std::vector<llvm::Type *> ArgTys = { |
3845 | QueueTy, Int32Ty, RangeTy, Int32Ty, |
3846 | EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy}; |
3847 | |
3848 | std::vector<llvm::Value *> Args = {Queue, Flags, Range, |
3849 | NumEvents, EventWaitList, EventRet, |
3850 | Kernel, Block}; |
3851 | |
3852 | if (NumArgs == 7) { |
3853 | // Has events but no variadics. |
3854 | Name = "__enqueue_kernel_basic_events"; |
3855 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
3856 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
3857 | return RValue::get( |
3858 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), |
3859 | llvm::ArrayRef<llvm::Value *>(Args))); |
3860 | } |
3861 | // Has event info and variadics |
3862 | // Pass the number of variadics to the runtime function too. |
3863 | Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7)); |
3864 | ArgTys.push_back(Int32Ty); |
3865 | Name = "__enqueue_kernel_events_varargs"; |
3866 | |
3867 | llvm::Value *ElemPtr, *TmpSize, *TmpPtr; |
3868 | std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7); |
3869 | Args.push_back(ElemPtr); |
3870 | ArgTys.push_back(ElemPtr->getType()); |
3871 | |
3872 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
3873 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
3874 | auto Call = |
3875 | RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), |
3876 | llvm::ArrayRef<llvm::Value *>(Args))); |
3877 | if (TmpSize) |
3878 | EmitLifetimeEnd(TmpSize, TmpPtr); |
3879 | return Call; |
3880 | } |
3881 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
3882 | } |
3883 | // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block |
3884 | // parameter. |
3885 | case Builtin::BIget_kernel_work_group_size: { |
3886 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
3887 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
3888 | auto Info = |
3889 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0)); |
3890 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
3891 | Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
3892 | return RValue::get(Builder.CreateCall( |
3893 | CGM.CreateRuntimeFunction( |
3894 | llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy}, |
3895 | false), |
3896 | "__get_kernel_work_group_size_impl"), |
3897 | {Kernel, Arg})); |
3898 | } |
3899 | case Builtin::BIget_kernel_preferred_work_group_size_multiple: { |
3900 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
3901 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
3902 | auto Info = |
3903 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0)); |
3904 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
3905 | Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
3906 | return RValue::get(Builder.CreateCall( |
3907 | CGM.CreateRuntimeFunction( |
3908 | llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy}, |
3909 | false), |
3910 | "__get_kernel_preferred_work_group_size_multiple_impl"), |
3911 | {Kernel, Arg})); |
3912 | } |
3913 | case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: |
3914 | case Builtin::BIget_kernel_sub_group_count_for_ndrange: { |
3915 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
3916 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
3917 | LValue NDRangeL = EmitAggExprToLValue(E->getArg(0)); |
3918 | llvm::Value *NDRange = NDRangeL.getAddress().getPointer(); |
3919 | auto Info = |
3920 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1)); |
3921 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
3922 | Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
3923 | const char *Name = |
3924 | BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange |
3925 | ? "__get_kernel_max_sub_group_size_for_ndrange_impl" |
3926 | : "__get_kernel_sub_group_count_for_ndrange_impl"; |
3927 | return RValue::get(Builder.CreateCall( |
3928 | CGM.CreateRuntimeFunction( |
3929 | llvm::FunctionType::get( |
3930 | IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy}, |
3931 | false), |
3932 | Name), |
3933 | {NDRange, Kernel, Block})); |
3934 | } |
3935 | |
3936 | case Builtin::BI__builtin_store_half: |
3937 | case Builtin::BI__builtin_store_halff: { |
3938 | Value *Val = EmitScalarExpr(E->getArg(0)); |
3939 | Address Address = EmitPointerWithAlignment(E->getArg(1)); |
3940 | Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy()); |
3941 | return RValue::get(Builder.CreateStore(HalfVal, Address)); |
3942 | } |
3943 | case Builtin::BI__builtin_load_half: { |
3944 | Address Address = EmitPointerWithAlignment(E->getArg(0)); |
3945 | Value *HalfVal = Builder.CreateLoad(Address); |
3946 | return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy())); |
3947 | } |
3948 | case Builtin::BI__builtin_load_halff: { |
3949 | Address Address = EmitPointerWithAlignment(E->getArg(0)); |
3950 | Value *HalfVal = Builder.CreateLoad(Address); |
3951 | return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy())); |
3952 | } |
3953 | case Builtin::BIprintf: |
3954 | if (getTarget().getTriple().isNVPTX()) |
3955 | return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue); |
3956 | break; |
3957 | case Builtin::BI__builtin_canonicalize: |
3958 | case Builtin::BI__builtin_canonicalizef: |
3959 | case Builtin::BI__builtin_canonicalizel: |
3960 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize)); |
3961 | |
3962 | case Builtin::BI__builtin_thread_pointer: { |
3963 | if (!getContext().getTargetInfo().isTLSSupported()) |
3964 | CGM.ErrorUnsupported(E, "__builtin_thread_pointer"); |
3965 | // Fall through - it's already mapped to the intrinsic by GCCBuiltin. |
3966 | break; |
3967 | } |
3968 | case Builtin::BI__builtin_os_log_format: |
3969 | return emitBuiltinOSLogFormat(*E); |
3970 | |
3971 | case Builtin::BI__xray_customevent: { |
3972 | if (!ShouldXRayInstrumentFunction()) |
3973 | return RValue::getIgnored(); |
3974 | |
3975 | if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( |
3976 | XRayInstrKind::Custom)) |
3977 | return RValue::getIgnored(); |
3978 | |
3979 | if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) |
3980 | if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents()) |
3981 | return RValue::getIgnored(); |
3982 | |
3983 | Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent); |
3984 | auto FTy = F->getFunctionType(); |
3985 | auto Arg0 = E->getArg(0); |
3986 | auto Arg0Val = EmitScalarExpr(Arg0); |
3987 | auto Arg0Ty = Arg0->getType(); |
3988 | auto PTy0 = FTy->getParamType(0); |
3989 | if (PTy0 != Arg0Val->getType()) { |
3990 | if (Arg0Ty->isArrayType()) |
3991 | Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer(); |
3992 | else |
3993 | Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0); |
3994 | } |
3995 | auto Arg1 = EmitScalarExpr(E->getArg(1)); |
3996 | auto PTy1 = FTy->getParamType(1); |
3997 | if (PTy1 != Arg1->getType()) |
3998 | Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1); |
3999 | return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1})); |
4000 | } |
4001 | |
4002 | case Builtin::BI__xray_typedevent: { |
4003 | // TODO: There should be a way to always emit events even if the current |
4004 | // function is not instrumented. Losing events in a stream can cripple |
4005 | // a trace. |
4006 | if (!ShouldXRayInstrumentFunction()) |
4007 | return RValue::getIgnored(); |
4008 | |
4009 | if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( |
4010 | XRayInstrKind::Typed)) |
4011 | return RValue::getIgnored(); |
4012 | |
4013 | if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) |
4014 | if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents()) |
4015 | return RValue::getIgnored(); |
4016 | |
4017 | Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent); |
4018 | auto FTy = F->getFunctionType(); |
4019 | auto Arg0 = EmitScalarExpr(E->getArg(0)); |
4020 | auto PTy0 = FTy->getParamType(0); |
4021 | if (PTy0 != Arg0->getType()) |
4022 | Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0); |
4023 | auto Arg1 = E->getArg(1); |
4024 | auto Arg1Val = EmitScalarExpr(Arg1); |
4025 | auto Arg1Ty = Arg1->getType(); |
4026 | auto PTy1 = FTy->getParamType(1); |
4027 | if (PTy1 != Arg1Val->getType()) { |
4028 | if (Arg1Ty->isArrayType()) |
4029 | Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer(); |
4030 | else |
4031 | Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1); |
4032 | } |
4033 | auto Arg2 = EmitScalarExpr(E->getArg(2)); |
4034 | auto PTy2 = FTy->getParamType(2); |
4035 | if (PTy2 != Arg2->getType()) |
4036 | Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2); |
4037 | return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2})); |
4038 | } |
4039 | |
4040 | case Builtin::BI__builtin_ms_va_start: |
4041 | case Builtin::BI__builtin_ms_va_end: |
4042 | return RValue::get( |
4043 | EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(), |
4044 | BuiltinID == Builtin::BI__builtin_ms_va_start)); |
4045 | |
4046 | case Builtin::BI__builtin_ms_va_copy: { |
4047 | // Lower this manually. We can't reliably determine whether or not any |
4048 | // given va_copy() is for a Win64 va_list from the calling convention |
4049 | // alone, because it's legal to do this from a System V ABI function. |
4050 | // With opaque pointer types, we won't have enough information in LLVM |
4051 | // IR to determine this from the argument types, either. Best to do it |
4052 | // now, while we have enough information. |
4053 | Address DestAddr = EmitMSVAListRef(E->getArg(0)); |
4054 | Address SrcAddr = EmitMSVAListRef(E->getArg(1)); |
4055 | |
4056 | llvm::Type *BPP = Int8PtrPtrTy; |
4057 | |
4058 | DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"), |
4059 | DestAddr.getAlignment()); |
4060 | SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"), |
4061 | SrcAddr.getAlignment()); |
4062 | |
4063 | Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val"); |
4064 | return RValue::get(Builder.CreateStore(ArgPtr, DestAddr)); |
4065 | } |
4066 | } |
4067 | |
4068 | // If this is an alias for a lib function (e.g. __builtin_sin), emit |
4069 | // the call using the normal call path, but using the unmangled |
4070 | // version of the function name. |
4071 | if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) |
4072 | return emitLibraryCall(*this, FD, E, |
4073 | CGM.getBuiltinLibFunction(FD, BuiltinID)); |
4074 | |
4075 | // If this is a predefined lib function (e.g. malloc), emit the call |
4076 | // using exactly the normal call path. |
4077 | if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) |
4078 | return emitLibraryCall(*this, FD, E, |
4079 | cast<llvm::Constant>(EmitScalarExpr(E->getCallee()))); |
4080 | |
4081 | // Check that a call to a target specific builtin has the correct target |
4082 | // features. |
4083 | // This is down here to avoid non-target specific builtins, however, if |
4084 | // generic builtins start to require generic target features then we |
4085 | // can move this up to the beginning of the function. |
4086 | checkTargetFeatures(E, FD); |
4087 | |
4088 | if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID)) |
4089 | LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth); |
4090 | |
4091 | // See if we have a target specific intrinsic. |
4092 | const char *Name = getContext().BuiltinInfo.getName(BuiltinID); |
4093 | Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; |
4094 | StringRef Prefix = |
4095 | llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch()); |
4096 | if (!Prefix.empty()) { |
4097 | IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name); |
4098 | // NOTE we don't need to perform a compatibility flag check here since the |
4099 | // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the |
4100 | // MS builtins via ALL_MS_LANGUAGES and are filtered earlier. |
4101 | if (IntrinsicID == Intrinsic::not_intrinsic) |
4102 | IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name); |
4103 | } |
4104 | |
4105 | if (IntrinsicID != Intrinsic::not_intrinsic) { |
4106 | SmallVector<Value*, 16> Args; |
4107 | |
4108 | // Find out if any arguments are required to be integer constant |
4109 | // expressions. |
4110 | unsigned ICEArguments = 0; |
4111 | ASTContext::GetBuiltinTypeError Error; |
4112 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
4113 | assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error" ) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4113, __PRETTY_FUNCTION__)); |
4114 | |
4115 | Function *F = CGM.getIntrinsic(IntrinsicID); |
4116 | llvm::FunctionType *FTy = F->getFunctionType(); |
4117 | |
4118 | for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { |
4119 | Value *ArgValue; |
4120 | // If this is a normal argument, just emit it as a scalar. |
4121 | if ((ICEArguments & (1 << i)) == 0) { |
4122 | ArgValue = EmitScalarExpr(E->getArg(i)); |
4123 | } else { |
4124 | // If this is required to be a constant, constant fold it so that we |
4125 | // know that the generated intrinsic gets a ConstantInt. |
4126 | llvm::APSInt Result; |
4127 | bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext()); |
4128 | assert(IsConst && "Constant arg isn't actually constant?")((IsConst && "Constant arg isn't actually constant?") ? static_cast<void> (0) : __assert_fail ("IsConst && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4128, __PRETTY_FUNCTION__)); |
4129 | (void)IsConst; |
4130 | ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result); |
4131 | } |
4132 | |
4133 | // If the intrinsic arg type is different from the builtin arg type |
4134 | // we need to do a bit cast. |
4135 | llvm::Type *PTy = FTy->getParamType(i); |
4136 | if (PTy != ArgValue->getType()) { |
4137 | // XXX - vector of pointers? |
4138 | if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) { |
4139 | if (PtrTy->getAddressSpace() != |
4140 | ArgValue->getType()->getPointerAddressSpace()) { |
4141 | ArgValue = Builder.CreateAddrSpaceCast( |
4142 | ArgValue, |
4143 | ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace())); |
4144 | } |
4145 | } |
4146 | |
4147 | assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&((PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && "Must be able to losslessly bit cast to param") ? static_cast <void> (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4148, __PRETTY_FUNCTION__)) |
4148 | "Must be able to losslessly bit cast to param")((PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && "Must be able to losslessly bit cast to param") ? static_cast <void> (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4148, __PRETTY_FUNCTION__)); |
4149 | ArgValue = Builder.CreateBitCast(ArgValue, PTy); |
4150 | } |
4151 | |
4152 | Args.push_back(ArgValue); |
4153 | } |
4154 | |
4155 | Value *V = Builder.CreateCall(F, Args); |
4156 | QualType BuiltinRetType = E->getType(); |
4157 | |
4158 | llvm::Type *RetTy = VoidTy; |
4159 | if (!BuiltinRetType->isVoidType()) |
4160 | RetTy = ConvertType(BuiltinRetType); |
4161 | |
4162 | if (RetTy != V->getType()) { |
4163 | // XXX - vector of pointers? |
4164 | if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) { |
4165 | if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) { |
4166 | V = Builder.CreateAddrSpaceCast( |
4167 | V, V->getType()->getPointerTo(PtrTy->getAddressSpace())); |
4168 | } |
4169 | } |
4170 | |
4171 | assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&((V->getType()->canLosslesslyBitCastTo(RetTy) && "Must be able to losslessly bit cast result type") ? static_cast <void> (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4172, __PRETTY_FUNCTION__)) |
4172 | "Must be able to losslessly bit cast result type")((V->getType()->canLosslesslyBitCastTo(RetTy) && "Must be able to losslessly bit cast result type") ? static_cast <void> (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4172, __PRETTY_FUNCTION__)); |
4173 | V = Builder.CreateBitCast(V, RetTy); |
4174 | } |
4175 | |
4176 | return RValue::get(V); |
4177 | } |
4178 | |
4179 | // See if we have a target specific builtin that needs to be lowered. |
4180 | if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E)) |
4181 | return RValue::get(V); |
4182 | |
4183 | ErrorUnsupported(E, "builtin function"); |
4184 | |
4185 | // Unknown builtin, for now just dump it out and return undef. |
4186 | return GetUndefRValue(E->getType()); |
4187 | } |
4188 | |
4189 | static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, |
4190 | unsigned BuiltinID, const CallExpr *E, |
4191 | llvm::Triple::ArchType Arch) { |
4192 | switch (Arch) { |
4193 | case llvm::Triple::arm: |
4194 | case llvm::Triple::armeb: |
4195 | case llvm::Triple::thumb: |
4196 | case llvm::Triple::thumbeb: |
4197 | return CGF->EmitARMBuiltinExpr(BuiltinID, E, Arch); |
4198 | case llvm::Triple::aarch64: |
4199 | case llvm::Triple::aarch64_be: |
4200 | return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch); |
4201 | case llvm::Triple::x86: |
4202 | case llvm::Triple::x86_64: |
4203 | return CGF->EmitX86BuiltinExpr(BuiltinID, E); |
4204 | case llvm::Triple::ppc: |
4205 | case llvm::Triple::ppc64: |
4206 | case llvm::Triple::ppc64le: |
4207 | return CGF->EmitPPCBuiltinExpr(BuiltinID, E); |
4208 | case llvm::Triple::r600: |
4209 | case llvm::Triple::amdgcn: |
4210 | return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E); |
4211 | case llvm::Triple::systemz: |
4212 | return CGF->EmitSystemZBuiltinExpr(BuiltinID, E); |
4213 | case llvm::Triple::nvptx: |
4214 | case llvm::Triple::nvptx64: |
4215 | return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E); |
4216 | case llvm::Triple::wasm32: |
4217 | case llvm::Triple::wasm64: |
4218 | return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E); |
4219 | case llvm::Triple::hexagon: |
4220 | return CGF->EmitHexagonBuiltinExpr(BuiltinID, E); |
4221 | default: |
4222 | return nullptr; |
4223 | } |
4224 | } |
4225 | |
4226 | Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, |
4227 | const CallExpr *E) { |
4228 | if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) { |
4229 | assert(getContext().getAuxTargetInfo() && "Missing aux target info")((getContext().getAuxTargetInfo() && "Missing aux target info" ) ? static_cast<void> (0) : __assert_fail ("getContext().getAuxTargetInfo() && \"Missing aux target info\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4229, __PRETTY_FUNCTION__)); |
4230 | return EmitTargetArchBuiltinExpr( |
4231 | this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E, |
4232 | getContext().getAuxTargetInfo()->getTriple().getArch()); |
4233 | } |
4234 | |
4235 | return EmitTargetArchBuiltinExpr(this, BuiltinID, E, |
4236 | getTarget().getTriple().getArch()); |
4237 | } |
4238 | |
4239 | static llvm::VectorType *GetNeonType(CodeGenFunction *CGF, |
4240 | NeonTypeFlags TypeFlags, |
4241 | bool HasLegalHalfType=true, |
4242 | bool V1Ty=false) { |
4243 | int IsQuad = TypeFlags.isQuad(); |
4244 | switch (TypeFlags.getEltType()) { |
4245 | case NeonTypeFlags::Int8: |
4246 | case NeonTypeFlags::Poly8: |
4247 | return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad)); |
4248 | case NeonTypeFlags::Int16: |
4249 | case NeonTypeFlags::Poly16: |
4250 | return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); |
4251 | case NeonTypeFlags::Float16: |
4252 | if (HasLegalHalfType) |
4253 | return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad)); |
4254 | else |
4255 | return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); |
4256 | case NeonTypeFlags::Int32: |
4257 | return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad)); |
4258 | case NeonTypeFlags::Int64: |
4259 | case NeonTypeFlags::Poly64: |
4260 | return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad)); |
4261 | case NeonTypeFlags::Poly128: |
4262 | // FIXME: i128 and f128 doesn't get fully support in Clang and llvm. |
4263 | // There is a lot of i128 and f128 API missing. |
4264 | // so we use v16i8 to represent poly128 and get pattern matched. |
4265 | return llvm::VectorType::get(CGF->Int8Ty, 16); |
4266 | case NeonTypeFlags::Float32: |
4267 | return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad)); |
4268 | case NeonTypeFlags::Float64: |
4269 | return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad)); |
4270 | } |
4271 | llvm_unreachable("Unknown vector element type!")::llvm::llvm_unreachable_internal("Unknown vector element type!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4271); |
4272 | } |
4273 | |
4274 | static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF, |
4275 | NeonTypeFlags IntTypeFlags) { |
4276 | int IsQuad = IntTypeFlags.isQuad(); |
4277 | switch (IntTypeFlags.getEltType()) { |
4278 | case NeonTypeFlags::Int16: |
4279 | return llvm::VectorType::get(CGF->HalfTy, (4 << IsQuad)); |
4280 | case NeonTypeFlags::Int32: |
4281 | return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad)); |
4282 | case NeonTypeFlags::Int64: |
4283 | return llvm::VectorType::get(CGF->DoubleTy, (1 << IsQuad)); |
4284 | default: |
4285 | llvm_unreachable("Type can't be converted to floating-point!")::llvm::llvm_unreachable_internal("Type can't be converted to floating-point!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4285); |
4286 | } |
4287 | } |
4288 | |
4289 | Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { |
4290 | unsigned nElts = V->getType()->getVectorNumElements(); |
4291 | Value* SV = llvm::ConstantVector::getSplat(nElts, C); |
4292 | return Builder.CreateShuffleVector(V, V, SV, "lane"); |
4293 | } |
4294 | |
4295 | Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, |
4296 | const char *name, |
4297 | unsigned shift, bool rightshift) { |
4298 | unsigned j = 0; |
4299 | for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); |
4300 | ai != ae; ++ai, ++j) |
4301 | if (shift > 0 && shift == j) |
4302 | Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); |
4303 | else |
4304 | Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); |
4305 | |
4306 | return Builder.CreateCall(F, Ops, name); |
4307 | } |
4308 | |
4309 | Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty, |
4310 | bool neg) { |
4311 | int SV = cast<ConstantInt>(V)->getSExtValue(); |
4312 | return ConstantInt::get(Ty, neg ? -SV : SV); |
4313 | } |
4314 | |
4315 | // Right-shift a vector by a constant. |
4316 | Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift, |
4317 | llvm::Type *Ty, bool usgn, |
4318 | const char *name) { |
4319 | llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); |
4320 | |
4321 | int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue(); |
4322 | int EltSize = VTy->getScalarSizeInBits(); |
4323 | |
4324 | Vec = Builder.CreateBitCast(Vec, Ty); |
4325 | |
4326 | // lshr/ashr are undefined when the shift amount is equal to the vector |
4327 | // element size. |
4328 | if (ShiftAmt == EltSize) { |
4329 | if (usgn) { |
4330 | // Right-shifting an unsigned value by its size yields 0. |
4331 | return llvm::ConstantAggregateZero::get(VTy); |
4332 | } else { |
4333 | // Right-shifting a signed value by its size is equivalent |
4334 | // to a shift of size-1. |
4335 | --ShiftAmt; |
4336 | Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt); |
4337 | } |
4338 | } |
4339 | |
4340 | Shift = EmitNeonShiftVector(Shift, Ty, false); |
4341 | if (usgn) |
4342 | return Builder.CreateLShr(Vec, Shift, name); |
4343 | else |
4344 | return Builder.CreateAShr(Vec, Shift, name); |
4345 | } |
4346 | |
4347 | enum { |
4348 | AddRetType = (1 << 0), |
4349 | Add1ArgType = (1 << 1), |
4350 | Add2ArgTypes = (1 << 2), |
4351 | |
4352 | VectorizeRetType = (1 << 3), |
4353 | VectorizeArgTypes = (1 << 4), |
4354 | |
4355 | InventFloatType = (1 << 5), |
4356 | UnsignedAlts = (1 << 6), |
4357 | |
4358 | Use64BitVectors = (1 << 7), |
4359 | Use128BitVectors = (1 << 8), |
4360 | |
4361 | Vectorize1ArgType = Add1ArgType | VectorizeArgTypes, |
4362 | VectorRet = AddRetType | VectorizeRetType, |
4363 | VectorRetGetArgs01 = |
4364 | AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes, |
4365 | FpCmpzModifiers = |
4366 | AddRetType | VectorizeRetType | Add1ArgType | InventFloatType |
4367 | }; |
4368 | |
4369 | namespace { |
4370 | struct NeonIntrinsicInfo { |
4371 | const char *NameHint; |
4372 | unsigned BuiltinID; |
4373 | unsigned LLVMIntrinsic; |
4374 | unsigned AltLLVMIntrinsic; |
4375 | unsigned TypeModifier; |
4376 | |
4377 | bool operator<(unsigned RHSBuiltinID) const { |
4378 | return BuiltinID < RHSBuiltinID; |
4379 | } |
4380 | bool operator<(const NeonIntrinsicInfo &TE) const { |
4381 | return BuiltinID < TE.BuiltinID; |
4382 | } |
4383 | }; |
4384 | } // end anonymous namespace |
4385 | |
4386 | #define NEONMAP0(NameBase) \ |
4387 | { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 } |
4388 | |
4389 | #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ |
4390 | { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ |
4391 | Intrinsic::LLVMIntrinsic, 0, TypeModifier } |
4392 | |
4393 | #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \ |
4394 | { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ |
4395 | Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \ |
4396 | TypeModifier } |
4397 | |
4398 | static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = { |
4399 | NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), |
4400 | NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), |
4401 | NEONMAP1(vabs_v, arm_neon_vabs, 0), |
4402 | NEONMAP1(vabsq_v, arm_neon_vabs, 0), |
4403 | NEONMAP0(vaddhn_v), |
4404 | NEONMAP1(vaesdq_v, arm_neon_aesd, 0), |
4405 | NEONMAP1(vaeseq_v, arm_neon_aese, 0), |
4406 | NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0), |
4407 | NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0), |
4408 | NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType), |
4409 | NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType), |
4410 | NEONMAP1(vcage_v, arm_neon_vacge, 0), |
4411 | NEONMAP1(vcageq_v, arm_neon_vacge, 0), |
4412 | NEONMAP1(vcagt_v, arm_neon_vacgt, 0), |
4413 | NEONMAP1(vcagtq_v, arm_neon_vacgt, 0), |
4414 | NEONMAP1(vcale_v, arm_neon_vacge, 0), |
4415 | NEONMAP1(vcaleq_v, arm_neon_vacge, 0), |
4416 | NEONMAP1(vcalt_v, arm_neon_vacgt, 0), |
4417 | NEONMAP1(vcaltq_v, arm_neon_vacgt, 0), |
4418 | NEONMAP0(vceqz_v), |
4419 | NEONMAP0(vceqzq_v), |
4420 | NEONMAP0(vcgez_v), |
4421 | NEONMAP0(vcgezq_v), |
4422 | NEONMAP0(vcgtz_v), |
4423 | NEONMAP0(vcgtzq_v), |
4424 | NEONMAP0(vclez_v), |
4425 | NEONMAP0(vclezq_v), |
4426 | NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType), |
4427 | NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType), |
4428 | NEONMAP0(vcltz_v), |
4429 | NEONMAP0(vcltzq_v), |
4430 | NEONMAP1(vclz_v, ctlz, Add1ArgType), |
4431 | NEONMAP1(vclzq_v, ctlz, Add1ArgType), |
4432 | NEONMAP1(vcnt_v, ctpop, Add1ArgType), |
4433 | NEONMAP1(vcntq_v, ctpop, Add1ArgType), |
4434 | NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0), |
4435 | NEONMAP0(vcvt_f16_v), |
4436 | NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0), |
4437 | NEONMAP0(vcvt_f32_v), |
4438 | NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
4439 | NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
4440 | NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0), |
4441 | NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0), |
4442 | NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0), |
4443 | NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0), |
4444 | NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0), |
4445 | NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0), |
4446 | NEONMAP0(vcvt_s16_v), |
4447 | NEONMAP0(vcvt_s32_v), |
4448 | NEONMAP0(vcvt_s64_v), |
4449 | NEONMAP0(vcvt_u16_v), |
4450 | NEONMAP0(vcvt_u32_v), |
4451 | NEONMAP0(vcvt_u64_v), |
4452 | NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0), |
4453 | NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0), |
4454 | NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0), |
4455 | NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0), |
4456 | NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0), |
4457 | NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0), |
4458 | NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0), |
4459 | NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0), |
4460 | NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0), |
4461 | NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0), |
4462 | NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0), |
4463 | NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0), |
4464 | NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0), |
4465 | NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0), |
4466 | NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0), |
4467 | NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0), |
4468 | NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0), |
4469 | NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0), |
4470 | NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0), |
4471 | NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0), |
4472 | NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0), |
4473 | NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0), |
4474 | NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0), |
4475 | NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0), |
4476 | NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0), |
4477 | NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0), |
4478 | NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0), |
4479 | NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0), |
4480 | NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0), |
4481 | NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0), |
4482 | NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0), |
4483 | NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0), |
4484 | NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0), |
4485 | NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0), |
4486 | NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0), |
4487 | NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0), |
4488 | NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0), |
4489 | NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0), |
4490 | NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0), |
4491 | NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0), |
4492 | NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0), |
4493 | NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0), |
4494 | NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0), |
4495 | NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0), |
4496 | NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0), |
4497 | NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0), |
4498 | NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0), |
4499 | NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0), |
4500 | NEONMAP0(vcvtq_f16_v), |
4501 | NEONMAP0(vcvtq_f32_v), |
4502 | NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
4503 | NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
4504 | NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0), |
4505 | NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0), |
4506 | NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0), |
4507 | NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0), |
4508 | NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0), |
4509 | NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0), |
4510 | NEONMAP0(vcvtq_s16_v), |
4511 | NEONMAP0(vcvtq_s32_v), |
4512 | NEONMAP0(vcvtq_s64_v), |
4513 | NEONMAP0(vcvtq_u16_v), |
4514 | NEONMAP0(vcvtq_u32_v), |
4515 | NEONMAP0(vcvtq_u64_v), |
4516 | NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0), |
4517 | NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0), |
4518 | NEONMAP0(vext_v), |
4519 | NEONMAP0(vextq_v), |
4520 | NEONMAP0(vfma_v), |
4521 | NEONMAP0(vfmaq_v), |
4522 | NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), |
4523 | NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), |
4524 | NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), |
4525 | NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), |
4526 | NEONMAP0(vld1_dup_v), |
4527 | NEONMAP1(vld1_v, arm_neon_vld1, 0), |
4528 | NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0), |
4529 | NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0), |
4530 | NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0), |
4531 | NEONMAP0(vld1q_dup_v), |
4532 | NEONMAP1(vld1q_v, arm_neon_vld1, 0), |
4533 | NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0), |
4534 | NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0), |
4535 | NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0), |
4536 | NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0), |
4537 | NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0), |
4538 | NEONMAP1(vld2_v, arm_neon_vld2, 0), |
4539 | NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0), |
4540 | NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0), |
4541 | NEONMAP1(vld2q_v, arm_neon_vld2, 0), |
4542 | NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0), |
4543 | NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0), |
4544 | NEONMAP1(vld3_v, arm_neon_vld3, 0), |
4545 | NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0), |
4546 | NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0), |
4547 | NEONMAP1(vld3q_v, arm_neon_vld3, 0), |
4548 | NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0), |
4549 | NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0), |
4550 | NEONMAP1(vld4_v, arm_neon_vld4, 0), |
4551 | NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0), |
4552 | NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0), |
4553 | NEONMAP1(vld4q_v, arm_neon_vld4, 0), |
4554 | NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), |
4555 | NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType), |
4556 | NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType), |
4557 | NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), |
4558 | NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), |
4559 | NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType), |
4560 | NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType), |
4561 | NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), |
4562 | NEONMAP0(vmovl_v), |
4563 | NEONMAP0(vmovn_v), |
4564 | NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType), |
4565 | NEONMAP0(vmull_v), |
4566 | NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType), |
4567 | NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), |
4568 | NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), |
4569 | NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType), |
4570 | NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), |
4571 | NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), |
4572 | NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType), |
4573 | NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts), |
4574 | NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts), |
4575 | NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType), |
4576 | NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType), |
4577 | NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts), |
4578 | NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts), |
4579 | NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0), |
4580 | NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0), |
4581 | NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType), |
4582 | NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType), |
4583 | NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType), |
4584 | NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts), |
4585 | NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType), |
4586 | NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType), |
4587 | NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType), |
4588 | NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType), |
4589 | NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType), |
4590 | NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), |
4591 | NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), |
4592 | NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), |
4593 | NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), |
4594 | NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), |
4595 | NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), |
4596 | NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0), |
4597 | NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0), |
4598 | NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts), |
4599 | NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts), |
4600 | NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType), |
4601 | NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), |
4602 | NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), |
4603 | NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType), |
4604 | NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType), |
4605 | NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), |
4606 | NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), |
4607 | NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType), |
4608 | NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType), |
4609 | NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType), |
4610 | NEONMAP0(vrndi_v), |
4611 | NEONMAP0(vrndiq_v), |
4612 | NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType), |
4613 | NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType), |
4614 | NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType), |
4615 | NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType), |
4616 | NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType), |
4617 | NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType), |
4618 | NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType), |
4619 | NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType), |
4620 | NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType), |
4621 | NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), |
4622 | NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), |
4623 | NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), |
4624 | NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), |
4625 | NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), |
4626 | NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), |
4627 | NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType), |
4628 | NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType), |
4629 | NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType), |
4630 | NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0), |
4631 | NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0), |
4632 | NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0), |
4633 | NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0), |
4634 | NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0), |
4635 | NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0), |
4636 | NEONMAP0(vshl_n_v), |
4637 | NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), |
4638 | NEONMAP0(vshll_n_v), |
4639 | NEONMAP0(vshlq_n_v), |
4640 | NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), |
4641 | NEONMAP0(vshr_n_v), |
4642 | NEONMAP0(vshrn_n_v), |
4643 | NEONMAP0(vshrq_n_v), |
4644 | NEONMAP1(vst1_v, arm_neon_vst1, 0), |
4645 | NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0), |
4646 | NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0), |
4647 | NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0), |
4648 | NEONMAP1(vst1q_v, arm_neon_vst1, 0), |
4649 | NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0), |
4650 | NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0), |
4651 | NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0), |
4652 | NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0), |
4653 | NEONMAP1(vst2_v, arm_neon_vst2, 0), |
4654 | NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0), |
4655 | NEONMAP1(vst2q_v, arm_neon_vst2, 0), |
4656 | NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0), |
4657 | NEONMAP1(vst3_v, arm_neon_vst3, 0), |
4658 | NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0), |
4659 | NEONMAP1(vst3q_v, arm_neon_vst3, 0), |
4660 | NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0), |
4661 | NEONMAP1(vst4_v, arm_neon_vst4, 0), |
4662 | NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0), |
4663 | NEONMAP1(vst4q_v, arm_neon_vst4, 0), |
4664 | NEONMAP0(vsubhn_v), |
4665 | NEONMAP0(vtrn_v), |
4666 | NEONMAP0(vtrnq_v), |
4667 | NEONMAP0(vtst_v), |
4668 | NEONMAP0(vtstq_v), |
4669 | NEONMAP0(vuzp_v), |
4670 | NEONMAP0(vuzpq_v), |
4671 | NEONMAP0(vzip_v), |
4672 | NEONMAP0(vzipq_v) |
4673 | }; |
4674 | |
4675 | static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = { |
4676 | NEONMAP1(vabs_v, aarch64_neon_abs, 0), |
4677 | NEONMAP1(vabsq_v, aarch64_neon_abs, 0), |
4678 | NEONMAP0(vaddhn_v), |
4679 | NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0), |
4680 | NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0), |
4681 | NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0), |
4682 | NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0), |
4683 | NEONMAP1(vcage_v, aarch64_neon_facge, 0), |
4684 | NEONMAP1(vcageq_v, aarch64_neon_facge, 0), |
4685 | NEONMAP1(vcagt_v, aarch64_neon_facgt, 0), |
4686 | NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0), |
4687 | NEONMAP1(vcale_v, aarch64_neon_facge, 0), |
4688 | NEONMAP1(vcaleq_v, aarch64_neon_facge, 0), |
4689 | NEONMAP1(vcalt_v, aarch64_neon_facgt, 0), |
4690 | NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0), |
4691 | NEONMAP0(vceqz_v), |
4692 | NEONMAP0(vceqzq_v), |
4693 | NEONMAP0(vcgez_v), |
4694 | NEONMAP0(vcgezq_v), |
4695 | NEONMAP0(vcgtz_v), |
4696 | NEONMAP0(vcgtzq_v), |
4697 | NEONMAP0(vclez_v), |
4698 | NEONMAP0(vclezq_v), |
4699 | NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType), |
4700 | NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType), |
4701 | NEONMAP0(vcltz_v), |
4702 | NEONMAP0(vcltzq_v), |
4703 | NEONMAP1(vclz_v, ctlz, Add1ArgType), |
4704 | NEONMAP1(vclzq_v, ctlz, Add1ArgType), |
4705 | NEONMAP1(vcnt_v, ctpop, Add1ArgType), |
4706 | NEONMAP1(vcntq_v, ctpop, Add1ArgType), |
4707 | NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0), |
4708 | NEONMAP0(vcvt_f16_v), |
4709 | NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0), |
4710 | NEONMAP0(vcvt_f32_v), |
4711 | NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
4712 | NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
4713 | NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
4714 | NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), |
4715 | NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), |
4716 | NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), |
4717 | NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), |
4718 | NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), |
4719 | NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), |
4720 | NEONMAP0(vcvtq_f16_v), |
4721 | NEONMAP0(vcvtq_f32_v), |
4722 | NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
4723 | NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
4724 | NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
4725 | NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), |
4726 | NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), |
4727 | NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), |
4728 | NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), |
4729 | NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), |
4730 | NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), |
4731 | NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType), |
4732 | NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0), |
4733 | NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0), |
4734 | NEONMAP0(vext_v), |
4735 | NEONMAP0(vextq_v), |
4736 | NEONMAP0(vfma_v), |
4737 | NEONMAP0(vfmaq_v), |
4738 | NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0), |
4739 | NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0), |
4740 | NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0), |
4741 | NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0), |
4742 | NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0), |
4743 | NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0), |
4744 | NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0), |
4745 | NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0), |
4746 | NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), |
4747 | NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), |
4748 | NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), |
4749 | NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), |
4750 | NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0), |
4751 | NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0), |
4752 | NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0), |
4753 | NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0), |
4754 | NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0), |
4755 | NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0), |
4756 | NEONMAP0(vmovl_v), |
4757 | NEONMAP0(vmovn_v), |
4758 | NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType), |
4759 | NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType), |
4760 | NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType), |
4761 | NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), |
4762 | NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), |
4763 | NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType), |
4764 | NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType), |
4765 | NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType), |
4766 | NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), |
4767 | NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), |
4768 | NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0), |
4769 | NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0), |
4770 | NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType), |
4771 | NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType), |
4772 | NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType), |
4773 | NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts), |
4774 | NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType), |
4775 | NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType), |
4776 | NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType), |
4777 | NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType), |
4778 | NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType), |
4779 | NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), |
4780 | NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), |
4781 | NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts), |
4782 | NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), |
4783 | NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts), |
4784 | NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), |
4785 | NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0), |
4786 | NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0), |
4787 | NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), |
4788 | NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), |
4789 | NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType), |
4790 | NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), |
4791 | NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), |
4792 | NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType), |
4793 | NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType), |
4794 | NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), |
4795 | NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), |
4796 | NEONMAP0(vrndi_v), |
4797 | NEONMAP0(vrndiq_v), |
4798 | NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), |
4799 | NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), |
4800 | NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), |
4801 | NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), |
4802 | NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), |
4803 | NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), |
4804 | NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType), |
4805 | NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType), |
4806 | NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType), |
4807 | NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0), |
4808 | NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0), |
4809 | NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0), |
4810 | NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0), |
4811 | NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0), |
4812 | NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0), |
4813 | NEONMAP0(vshl_n_v), |
4814 | NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), |
4815 | NEONMAP0(vshll_n_v), |
4816 | NEONMAP0(vshlq_n_v), |
4817 | NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), |
4818 | NEONMAP0(vshr_n_v), |
4819 | NEONMAP0(vshrn_n_v), |
4820 | NEONMAP0(vshrq_n_v), |
4821 | NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0), |
4822 | NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0), |
4823 | NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0), |
4824 | NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0), |
4825 | NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0), |
4826 | NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0), |
4827 | NEONMAP0(vsubhn_v), |
4828 | NEONMAP0(vtst_v), |
4829 | NEONMAP0(vtstq_v), |
4830 | }; |
4831 | |
4832 | static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = { |
4833 | NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType), |
4834 | NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType), |
4835 | NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType), |
4836 | NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), |
4837 | NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), |
4838 | NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), |
4839 | NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), |
4840 | NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), |
4841 | NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), |
4842 | NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
4843 | NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), |
4844 | NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType), |
4845 | NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), |
4846 | NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType), |
4847 | NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
4848 | NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
4849 | NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType), |
4850 | NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType), |
4851 | NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), |
4852 | NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), |
4853 | NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType), |
4854 | NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType), |
4855 | NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), |
4856 | NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), |
4857 | NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
4858 | NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
4859 | NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
4860 | NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
4861 | NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
4862 | NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
4863 | NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
4864 | NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
4865 | NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
4866 | NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
4867 | NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
4868 | NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
4869 | NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
4870 | NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
4871 | NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
4872 | NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
4873 | NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
4874 | NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
4875 | NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
4876 | NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
4877 | NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
4878 | NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
4879 | NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
4880 | NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
4881 | NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0), |
4882 | NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
4883 | NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
4884 | NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
4885 | NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
4886 | NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), |
4887 | NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), |
4888 | NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
4889 | NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
4890 | NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), |
4891 | NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), |
4892 | NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
4893 | NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
4894 | NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
4895 | NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
4896 | NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), |
4897 | NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), |
4898 | NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
4899 | NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), |
4900 | NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), |
4901 | NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), |
4902 | NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0), |
4903 | NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType), |
4904 | NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType), |
4905 | NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
4906 | NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
4907 | NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
4908 | NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
4909 | NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
4910 | NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
4911 | NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
4912 | NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
4913 | NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), |
4914 | NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
4915 | NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), |
4916 | NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType), |
4917 | NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), |
4918 | NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType), |
4919 | NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), |
4920 | NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), |
4921 | NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType), |
4922 | NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType), |
4923 | NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), |
4924 | NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), |
4925 | NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType), |
4926 | NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType), |
4927 | NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors), |
4928 | NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType), |
4929 | NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors), |
4930 | NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0), |
4931 | NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType), |
4932 | NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType), |
4933 | NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), |
4934 | NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), |
4935 | NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), |
4936 | NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), |
4937 | NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType), |
4938 | NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), |
4939 | NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), |
4940 | NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), |
4941 | NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType), |
4942 | NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), |
4943 | NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType), |
4944 | NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors), |
4945 | NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType), |
4946 | NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), |
4947 | NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), |
4948 | NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType), |
4949 | NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType), |
4950 | NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), |
4951 | NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), |
4952 | NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType), |
4953 | NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType), |
4954 | NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType), |
4955 | NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType), |
4956 | NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), |
4957 | NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), |
4958 | NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), |
4959 | NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), |
4960 | NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType), |
4961 | NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), |
4962 | NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), |
4963 | NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
4964 | NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
4965 | NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
4966 | NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
4967 | NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType), |
4968 | NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType), |
4969 | NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
4970 | NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
4971 | NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
4972 | NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
4973 | NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType), |
4974 | NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType), |
4975 | NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType), |
4976 | NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType), |
4977 | NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), |
4978 | NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), |
4979 | NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType), |
4980 | NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType), |
4981 | NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType), |
4982 | NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), |
4983 | NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), |
4984 | NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), |
4985 | NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), |
4986 | NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType), |
4987 | NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), |
4988 | NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), |
4989 | NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), |
4990 | NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), |
4991 | NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType), |
4992 | NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType), |
4993 | NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), |
4994 | NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), |
4995 | NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType), |
4996 | NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType), |
4997 | NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType), |
4998 | NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType), |
4999 | NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType), |
5000 | NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType), |
5001 | NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType), |
5002 | NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType), |
5003 | NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType), |
5004 | NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType), |
5005 | NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType), |
5006 | NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType), |
5007 | NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0), |
5008 | NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0), |
5009 | NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0), |
5010 | NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0), |
5011 | NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType), |
5012 | NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType), |
5013 | NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType), |
5014 | NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType), |
5015 | NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), |
5016 | NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType), |
5017 | NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), |
5018 | NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType), |
5019 | NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType), |
5020 | NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType), |
5021 | NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), |
5022 | NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType), |
5023 | NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), |
5024 | NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType), |
5025 | // FP16 scalar intrinisics go here. |
5026 | NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType), |
5027 | NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
5028 | NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
5029 | NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
5030 | NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
5031 | NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
5032 | NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
5033 | NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
5034 | NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
5035 | NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
5036 | NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
5037 | NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
5038 | NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
5039 | NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
5040 | NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
5041 | NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
5042 | NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
5043 | NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
5044 | NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
5045 | NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
5046 | NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
5047 | NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
5048 | NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
5049 | NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
5050 | NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
5051 | NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType), |
5052 | NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType), |
5053 | NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType), |
5054 | NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType), |
5055 | NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType), |
5056 | }; |
5057 | |
5058 | #undef NEONMAP0 |
5059 | #undef NEONMAP1 |
5060 | #undef NEONMAP2 |
5061 | |
5062 | static bool NEONSIMDIntrinsicsProvenSorted = false; |
5063 | |
5064 | static bool AArch64SIMDIntrinsicsProvenSorted = false; |
5065 | static bool AArch64SISDIntrinsicsProvenSorted = false; |
5066 | |
5067 | |
5068 | static const NeonIntrinsicInfo * |
5069 | findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap, |
5070 | unsigned BuiltinID, bool &MapProvenSorted) { |
5071 | |
5072 | #ifndef NDEBUG |
5073 | if (!MapProvenSorted) { |
5074 | assert(std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap)))((std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap ))) ? static_cast<void> (0) : __assert_fail ("std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap))" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5074, __PRETTY_FUNCTION__)); |
5075 | MapProvenSorted = true; |
5076 | } |
5077 | #endif |
5078 | |
5079 | const NeonIntrinsicInfo *Builtin = |
5080 | std::lower_bound(IntrinsicMap.begin(), IntrinsicMap.end(), BuiltinID); |
5081 | |
5082 | if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID) |
5083 | return Builtin; |
5084 | |
5085 | return nullptr; |
5086 | } |
5087 | |
5088 | Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID, |
5089 | unsigned Modifier, |
5090 | llvm::Type *ArgType, |
5091 | const CallExpr *E) { |
5092 | int VectorSize = 0; |
5093 | if (Modifier & Use64BitVectors) |
5094 | VectorSize = 64; |
5095 | else if (Modifier & Use128BitVectors) |
5096 | VectorSize = 128; |
5097 | |
5098 | // Return type. |
5099 | SmallVector<llvm::Type *, 3> Tys; |
5100 | if (Modifier & AddRetType) { |
5101 | llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); |
5102 | if (Modifier & VectorizeRetType) |
5103 | Ty = llvm::VectorType::get( |
5104 | Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1); |
5105 | |
5106 | Tys.push_back(Ty); |
5107 | } |
5108 | |
5109 | // Arguments. |
5110 | if (Modifier & VectorizeArgTypes) { |
5111 | int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1; |
5112 | ArgType = llvm::VectorType::get(ArgType, Elts); |
5113 | } |
5114 | |
5115 | if (Modifier & (Add1ArgType | Add2ArgTypes)) |
5116 | Tys.push_back(ArgType); |
5117 | |
5118 | if (Modifier & Add2ArgTypes) |
5119 | Tys.push_back(ArgType); |
5120 | |
5121 | if (Modifier & InventFloatType) |
5122 | Tys.push_back(FloatTy); |
5123 | |
5124 | return CGM.getIntrinsic(IntrinsicID, Tys); |
5125 | } |
5126 | |
5127 | static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF, |
5128 | const NeonIntrinsicInfo &SISDInfo, |
5129 | SmallVectorImpl<Value *> &Ops, |
5130 | const CallExpr *E) { |
5131 | unsigned BuiltinID = SISDInfo.BuiltinID; |
5132 | unsigned int Int = SISDInfo.LLVMIntrinsic; |
5133 | unsigned Modifier = SISDInfo.TypeModifier; |
5134 | const char *s = SISDInfo.NameHint; |
5135 | |
5136 | switch (BuiltinID) { |
5137 | case NEON::BI__builtin_neon_vcled_s64: |
5138 | case NEON::BI__builtin_neon_vcled_u64: |
5139 | case NEON::BI__builtin_neon_vcles_f32: |
5140 | case NEON::BI__builtin_neon_vcled_f64: |
5141 | case NEON::BI__builtin_neon_vcltd_s64: |
5142 | case NEON::BI__builtin_neon_vcltd_u64: |
5143 | case NEON::BI__builtin_neon_vclts_f32: |
5144 | case NEON::BI__builtin_neon_vcltd_f64: |
5145 | case NEON::BI__builtin_neon_vcales_f32: |
5146 | case NEON::BI__builtin_neon_vcaled_f64: |
5147 | case NEON::BI__builtin_neon_vcalts_f32: |
5148 | case NEON::BI__builtin_neon_vcaltd_f64: |
5149 | // Only one direction of comparisons actually exist, cmle is actually a cmge |
5150 | // with swapped operands. The table gives us the right intrinsic but we |
5151 | // still need to do the swap. |
5152 | std::swap(Ops[0], Ops[1]); |
5153 | break; |
5154 | } |
5155 | |
5156 | assert(Int && "Generic code assumes a valid intrinsic")((Int && "Generic code assumes a valid intrinsic") ? static_cast <void> (0) : __assert_fail ("Int && \"Generic code assumes a valid intrinsic\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5156, __PRETTY_FUNCTION__)); |
5157 | |
5158 | // Determine the type(s) of this overloaded AArch64 intrinsic. |
5159 | const Expr *Arg = E->getArg(0); |
5160 | llvm::Type *ArgTy = CGF.ConvertType(Arg->getType()); |
5161 | Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E); |
5162 | |
5163 | int j = 0; |
5164 | ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0); |
5165 | for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); |
5166 | ai != ae; ++ai, ++j) { |
5167 | llvm::Type *ArgTy = ai->getType(); |
5168 | if (Ops[j]->getType()->getPrimitiveSizeInBits() == |
5169 | ArgTy->getPrimitiveSizeInBits()) |
5170 | continue; |
5171 | |
5172 | assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy())((ArgTy->isVectorTy() && !Ops[j]->getType()-> isVectorTy()) ? static_cast<void> (0) : __assert_fail ( "ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5172, __PRETTY_FUNCTION__)); |
5173 | // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate |
5174 | // it before inserting. |
5175 | Ops[j] = |
5176 | CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType()); |
5177 | Ops[j] = |
5178 | CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0); |
5179 | } |
5180 | |
5181 | Value *Result = CGF.EmitNeonCall(F, Ops, s); |
5182 | llvm::Type *ResultType = CGF.ConvertType(E->getType()); |
5183 | if (ResultType->getPrimitiveSizeInBits() < |
5184 | Result->getType()->getPrimitiveSizeInBits()) |
5185 | return CGF.Builder.CreateExtractElement(Result, C0); |
5186 | |
5187 | return CGF.Builder.CreateBitCast(Result, ResultType, s); |
5188 | } |
5189 | |
5190 | Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( |
5191 | unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic, |
5192 | const char *NameHint, unsigned Modifier, const CallExpr *E, |
5193 | SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1, |
5194 | llvm::Triple::ArchType Arch) { |
5195 | // Get the last argument, which specifies the vector type. |
5196 | llvm::APSInt NeonTypeConst; |
5197 | const Expr *Arg = E->getArg(E->getNumArgs() - 1); |
5198 | if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext())) |
5199 | return nullptr; |
5200 | |
5201 | // Determine the type of this overloaded NEON intrinsic. |
5202 | NeonTypeFlags Type(NeonTypeConst.getZExtValue()); |
5203 | bool Usgn = Type.isUnsigned(); |
5204 | bool Quad = Type.isQuad(); |
5205 | const bool HasLegalHalfType = getTarget().hasLegalHalfType(); |
5206 | |
5207 | llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType); |
5208 | llvm::Type *Ty = VTy; |
5209 | if (!Ty) |
5210 | return nullptr; |
5211 | |
5212 | auto getAlignmentValue32 = [&](Address addr) -> Value* { |
5213 | return Builder.getInt32(addr.getAlignment().getQuantity()); |
5214 | }; |
5215 | |
5216 | unsigned Int = LLVMIntrinsic; |
5217 | if ((Modifier & UnsignedAlts) && !Usgn) |
5218 | Int = AltLLVMIntrinsic; |
5219 | |
5220 | switch (BuiltinID) { |
5221 | default: break; |
5222 | case NEON::BI__builtin_neon_vpadd_v: |
5223 | case NEON::BI__builtin_neon_vpaddq_v: |
5224 | // We don't allow fp/int overloading of intrinsics. |
5225 | if (VTy->getElementType()->isFloatingPointTy() && |
5226 | Int == Intrinsic::aarch64_neon_addp) |
5227 | Int = Intrinsic::aarch64_neon_faddp; |
5228 | break; |
5229 | case NEON::BI__builtin_neon_vabs_v: |
5230 | case NEON::BI__builtin_neon_vabsq_v: |
5231 | if (VTy->getElementType()->isFloatingPointTy()) |
5232 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs"); |
5233 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs"); |
5234 | case NEON::BI__builtin_neon_vaddhn_v: { |
5235 | llvm::VectorType *SrcTy = |
5236 | llvm::VectorType::getExtendedElementVectorType(VTy); |
5237 | |
5238 | // %sum = add <4 x i32> %lhs, %rhs |
5239 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
5240 | Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); |
5241 | Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn"); |
5242 | |
5243 | // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> |
5244 | Constant *ShiftAmt = |
5245 | ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); |
5246 | Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn"); |
5247 | |
5248 | // %res = trunc <4 x i32> %high to <4 x i16> |
5249 | return Builder.CreateTrunc(Ops[0], VTy, "vaddhn"); |
5250 | } |
5251 | case NEON::BI__builtin_neon_vcale_v: |
5252 | case NEON::BI__builtin_neon_vcaleq_v: |
5253 | case NEON::BI__builtin_neon_vcalt_v: |
5254 | case NEON::BI__builtin_neon_vcaltq_v: |
5255 | std::swap(Ops[0], Ops[1]); |
5256 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
5257 | case NEON::BI__builtin_neon_vcage_v: |
5258 | case NEON::BI__builtin_neon_vcageq_v: |
5259 | case NEON::BI__builtin_neon_vcagt_v: |
5260 | case NEON::BI__builtin_neon_vcagtq_v: { |
5261 | llvm::Type *Ty; |
5262 | switch (VTy->getScalarSizeInBits()) { |
5263 | default: llvm_unreachable("unexpected type")::llvm::llvm_unreachable_internal("unexpected type", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5263); |
5264 | case 32: |
5265 | Ty = FloatTy; |
5266 | break; |
5267 | case 64: |
5268 | Ty = DoubleTy; |
5269 | break; |
5270 | case 16: |
5271 | Ty = HalfTy; |
5272 | break; |
5273 | } |
5274 | llvm::Type *VecFlt = llvm::VectorType::get(Ty, VTy->getNumElements()); |
5275 | llvm::Type *Tys[] = { VTy, VecFlt }; |
5276 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
5277 | return EmitNeonCall(F, Ops, NameHint); |
5278 | } |
5279 | case NEON::BI__builtin_neon_vceqz_v: |
5280 | case NEON::BI__builtin_neon_vceqzq_v: |
5281 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ, |
5282 | ICmpInst::ICMP_EQ, "vceqz"); |
5283 | case NEON::BI__builtin_neon_vcgez_v: |
5284 | case NEON::BI__builtin_neon_vcgezq_v: |
5285 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE, |
5286 | ICmpInst::ICMP_SGE, "vcgez"); |
5287 | case NEON::BI__builtin_neon_vclez_v: |
5288 | case NEON::BI__builtin_neon_vclezq_v: |
5289 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE, |
5290 | ICmpInst::ICMP_SLE, "vclez"); |
5291 | case NEON::BI__builtin_neon_vcgtz_v: |
5292 | case NEON::BI__builtin_neon_vcgtzq_v: |
5293 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT, |
5294 | ICmpInst::ICMP_SGT, "vcgtz"); |
5295 | case NEON::BI__builtin_neon_vcltz_v: |
5296 | case NEON::BI__builtin_neon_vcltzq_v: |
5297 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT, |
5298 | ICmpInst::ICMP_SLT, "vcltz"); |
5299 | case NEON::BI__builtin_neon_vclz_v: |
5300 | case NEON::BI__builtin_neon_vclzq_v: |
5301 | // We generate target-independent intrinsic, which needs a second argument |
5302 | // for whether or not clz of zero is undefined; on ARM it isn't. |
5303 | Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef())); |
5304 | break; |
5305 | case NEON::BI__builtin_neon_vcvt_f32_v: |
5306 | case NEON::BI__builtin_neon_vcvtq_f32_v: |
5307 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
5308 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad), |
5309 | HasLegalHalfType); |
5310 | return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") |
5311 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); |
5312 | case NEON::BI__builtin_neon_vcvt_f16_v: |
5313 | case NEON::BI__builtin_neon_vcvtq_f16_v: |
5314 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
5315 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad), |
5316 | HasLegalHalfType); |
5317 | return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") |
5318 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); |
5319 | case NEON::BI__builtin_neon_vcvt_n_f16_v: |
5320 | case NEON::BI__builtin_neon_vcvt_n_f32_v: |
5321 | case NEON::BI__builtin_neon_vcvt_n_f64_v: |
5322 | case NEON::BI__builtin_neon_vcvtq_n_f16_v: |
5323 | case NEON::BI__builtin_neon_vcvtq_n_f32_v: |
5324 | case NEON::BI__builtin_neon_vcvtq_n_f64_v: { |
5325 | llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty }; |
5326 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; |
5327 | Function *F = CGM.getIntrinsic(Int, Tys); |
5328 | return EmitNeonCall(F, Ops, "vcvt_n"); |
5329 | } |
5330 | case NEON::BI__builtin_neon_vcvt_n_s16_v: |
5331 | case NEON::BI__builtin_neon_vcvt_n_s32_v: |
5332 | case NEON::BI__builtin_neon_vcvt_n_u16_v: |
5333 | case NEON::BI__builtin_neon_vcvt_n_u32_v: |
5334 | case NEON::BI__builtin_neon_vcvt_n_s64_v: |
5335 | case NEON::BI__builtin_neon_vcvt_n_u64_v: |
5336 | case NEON::BI__builtin_neon_vcvtq_n_s16_v: |
5337 | case NEON::BI__builtin_neon_vcvtq_n_s32_v: |
5338 | case NEON::BI__builtin_neon_vcvtq_n_u16_v: |
5339 | case NEON::BI__builtin_neon_vcvtq_n_u32_v: |
5340 | case NEON::BI__builtin_neon_vcvtq_n_s64_v: |
5341 | case NEON::BI__builtin_neon_vcvtq_n_u64_v: { |
5342 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
5343 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
5344 | return EmitNeonCall(F, Ops, "vcvt_n"); |
5345 | } |
5346 | case NEON::BI__builtin_neon_vcvt_s32_v: |
5347 | case NEON::BI__builtin_neon_vcvt_u32_v: |
5348 | case NEON::BI__builtin_neon_vcvt_s64_v: |
5349 | case NEON::BI__builtin_neon_vcvt_u64_v: |
5350 | case NEON::BI__builtin_neon_vcvt_s16_v: |
5351 | case NEON::BI__builtin_neon_vcvt_u16_v: |
5352 | case NEON::BI__builtin_neon_vcvtq_s32_v: |
5353 | case NEON::BI__builtin_neon_vcvtq_u32_v: |
5354 | case NEON::BI__builtin_neon_vcvtq_s64_v: |
5355 | case NEON::BI__builtin_neon_vcvtq_u64_v: |
5356 | case NEON::BI__builtin_neon_vcvtq_s16_v: |
5357 | case NEON::BI__builtin_neon_vcvtq_u16_v: { |
5358 | Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type)); |
5359 | return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") |
5360 | : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); |
5361 | } |
5362 | case NEON::BI__builtin_neon_vcvta_s16_v: |
5363 | case NEON::BI__builtin_neon_vcvta_s32_v: |
5364 | case NEON::BI__builtin_neon_vcvta_s64_v: |
5365 | case NEON::BI__builtin_neon_vcvta_u16_v: |
5366 | case NEON::BI__builtin_neon_vcvta_u32_v: |
5367 | case NEON::BI__builtin_neon_vcvta_u64_v: |
5368 | case NEON::BI__builtin_neon_vcvtaq_s16_v: |
5369 | case NEON::BI__builtin_neon_vcvtaq_s32_v: |
5370 | case NEON::BI__builtin_neon_vcvtaq_s64_v: |
5371 | case NEON::BI__builtin_neon_vcvtaq_u16_v: |
5372 | case NEON::BI__builtin_neon_vcvtaq_u32_v: |
5373 | case NEON::BI__builtin_neon_vcvtaq_u64_v: |
5374 | case NEON::BI__builtin_neon_vcvtn_s16_v: |
5375 | case NEON::BI__builtin_neon_vcvtn_s32_v: |
5376 | case NEON::BI__builtin_neon_vcvtn_s64_v: |
5377 | case NEON::BI__builtin_neon_vcvtn_u16_v: |
5378 | case NEON::BI__builtin_neon_vcvtn_u32_v: |
5379 | case NEON::BI__builtin_neon_vcvtn_u64_v: |
5380 | case NEON::BI__builtin_neon_vcvtnq_s16_v: |
5381 | case NEON::BI__builtin_neon_vcvtnq_s32_v: |
5382 | case NEON::BI__builtin_neon_vcvtnq_s64_v: |
5383 | case NEON::BI__builtin_neon_vcvtnq_u16_v: |
5384 | case NEON::BI__builtin_neon_vcvtnq_u32_v: |
5385 | case NEON::BI__builtin_neon_vcvtnq_u64_v: |
5386 | case NEON::BI__builtin_neon_vcvtp_s16_v: |
5387 | case NEON::BI__builtin_neon_vcvtp_s32_v: |
5388 | case NEON::BI__builtin_neon_vcvtp_s64_v: |
5389 | case NEON::BI__builtin_neon_vcvtp_u16_v: |
5390 | case NEON::BI__builtin_neon_vcvtp_u32_v: |
5391 | case NEON::BI__builtin_neon_vcvtp_u64_v: |
5392 | case NEON::BI__builtin_neon_vcvtpq_s16_v: |
5393 | case NEON::BI__builtin_neon_vcvtpq_s32_v: |
5394 | case NEON::BI__builtin_neon_vcvtpq_s64_v: |
5395 | case NEON::BI__builtin_neon_vcvtpq_u16_v: |
5396 | case NEON::BI__builtin_neon_vcvtpq_u32_v: |
5397 | case NEON::BI__builtin_neon_vcvtpq_u64_v: |
5398 | case NEON::BI__builtin_neon_vcvtm_s16_v: |
5399 | case NEON::BI__builtin_neon_vcvtm_s32_v: |
5400 | case NEON::BI__builtin_neon_vcvtm_s64_v: |
5401 | case NEON::BI__builtin_neon_vcvtm_u16_v: |
5402 | case NEON::BI__builtin_neon_vcvtm_u32_v: |
5403 | case NEON::BI__builtin_neon_vcvtm_u64_v: |
5404 | case NEON::BI__builtin_neon_vcvtmq_s16_v: |
5405 | case NEON::BI__builtin_neon_vcvtmq_s32_v: |
5406 | case NEON::BI__builtin_neon_vcvtmq_s64_v: |
5407 | case NEON::BI__builtin_neon_vcvtmq_u16_v: |
5408 | case NEON::BI__builtin_neon_vcvtmq_u32_v: |
5409 | case NEON::BI__builtin_neon_vcvtmq_u64_v: { |
5410 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
5411 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint); |
5412 | } |
5413 | case NEON::BI__builtin_neon_vext_v: |
5414 | case NEON::BI__builtin_neon_vextq_v: { |
5415 | int CV = cast<ConstantInt>(Ops[2])->getSExtValue(); |
5416 | SmallVector<uint32_t, 16> Indices; |
5417 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
5418 | Indices.push_back(i+CV); |
5419 | |
5420 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
5421 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
5422 | return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext"); |
5423 | } |
5424 | case NEON::BI__builtin_neon_vfma_v: |
5425 | case NEON::BI__builtin_neon_vfmaq_v: { |
5426 | Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty); |
5427 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
5428 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
5429 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
5430 | |
5431 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. |
5432 | return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]}); |
5433 | } |
5434 | case NEON::BI__builtin_neon_vld1_v: |
5435 | case NEON::BI__builtin_neon_vld1q_v: { |
5436 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
5437 | Ops.push_back(getAlignmentValue32(PtrOp0)); |
5438 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1"); |
5439 | } |
5440 | case NEON::BI__builtin_neon_vld1_x2_v: |
5441 | case NEON::BI__builtin_neon_vld1q_x2_v: |
5442 | case NEON::BI__builtin_neon_vld1_x3_v: |
5443 | case NEON::BI__builtin_neon_vld1q_x3_v: |
5444 | case NEON::BI__builtin_neon_vld1_x4_v: |
5445 | case NEON::BI__builtin_neon_vld1q_x4_v: { |
5446 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType()); |
5447 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
5448 | llvm::Type *Tys[2] = { VTy, PTy }; |
5449 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
5450 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN"); |
5451 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
5452 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
5453 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
5454 | } |
5455 | case NEON::BI__builtin_neon_vld2_v: |
5456 | case NEON::BI__builtin_neon_vld2q_v: |
5457 | case NEON::BI__builtin_neon_vld3_v: |
5458 | case NEON::BI__builtin_neon_vld3q_v: |
5459 | case NEON::BI__builtin_neon_vld4_v: |
5460 | case NEON::BI__builtin_neon_vld4q_v: |
5461 | case NEON::BI__builtin_neon_vld2_dup_v: |
5462 | case NEON::BI__builtin_neon_vld2q_dup_v: |
5463 | case NEON::BI__builtin_neon_vld3_dup_v: |
5464 | case NEON::BI__builtin_neon_vld3q_dup_v: |
5465 | case NEON::BI__builtin_neon_vld4_dup_v: |
5466 | case NEON::BI__builtin_neon_vld4q_dup_v: { |
5467 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
5468 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
5469 | Value *Align = getAlignmentValue32(PtrOp1); |
5470 | Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint); |
5471 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
5472 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
5473 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
5474 | } |
5475 | case NEON::BI__builtin_neon_vld1_dup_v: |
5476 | case NEON::BI__builtin_neon_vld1q_dup_v: { |
5477 | Value *V = UndefValue::get(Ty); |
5478 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); |
5479 | PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty); |
5480 | LoadInst *Ld = Builder.CreateLoad(PtrOp0); |
5481 | llvm::Constant *CI = ConstantInt::get(SizeTy, 0); |
5482 | Ops[0] = Builder.CreateInsertElement(V, Ld, CI); |
5483 | return EmitNeonSplat(Ops[0], CI); |
5484 | } |
5485 | case NEON::BI__builtin_neon_vld2_lane_v: |
5486 | case NEON::BI__builtin_neon_vld2q_lane_v: |
5487 | case NEON::BI__builtin_neon_vld3_lane_v: |
5488 | case NEON::BI__builtin_neon_vld3q_lane_v: |
5489 | case NEON::BI__builtin_neon_vld4_lane_v: |
5490 | case NEON::BI__builtin_neon_vld4q_lane_v: { |
5491 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
5492 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
5493 | for (unsigned I = 2; I < Ops.size() - 1; ++I) |
5494 | Ops[I] = Builder.CreateBitCast(Ops[I], Ty); |
5495 | Ops.push_back(getAlignmentValue32(PtrOp1)); |
5496 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint); |
5497 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
5498 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
5499 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
5500 | } |
5501 | case NEON::BI__builtin_neon_vmovl_v: { |
5502 | llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy); |
5503 | Ops[0] = Builder.CreateBitCast(Ops[0], DTy); |
5504 | if (Usgn) |
5505 | return Builder.CreateZExt(Ops[0], Ty, "vmovl"); |
5506 | return Builder.CreateSExt(Ops[0], Ty, "vmovl"); |
5507 | } |
5508 | case NEON::BI__builtin_neon_vmovn_v: { |
5509 | llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy); |
5510 | Ops[0] = Builder.CreateBitCast(Ops[0], QTy); |
5511 | return Builder.CreateTrunc(Ops[0], Ty, "vmovn"); |
5512 | } |
5513 | case NEON::BI__builtin_neon_vmull_v: |
5514 | // FIXME: the integer vmull operations could be emitted in terms of pure |
5515 | // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of |
5516 | // hoisting the exts outside loops. Until global ISel comes along that can |
5517 | // see through such movement this leads to bad CodeGen. So we need an |
5518 | // intrinsic for now. |
5519 | Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; |
5520 | Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int; |
5521 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); |
5522 | case NEON::BI__builtin_neon_vpadal_v: |
5523 | case NEON::BI__builtin_neon_vpadalq_v: { |
5524 | // The source operand type has twice as many elements of half the size. |
5525 | unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); |
5526 | llvm::Type *EltTy = |
5527 | llvm::IntegerType::get(getLLVMContext(), EltBits / 2); |
5528 | llvm::Type *NarrowTy = |
5529 | llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); |
5530 | llvm::Type *Tys[2] = { Ty, NarrowTy }; |
5531 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); |
5532 | } |
5533 | case NEON::BI__builtin_neon_vpaddl_v: |
5534 | case NEON::BI__builtin_neon_vpaddlq_v: { |
5535 | // The source operand type has twice as many elements of half the size. |
5536 | unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); |
5537 | llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); |
5538 | llvm::Type *NarrowTy = |
5539 | llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); |
5540 | llvm::Type *Tys[2] = { Ty, NarrowTy }; |
5541 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl"); |
5542 | } |
5543 | case NEON::BI__builtin_neon_vqdmlal_v: |
5544 | case NEON::BI__builtin_neon_vqdmlsl_v: { |
5545 | SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end()); |
5546 | Ops[1] = |
5547 | EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal"); |
5548 | Ops.resize(2); |
5549 | return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint); |
5550 | } |
5551 | case NEON::BI__builtin_neon_vqshl_n_v: |
5552 | case NEON::BI__builtin_neon_vqshlq_n_v: |
5553 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n", |
5554 | 1, false); |
5555 | case NEON::BI__builtin_neon_vqshlu_n_v: |
5556 | case NEON::BI__builtin_neon_vqshluq_n_v: |
5557 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", |
5558 | 1, false); |
5559 | case NEON::BI__builtin_neon_vrecpe_v: |
5560 | case NEON::BI__builtin_neon_vrecpeq_v: |
5561 | case NEON::BI__builtin_neon_vrsqrte_v: |
5562 | case NEON::BI__builtin_neon_vrsqrteq_v: |
5563 | Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic; |
5564 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint); |
5565 | case NEON::BI__builtin_neon_vrndi_v: |
5566 | case NEON::BI__builtin_neon_vrndiq_v: |
5567 | Int = Intrinsic::nearbyint; |
5568 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint); |
5569 | case NEON::BI__builtin_neon_vrshr_n_v: |
5570 | case NEON::BI__builtin_neon_vrshrq_n_v: |
5571 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", |
5572 | 1, true); |
5573 | case NEON::BI__builtin_neon_vshl_n_v: |
5574 | case NEON::BI__builtin_neon_vshlq_n_v: |
5575 | Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); |
5576 | return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], |
5577 | "vshl_n"); |
5578 | case NEON::BI__builtin_neon_vshll_n_v: { |
5579 | llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy); |
5580 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
5581 | if (Usgn) |
5582 | Ops[0] = Builder.CreateZExt(Ops[0], VTy); |
5583 | else |
5584 | Ops[0] = Builder.CreateSExt(Ops[0], VTy); |
5585 | Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false); |
5586 | return Builder.CreateShl(Ops[0], Ops[1], "vshll_n"); |
5587 | } |
5588 | case NEON::BI__builtin_neon_vshrn_n_v: { |
5589 | llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy); |
5590 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
5591 | Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false); |
5592 | if (Usgn) |
5593 | Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]); |
5594 | else |
5595 | Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]); |
5596 | return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n"); |
5597 | } |
5598 | case NEON::BI__builtin_neon_vshr_n_v: |
5599 | case NEON::BI__builtin_neon_vshrq_n_v: |
5600 | return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n"); |
5601 | case NEON::BI__builtin_neon_vst1_v: |
5602 | case NEON::BI__builtin_neon_vst1q_v: |
5603 | case NEON::BI__builtin_neon_vst2_v: |
5604 | case NEON::BI__builtin_neon_vst2q_v: |
5605 | case NEON::BI__builtin_neon_vst3_v: |
5606 | case NEON::BI__builtin_neon_vst3q_v: |
5607 | case NEON::BI__builtin_neon_vst4_v: |
5608 | case NEON::BI__builtin_neon_vst4q_v: |
5609 | case NEON::BI__builtin_neon_vst2_lane_v: |
5610 | case NEON::BI__builtin_neon_vst2q_lane_v: |
5611 | case NEON::BI__builtin_neon_vst3_lane_v: |
5612 | case NEON::BI__builtin_neon_vst3q_lane_v: |
5613 | case NEON::BI__builtin_neon_vst4_lane_v: |
5614 | case NEON::BI__builtin_neon_vst4q_lane_v: { |
5615 | llvm::Type *Tys[] = {Int8PtrTy, Ty}; |
5616 | Ops.push_back(getAlignmentValue32(PtrOp0)); |
5617 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, ""); |
5618 | } |
5619 | case NEON::BI__builtin_neon_vst1_x2_v: |
5620 | case NEON::BI__builtin_neon_vst1q_x2_v: |
5621 | case NEON::BI__builtin_neon_vst1_x3_v: |
5622 | case NEON::BI__builtin_neon_vst1q_x3_v: |
5623 | case NEON::BI__builtin_neon_vst1_x4_v: |
5624 | case NEON::BI__builtin_neon_vst1q_x4_v: { |
5625 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType()); |
5626 | // TODO: Currently in AArch32 mode the pointer operand comes first, whereas |
5627 | // in AArch64 it comes last. We may want to stick to one or another. |
5628 | if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be) { |
5629 | llvm::Type *Tys[2] = { VTy, PTy }; |
5630 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
5631 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); |
5632 | } |
5633 | llvm::Type *Tys[2] = { PTy, VTy }; |
5634 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); |
5635 | } |
5636 | case NEON::BI__builtin_neon_vsubhn_v: { |
5637 | llvm::VectorType *SrcTy = |
5638 | llvm::VectorType::getExtendedElementVectorType(VTy); |
5639 | |
5640 | // %sum = add <4 x i32> %lhs, %rhs |
5641 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
5642 | Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); |
5643 | Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn"); |
5644 | |
5645 | // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> |
5646 | Constant *ShiftAmt = |
5647 | ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); |
5648 | Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn"); |
5649 | |
5650 | // %res = trunc <4 x i32> %high to <4 x i16> |
5651 | return Builder.CreateTrunc(Ops[0], VTy, "vsubhn"); |
5652 | } |
5653 | case NEON::BI__builtin_neon_vtrn_v: |
5654 | case NEON::BI__builtin_neon_vtrnq_v: { |
5655 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
5656 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
5657 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
5658 | Value *SV = nullptr; |
5659 | |
5660 | for (unsigned vi = 0; vi != 2; ++vi) { |
5661 | SmallVector<uint32_t, 16> Indices; |
5662 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
5663 | Indices.push_back(i+vi); |
5664 | Indices.push_back(i+e+vi); |
5665 | } |
5666 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
5667 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn"); |
5668 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
5669 | } |
5670 | return SV; |
5671 | } |
5672 | case NEON::BI__builtin_neon_vtst_v: |
5673 | case NEON::BI__builtin_neon_vtstq_v: { |
5674 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
5675 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
5676 | Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); |
5677 | Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], |
5678 | ConstantAggregateZero::get(Ty)); |
5679 | return Builder.CreateSExt(Ops[0], Ty, "vtst"); |
5680 | } |
5681 | case NEON::BI__builtin_neon_vuzp_v: |
5682 | case NEON::BI__builtin_neon_vuzpq_v: { |
5683 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
5684 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
5685 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
5686 | Value *SV = nullptr; |
5687 | |
5688 | for (unsigned vi = 0; vi != 2; ++vi) { |
5689 | SmallVector<uint32_t, 16> Indices; |
5690 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
5691 | Indices.push_back(2*i+vi); |
5692 | |
5693 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
5694 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp"); |
5695 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
5696 | } |
5697 | return SV; |
5698 | } |
5699 | case NEON::BI__builtin_neon_vzip_v: |
5700 | case NEON::BI__builtin_neon_vzipq_v: { |
5701 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
5702 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
5703 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
5704 | Value *SV = nullptr; |
5705 | |
5706 | for (unsigned vi = 0; vi != 2; ++vi) { |
5707 | SmallVector<uint32_t, 16> Indices; |
5708 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
5709 | Indices.push_back((i + vi*e) >> 1); |
5710 | Indices.push_back(((i + vi*e) >> 1)+e); |
5711 | } |
5712 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
5713 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip"); |
5714 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
5715 | } |
5716 | return SV; |
5717 | } |
5718 | case NEON::BI__builtin_neon_vdot_v: |
5719 | case NEON::BI__builtin_neon_vdotq_v: { |
5720 | llvm::Type *InputTy = |
5721 | llvm::VectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
5722 | llvm::Type *Tys[2] = { Ty, InputTy }; |
5723 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; |
5724 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot"); |
5725 | } |
5726 | case NEON::BI__builtin_neon_vfmlal_low_v: |
5727 | case NEON::BI__builtin_neon_vfmlalq_low_v: { |
5728 | llvm::Type *InputTy = |
5729 | llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
5730 | llvm::Type *Tys[2] = { Ty, InputTy }; |
5731 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low"); |
5732 | } |
5733 | case NEON::BI__builtin_neon_vfmlsl_low_v: |
5734 | case NEON::BI__builtin_neon_vfmlslq_low_v: { |
5735 | llvm::Type *InputTy = |
5736 | llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
5737 | llvm::Type *Tys[2] = { Ty, InputTy }; |
5738 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low"); |
5739 | } |
5740 | case NEON::BI__builtin_neon_vfmlal_high_v: |
5741 | case NEON::BI__builtin_neon_vfmlalq_high_v: { |
5742 | llvm::Type *InputTy = |
5743 | llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
5744 | llvm::Type *Tys[2] = { Ty, InputTy }; |
5745 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high"); |
5746 | } |
5747 | case NEON::BI__builtin_neon_vfmlsl_high_v: |
5748 | case NEON::BI__builtin_neon_vfmlslq_high_v: { |
5749 | llvm::Type *InputTy = |
5750 | llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
5751 | llvm::Type *Tys[2] = { Ty, InputTy }; |
5752 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high"); |
5753 | } |
5754 | } |
5755 | |
5756 | assert(Int && "Expected valid intrinsic number")((Int && "Expected valid intrinsic number") ? static_cast <void> (0) : __assert_fail ("Int && \"Expected valid intrinsic number\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5756, __PRETTY_FUNCTION__)); |
5757 | |
5758 | // Determine the type(s) of this overloaded AArch64 intrinsic. |
5759 | Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E); |
5760 | |
5761 | Value *Result = EmitNeonCall(F, Ops, NameHint); |
5762 | llvm::Type *ResultType = ConvertType(E->getType()); |
5763 | // AArch64 intrinsic one-element vector type cast to |
5764 | // scalar type expected by the builtin |
5765 | return Builder.CreateBitCast(Result, ResultType, NameHint); |
5766 | } |
5767 | |
5768 | Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr( |
5769 | Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp, |
5770 | const CmpInst::Predicate Ip, const Twine &Name) { |
5771 | llvm::Type *OTy = Op->getType(); |
5772 | |
5773 | // FIXME: this is utterly horrific. We should not be looking at previous |
5774 | // codegen context to find out what needs doing. Unfortunately TableGen |
5775 | // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32 |
5776 | // (etc). |
5777 | if (BitCastInst *BI = dyn_cast<BitCastInst>(Op)) |
5778 | OTy = BI->getOperand(0)->getType(); |
5779 | |
5780 | Op = Builder.CreateBitCast(Op, OTy); |
5781 | if (OTy->getScalarType()->isFloatingPointTy()) { |
5782 | Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy)); |
5783 | } else { |
5784 | Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy)); |
5785 | } |
5786 | return Builder.CreateSExt(Op, Ty, Name); |
5787 | } |
5788 | |
5789 | static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
5790 | Value *ExtOp, Value *IndexOp, |
5791 | llvm::Type *ResTy, unsigned IntID, |
5792 | const char *Name) { |
5793 | SmallVector<Value *, 2> TblOps; |
5794 | if (ExtOp) |
5795 | TblOps.push_back(ExtOp); |
5796 | |
5797 | // Build a vector containing sequential number like (0, 1, 2, ..., 15) |
5798 | SmallVector<uint32_t, 16> Indices; |
5799 | llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType()); |
5800 | for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) { |
5801 | Indices.push_back(2*i); |
5802 | Indices.push_back(2*i+1); |
5803 | } |
5804 | |
5805 | int PairPos = 0, End = Ops.size() - 1; |
5806 | while (PairPos < End) { |
5807 | TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], |
5808 | Ops[PairPos+1], Indices, |
5809 | Name)); |
5810 | PairPos += 2; |
5811 | } |
5812 | |
5813 | // If there's an odd number of 64-bit lookup table, fill the high 64-bit |
5814 | // of the 128-bit lookup table with zero. |
5815 | if (PairPos == End) { |
5816 | Value *ZeroTbl = ConstantAggregateZero::get(TblTy); |
5817 | TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], |
5818 | ZeroTbl, Indices, Name)); |
5819 | } |
5820 | |
5821 | Function *TblF; |
5822 | TblOps.push_back(IndexOp); |
5823 | TblF = CGF.CGM.getIntrinsic(IntID, ResTy); |
5824 | |
5825 | return CGF.EmitNeonCall(TblF, TblOps, Name); |
5826 | } |
5827 | |
5828 | Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) { |
5829 | unsigned Value; |
5830 | switch (BuiltinID) { |
5831 | default: |
5832 | return nullptr; |
5833 | case ARM::BI__builtin_arm_nop: |
5834 | Value = 0; |
5835 | break; |
5836 | case ARM::BI__builtin_arm_yield: |
5837 | case ARM::BI__yield: |
5838 | Value = 1; |
5839 | break; |
5840 | case ARM::BI__builtin_arm_wfe: |
5841 | case ARM::BI__wfe: |
5842 | Value = 2; |
5843 | break; |
5844 | case ARM::BI__builtin_arm_wfi: |
5845 | case ARM::BI__wfi: |
5846 | Value = 3; |
5847 | break; |
5848 | case ARM::BI__builtin_arm_sev: |
5849 | case ARM::BI__sev: |
5850 | Value = 4; |
5851 | break; |
5852 | case ARM::BI__builtin_arm_sevl: |
5853 | case ARM::BI__sevl: |
5854 | Value = 5; |
5855 | break; |
5856 | } |
5857 | |
5858 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint), |
5859 | llvm::ConstantInt::get(Int32Ty, Value)); |
5860 | } |
5861 | |
5862 | // Generates the IR for the read/write special register builtin, |
5863 | // ValueType is the type of the value that is to be written or read, |
5864 | // RegisterType is the type of the register being written to or read from. |
5865 | static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF, |
5866 | const CallExpr *E, |
5867 | llvm::Type *RegisterType, |
5868 | llvm::Type *ValueType, |
5869 | bool IsRead, |
5870 | StringRef SysReg = "") { |
5871 | // write and register intrinsics only support 32 and 64 bit operations. |
5872 | assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))(((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy (64)) && "Unsupported size for register.") ? static_cast <void> (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5873, __PRETTY_FUNCTION__)) |
5873 | && "Unsupported size for register.")(((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy (64)) && "Unsupported size for register.") ? static_cast <void> (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5873, __PRETTY_FUNCTION__)); |
5874 | |
5875 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
5876 | CodeGen::CodeGenModule &CGM = CGF.CGM; |
5877 | LLVMContext &Context = CGM.getLLVMContext(); |
5878 | |
5879 | if (SysReg.empty()) { |
5880 | const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts(); |
5881 | SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString(); |
5882 | } |
5883 | |
5884 | llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) }; |
5885 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); |
5886 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); |
5887 | |
5888 | llvm::Type *Types[] = { RegisterType }; |
5889 | |
5890 | bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32); |
5891 | assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))((!(RegisterType->isIntegerTy(32) && ValueType-> isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register" ) ? static_cast<void> (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5892, __PRETTY_FUNCTION__)) |
5892 | && "Can't fit 64-bit value in 32-bit register")((!(RegisterType->isIntegerTy(32) && ValueType-> isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register" ) ? static_cast<void> (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5892, __PRETTY_FUNCTION__)); |
5893 | |
5894 | if (IsRead) { |
5895 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); |
5896 | llvm::Value *Call = Builder.CreateCall(F, Metadata); |
5897 | |
5898 | if (MixedTypes) |
5899 | // Read into 64 bit register and then truncate result to 32 bit. |
5900 | return Builder.CreateTrunc(Call, ValueType); |
5901 | |
5902 | if (ValueType->isPointerTy()) |
5903 | // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*). |
5904 | return Builder.CreateIntToPtr(Call, ValueType); |
5905 | |
5906 | return Call; |
5907 | } |
5908 | |
5909 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); |
5910 | llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1)); |
5911 | if (MixedTypes) { |
5912 | // Extend 32 bit write value to 64 bit to pass to write. |
5913 | ArgValue = Builder.CreateZExt(ArgValue, RegisterType); |
5914 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
5915 | } |
5916 | |
5917 | if (ValueType->isPointerTy()) { |
5918 | // Have VoidPtrTy ArgValue but want to return an i32/i64. |
5919 | ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType); |
5920 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
5921 | } |
5922 | |
5923 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
5924 | } |
5925 | |
5926 | /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra |
5927 | /// argument that specifies the vector type. |
5928 | static bool HasExtraNeonArgument(unsigned BuiltinID) { |
5929 | switch (BuiltinID) { |
5930 | default: break; |
5931 | case NEON::BI__builtin_neon_vget_lane_i8: |
5932 | case NEON::BI__builtin_neon_vget_lane_i16: |
5933 | case NEON::BI__builtin_neon_vget_lane_i32: |
5934 | case NEON::BI__builtin_neon_vget_lane_i64: |
5935 | case NEON::BI__builtin_neon_vget_lane_f32: |
5936 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
5937 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
5938 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
5939 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
5940 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
5941 | case NEON::BI__builtin_neon_vset_lane_i8: |
5942 | case NEON::BI__builtin_neon_vset_lane_i16: |
5943 | case NEON::BI__builtin_neon_vset_lane_i32: |
5944 | case NEON::BI__builtin_neon_vset_lane_i64: |
5945 | case NEON::BI__builtin_neon_vset_lane_f32: |
5946 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
5947 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
5948 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
5949 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
5950 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
5951 | case NEON::BI__builtin_neon_vsha1h_u32: |
5952 | case NEON::BI__builtin_neon_vsha1cq_u32: |
5953 | case NEON::BI__builtin_neon_vsha1pq_u32: |
5954 | case NEON::BI__builtin_neon_vsha1mq_u32: |
5955 | case clang::ARM::BI_MoveToCoprocessor: |
5956 | case clang::ARM::BI_MoveToCoprocessor2: |
5957 | return false; |
5958 | } |
5959 | return true; |
5960 | } |
5961 | |
5962 | Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, |
5963 | const CallExpr *E, |
5964 | llvm::Triple::ArchType Arch) { |
5965 | if (auto Hint = GetValueForARMHint(BuiltinID)) |
5966 | return Hint; |
5967 | |
5968 | if (BuiltinID == ARM::BI__emit) { |
5969 | bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb; |
5970 | llvm::FunctionType *FTy = |
5971 | llvm::FunctionType::get(VoidTy, /*Variadic=*/false); |
5972 | |
5973 | Expr::EvalResult Result; |
5974 | if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext())) |
5975 | llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5975); |
5976 | |
5977 | llvm::APSInt Value = Result.Val.getInt(); |
5978 | uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue(); |
5979 | |
5980 | llvm::InlineAsm *Emit = |
5981 | IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "", |
5982 | /*SideEffects=*/true) |
5983 | : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "", |
5984 | /*SideEffects=*/true); |
5985 | |
5986 | return Builder.CreateCall(Emit); |
5987 | } |
5988 | |
5989 | if (BuiltinID == ARM::BI__builtin_arm_dbg) { |
5990 | Value *Option = EmitScalarExpr(E->getArg(0)); |
5991 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option); |
5992 | } |
5993 | |
5994 | if (BuiltinID == ARM::BI__builtin_arm_prefetch) { |
5995 | Value *Address = EmitScalarExpr(E->getArg(0)); |
5996 | Value *RW = EmitScalarExpr(E->getArg(1)); |
5997 | Value *IsData = EmitScalarExpr(E->getArg(2)); |
5998 | |
5999 | // Locality is not supported on ARM target |
6000 | Value *Locality = llvm::ConstantInt::get(Int32Ty, 3); |
6001 | |
6002 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch); |
6003 | return Builder.CreateCall(F, {Address, RW, Locality, IsData}); |
6004 | } |
6005 | |
6006 | if (BuiltinID == ARM::BI__builtin_arm_rbit) { |
6007 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
6008 | return Builder.CreateCall( |
6009 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); |
6010 | } |
6011 | |
6012 | if (BuiltinID == ARM::BI__clear_cache) { |
6013 | assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")((E->getNumArgs() == 2 && "__clear_cache takes 2 arguments" ) ? static_cast<void> (0) : __assert_fail ("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6013, __PRETTY_FUNCTION__)); |
6014 | const FunctionDecl *FD = E->getDirectCallee(); |
6015 | Value *Ops[2]; |
6016 | for (unsigned i = 0; i < 2; i++) |
6017 | Ops[i] = EmitScalarExpr(E->getArg(i)); |
6018 | llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); |
6019 | llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); |
6020 | StringRef Name = FD->getName(); |
6021 | return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); |
6022 | } |
6023 | |
6024 | if (BuiltinID == ARM::BI__builtin_arm_mcrr || |
6025 | BuiltinID == ARM::BI__builtin_arm_mcrr2) { |
6026 | Function *F; |
6027 | |
6028 | switch (BuiltinID) { |
6029 | default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6029); |
6030 | case ARM::BI__builtin_arm_mcrr: |
6031 | F = CGM.getIntrinsic(Intrinsic::arm_mcrr); |
6032 | break; |
6033 | case ARM::BI__builtin_arm_mcrr2: |
6034 | F = CGM.getIntrinsic(Intrinsic::arm_mcrr2); |
6035 | break; |
6036 | } |
6037 | |
6038 | // MCRR{2} instruction has 5 operands but |
6039 | // the intrinsic has 4 because Rt and Rt2 |
6040 | // are represented as a single unsigned 64 |
6041 | // bit integer in the intrinsic definition |
6042 | // but internally it's represented as 2 32 |
6043 | // bit integers. |
6044 | |
6045 | Value *Coproc = EmitScalarExpr(E->getArg(0)); |
6046 | Value *Opc1 = EmitScalarExpr(E->getArg(1)); |
6047 | Value *RtAndRt2 = EmitScalarExpr(E->getArg(2)); |
6048 | Value *CRm = EmitScalarExpr(E->getArg(3)); |
6049 | |
6050 | Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); |
6051 | Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty); |
6052 | Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1); |
6053 | Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty); |
6054 | |
6055 | return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm}); |
6056 | } |
6057 | |
6058 | if (BuiltinID == ARM::BI__builtin_arm_mrrc || |
6059 | BuiltinID == ARM::BI__builtin_arm_mrrc2) { |
6060 | Function *F; |
6061 | |
6062 | switch (BuiltinID) { |
6063 | default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6063); |
6064 | case ARM::BI__builtin_arm_mrrc: |
6065 | F = CGM.getIntrinsic(Intrinsic::arm_mrrc); |
6066 | break; |
6067 | case ARM::BI__builtin_arm_mrrc2: |
6068 | F = CGM.getIntrinsic(Intrinsic::arm_mrrc2); |
6069 | break; |
6070 | } |
6071 | |
6072 | Value *Coproc = EmitScalarExpr(E->getArg(0)); |
6073 | Value *Opc1 = EmitScalarExpr(E->getArg(1)); |
6074 | Value *CRm = EmitScalarExpr(E->getArg(2)); |
6075 | Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm}); |
6076 | |
6077 | // Returns an unsigned 64 bit integer, represented |
6078 | // as two 32 bit integers. |
6079 | |
6080 | Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1); |
6081 | Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0); |
6082 | Rt = Builder.CreateZExt(Rt, Int64Ty); |
6083 | Rt1 = Builder.CreateZExt(Rt1, Int64Ty); |
6084 | |
6085 | Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32); |
6086 | RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true); |
6087 | RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1); |
6088 | |
6089 | return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType())); |
6090 | } |
6091 | |
6092 | if (BuiltinID == ARM::BI__builtin_arm_ldrexd || |
6093 | ((BuiltinID == ARM::BI__builtin_arm_ldrex || |
6094 | BuiltinID == ARM::BI__builtin_arm_ldaex) && |
6095 | getContext().getTypeSize(E->getType()) == 64) || |
6096 | BuiltinID == ARM::BI__ldrexd) { |
6097 | Function *F; |
6098 | |
6099 | switch (BuiltinID) { |
6100 | default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6100); |
6101 | case ARM::BI__builtin_arm_ldaex: |
6102 | F = CGM.getIntrinsic(Intrinsic::arm_ldaexd); |
6103 | break; |
6104 | case ARM::BI__builtin_arm_ldrexd: |
6105 | case ARM::BI__builtin_arm_ldrex: |
6106 | case ARM::BI__ldrexd: |
6107 | F = CGM.getIntrinsic(Intrinsic::arm_ldrexd); |
6108 | break; |
6109 | } |
6110 | |
6111 | Value *LdPtr = EmitScalarExpr(E->getArg(0)); |
6112 | Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), |
6113 | "ldrexd"); |
6114 | |
6115 | Value *Val0 = Builder.CreateExtractValue(Val, 1); |
6116 | Value *Val1 = Builder.CreateExtractValue(Val, 0); |
6117 | Val0 = Builder.CreateZExt(Val0, Int64Ty); |
6118 | Val1 = Builder.CreateZExt(Val1, Int64Ty); |
6119 | |
6120 | Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32); |
6121 | Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); |
6122 | Val = Builder.CreateOr(Val, Val1); |
6123 | return Builder.CreateBitCast(Val, ConvertType(E->getType())); |
6124 | } |
6125 | |
6126 | if (BuiltinID == ARM::BI__builtin_arm_ldrex || |
6127 | BuiltinID == ARM::BI__builtin_arm_ldaex) { |
6128 | Value *LoadAddr = EmitScalarExpr(E->getArg(0)); |
6129 | |
6130 | QualType Ty = E->getType(); |
6131 | llvm::Type *RealResTy = ConvertType(Ty); |
6132 | llvm::Type *PtrTy = llvm::IntegerType::get( |
6133 | getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo(); |
6134 | LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy); |
6135 | |
6136 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex |
6137 | ? Intrinsic::arm_ldaex |
6138 | : Intrinsic::arm_ldrex, |
6139 | PtrTy); |
6140 | Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex"); |
6141 | |
6142 | if (RealResTy->isPointerTy()) |
6143 | return Builder.CreateIntToPtr(Val, RealResTy); |
6144 | else { |
6145 | llvm::Type *IntResTy = llvm::IntegerType::get( |
6146 | getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy)); |
6147 | Val = Builder.CreateTruncOrBitCast(Val, IntResTy); |
6148 | return Builder.CreateBitCast(Val, RealResTy); |
6149 | } |
6150 | } |
6151 | |
6152 | if (BuiltinID == ARM::BI__builtin_arm_strexd || |
6153 | ((BuiltinID == ARM::BI__builtin_arm_stlex || |
6154 | BuiltinID == ARM::BI__builtin_arm_strex) && |
6155 | getContext().getTypeSize(E->getArg(0)->getType()) == 64)) { |
6156 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex |
6157 | ? Intrinsic::arm_stlexd |
6158 | : Intrinsic::arm_strexd); |
6159 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty); |
6160 | |
6161 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); |
6162 | Value *Val = EmitScalarExpr(E->getArg(0)); |
6163 | Builder.CreateStore(Val, Tmp); |
6164 | |
6165 | Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy)); |
6166 | Val = Builder.CreateLoad(LdPtr); |
6167 | |
6168 | Value *Arg0 = Builder.CreateExtractValue(Val, 0); |
6169 | Value *Arg1 = Builder.CreateExtractValue(Val, 1); |
6170 | Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); |
6171 | return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd"); |
6172 | } |
6173 | |
6174 | if (BuiltinID == ARM::BI__builtin_arm_strex || |
6175 | BuiltinID == ARM::BI__builtin_arm_stlex) { |
6176 | Value *StoreVal = EmitScalarExpr(E->getArg(0)); |
6177 | Value *StoreAddr = EmitScalarExpr(E->getArg(1)); |
6178 | |
6179 | QualType Ty = E->getArg(0)->getType(); |
6180 | llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), |
6181 | getContext().getTypeSize(Ty)); |
6182 | StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); |
6183 | |
6184 | if (StoreVal->getType()->isPointerTy()) |
6185 | StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty); |
6186 | else { |
6187 | llvm::Type *IntTy = llvm::IntegerType::get( |
6188 | getLLVMContext(), |
6189 | CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType())); |
6190 | StoreVal = Builder.CreateBitCast(StoreVal, IntTy); |
6191 | StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty); |
6192 | } |
6193 | |
6194 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex |
6195 | ? Intrinsic::arm_stlex |
6196 | : Intrinsic::arm_strex, |
6197 | StoreAddr->getType()); |
6198 | return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex"); |
6199 | } |
6200 | |
6201 | if (BuiltinID == ARM::BI__builtin_arm_clrex) { |
6202 | Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex); |
6203 | return Builder.CreateCall(F); |
6204 | } |
6205 | |
6206 | // CRC32 |
6207 | Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; |
6208 | switch (BuiltinID) { |
6209 | case ARM::BI__builtin_arm_crc32b: |
6210 | CRCIntrinsicID = Intrinsic::arm_crc32b; break; |
6211 | case ARM::BI__builtin_arm_crc32cb: |
6212 | CRCIntrinsicID = Intrinsic::arm_crc32cb; break; |
6213 | case ARM::BI__builtin_arm_crc32h: |
6214 | CRCIntrinsicID = Intrinsic::arm_crc32h; break; |
6215 | case ARM::BI__builtin_arm_crc32ch: |
6216 | CRCIntrinsicID = Intrinsic::arm_crc32ch; break; |
6217 | case ARM::BI__builtin_arm_crc32w: |
6218 | case ARM::BI__builtin_arm_crc32d: |
6219 | CRCIntrinsicID = Intrinsic::arm_crc32w; break; |
6220 | case ARM::BI__builtin_arm_crc32cw: |
6221 | case ARM::BI__builtin_arm_crc32cd: |
6222 | CRCIntrinsicID = Intrinsic::arm_crc32cw; break; |
6223 | } |
6224 | |
6225 | if (CRCIntrinsicID != Intrinsic::not_intrinsic) { |
6226 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
6227 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); |
6228 | |
6229 | // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w |
6230 | // intrinsics, hence we need different codegen for these cases. |
6231 | if (BuiltinID == ARM::BI__builtin_arm_crc32d || |
6232 | BuiltinID == ARM::BI__builtin_arm_crc32cd) { |
6233 | Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); |
6234 | Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty); |
6235 | Value *Arg1b = Builder.CreateLShr(Arg1, C1); |
6236 | Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty); |
6237 | |
6238 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); |
6239 | Value *Res = Builder.CreateCall(F, {Arg0, Arg1a}); |
6240 | return Builder.CreateCall(F, {Res, Arg1b}); |
6241 | } else { |
6242 | Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty); |
6243 | |
6244 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); |
6245 | return Builder.CreateCall(F, {Arg0, Arg1}); |
6246 | } |
6247 | } |
6248 | |
6249 | if (BuiltinID == ARM::BI__builtin_arm_rsr || |
6250 | BuiltinID == ARM::BI__builtin_arm_rsr64 || |
6251 | BuiltinID == ARM::BI__builtin_arm_rsrp || |
6252 | BuiltinID == ARM::BI__builtin_arm_wsr || |
6253 | BuiltinID == ARM::BI__builtin_arm_wsr64 || |
6254 | BuiltinID == ARM::BI__builtin_arm_wsrp) { |
6255 | |
6256 | bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr || |
6257 | BuiltinID == ARM::BI__builtin_arm_rsr64 || |
6258 | BuiltinID == ARM::BI__builtin_arm_rsrp; |
6259 | |
6260 | bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp || |
6261 | BuiltinID == ARM::BI__builtin_arm_wsrp; |
6262 | |
6263 | bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 || |
6264 | BuiltinID == ARM::BI__builtin_arm_wsr64; |
6265 | |
6266 | llvm::Type *ValueType; |
6267 | llvm::Type *RegisterType; |
6268 | if (IsPointerBuiltin) { |
6269 | ValueType = VoidPtrTy; |
6270 | RegisterType = Int32Ty; |
6271 | } else if (Is64Bit) { |
6272 | ValueType = RegisterType = Int64Ty; |
6273 | } else { |
6274 | ValueType = RegisterType = Int32Ty; |
6275 | } |
6276 | |
6277 | return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead); |
6278 | } |
6279 | |
6280 | // Find out if any arguments are required to be integer constant |
6281 | // expressions. |
6282 | unsigned ICEArguments = 0; |
6283 | ASTContext::GetBuiltinTypeError Error; |
6284 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
6285 | assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error" ) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6285, __PRETTY_FUNCTION__)); |
6286 | |
6287 | auto getAlignmentValue32 = [&](Address addr) -> Value* { |
6288 | return Builder.getInt32(addr.getAlignment().getQuantity()); |
6289 | }; |
6290 | |
6291 | Address PtrOp0 = Address::invalid(); |
6292 | Address PtrOp1 = Address::invalid(); |
6293 | SmallVector<Value*, 4> Ops; |
6294 | bool HasExtraArg = HasExtraNeonArgument(BuiltinID); |
6295 | unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0); |
6296 | for (unsigned i = 0, e = NumArgs; i != e; i++) { |
6297 | if (i == 0) { |
6298 | switch (BuiltinID) { |
6299 | case NEON::BI__builtin_neon_vld1_v: |
6300 | case NEON::BI__builtin_neon_vld1q_v: |
6301 | case NEON::BI__builtin_neon_vld1q_lane_v: |
6302 | case NEON::BI__builtin_neon_vld1_lane_v: |
6303 | case NEON::BI__builtin_neon_vld1_dup_v: |
6304 | case NEON::BI__builtin_neon_vld1q_dup_v: |
6305 | case NEON::BI__builtin_neon_vst1_v: |
6306 | case NEON::BI__builtin_neon_vst1q_v: |
6307 | case NEON::BI__builtin_neon_vst1q_lane_v: |
6308 | case NEON::BI__builtin_neon_vst1_lane_v: |
6309 | case NEON::BI__builtin_neon_vst2_v: |
6310 | case NEON::BI__builtin_neon_vst2q_v: |
6311 | case NEON::BI__builtin_neon_vst2_lane_v: |
6312 | case NEON::BI__builtin_neon_vst2q_lane_v: |
6313 | case NEON::BI__builtin_neon_vst3_v: |
6314 | case NEON::BI__builtin_neon_vst3q_v: |
6315 | case NEON::BI__builtin_neon_vst3_lane_v: |
6316 | case NEON::BI__builtin_neon_vst3q_lane_v: |
6317 | case NEON::BI__builtin_neon_vst4_v: |
6318 | case NEON::BI__builtin_neon_vst4q_v: |
6319 | case NEON::BI__builtin_neon_vst4_lane_v: |
6320 | case NEON::BI__builtin_neon_vst4q_lane_v: |
6321 | // Get the alignment for the argument in addition to the value; |
6322 | // we'll use it later. |
6323 | PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); |
6324 | Ops.push_back(PtrOp0.getPointer()); |
6325 | continue; |
6326 | } |
6327 | } |
6328 | if (i == 1) { |
6329 | switch (BuiltinID) { |
6330 | case NEON::BI__builtin_neon_vld2_v: |
6331 | case NEON::BI__builtin_neon_vld2q_v: |
6332 | case NEON::BI__builtin_neon_vld3_v: |
6333 | case NEON::BI__builtin_neon_vld3q_v: |
6334 | case NEON::BI__builtin_neon_vld4_v: |
6335 | case NEON::BI__builtin_neon_vld4q_v: |
6336 | case NEON::BI__builtin_neon_vld2_lane_v: |
6337 | case NEON::BI__builtin_neon_vld2q_lane_v: |
6338 | case NEON::BI__builtin_neon_vld3_lane_v: |
6339 | case NEON::BI__builtin_neon_vld3q_lane_v: |
6340 | case NEON::BI__builtin_neon_vld4_lane_v: |
6341 | case NEON::BI__builtin_neon_vld4q_lane_v: |
6342 | case NEON::BI__builtin_neon_vld2_dup_v: |
6343 | case NEON::BI__builtin_neon_vld2q_dup_v: |
6344 | case NEON::BI__builtin_neon_vld3_dup_v: |
6345 | case NEON::BI__builtin_neon_vld3q_dup_v: |
6346 | case NEON::BI__builtin_neon_vld4_dup_v: |
6347 | case NEON::BI__builtin_neon_vld4q_dup_v: |
6348 | // Get the alignment for the argument in addition to the value; |
6349 | // we'll use it later. |
6350 | PtrOp1 = EmitPointerWithAlignment(E->getArg(1)); |
6351 | Ops.push_back(PtrOp1.getPointer()); |
6352 | continue; |
6353 | } |
6354 | } |
6355 | |
6356 | if ((ICEArguments & (1 << i)) == 0) { |
6357 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
6358 | } else { |
6359 | // If this is required to be a constant, constant fold it so that we know |
6360 | // that the generated intrinsic gets a ConstantInt. |
6361 | llvm::APSInt Result; |
6362 | bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); |
6363 | assert(IsConst && "Constant arg isn't actually constant?")((IsConst && "Constant arg isn't actually constant?") ? static_cast<void> (0) : __assert_fail ("IsConst && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6363, __PRETTY_FUNCTION__)); (void)IsConst; |
6364 | Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); |
6365 | } |
6366 | } |
6367 | |
6368 | switch (BuiltinID) { |
6369 | default: break; |
6370 | |
6371 | case NEON::BI__builtin_neon_vget_lane_i8: |
6372 | case NEON::BI__builtin_neon_vget_lane_i16: |
6373 | case NEON::BI__builtin_neon_vget_lane_i32: |
6374 | case NEON::BI__builtin_neon_vget_lane_i64: |
6375 | case NEON::BI__builtin_neon_vget_lane_f32: |
6376 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
6377 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
6378 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
6379 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
6380 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
6381 | return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane"); |
6382 | |
6383 | case NEON::BI__builtin_neon_vrndns_f32: { |
6384 | Value *Arg = EmitScalarExpr(E->getArg(0)); |
6385 | llvm::Type *Tys[] = {Arg->getType()}; |
6386 | Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys); |
6387 | return Builder.CreateCall(F, {Arg}, "vrndn"); } |
6388 | |
6389 | case NEON::BI__builtin_neon_vset_lane_i8: |
6390 | case NEON::BI__builtin_neon_vset_lane_i16: |
6391 | case NEON::BI__builtin_neon_vset_lane_i32: |
6392 | case NEON::BI__builtin_neon_vset_lane_i64: |
6393 | case NEON::BI__builtin_neon_vset_lane_f32: |
6394 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
6395 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
6396 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
6397 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
6398 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
6399 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
6400 | |
6401 | case NEON::BI__builtin_neon_vsha1h_u32: |
6402 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops, |
6403 | "vsha1h"); |
6404 | case NEON::BI__builtin_neon_vsha1cq_u32: |
6405 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops, |
6406 | "vsha1h"); |
6407 | case NEON::BI__builtin_neon_vsha1pq_u32: |
6408 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops, |
6409 | "vsha1h"); |
6410 | case NEON::BI__builtin_neon_vsha1mq_u32: |
6411 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops, |
6412 | "vsha1h"); |
6413 | |
6414 | // The ARM _MoveToCoprocessor builtins put the input register value as |
6415 | // the first argument, but the LLVM intrinsic expects it as the third one. |
6416 | case ARM::BI_MoveToCoprocessor: |
6417 | case ARM::BI_MoveToCoprocessor2: { |
6418 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ? |
6419 | Intrinsic::arm_mcr : Intrinsic::arm_mcr2); |
6420 | return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0], |
6421 | Ops[3], Ops[4], Ops[5]}); |
6422 | } |
6423 | case ARM::BI_BitScanForward: |
6424 | case ARM::BI_BitScanForward64: |
6425 | return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E); |
6426 | case ARM::BI_BitScanReverse: |
6427 | case ARM::BI_BitScanReverse64: |
6428 | return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E); |
6429 | |
6430 | case ARM::BI_InterlockedAnd64: |
6431 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E); |
6432 | case ARM::BI_InterlockedExchange64: |
6433 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E); |
6434 | case ARM::BI_InterlockedExchangeAdd64: |
6435 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E); |
6436 | case ARM::BI_InterlockedExchangeSub64: |
6437 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E); |
6438 | case ARM::BI_InterlockedOr64: |
6439 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E); |
6440 | case ARM::BI_InterlockedXor64: |
6441 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E); |
6442 | case ARM::BI_InterlockedDecrement64: |
6443 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E); |
6444 | case ARM::BI_InterlockedIncrement64: |
6445 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E); |
6446 | case ARM::BI_InterlockedExchangeAdd8_acq: |
6447 | case ARM::BI_InterlockedExchangeAdd16_acq: |
6448 | case ARM::BI_InterlockedExchangeAdd_acq: |
6449 | case ARM::BI_InterlockedExchangeAdd64_acq: |
6450 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E); |
6451 | case ARM::BI_InterlockedExchangeAdd8_rel: |
6452 | case ARM::BI_InterlockedExchangeAdd16_rel: |
6453 | case ARM::BI_InterlockedExchangeAdd_rel: |
6454 | case ARM::BI_InterlockedExchangeAdd64_rel: |
6455 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E); |
6456 | case ARM::BI_InterlockedExchangeAdd8_nf: |
6457 | case ARM::BI_InterlockedExchangeAdd16_nf: |
6458 | case ARM::BI_InterlockedExchangeAdd_nf: |
6459 | case ARM::BI_InterlockedExchangeAdd64_nf: |
6460 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E); |
6461 | case ARM::BI_InterlockedExchange8_acq: |
6462 | case ARM::BI_InterlockedExchange16_acq: |
6463 | case ARM::BI_InterlockedExchange_acq: |
6464 | case ARM::BI_InterlockedExchange64_acq: |
6465 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E); |
6466 | case ARM::BI_InterlockedExchange8_rel: |
6467 | case ARM::BI_InterlockedExchange16_rel: |
6468 | case ARM::BI_InterlockedExchange_rel: |
6469 | case ARM::BI_InterlockedExchange64_rel: |
6470 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E); |
6471 | case ARM::BI_InterlockedExchange8_nf: |
6472 | case ARM::BI_InterlockedExchange16_nf: |
6473 | case ARM::BI_InterlockedExchange_nf: |
6474 | case ARM::BI_InterlockedExchange64_nf: |
6475 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E); |
6476 | case ARM::BI_InterlockedCompareExchange8_acq: |
6477 | case ARM::BI_InterlockedCompareExchange16_acq: |
6478 | case ARM::BI_InterlockedCompareExchange_acq: |
6479 | case ARM::BI_InterlockedCompareExchange64_acq: |
6480 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E); |
6481 | case ARM::BI_InterlockedCompareExchange8_rel: |
6482 | case ARM::BI_InterlockedCompareExchange16_rel: |
6483 | case ARM::BI_InterlockedCompareExchange_rel: |
6484 | case ARM::BI_InterlockedCompareExchange64_rel: |
6485 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E); |
6486 | case ARM::BI_InterlockedCompareExchange8_nf: |
6487 | case ARM::BI_InterlockedCompareExchange16_nf: |
6488 | case ARM::BI_InterlockedCompareExchange_nf: |
6489 | case ARM::BI_InterlockedCompareExchange64_nf: |
6490 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E); |
6491 | case ARM::BI_InterlockedOr8_acq: |
6492 | case ARM::BI_InterlockedOr16_acq: |
6493 | case ARM::BI_InterlockedOr_acq: |
6494 | case ARM::BI_InterlockedOr64_acq: |
6495 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E); |
6496 | case ARM::BI_InterlockedOr8_rel: |
6497 | case ARM::BI_InterlockedOr16_rel: |
6498 | case ARM::BI_InterlockedOr_rel: |
6499 | case ARM::BI_InterlockedOr64_rel: |
6500 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E); |
6501 | case ARM::BI_InterlockedOr8_nf: |
6502 | case ARM::BI_InterlockedOr16_nf: |
6503 | case ARM::BI_InterlockedOr_nf: |
6504 | case ARM::BI_InterlockedOr64_nf: |
6505 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E); |
6506 | case ARM::BI_InterlockedXor8_acq: |
6507 | case ARM::BI_InterlockedXor16_acq: |
6508 | case ARM::BI_InterlockedXor_acq: |
6509 | case ARM::BI_InterlockedXor64_acq: |
6510 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E); |
6511 | case ARM::BI_InterlockedXor8_rel: |
6512 | case ARM::BI_InterlockedXor16_rel: |
6513 | case ARM::BI_InterlockedXor_rel: |
6514 | case ARM::BI_InterlockedXor64_rel: |
6515 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E); |
6516 | case ARM::BI_InterlockedXor8_nf: |
6517 | case ARM::BI_InterlockedXor16_nf: |
6518 | case ARM::BI_InterlockedXor_nf: |
6519 | case ARM::BI_InterlockedXor64_nf: |
6520 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E); |
6521 | case ARM::BI_InterlockedAnd8_acq: |
6522 | case ARM::BI_InterlockedAnd16_acq: |
6523 | case ARM::BI_InterlockedAnd_acq: |
6524 | case ARM::BI_InterlockedAnd64_acq: |
6525 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E); |
6526 | case ARM::BI_InterlockedAnd8_rel: |
6527 | case ARM::BI_InterlockedAnd16_rel: |
6528 | case ARM::BI_InterlockedAnd_rel: |
6529 | case ARM::BI_InterlockedAnd64_rel: |
6530 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E); |
6531 | case ARM::BI_InterlockedAnd8_nf: |
6532 | case ARM::BI_InterlockedAnd16_nf: |
6533 | case ARM::BI_InterlockedAnd_nf: |
6534 | case ARM::BI_InterlockedAnd64_nf: |
6535 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E); |
6536 | case ARM::BI_InterlockedIncrement16_acq: |
6537 | case ARM::BI_InterlockedIncrement_acq: |
6538 | case ARM::BI_InterlockedIncrement64_acq: |
6539 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E); |
6540 | case ARM::BI_InterlockedIncrement16_rel: |
6541 | case ARM::BI_InterlockedIncrement_rel: |
6542 | case ARM::BI_InterlockedIncrement64_rel: |
6543 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E); |
6544 | case ARM::BI_InterlockedIncrement16_nf: |
6545 | case ARM::BI_InterlockedIncrement_nf: |
6546 | case ARM::BI_InterlockedIncrement64_nf: |
6547 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E); |
6548 | case ARM::BI_InterlockedDecrement16_acq: |
6549 | case ARM::BI_InterlockedDecrement_acq: |
6550 | case ARM::BI_InterlockedDecrement64_acq: |
6551 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E); |
6552 | case ARM::BI_InterlockedDecrement16_rel: |
6553 | case ARM::BI_InterlockedDecrement_rel: |
6554 | case ARM::BI_InterlockedDecrement64_rel: |
6555 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E); |
6556 | case ARM::BI_InterlockedDecrement16_nf: |
6557 | case ARM::BI_InterlockedDecrement_nf: |
6558 | case ARM::BI_InterlockedDecrement64_nf: |
6559 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E); |
6560 | } |
6561 | |
6562 | // Get the last argument, which specifies the vector type. |
6563 | assert(HasExtraArg)((HasExtraArg) ? static_cast<void> (0) : __assert_fail ( "HasExtraArg", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6563, __PRETTY_FUNCTION__)); |
6564 | llvm::APSInt Result; |
6565 | const Expr *Arg = E->getArg(E->getNumArgs()-1); |
6566 | if (!Arg->isIntegerConstantExpr(Result, getContext())) |
6567 | return nullptr; |
6568 | |
6569 | if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f || |
6570 | BuiltinID == ARM::BI__builtin_arm_vcvtr_d) { |
6571 | // Determine the overloaded type of this builtin. |
6572 | llvm::Type *Ty; |
6573 | if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f) |
6574 | Ty = FloatTy; |
6575 | else |
6576 | Ty = DoubleTy; |
6577 | |
6578 | // Determine whether this is an unsigned conversion or not. |
6579 | bool usgn = Result.getZExtValue() == 1; |
6580 | unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; |
6581 | |
6582 | // Call the appropriate intrinsic. |
6583 | Function *F = CGM.getIntrinsic(Int, Ty); |
6584 | return Builder.CreateCall(F, Ops, "vcvtr"); |
6585 | } |
6586 | |
6587 | // Determine the type of this overloaded NEON intrinsic. |
6588 | NeonTypeFlags Type(Result.getZExtValue()); |
6589 | bool usgn = Type.isUnsigned(); |
6590 | bool rightShift = false; |
6591 | |
6592 | llvm::VectorType *VTy = GetNeonType(this, Type, |
6593 | getTarget().hasLegalHalfType()); |
6594 | llvm::Type *Ty = VTy; |
6595 | if (!Ty) |
6596 | return nullptr; |
6597 | |
6598 | // Many NEON builtins have identical semantics and uses in ARM and |
6599 | // AArch64. Emit these in a single function. |
6600 | auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap); |
6601 | const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap( |
6602 | IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted); |
6603 | if (Builtin) |
6604 | return EmitCommonNeonBuiltinExpr( |
6605 | Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, |
6606 | Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch); |
6607 | |
6608 | unsigned Int; |
6609 | switch (BuiltinID) { |
6610 | default: return nullptr; |
6611 | case NEON::BI__builtin_neon_vld1q_lane_v: |
6612 | // Handle 64-bit integer elements as a special case. Use shuffles of |
6613 | // one-element vectors to avoid poor code for i64 in the backend. |
6614 | if (VTy->getElementType()->isIntegerTy(64)) { |
6615 | // Extract the other lane. |
6616 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6617 | uint32_t Lane = cast<ConstantInt>(Ops[2])->getZExtValue(); |
6618 | Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane)); |
6619 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); |
6620 | // Load the value as a one-element vector. |
6621 | Ty = llvm::VectorType::get(VTy->getElementType(), 1); |
6622 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
6623 | Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys); |
6624 | Value *Align = getAlignmentValue32(PtrOp0); |
6625 | Value *Ld = Builder.CreateCall(F, {Ops[0], Align}); |
6626 | // Combine them. |
6627 | uint32_t Indices[] = {1 - Lane, Lane}; |
6628 | SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices); |
6629 | return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane"); |
6630 | } |
6631 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
6632 | case NEON::BI__builtin_neon_vld1_lane_v: { |
6633 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6634 | PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType()); |
6635 | Value *Ld = Builder.CreateLoad(PtrOp0); |
6636 | return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane"); |
6637 | } |
6638 | case NEON::BI__builtin_neon_vqrshrn_n_v: |
6639 | Int = |
6640 | usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; |
6641 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n", |
6642 | 1, true); |
6643 | case NEON::BI__builtin_neon_vqrshrun_n_v: |
6644 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty), |
6645 | Ops, "vqrshrun_n", 1, true); |
6646 | case NEON::BI__builtin_neon_vqshrn_n_v: |
6647 | Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; |
6648 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n", |
6649 | 1, true); |
6650 | case NEON::BI__builtin_neon_vqshrun_n_v: |
6651 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty), |
6652 | Ops, "vqshrun_n", 1, true); |
6653 | case NEON::BI__builtin_neon_vrecpe_v: |
6654 | case NEON::BI__builtin_neon_vrecpeq_v: |
6655 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), |
6656 | Ops, "vrecpe"); |
6657 | case NEON::BI__builtin_neon_vrshrn_n_v: |
6658 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), |
6659 | Ops, "vrshrn_n", 1, true); |
6660 | case NEON::BI__builtin_neon_vrsra_n_v: |
6661 | case NEON::BI__builtin_neon_vrsraq_n_v: |
6662 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6663 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6664 | Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); |
6665 | Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; |
6666 | Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]}); |
6667 | return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); |
6668 | case NEON::BI__builtin_neon_vsri_n_v: |
6669 | case NEON::BI__builtin_neon_vsriq_n_v: |
6670 | rightShift = true; |
6671 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
6672 | case NEON::BI__builtin_neon_vsli_n_v: |
6673 | case NEON::BI__builtin_neon_vsliq_n_v: |
6674 | Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift); |
6675 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty), |
6676 | Ops, "vsli_n"); |
6677 | case NEON::BI__builtin_neon_vsra_n_v: |
6678 | case NEON::BI__builtin_neon_vsraq_n_v: |
6679 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6680 | Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n"); |
6681 | return Builder.CreateAdd(Ops[0], Ops[1]); |
6682 | case NEON::BI__builtin_neon_vst1q_lane_v: |
6683 | // Handle 64-bit integer elements as a special case. Use a shuffle to get |
6684 | // a one-element vector and avoid poor code for i64 in the backend. |
6685 | if (VTy->getElementType()->isIntegerTy(64)) { |
6686 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6687 | Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2])); |
6688 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); |
6689 | Ops[2] = getAlignmentValue32(PtrOp0); |
6690 | llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()}; |
6691 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, |
6692 | Tys), Ops); |
6693 | } |
6694 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
6695 | case NEON::BI__builtin_neon_vst1_lane_v: { |
6696 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6697 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); |
6698 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
6699 | auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty)); |
6700 | return St; |
6701 | } |
6702 | case NEON::BI__builtin_neon_vtbl1_v: |
6703 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), |
6704 | Ops, "vtbl1"); |
6705 | case NEON::BI__builtin_neon_vtbl2_v: |
6706 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), |
6707 | Ops, "vtbl2"); |
6708 | case NEON::BI__builtin_neon_vtbl3_v: |
6709 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), |
6710 | Ops, "vtbl3"); |
6711 | case NEON::BI__builtin_neon_vtbl4_v: |
6712 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), |
6713 | Ops, "vtbl4"); |
6714 | case NEON::BI__builtin_neon_vtbx1_v: |
6715 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), |
6716 | Ops, "vtbx1"); |
6717 | case NEON::BI__builtin_neon_vtbx2_v: |
6718 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), |
6719 | Ops, "vtbx2"); |
6720 | case NEON::BI__builtin_neon_vtbx3_v: |
6721 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), |
6722 | Ops, "vtbx3"); |
6723 | case NEON::BI__builtin_neon_vtbx4_v: |
6724 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), |
6725 | Ops, "vtbx4"); |
6726 | } |
6727 | } |
6728 | |
6729 | static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, |
6730 | const CallExpr *E, |
6731 | SmallVectorImpl<Value *> &Ops, |
6732 | llvm::Triple::ArchType Arch) { |
6733 | unsigned int Int = 0; |
6734 | const char *s = nullptr; |
6735 | |
6736 | switch (BuiltinID) { |
6737 | default: |
6738 | return nullptr; |
6739 | case NEON::BI__builtin_neon_vtbl1_v: |
6740 | case NEON::BI__builtin_neon_vqtbl1_v: |
6741 | case NEON::BI__builtin_neon_vqtbl1q_v: |
6742 | case NEON::BI__builtin_neon_vtbl2_v: |
6743 | case NEON::BI__builtin_neon_vqtbl2_v: |
6744 | case NEON::BI__builtin_neon_vqtbl2q_v: |
6745 | case NEON::BI__builtin_neon_vtbl3_v: |
6746 | case NEON::BI__builtin_neon_vqtbl3_v: |
6747 | case NEON::BI__builtin_neon_vqtbl3q_v: |
6748 | case NEON::BI__builtin_neon_vtbl4_v: |
6749 | case NEON::BI__builtin_neon_vqtbl4_v: |
6750 | case NEON::BI__builtin_neon_vqtbl4q_v: |
6751 | break; |
6752 | case NEON::BI__builtin_neon_vtbx1_v: |
6753 | case NEON::BI__builtin_neon_vqtbx1_v: |
6754 | case NEON::BI__builtin_neon_vqtbx1q_v: |
6755 | case NEON::BI__builtin_neon_vtbx2_v: |
6756 | case NEON::BI__builtin_neon_vqtbx2_v: |
6757 | case NEON::BI__builtin_neon_vqtbx2q_v: |
6758 | case NEON::BI__builtin_neon_vtbx3_v: |
6759 | case NEON::BI__builtin_neon_vqtbx3_v: |
6760 | case NEON::BI__builtin_neon_vqtbx3q_v: |
6761 | case NEON::BI__builtin_neon_vtbx4_v: |
6762 | case NEON::BI__builtin_neon_vqtbx4_v: |
6763 | case NEON::BI__builtin_neon_vqtbx4q_v: |
6764 | break; |
6765 | } |
6766 | |
6767 | assert(E->getNumArgs() >= 3)((E->getNumArgs() >= 3) ? static_cast<void> (0) : __assert_fail ("E->getNumArgs() >= 3", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6767, __PRETTY_FUNCTION__)); |
6768 | |
6769 | // Get the last argument, which specifies the vector type. |
6770 | llvm::APSInt Result; |
6771 | const Expr *Arg = E->getArg(E->getNumArgs() - 1); |
6772 | if (!Arg->isIntegerConstantExpr(Result, CGF.getContext())) |
6773 | return nullptr; |
6774 | |
6775 | // Determine the type of this overloaded NEON intrinsic. |
6776 | NeonTypeFlags Type(Result.getZExtValue()); |
6777 | llvm::VectorType *Ty = GetNeonType(&CGF, Type); |
6778 | if (!Ty) |
6779 | return nullptr; |
6780 | |
6781 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
6782 | |
6783 | // AArch64 scalar builtins are not overloaded, they do not have an extra |
6784 | // argument that specifies the vector type, need to handle each case. |
6785 | switch (BuiltinID) { |
6786 | case NEON::BI__builtin_neon_vtbl1_v: { |
6787 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr, |
6788 | Ops[1], Ty, Intrinsic::aarch64_neon_tbl1, |
6789 | "vtbl1"); |
6790 | } |
6791 | case NEON::BI__builtin_neon_vtbl2_v: { |
6792 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr, |
6793 | Ops[2], Ty, Intrinsic::aarch64_neon_tbl1, |
6794 | "vtbl1"); |
6795 | } |
6796 | case NEON::BI__builtin_neon_vtbl3_v: { |
6797 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr, |
6798 | Ops[3], Ty, Intrinsic::aarch64_neon_tbl2, |
6799 | "vtbl2"); |
6800 | } |
6801 | case NEON::BI__builtin_neon_vtbl4_v: { |
6802 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr, |
6803 | Ops[4], Ty, Intrinsic::aarch64_neon_tbl2, |
6804 | "vtbl2"); |
6805 | } |
6806 | case NEON::BI__builtin_neon_vtbx1_v: { |
6807 | Value *TblRes = |
6808 | packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2], |
6809 | Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1"); |
6810 | |
6811 | llvm::Constant *EightV = ConstantInt::get(Ty, 8); |
6812 | Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV); |
6813 | CmpRes = Builder.CreateSExt(CmpRes, Ty); |
6814 | |
6815 | Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]); |
6816 | Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes); |
6817 | return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx"); |
6818 | } |
6819 | case NEON::BI__builtin_neon_vtbx2_v: { |
6820 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0], |
6821 | Ops[3], Ty, Intrinsic::aarch64_neon_tbx1, |
6822 | "vtbx1"); |
6823 | } |
6824 | case NEON::BI__builtin_neon_vtbx3_v: { |
6825 | Value *TblRes = |
6826 | packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4], |
6827 | Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2"); |
6828 | |
6829 | llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24); |
6830 | Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4], |
6831 | TwentyFourV); |
6832 | CmpRes = Builder.CreateSExt(CmpRes, Ty); |
6833 | |
6834 | Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]); |
6835 | Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes); |
6836 | return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx"); |
6837 | } |
6838 | case NEON::BI__builtin_neon_vtbx4_v: { |
6839 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0], |
6840 | Ops[5], Ty, Intrinsic::aarch64_neon_tbx2, |
6841 | "vtbx2"); |
6842 | } |
6843 | case NEON::BI__builtin_neon_vqtbl1_v: |
6844 | case NEON::BI__builtin_neon_vqtbl1q_v: |
6845 | Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break; |
6846 | case NEON::BI__builtin_neon_vqtbl2_v: |
6847 | case NEON::BI__builtin_neon_vqtbl2q_v: { |
6848 | Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break; |
6849 | case NEON::BI__builtin_neon_vqtbl3_v: |
6850 | case NEON::BI__builtin_neon_vqtbl3q_v: |
6851 | Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break; |
6852 | case NEON::BI__builtin_neon_vqtbl4_v: |
6853 | case NEON::BI__builtin_neon_vqtbl4q_v: |
6854 | Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break; |
6855 | case NEON::BI__builtin_neon_vqtbx1_v: |
6856 | case NEON::BI__builtin_neon_vqtbx1q_v: |
6857 | Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break; |
6858 | case NEON::BI__builtin_neon_vqtbx2_v: |
6859 | case NEON::BI__builtin_neon_vqtbx2q_v: |
6860 | Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break; |
6861 | case NEON::BI__builtin_neon_vqtbx3_v: |
6862 | case NEON::BI__builtin_neon_vqtbx3q_v: |
6863 | Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break; |
6864 | case NEON::BI__builtin_neon_vqtbx4_v: |
6865 | case NEON::BI__builtin_neon_vqtbx4q_v: |
6866 | Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break; |
6867 | } |
6868 | } |
6869 | |
6870 | if (!Int) |
6871 | return nullptr; |
6872 | |
6873 | Function *F = CGF.CGM.getIntrinsic(Int, Ty); |
6874 | return CGF.EmitNeonCall(F, Ops, s); |
6875 | } |
6876 | |
6877 | Value *CodeGenFunction::vectorWrapScalar16(Value *Op) { |
6878 | llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4); |
6879 | Op = Builder.CreateBitCast(Op, Int16Ty); |
6880 | Value *V = UndefValue::get(VTy); |
6881 | llvm::Constant *CI = ConstantInt::get(SizeTy, 0); |
6882 | Op = Builder.CreateInsertElement(V, Op, CI); |
6883 | return Op; |
6884 | } |
6885 | |
6886 | Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, |
6887 | const CallExpr *E, |
6888 | llvm::Triple::ArchType Arch) { |
6889 | unsigned HintID = static_cast<unsigned>(-1); |
6890 | switch (BuiltinID) { |
6891 | default: break; |
6892 | case AArch64::BI__builtin_arm_nop: |
6893 | HintID = 0; |
6894 | break; |
6895 | case AArch64::BI__builtin_arm_yield: |
6896 | case AArch64::BI__yield: |
6897 | HintID = 1; |
6898 | break; |
6899 | case AArch64::BI__builtin_arm_wfe: |
6900 | case AArch64::BI__wfe: |
6901 | HintID = 2; |
6902 | break; |
6903 | case AArch64::BI__builtin_arm_wfi: |
6904 | case AArch64::BI__wfi: |
6905 | HintID = 3; |
6906 | break; |
6907 | case AArch64::BI__builtin_arm_sev: |
6908 | case AArch64::BI__sev: |
6909 | HintID = 4; |
6910 | break; |
6911 | case AArch64::BI__builtin_arm_sevl: |
6912 | case AArch64::BI__sevl: |
6913 | HintID = 5; |
6914 | break; |
6915 | } |
6916 | |
6917 | if (HintID != static_cast<unsigned>(-1)) { |
6918 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint); |
6919 | return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID)); |
6920 | } |
6921 | |
6922 | if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { |
6923 | Value *Address = EmitScalarExpr(E->getArg(0)); |
6924 | Value *RW = EmitScalarExpr(E->getArg(1)); |
6925 | Value *CacheLevel = EmitScalarExpr(E->getArg(2)); |
6926 | Value *RetentionPolicy = EmitScalarExpr(E->getArg(3)); |
6927 | Value *IsData = EmitScalarExpr(E->getArg(4)); |
6928 | |
6929 | Value *Locality = nullptr; |
6930 | if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) { |
6931 | // Temporal fetch, needs to convert cache level to locality. |
6932 | Locality = llvm::ConstantInt::get(Int32Ty, |
6933 | -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3); |
6934 | } else { |
6935 | // Streaming fetch. |
6936 | Locality = llvm::ConstantInt::get(Int32Ty, 0); |
6937 | } |
6938 | |
6939 | // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify |
6940 | // PLDL3STRM or PLDL2STRM. |
6941 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch); |
6942 | return Builder.CreateCall(F, {Address, RW, Locality, IsData}); |
6943 | } |
6944 | |
6945 | if (BuiltinID == AArch64::BI__builtin_arm_rbit) { |
6946 | assert((getContext().getTypeSize(E->getType()) == 32) &&(((getContext().getTypeSize(E->getType()) == 32) && "rbit of unusual size!") ? static_cast<void> (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6947, __PRETTY_FUNCTION__)) |
6947 | "rbit of unusual size!")(((getContext().getTypeSize(E->getType()) == 32) && "rbit of unusual size!") ? static_cast<void> (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6947, __PRETTY_FUNCTION__)); |
6948 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
6949 | return Builder.CreateCall( |
6950 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); |
6951 | } |
6952 | if (BuiltinID == AArch64::BI__builtin_arm_rbit64) { |
6953 | assert((getContext().getTypeSize(E->getType()) == 64) &&(((getContext().getTypeSize(E->getType()) == 64) && "rbit of unusual size!") ? static_cast<void> (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6954, __PRETTY_FUNCTION__)) |
6954 | "rbit of unusual size!")(((getContext().getTypeSize(E->getType()) == 64) && "rbit of unusual size!") ? static_cast<void> (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6954, __PRETTY_FUNCTION__)); |
6955 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
6956 | return Builder.CreateCall( |
6957 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); |
6958 | } |
6959 | |
6960 | if (BuiltinID == AArch64::BI__clear_cache) { |
6961 | assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")((E->getNumArgs() == 2 && "__clear_cache takes 2 arguments" ) ? static_cast<void> (0) : __assert_fail ("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6961, __PRETTY_FUNCTION__)); |
6962 | const FunctionDecl *FD = E->getDirectCallee(); |
6963 | Value *Ops[2]; |
6964 | for (unsigned i = 0; i < 2; i++) |
6965 | Ops[i] = EmitScalarExpr(E->getArg(i)); |
6966 | llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); |
6967 | llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); |
6968 | StringRef Name = FD->getName(); |
6969 | return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); |
6970 | } |
6971 | |
6972 | if ((BuiltinID == AArch64::BI__builtin_arm_ldrex || |
6973 | BuiltinID == AArch64::BI__builtin_arm_ldaex) && |
6974 | getContext().getTypeSize(E->getType()) == 128) { |
6975 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex |
6976 | ? Intrinsic::aarch64_ldaxp |
6977 | : Intrinsic::aarch64_ldxp); |
6978 | |
6979 | Value *LdPtr = EmitScalarExpr(E->getArg(0)); |
6980 | Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), |
6981 | "ldxp"); |
6982 | |
6983 | Value *Val0 = Builder.CreateExtractValue(Val, 1); |
6984 | Value *Val1 = Builder.CreateExtractValue(Val, 0); |
6985 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
6986 | Val0 = Builder.CreateZExt(Val0, Int128Ty); |
6987 | Val1 = Builder.CreateZExt(Val1, Int128Ty); |
6988 | |
6989 | Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64); |
6990 | Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); |
6991 | Val = Builder.CreateOr(Val, Val1); |
6992 | return Builder.CreateBitCast(Val, ConvertType(E->getType())); |
6993 | } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex || |
6994 | BuiltinID == AArch64::BI__builtin_arm_ldaex) { |
6995 | Value *LoadAddr = EmitScalarExpr(E->getArg(0)); |
6996 | |
6997 | QualType Ty = E->getType(); |
6998 | llvm::Type *RealResTy = ConvertType(Ty); |
6999 | llvm::Type *PtrTy = llvm::IntegerType::get( |
7000 | getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo(); |
7001 | LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy); |
7002 | |
7003 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex |
7004 | ? Intrinsic::aarch64_ldaxr |
7005 | : Intrinsic::aarch64_ldxr, |
7006 | PtrTy); |
7007 | Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr"); |
7008 | |
7009 | if (RealResTy->isPointerTy()) |
7010 | return Builder.CreateIntToPtr(Val, RealResTy); |
7011 | |
7012 | llvm::Type *IntResTy = llvm::IntegerType::get( |
7013 | getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy)); |
7014 | Val = Builder.CreateTruncOrBitCast(Val, IntResTy); |
7015 | return Builder.CreateBitCast(Val, RealResTy); |
7016 | } |
7017 | |
7018 | if ((BuiltinID == AArch64::BI__builtin_arm_strex || |
7019 | BuiltinID == AArch64::BI__builtin_arm_stlex) && |
7020 | getContext().getTypeSize(E->getArg(0)->getType()) == 128) { |
7021 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex |
7022 | ? Intrinsic::aarch64_stlxp |
7023 | : Intrinsic::aarch64_stxp); |
7024 | llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty); |
7025 | |
7026 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); |
7027 | EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true); |
7028 | |
7029 | Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy)); |
7030 | llvm::Value *Val = Builder.CreateLoad(Tmp); |
7031 | |
7032 | Value *Arg0 = Builder.CreateExtractValue(Val, 0); |
7033 | Value *Arg1 = Builder.CreateExtractValue(Val, 1); |
7034 | Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), |
7035 | Int8PtrTy); |
7036 | return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp"); |
7037 | } |
7038 | |
7039 | if (BuiltinID == AArch64::BI__builtin_arm_strex || |
7040 | BuiltinID == AArch64::BI__builtin_arm_stlex) { |
7041 | Value *StoreVal = EmitScalarExpr(E->getArg(0)); |
7042 | Value *StoreAddr = EmitScalarExpr(E->getArg(1)); |
7043 | |
7044 | QualType Ty = E->getArg(0)->getType(); |
7045 | llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), |
7046 | getContext().getTypeSize(Ty)); |
7047 | StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); |
7048 | |
7049 | if (StoreVal->getType()->isPointerTy()) |
7050 | StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty); |
7051 | else { |
7052 | llvm::Type *IntTy = llvm::IntegerType::get( |
7053 | getLLVMContext(), |
7054 | CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType())); |
7055 | StoreVal = Builder.CreateBitCast(StoreVal, IntTy); |
7056 | StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty); |
7057 | } |
7058 | |
7059 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex |
7060 | ? Intrinsic::aarch64_stlxr |
7061 | : Intrinsic::aarch64_stxr, |
7062 | StoreAddr->getType()); |
7063 | return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr"); |
7064 | } |
7065 | |
7066 | if (BuiltinID == AArch64::BI__getReg) { |
7067 | Expr::EvalResult Result; |
7068 | if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext())) |
7069 | llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7069); |
7070 | |
7071 | llvm::APSInt Value = Result.Val.getInt(); |
7072 | LLVMContext &Context = CGM.getLLVMContext(); |
7073 | std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10); |
7074 | |
7075 | llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)}; |
7076 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); |
7077 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); |
7078 | |
7079 | llvm::Function *F = |
7080 | CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty}); |
7081 | return Builder.CreateCall(F, Metadata); |
7082 | } |
7083 | |
7084 | if (BuiltinID == AArch64::BI__builtin_arm_clrex) { |
7085 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex); |
7086 | return Builder.CreateCall(F); |
7087 | } |
7088 | |
7089 | if (BuiltinID == AArch64::BI_ReadWriteBarrier) |
7090 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
7091 | llvm::SyncScope::SingleThread); |
7092 | |
7093 | // CRC32 |
7094 | Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; |
7095 | switch (BuiltinID) { |
7096 | case AArch64::BI__builtin_arm_crc32b: |
7097 | CRCIntrinsicID = Intrinsic::aarch64_crc32b; break; |
7098 | case AArch64::BI__builtin_arm_crc32cb: |
7099 | CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break; |
7100 | case AArch64::BI__builtin_arm_crc32h: |
7101 | CRCIntrinsicID = Intrinsic::aarch64_crc32h; break; |
7102 | case AArch64::BI__builtin_arm_crc32ch: |
7103 | CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break; |
7104 | case AArch64::BI__builtin_arm_crc32w: |
7105 | CRCIntrinsicID = Intrinsic::aarch64_crc32w; break; |
7106 | case AArch64::BI__builtin_arm_crc32cw: |
7107 | CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break; |
7108 | case AArch64::BI__builtin_arm_crc32d: |
7109 | CRCIntrinsicID = Intrinsic::aarch64_crc32x; break; |
7110 | case AArch64::BI__builtin_arm_crc32cd: |
7111 | CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break; |
7112 | } |
7113 | |
7114 | if (CRCIntrinsicID != Intrinsic::not_intrinsic) { |
7115 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
7116 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); |
7117 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); |
7118 | |
7119 | llvm::Type *DataTy = F->getFunctionType()->getParamType(1); |
7120 | Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy); |
7121 | |
7122 | return Builder.CreateCall(F, {Arg0, Arg1}); |
7123 | } |
7124 | |
7125 | // Memory Tagging Extensions (MTE) Intrinsics |
7126 | Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic; |
7127 | switch (BuiltinID) { |
7128 | case AArch64::BI__builtin_arm_irg: |
7129 | MTEIntrinsicID = Intrinsic::aarch64_irg; break; |
7130 | case AArch64::BI__builtin_arm_addg: |
7131 | MTEIntrinsicID = Intrinsic::aarch64_addg; break; |
7132 | case AArch64::BI__builtin_arm_gmi: |
7133 | MTEIntrinsicID = Intrinsic::aarch64_gmi; break; |
7134 | case AArch64::BI__builtin_arm_ldg: |
7135 | MTEIntrinsicID = Intrinsic::aarch64_ldg; break; |
7136 | case AArch64::BI__builtin_arm_stg: |
7137 | MTEIntrinsicID = Intrinsic::aarch64_stg; break; |
7138 | case AArch64::BI__builtin_arm_subp: |
7139 | MTEIntrinsicID = Intrinsic::aarch64_subp; break; |
7140 | } |
7141 | |
7142 | if (MTEIntrinsicID != Intrinsic::not_intrinsic) { |
7143 | llvm::Type *T = ConvertType(E->getType()); |
7144 | |
7145 | if (MTEIntrinsicID == Intrinsic::aarch64_irg) { |
7146 | Value *Pointer = EmitScalarExpr(E->getArg(0)); |
7147 | Value *Mask = EmitScalarExpr(E->getArg(1)); |
7148 | |
7149 | Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); |
7150 | Mask = Builder.CreateZExt(Mask, Int64Ty); |
7151 | Value *RV = Builder.CreateCall( |
7152 | CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask}); |
7153 | return Builder.CreatePointerCast(RV, T); |
7154 | } |
7155 | if (MTEIntrinsicID == Intrinsic::aarch64_addg) { |
7156 | Value *Pointer = EmitScalarExpr(E->getArg(0)); |
7157 | Value *TagOffset = EmitScalarExpr(E->getArg(1)); |
7158 | |
7159 | Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); |
7160 | TagOffset = Builder.CreateZExt(TagOffset, Int64Ty); |
7161 | Value *RV = Builder.CreateCall( |
7162 | CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset}); |
7163 | return Builder.CreatePointerCast(RV, T); |
7164 | } |
7165 | if (MTEIntrinsicID == Intrinsic::aarch64_gmi) { |
7166 | Value *Pointer = EmitScalarExpr(E->getArg(0)); |
7167 | Value *ExcludedMask = EmitScalarExpr(E->getArg(1)); |
7168 | |
7169 | ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty); |
7170 | Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); |
7171 | return Builder.CreateCall( |
7172 | CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask}); |
7173 | } |
7174 | // Although it is possible to supply a different return |
7175 | // address (first arg) to this intrinsic, for now we set |
7176 | // return address same as input address. |
7177 | if (MTEIntrinsicID == Intrinsic::aarch64_ldg) { |
7178 | Value *TagAddress = EmitScalarExpr(E->getArg(0)); |
7179 | TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy); |
7180 | Value *RV = Builder.CreateCall( |
7181 | CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress}); |
7182 | return Builder.CreatePointerCast(RV, T); |
7183 | } |
7184 | // Although it is possible to supply a different tag (to set) |
7185 | // to this intrinsic (as first arg), for now we supply |
7186 | // the tag that is in input address arg (common use case). |
7187 | if (MTEIntrinsicID == Intrinsic::aarch64_stg) { |
7188 | Value *TagAddress = EmitScalarExpr(E->getArg(0)); |
7189 | TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy); |
7190 | return Builder.CreateCall( |
7191 | CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress}); |
7192 | } |
7193 | if (MTEIntrinsicID == Intrinsic::aarch64_subp) { |
7194 | Value *PointerA = EmitScalarExpr(E->getArg(0)); |
7195 | Value *PointerB = EmitScalarExpr(E->getArg(1)); |
7196 | PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy); |
7197 | PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy); |
7198 | return Builder.CreateCall( |
7199 | CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB}); |
7200 | } |
7201 | } |
7202 | |
7203 | if (BuiltinID == AArch64::BI__builtin_arm_rsr || |
7204 | BuiltinID == AArch64::BI__builtin_arm_rsr64 || |
7205 | BuiltinID == AArch64::BI__builtin_arm_rsrp || |
7206 | BuiltinID == AArch64::BI__builtin_arm_wsr || |
7207 | BuiltinID == AArch64::BI__builtin_arm_wsr64 || |
7208 | BuiltinID == AArch64::BI__builtin_arm_wsrp) { |
7209 | |
7210 | bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr || |
7211 | BuiltinID == AArch64::BI__builtin_arm_rsr64 || |
7212 | BuiltinID == AArch64::BI__builtin_arm_rsrp; |
7213 | |
7214 | bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp || |
7215 | BuiltinID == AArch64::BI__builtin_arm_wsrp; |
7216 | |
7217 | bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr && |
7218 | BuiltinID != AArch64::BI__builtin_arm_wsr; |
7219 | |
7220 | llvm::Type *ValueType; |
7221 | llvm::Type *RegisterType = Int64Ty; |
7222 | if (IsPointerBuiltin) { |
7223 | ValueType = VoidPtrTy; |
7224 | } else if (Is64Bit) { |
7225 | ValueType = Int64Ty; |
7226 | } else { |
7227 | ValueType = Int32Ty; |
7228 | } |
7229 | |
7230 | return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead); |
7231 | } |
7232 | |
7233 | if (BuiltinID == AArch64::BI_ReadStatusReg || |
7234 | BuiltinID == AArch64::BI_WriteStatusReg) { |
7235 | LLVMContext &Context = CGM.getLLVMContext(); |
7236 | |
7237 | unsigned SysReg = |
7238 | E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
7239 | |
7240 | std::string SysRegStr; |
7241 | llvm::raw_string_ostream(SysRegStr) << |
7242 | ((1 << 1) | ((SysReg >> 14) & 1)) << ":" << |
7243 | ((SysReg >> 11) & 7) << ":" << |
7244 | ((SysReg >> 7) & 15) << ":" << |
7245 | ((SysReg >> 3) & 15) << ":" << |
7246 | ( SysReg & 7); |
7247 | |
7248 | llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) }; |
7249 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); |
7250 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); |
7251 | |
7252 | llvm::Type *RegisterType = Int64Ty; |
7253 | llvm::Type *Types[] = { RegisterType }; |
7254 | |
7255 | if (BuiltinID == AArch64::BI_ReadStatusReg) { |
7256 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); |
7257 | |
7258 | return Builder.CreateCall(F, Metadata); |
7259 | } |
7260 | |
7261 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); |
7262 | llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1)); |
7263 | |
7264 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
7265 | } |
7266 | |
7267 | if (BuiltinID == AArch64::BI_AddressOfReturnAddress) { |
7268 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress); |
7269 | return Builder.CreateCall(F); |
7270 | } |
7271 | |
7272 | if (BuiltinID == AArch64::BI__builtin_sponentry) { |
7273 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry); |
7274 | return Builder.CreateCall(F); |
7275 | } |
7276 | |
7277 | // Find out if any arguments are required to be integer constant |
7278 | // expressions. |
7279 | unsigned ICEArguments = 0; |
7280 | ASTContext::GetBuiltinTypeError Error; |
7281 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
7282 | assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error" ) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7282, __PRETTY_FUNCTION__)); |
7283 | |
7284 | llvm::SmallVector<Value*, 4> Ops; |
7285 | for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { |
7286 | if ((ICEArguments & (1 << i)) == 0) { |
7287 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
7288 | } else { |
7289 | // If this is required to be a constant, constant fold it so that we know |
7290 | // that the generated intrinsic gets a ConstantInt. |
7291 | llvm::APSInt Result; |
7292 | bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); |
7293 | assert(IsConst && "Constant arg isn't actually constant?")((IsConst && "Constant arg isn't actually constant?") ? static_cast<void> (0) : __assert_fail ("IsConst && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7293, __PRETTY_FUNCTION__)); |
7294 | (void)IsConst; |
7295 | Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); |
7296 | } |
7297 | } |
7298 | |
7299 | auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap); |
7300 | const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap( |
7301 | SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted); |
7302 | |
7303 | if (Builtin) { |
7304 | Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1))); |
7305 | Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E); |
7306 | assert(Result && "SISD intrinsic should have been handled")((Result && "SISD intrinsic should have been handled" ) ? static_cast<void> (0) : __assert_fail ("Result && \"SISD intrinsic should have been handled\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7306, __PRETTY_FUNCTION__)); |
7307 | return Result; |
7308 | } |
7309 | |
7310 | llvm::APSInt Result; |
7311 | const Expr *Arg = E->getArg(E->getNumArgs()-1); |
7312 | NeonTypeFlags Type(0); |
7313 | if (Arg->isIntegerConstantExpr(Result, getContext())) |
7314 | // Determine the type of this overloaded NEON intrinsic. |
7315 | Type = NeonTypeFlags(Result.getZExtValue()); |
7316 | |
7317 | bool usgn = Type.isUnsigned(); |
7318 | bool quad = Type.isQuad(); |
7319 | |
7320 | // Handle non-overloaded intrinsics first. |
7321 | switch (BuiltinID) { |
7322 | default: break; |
7323 | case NEON::BI__builtin_neon_vabsh_f16: |
7324 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7325 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs"); |
7326 | case NEON::BI__builtin_neon_vldrq_p128: { |
7327 | llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128); |
7328 | llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0); |
7329 | Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy); |
7330 | return Builder.CreateAlignedLoad(Int128Ty, Ptr, |
7331 | CharUnits::fromQuantity(16)); |
7332 | } |
7333 | case NEON::BI__builtin_neon_vstrq_p128: { |
7334 | llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128); |
7335 | Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy); |
7336 | return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr); |
7337 | } |
7338 | case NEON::BI__builtin_neon_vcvts_u32_f32: |
7339 | case NEON::BI__builtin_neon_vcvtd_u64_f64: |
7340 | usgn = true; |
7341 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
7342 | case NEON::BI__builtin_neon_vcvts_s32_f32: |
7343 | case NEON::BI__builtin_neon_vcvtd_s64_f64: { |
7344 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7345 | bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64; |
7346 | llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty; |
7347 | llvm::Type *FTy = Is64 ? DoubleTy : FloatTy; |
7348 | Ops[0] = Builder.CreateBitCast(Ops[0], FTy); |
7349 | if (usgn) |
7350 | return Builder.CreateFPToUI(Ops[0], InTy); |
7351 | return Builder.CreateFPToSI(Ops[0], InTy); |
7352 | } |
7353 | case NEON::BI__builtin_neon_vcvts_f32_u32: |
7354 | case NEON::BI__builtin_neon_vcvtd_f64_u64: |
7355 | usgn = true; |
7356 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
7357 | case NEON::BI__builtin_neon_vcvts_f32_s32: |
7358 | case NEON::BI__builtin_neon_vcvtd_f64_s64: { |
7359 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7360 | bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64; |
7361 | llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty; |
7362 | llvm::Type *FTy = Is64 ? DoubleTy : FloatTy; |
7363 | Ops[0] = Builder.CreateBitCast(Ops[0], InTy); |
7364 | if (usgn) |
7365 | return Builder.CreateUIToFP(Ops[0], FTy); |
7366 | return Builder.CreateSIToFP(Ops[0], FTy); |
7367 | } |
7368 | case NEON::BI__builtin_neon_vcvth_f16_u16: |
7369 | case NEON::BI__builtin_neon_vcvth_f16_u32: |
7370 | case NEON::BI__builtin_neon_vcvth_f16_u64: |
7371 | usgn = true; |
7372 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
7373 | case NEON::BI__builtin_neon_vcvth_f16_s16: |
7374 | case NEON::BI__builtin_neon_vcvth_f16_s32: |
7375 | case NEON::BI__builtin_neon_vcvth_f16_s64: { |
7376 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7377 | llvm::Type *FTy = HalfTy; |
7378 | llvm::Type *InTy; |
7379 | if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64) |
7380 | InTy = Int64Ty; |
7381 | else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32) |
7382 | InTy = Int32Ty; |
7383 | else |
7384 | InTy = Int16Ty; |
7385 | Ops[0] = Builder.CreateBitCast(Ops[0], InTy); |
7386 | if (usgn) |
7387 | return Builder.CreateUIToFP(Ops[0], FTy); |
7388 | return Builder.CreateSIToFP(Ops[0], FTy); |
7389 | } |
7390 | case NEON::BI__builtin_neon_vcvth_u16_f16: |
7391 | usgn = true; |
7392 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
7393 | case NEON::BI__builtin_neon_vcvth_s16_f16: { |
7394 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7395 | Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy); |
7396 | if (usgn) |
7397 | return Builder.CreateFPToUI(Ops[0], Int16Ty); |
7398 | return Builder.CreateFPToSI(Ops[0], Int16Ty); |
7399 | } |
7400 | case NEON::BI__builtin_neon_vcvth_u32_f16: |
7401 | usgn = true; |
7402 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
7403 | case NEON::BI__builtin_neon_vcvth_s32_f16: { |
7404 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7405 | Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy); |
7406 | if (usgn) |
7407 | return Builder.CreateFPToUI(Ops[0], Int32Ty); |
7408 | return Builder.CreateFPToSI(Ops[0], Int32Ty); |
7409 | } |
7410 | case NEON::BI__builtin_neon_vcvth_u64_f16: |
7411 | usgn = true; |
7412 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
7413 | case NEON::BI__builtin_neon_vcvth_s64_f16: { |
7414 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7415 | Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy); |
7416 | if (usgn) |
7417 | return Builder.CreateFPToUI(Ops[0], Int64Ty); |
7418 | return Builder.CreateFPToSI(Ops[0], Int64Ty); |
7419 | } |
7420 | case NEON::BI__builtin_neon_vcvtah_u16_f16: |
7421 | case NEON::BI__builtin_neon_vcvtmh_u16_f16: |
7422 | case NEON::BI__builtin_neon_vcvtnh_u16_f16: |
7423 | case NEON::BI__builtin_neon_vcvtph_u16_f16: |
7424 | case NEON::BI__builtin_neon_vcvtah_s16_f16: |
7425 | case NEON::BI__builtin_neon_vcvtmh_s16_f16: |
7426 | case NEON::BI__builtin_neon_vcvtnh_s16_f16: |
7427 | case NEON::BI__builtin_neon_vcvtph_s16_f16: { |
7428 | unsigned Int; |
7429 | llvm::Type* InTy = Int32Ty; |
7430 | llvm::Type* FTy = HalfTy; |
7431 | llvm::Type *Tys[2] = {InTy, FTy}; |
7432 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7433 | switch (BuiltinID) { |
7434 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7434); |
7435 | case NEON::BI__builtin_neon_vcvtah_u16_f16: |
7436 | Int = Intrinsic::aarch64_neon_fcvtau; break; |
7437 | case NEON::BI__builtin_neon_vcvtmh_u16_f16: |
7438 | Int = Intrinsic::aarch64_neon_fcvtmu; break; |
7439 | case NEON::BI__builtin_neon_vcvtnh_u16_f16: |
7440 | Int = Intrinsic::aarch64_neon_fcvtnu; break; |
7441 | case NEON::BI__builtin_neon_vcvtph_u16_f16: |
7442 | Int = Intrinsic::aarch64_neon_fcvtpu; break; |
7443 | case NEON::BI__builtin_neon_vcvtah_s16_f16: |
7444 | Int = Intrinsic::aarch64_neon_fcvtas; break; |
7445 | case NEON::BI__builtin_neon_vcvtmh_s16_f16: |
7446 | Int = Intrinsic::aarch64_neon_fcvtms; break; |
7447 | case NEON::BI__builtin_neon_vcvtnh_s16_f16: |
7448 | Int = Intrinsic::aarch64_neon_fcvtns; break; |
7449 | case NEON::BI__builtin_neon_vcvtph_s16_f16: |
7450 | Int = Intrinsic::aarch64_neon_fcvtps; break; |
7451 | } |
7452 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt"); |
7453 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
7454 | } |
7455 | case NEON::BI__builtin_neon_vcaleh_f16: |
7456 | case NEON::BI__builtin_neon_vcalth_f16: |
7457 | case NEON::BI__builtin_neon_vcageh_f16: |
7458 | case NEON::BI__builtin_neon_vcagth_f16: { |
7459 | unsigned Int; |
7460 | llvm::Type* InTy = Int32Ty; |
7461 | llvm::Type* FTy = HalfTy; |
7462 | llvm::Type *Tys[2] = {InTy, FTy}; |
7463 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7464 | switch (BuiltinID) { |
7465 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7465); |
7466 | case NEON::BI__builtin_neon_vcageh_f16: |
7467 | Int = Intrinsic::aarch64_neon_facge; break; |
7468 | case NEON::BI__builtin_neon_vcagth_f16: |
7469 | Int = Intrinsic::aarch64_neon_facgt; break; |
7470 | case NEON::BI__builtin_neon_vcaleh_f16: |
7471 | Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break; |
7472 | case NEON::BI__builtin_neon_vcalth_f16: |
7473 | Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break; |
7474 | } |
7475 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg"); |
7476 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
7477 | } |
7478 | case NEON::BI__builtin_neon_vcvth_n_s16_f16: |
7479 | case NEON::BI__builtin_neon_vcvth_n_u16_f16: { |
7480 | unsigned Int; |
7481 | llvm::Type* InTy = Int32Ty; |
7482 | llvm::Type* FTy = HalfTy; |
7483 | llvm::Type *Tys[2] = {InTy, FTy}; |
7484 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7485 | switch (BuiltinID) { |
7486 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7486); |
7487 | case NEON::BI__builtin_neon_vcvth_n_s16_f16: |
7488 | Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break; |
7489 | case NEON::BI__builtin_neon_vcvth_n_u16_f16: |
7490 | Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break; |
7491 | } |
7492 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n"); |
7493 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
7494 | } |
7495 | case NEON::BI__builtin_neon_vcvth_n_f16_s16: |
7496 | case NEON::BI__builtin_neon_vcvth_n_f16_u16: { |
7497 | unsigned Int; |
7498 | llvm::Type* FTy = HalfTy; |
7499 | llvm::Type* InTy = Int32Ty; |
7500 | llvm::Type *Tys[2] = {FTy, InTy}; |
7501 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7502 | switch (BuiltinID) { |
7503 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7503); |
7504 | case NEON::BI__builtin_neon_vcvth_n_f16_s16: |
7505 | Int = Intrinsic::aarch64_neon_vcvtfxs2fp; |
7506 | Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext"); |
7507 | break; |
7508 | case NEON::BI__builtin_neon_vcvth_n_f16_u16: |
7509 | Int = Intrinsic::aarch64_neon_vcvtfxu2fp; |
7510 | Ops[0] = Builder.CreateZExt(Ops[0], InTy); |
7511 | break; |
7512 | } |
7513 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n"); |
7514 | } |
7515 | case NEON::BI__builtin_neon_vpaddd_s64: { |
7516 | llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2); |
7517 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
7518 | // The vector is v2f64, so make sure it's bitcast to that. |
7519 | Vec = Builder.CreateBitCast(Vec, Ty, "v2i64"); |
7520 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); |
7521 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); |
7522 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); |
7523 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); |
7524 | // Pairwise addition of a v2f64 into a scalar f64. |
7525 | return Builder.CreateAdd(Op0, Op1, "vpaddd"); |
7526 | } |
7527 | case NEON::BI__builtin_neon_vpaddd_f64: { |
7528 | llvm::Type *Ty = |
7529 | llvm::VectorType::get(DoubleTy, 2); |
7530 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
7531 | // The vector is v2f64, so make sure it's bitcast to that. |
7532 | Vec = Builder.CreateBitCast(Vec, Ty, "v2f64"); |
7533 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); |
7534 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); |
7535 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); |
7536 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); |
7537 | // Pairwise addition of a v2f64 into a scalar f64. |
7538 | return Builder.CreateFAdd(Op0, Op1, "vpaddd"); |
7539 | } |
7540 | case NEON::BI__builtin_neon_vpadds_f32: { |
7541 | llvm::Type *Ty = |
7542 | llvm::VectorType::get(FloatTy, 2); |
7543 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
7544 | // The vector is v2f32, so make sure it's bitcast to that. |
7545 | Vec = Builder.CreateBitCast(Vec, Ty, "v2f32"); |
7546 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); |
7547 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); |
7548 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); |
7549 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); |
7550 | // Pairwise addition of a v2f32 into a scalar f32. |
7551 | return Builder.CreateFAdd(Op0, Op1, "vpaddd"); |
7552 | } |
7553 | case NEON::BI__builtin_neon_vceqzd_s64: |
7554 | case NEON::BI__builtin_neon_vceqzd_f64: |
7555 | case NEON::BI__builtin_neon_vceqzs_f32: |
7556 | case NEON::BI__builtin_neon_vceqzh_f16: |
7557 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7558 | return EmitAArch64CompareBuiltinExpr( |
7559 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
7560 | ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz"); |
7561 | case NEON::BI__builtin_neon_vcgezd_s64: |
7562 | case NEON::BI__builtin_neon_vcgezd_f64: |
7563 | case NEON::BI__builtin_neon_vcgezs_f32: |
7564 | case NEON::BI__builtin_neon_vcgezh_f16: |
7565 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7566 | return EmitAArch64CompareBuiltinExpr( |
7567 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
7568 | ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez"); |
7569 | case NEON::BI__builtin_neon_vclezd_s64: |
7570 | case NEON::BI__builtin_neon_vclezd_f64: |
7571 | case NEON::BI__builtin_neon_vclezs_f32: |
7572 | case NEON::BI__builtin_neon_vclezh_f16: |
7573 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7574 | return EmitAArch64CompareBuiltinExpr( |
7575 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
7576 | ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez"); |
7577 | case NEON::BI__builtin_neon_vcgtzd_s64: |
7578 | case NEON::BI__builtin_neon_vcgtzd_f64: |
7579 | case NEON::BI__builtin_neon_vcgtzs_f32: |
7580 | case NEON::BI__builtin_neon_vcgtzh_f16: |
7581 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7582 | return EmitAArch64CompareBuiltinExpr( |
7583 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
7584 | ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz"); |
7585 | case NEON::BI__builtin_neon_vcltzd_s64: |
7586 | case NEON::BI__builtin_neon_vcltzd_f64: |
7587 | case NEON::BI__builtin_neon_vcltzs_f32: |
7588 | case NEON::BI__builtin_neon_vcltzh_f16: |
7589 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7590 | return EmitAArch64CompareBuiltinExpr( |
7591 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
7592 | ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz"); |
7593 | |
7594 | case NEON::BI__builtin_neon_vceqzd_u64: { |
7595 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
7596 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); |
7597 | Ops[0] = |
7598 | Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty)); |
7599 | return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd"); |
7600 | } |
7601 | case NEON::BI__builtin_neon_vceqd_f64: |
7602 | case NEON::BI__builtin_neon_vcled_f64: |
7603 | case NEON::BI__builtin_neon_vcltd_f64: |
7604 | case NEON::BI__builtin_neon_vcged_f64: |
7605 | case NEON::BI__builtin_neon_vcgtd_f64: { |
7606 | llvm::CmpInst::Predicate P; |
7607 | switch (BuiltinID) { |
7608 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7608); |
7609 | case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break; |
7610 | case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break; |
7611 | case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break; |
7612 | case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break; |
7613 | case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break; |
7614 | } |
7615 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7616 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
7617 | Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); |
7618 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); |
7619 | return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd"); |
7620 | } |
7621 | case NEON::BI__builtin_neon_vceqs_f32: |
7622 | case NEON::BI__builtin_neon_vcles_f32: |
7623 | case NEON::BI__builtin_neon_vclts_f32: |
7624 | case NEON::BI__builtin_neon_vcges_f32: |
7625 | case NEON::BI__builtin_neon_vcgts_f32: { |
7626 | llvm::CmpInst::Predicate P; |
7627 | switch (BuiltinID) { |
7628 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7628); |
7629 | case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break; |
7630 | case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break; |
7631 | case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break; |
7632 | case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break; |
7633 | case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break; |
7634 | } |
7635 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7636 | Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy); |
7637 | Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy); |
7638 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); |
7639 | return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd"); |
7640 | } |
7641 | case NEON::BI__builtin_neon_vceqh_f16: |
7642 | case NEON::BI__builtin_neon_vcleh_f16: |
7643 | case NEON::BI__builtin_neon_vclth_f16: |
7644 | case NEON::BI__builtin_neon_vcgeh_f16: |
7645 | case NEON::BI__builtin_neon_vcgth_f16: { |
7646 | llvm::CmpInst::Predicate P; |
7647 | switch (BuiltinID) { |
7648 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7648); |
7649 | case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break; |
7650 | case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break; |
7651 | case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break; |
7652 | case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break; |
7653 | case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break; |
7654 | } |
7655 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7656 | Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy); |
7657 | Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy); |
7658 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); |
7659 | return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd"); |
7660 | } |
7661 | case NEON::BI__builtin_neon_vceqd_s64: |
7662 | case NEON::BI__builtin_neon_vceqd_u64: |
7663 | case NEON::BI__builtin_neon_vcgtd_s64: |
7664 | case NEON::BI__builtin_neon_vcgtd_u64: |
7665 | case NEON::BI__builtin_neon_vcltd_s64: |
7666 | case NEON::BI__builtin_neon_vcltd_u64: |
7667 | case NEON::BI__builtin_neon_vcged_u64: |
7668 | case NEON::BI__builtin_neon_vcged_s64: |
7669 | case NEON::BI__builtin_neon_vcled_u64: |
7670 | case NEON::BI__builtin_neon_vcled_s64: { |
7671 | llvm::CmpInst::Predicate P; |
7672 | switch (BuiltinID) { |
7673 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7673); |
7674 | case NEON::BI__builtin_neon_vceqd_s64: |
7675 | case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break; |
7676 | case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break; |
7677 | case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break; |
7678 | case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break; |
7679 | case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break; |
7680 | case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break; |
7681 | case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break; |
7682 | case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break; |
7683 | case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break; |
7684 | } |
7685 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7686 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); |
7687 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); |
7688 | Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]); |
7689 | return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd"); |
7690 | } |
7691 | case NEON::BI__builtin_neon_vtstd_s64: |
7692 | case NEON::BI__builtin_neon_vtstd_u64: { |
7693 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7694 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); |
7695 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); |
7696 | Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); |
7697 | Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], |
7698 | llvm::Constant::getNullValue(Int64Ty)); |
7699 | return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd"); |
7700 | } |
7701 | case NEON::BI__builtin_neon_vset_lane_i8: |
7702 | case NEON::BI__builtin_neon_vset_lane_i16: |
7703 | case NEON::BI__builtin_neon_vset_lane_i32: |
7704 | case NEON::BI__builtin_neon_vset_lane_i64: |
7705 | case NEON::BI__builtin_neon_vset_lane_f32: |
7706 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
7707 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
7708 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
7709 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
7710 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
7711 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
7712 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
7713 | case NEON::BI__builtin_neon_vset_lane_f64: |
7714 | // The vector type needs a cast for the v1f64 variant. |
7715 | Ops[1] = Builder.CreateBitCast(Ops[1], |
7716 | llvm::VectorType::get(DoubleTy, 1)); |
7717 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
7718 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
7719 | case NEON::BI__builtin_neon_vsetq_lane_f64: |
7720 | // The vector type needs a cast for the v2f64 variant. |
7721 | Ops[1] = Builder.CreateBitCast(Ops[1], |
7722 | llvm::VectorType::get(DoubleTy, 2)); |
7723 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
7724 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
7725 | |
7726 | case NEON::BI__builtin_neon_vget_lane_i8: |
7727 | case NEON::BI__builtin_neon_vdupb_lane_i8: |
7728 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 8)); |
7729 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7730 | "vget_lane"); |
7731 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
7732 | case NEON::BI__builtin_neon_vdupb_laneq_i8: |
7733 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 16)); |
7734 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7735 | "vgetq_lane"); |
7736 | case NEON::BI__builtin_neon_vget_lane_i16: |
7737 | case NEON::BI__builtin_neon_vduph_lane_i16: |
7738 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 4)); |
7739 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7740 | "vget_lane"); |
7741 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
7742 | case NEON::BI__builtin_neon_vduph_laneq_i16: |
7743 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 8)); |
7744 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7745 | "vgetq_lane"); |
7746 | case NEON::BI__builtin_neon_vget_lane_i32: |
7747 | case NEON::BI__builtin_neon_vdups_lane_i32: |
7748 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 2)); |
7749 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7750 | "vget_lane"); |
7751 | case NEON::BI__builtin_neon_vdups_lane_f32: |
7752 | Ops[0] = Builder.CreateBitCast(Ops[0], |
7753 | llvm::VectorType::get(FloatTy, 2)); |
7754 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7755 | "vdups_lane"); |
7756 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
7757 | case NEON::BI__builtin_neon_vdups_laneq_i32: |
7758 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4)); |
7759 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7760 | "vgetq_lane"); |
7761 | case NEON::BI__builtin_neon_vget_lane_i64: |
7762 | case NEON::BI__builtin_neon_vdupd_lane_i64: |
7763 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 1)); |
7764 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7765 | "vget_lane"); |
7766 | case NEON::BI__builtin_neon_vdupd_lane_f64: |
7767 | Ops[0] = Builder.CreateBitCast(Ops[0], |
7768 | llvm::VectorType::get(DoubleTy, 1)); |
7769 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7770 | "vdupd_lane"); |
7771 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
7772 | case NEON::BI__builtin_neon_vdupd_laneq_i64: |
7773 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2)); |
7774 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7775 | "vgetq_lane"); |
7776 | case NEON::BI__builtin_neon_vget_lane_f32: |
7777 | Ops[0] = Builder.CreateBitCast(Ops[0], |
7778 | llvm::VectorType::get(FloatTy, 2)); |
7779 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7780 | "vget_lane"); |
7781 | case NEON::BI__builtin_neon_vget_lane_f64: |
7782 | Ops[0] = Builder.CreateBitCast(Ops[0], |
7783 | llvm::VectorType::get(DoubleTy, 1)); |
7784 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7785 | "vget_lane"); |
7786 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
7787 | case NEON::BI__builtin_neon_vdups_laneq_f32: |
7788 | Ops[0] = Builder.CreateBitCast(Ops[0], |
7789 | llvm::VectorType::get(FloatTy, 4)); |
7790 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7791 | "vgetq_lane"); |
7792 | case NEON::BI__builtin_neon_vgetq_lane_f64: |
7793 | case NEON::BI__builtin_neon_vdupd_laneq_f64: |
7794 | Ops[0] = Builder.CreateBitCast(Ops[0], |
7795 | llvm::VectorType::get(DoubleTy, 2)); |
7796 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7797 | "vgetq_lane"); |
7798 | case NEON::BI__builtin_neon_vaddh_f16: |
7799 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7800 | return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh"); |
7801 | case NEON::BI__builtin_neon_vsubh_f16: |
7802 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7803 | return Builder.CreateFSub(Ops[0], Ops[1], "vsubh"); |
7804 | case NEON::BI__builtin_neon_vmulh_f16: |
7805 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7806 | return Builder.CreateFMul(Ops[0], Ops[1], "vmulh"); |
7807 | case NEON::BI__builtin_neon_vdivh_f16: |
7808 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7809 | return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh"); |
7810 | case NEON::BI__builtin_neon_vfmah_f16: { |
7811 | Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy); |
7812 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. |
7813 | return Builder.CreateCall(F, |
7814 | {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]}); |
7815 | } |
7816 | case NEON::BI__builtin_neon_vfmsh_f16: { |
7817 | Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy); |
7818 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy); |
7819 | Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh"); |
7820 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. |
7821 | return Builder.CreateCall(F, {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]}); |
7822 | } |
7823 | case NEON::BI__builtin_neon_vaddd_s64: |
7824 | case NEON::BI__builtin_neon_vaddd_u64: |
7825 | return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd"); |
7826 | case NEON::BI__builtin_neon_vsubd_s64: |
7827 | case NEON::BI__builtin_neon_vsubd_u64: |
7828 | return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd"); |
7829 | case NEON::BI__builtin_neon_vqdmlalh_s16: |
7830 | case NEON::BI__builtin_neon_vqdmlslh_s16: { |
7831 | SmallVector<Value *, 2> ProductOps; |
7832 | ProductOps.push_back(vectorWrapScalar16(Ops[1])); |
7833 | ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2)))); |
7834 | llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4); |
7835 | Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), |
7836 | ProductOps, "vqdmlXl"); |
7837 | Constant *CI = ConstantInt::get(SizeTy, 0); |
7838 | Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); |
7839 | |
7840 | unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16 |
7841 | ? Intrinsic::aarch64_neon_sqadd |
7842 | : Intrinsic::aarch64_neon_sqsub; |
7843 | return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl"); |
7844 | } |
7845 | case NEON::BI__builtin_neon_vqshlud_n_s64: { |
7846 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7847 | Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); |
7848 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty), |
7849 | Ops, "vqshlu_n"); |
7850 | } |
7851 | case NEON::BI__builtin_neon_vqshld_n_u64: |
7852 | case NEON::BI__builtin_neon_vqshld_n_s64: { |
7853 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64 |
7854 | ? Intrinsic::aarch64_neon_uqshl |
7855 | : Intrinsic::aarch64_neon_sqshl; |
7856 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7857 | Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); |
7858 | return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n"); |
7859 | } |
7860 | case NEON::BI__builtin_neon_vrshrd_n_u64: |
7861 | case NEON::BI__builtin_neon_vrshrd_n_s64: { |
7862 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64 |
7863 | ? Intrinsic::aarch64_neon_urshl |
7864 | : Intrinsic::aarch64_neon_srshl; |
7865 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
7866 | int SV = cast<ConstantInt>(Ops[1])->getSExtValue(); |
7867 | Ops[1] = ConstantInt::get(Int64Ty, -SV); |
7868 | return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n"); |
7869 | } |
7870 | case NEON::BI__builtin_neon_vrsrad_n_u64: |
7871 | case NEON::BI__builtin_neon_vrsrad_n_s64: { |
7872 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64 |
7873 | ? Intrinsic::aarch64_neon_urshl |
7874 | : Intrinsic::aarch64_neon_srshl; |
7875 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); |
7876 | Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2)))); |
7877 | Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty), |
7878 | {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)}); |
7879 | return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty)); |
7880 | } |
7881 | case NEON::BI__builtin_neon_vshld_n_s64: |
7882 | case NEON::BI__builtin_neon_vshld_n_u64: { |
7883 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
7884 | return Builder.CreateShl( |
7885 | Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n"); |
7886 | } |
7887 | case NEON::BI__builtin_neon_vshrd_n_s64: { |
7888 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
7889 | return Builder.CreateAShr( |
7890 | Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63), |
7891 | Amt->getZExtValue())), |
7892 | "shrd_n"); |
7893 | } |
7894 | case NEON::BI__builtin_neon_vshrd_n_u64: { |
7895 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
7896 | uint64_t ShiftAmt = Amt->getZExtValue(); |
7897 | // Right-shifting an unsigned value by its size yields 0. |
7898 | if (ShiftAmt == 64) |
7899 | return ConstantInt::get(Int64Ty, 0); |
7900 | return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt), |
7901 | "shrd_n"); |
7902 | } |
7903 | case NEON::BI__builtin_neon_vsrad_n_s64: { |
7904 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); |
7905 | Ops[1] = Builder.CreateAShr( |
7906 | Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63), |
7907 | Amt->getZExtValue())), |
7908 | "shrd_n"); |
7909 | return Builder.CreateAdd(Ops[0], Ops[1]); |
7910 | } |
7911 | case NEON::BI__builtin_neon_vsrad_n_u64: { |
7912 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); |
7913 | uint64_t ShiftAmt = Amt->getZExtValue(); |
7914 | // Right-shifting an unsigned value by its size yields 0. |
7915 | // As Op + 0 = Op, return Ops[0] directly. |
7916 | if (ShiftAmt == 64) |
7917 | return Ops[0]; |
7918 | Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt), |
7919 | "shrd_n"); |
7920 | return Builder.CreateAdd(Ops[0], Ops[1]); |
7921 | } |
7922 | case NEON::BI__builtin_neon_vqdmlalh_lane_s16: |
7923 | case NEON::BI__builtin_neon_vqdmlalh_laneq_s16: |
7924 | case NEON::BI__builtin_neon_vqdmlslh_lane_s16: |
7925 | case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: { |
7926 | Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), |
7927 | "lane"); |
7928 | SmallVector<Value *, 2> ProductOps; |
7929 | ProductOps.push_back(vectorWrapScalar16(Ops[1])); |
7930 | ProductOps.push_back(vectorWrapScalar16(Ops[2])); |
7931 | llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4); |
7932 | Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), |
7933 | ProductOps, "vqdmlXl"); |
7934 | Constant *CI = ConstantInt::get(SizeTy, 0); |
7935 | Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); |
7936 | Ops.pop_back(); |
7937 | |
7938 | unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 || |
7939 | BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16) |
7940 | ? Intrinsic::aarch64_neon_sqadd |
7941 | : Intrinsic::aarch64_neon_sqsub; |
7942 | return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl"); |
7943 | } |
7944 | case NEON::BI__builtin_neon_vqdmlals_s32: |
7945 | case NEON::BI__builtin_neon_vqdmlsls_s32: { |
7946 | SmallVector<Value *, 2> ProductOps; |
7947 | ProductOps.push_back(Ops[1]); |
7948 | ProductOps.push_back(EmitScalarExpr(E->getArg(2))); |
7949 | Ops[1] = |
7950 | EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), |
7951 | ProductOps, "vqdmlXl"); |
7952 | |
7953 | unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32 |
7954 | ? Intrinsic::aarch64_neon_sqadd |
7955 | : Intrinsic::aarch64_neon_sqsub; |
7956 | return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl"); |
7957 | } |
7958 | case NEON::BI__builtin_neon_vqdmlals_lane_s32: |
7959 | case NEON::BI__builtin_neon_vqdmlals_laneq_s32: |
7960 | case NEON::BI__builtin_neon_vqdmlsls_lane_s32: |
7961 | case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: { |
7962 | Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), |
7963 | "lane"); |
7964 | SmallVector<Value *, 2> ProductOps; |
7965 | ProductOps.push_back(Ops[1]); |
7966 | ProductOps.push_back(Ops[2]); |
7967 | Ops[1] = |
7968 | EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), |
7969 | ProductOps, "vqdmlXl"); |
7970 | Ops.pop_back(); |
7971 | |
7972 | unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 || |
7973 | BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32) |
7974 | ? Intrinsic::aarch64_neon_sqadd |
7975 | : Intrinsic::aarch64_neon_sqsub; |
7976 | return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl"); |
7977 | } |
7978 | case NEON::BI__builtin_neon_vduph_lane_f16: { |
7979 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7980 | "vget_lane"); |
7981 | } |
7982 | case NEON::BI__builtin_neon_vduph_laneq_f16: { |
7983 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
7984 | "vgetq_lane"); |
7985 | } |
7986 | } |
7987 | |
7988 | llvm::VectorType *VTy = GetNeonType(this, Type); |
7989 | llvm::Type *Ty = VTy; |
7990 | if (!Ty) |
7991 | return nullptr; |
7992 | |
7993 | // Not all intrinsics handled by the common case work for AArch64 yet, so only |
7994 | // defer to common code if it's been added to our special map. |
7995 | Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, |
7996 | AArch64SIMDIntrinsicsProvenSorted); |
7997 | |
7998 | if (Builtin) |
7999 | return EmitCommonNeonBuiltinExpr( |
8000 | Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, |
8001 | Builtin->NameHint, Builtin->TypeModifier, E, Ops, |
8002 | /*never use addresses*/ Address::invalid(), Address::invalid(), Arch); |
8003 | |
8004 | if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) |
8005 | return V; |
8006 | |
8007 | unsigned Int; |
8008 | switch (BuiltinID) { |
8009 | default: return nullptr; |
8010 | case NEON::BI__builtin_neon_vbsl_v: |
8011 | case NEON::BI__builtin_neon_vbslq_v: { |
8012 | llvm::Type *BitTy = llvm::VectorType::getInteger(VTy); |
8013 | Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl"); |
8014 | Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl"); |
8015 | Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl"); |
8016 | |
8017 | Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl"); |
8018 | Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl"); |
8019 | Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl"); |
8020 | return Builder.CreateBitCast(Ops[0], Ty); |
8021 | } |
8022 | case NEON::BI__builtin_neon_vfma_lane_v: |
8023 | case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types |
8024 | // The ARM builtins (and instructions) have the addend as the first |
8025 | // operand, but the 'fma' intrinsics have it last. Swap it around here. |
8026 | Value *Addend = Ops[0]; |
8027 | Value *Multiplicand = Ops[1]; |
8028 | Value *LaneSource = Ops[2]; |
8029 | Ops[0] = Multiplicand; |
8030 | Ops[1] = LaneSource; |
8031 | Ops[2] = Addend; |
8032 | |
8033 | // Now adjust things to handle the lane access. |
8034 | llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ? |
8035 | llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) : |
8036 | VTy; |
8037 | llvm::Constant *cst = cast<Constant>(Ops[3]); |
8038 | Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst); |
8039 | Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy); |
8040 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane"); |
8041 | |
8042 | Ops.pop_back(); |
8043 | Int = Intrinsic::fma; |
8044 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla"); |
8045 | } |
8046 | case NEON::BI__builtin_neon_vfma_laneq_v: { |
8047 | llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); |
8048 | // v1f64 fma should be mapped to Neon scalar f64 fma |
8049 | if (VTy && VTy->getElementType() == DoubleTy) { |
8050 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
8051 | Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); |
8052 | llvm::Type *VTy = GetNeonType(this, |
8053 | NeonTypeFlags(NeonTypeFlags::Float64, false, true)); |
8054 | Ops[2] = Builder.CreateBitCast(Ops[2], VTy); |
8055 | Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); |
8056 | Function *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy); |
8057 | Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]}); |
8058 | return Builder.CreateBitCast(Result, Ty); |
8059 | } |
8060 | Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty); |
8061 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
8062 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
8063 | |
8064 | llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(), |
8065 | VTy->getNumElements() * 2); |
8066 | Ops[2] = Builder.CreateBitCast(Ops[2], STy); |
8067 | Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), |
8068 | cast<ConstantInt>(Ops[3])); |
8069 | Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane"); |
8070 | |
8071 | return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]}); |
8072 | } |
8073 | case NEON::BI__builtin_neon_vfmaq_laneq_v: { |
8074 | Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty); |
8075 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
8076 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
8077 | |
8078 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
8079 | Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3])); |
8080 | return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]}); |
8081 | } |
8082 | case NEON::BI__builtin_neon_vfmah_lane_f16: |
8083 | case NEON::BI__builtin_neon_vfmas_lane_f32: |
8084 | case NEON::BI__builtin_neon_vfmah_laneq_f16: |
8085 | case NEON::BI__builtin_neon_vfmas_laneq_f32: |
8086 | case NEON::BI__builtin_neon_vfmad_lane_f64: |
8087 | case NEON::BI__builtin_neon_vfmad_laneq_f64: { |
8088 | Ops.push_back(EmitScalarExpr(E->getArg(3))); |
8089 | llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); |
8090 | Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty); |
8091 | Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); |
8092 | return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]}); |
8093 | } |
8094 | case NEON::BI__builtin_neon_vmull_v: |
8095 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
8096 | Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull; |
8097 | if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull; |
8098 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); |
8099 | case NEON::BI__builtin_neon_vmax_v: |
8100 | case NEON::BI__builtin_neon_vmaxq_v: |
8101 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
8102 | Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax; |
8103 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax; |
8104 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax"); |
8105 | case NEON::BI__builtin_neon_vmaxh_f16: { |
8106 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
8107 | Int = Intrinsic::aarch64_neon_fmax; |
8108 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax"); |
8109 | } |
8110 | case NEON::BI__builtin_neon_vmin_v: |
8111 | case NEON::BI__builtin_neon_vminq_v: |
8112 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
8113 | Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin; |
8114 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin; |
8115 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin"); |
8116 | case NEON::BI__builtin_neon_vminh_f16: { |
8117 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
8118 | Int = Intrinsic::aarch64_neon_fmin; |
8119 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin"); |
8120 | } |
8121 | case NEON::BI__builtin_neon_vabd_v: |
8122 | case NEON::BI__builtin_neon_vabdq_v: |
8123 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
8124 | Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd; |
8125 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd; |
8126 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd"); |
8127 | case NEON::BI__builtin_neon_vpadal_v: |
8128 | case NEON::BI__builtin_neon_vpadalq_v: { |
8129 | unsigned ArgElts = VTy->getNumElements(); |
8130 | llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType()); |
8131 | unsigned BitWidth = EltTy->getBitWidth(); |
8132 | llvm::Type *ArgTy = llvm::VectorType::get( |
8133 | llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts); |
8134 | llvm::Type* Tys[2] = { VTy, ArgTy }; |
8135 | Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp; |
8136 | SmallVector<llvm::Value*, 1> TmpOps; |
8137 | TmpOps.push_back(Ops[1]); |
8138 | Function *F = CGM.getIntrinsic(Int, Tys); |
8139 | llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal"); |
8140 | llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType()); |
8141 | return Builder.CreateAdd(tmp, addend); |
8142 | } |
8143 | case NEON::BI__builtin_neon_vpmin_v: |
8144 | case NEON::BI__builtin_neon_vpminq_v: |
8145 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
8146 | Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp; |
8147 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp; |
8148 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); |
8149 | case NEON::BI__builtin_neon_vpmax_v: |
8150 | case NEON::BI__builtin_neon_vpmaxq_v: |
8151 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. |
8152 | Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp; |
8153 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp; |
8154 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); |
8155 | case NEON::BI__builtin_neon_vminnm_v: |
8156 | case NEON::BI__builtin_neon_vminnmq_v: |
8157 | Int = Intrinsic::aarch64_neon_fminnm; |
8158 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm"); |
8159 | case NEON::BI__builtin_neon_vminnmh_f16: |
8160 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
8161 | Int = Intrinsic::aarch64_neon_fminnm; |
8162 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm"); |
8163 | case NEON::BI__builtin_neon_vmaxnm_v: |
8164 | case NEON::BI__builtin_neon_vmaxnmq_v: |
8165 | Int = Intrinsic::aarch64_neon_fmaxnm; |
8166 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm"); |
8167 | case NEON::BI__builtin_neon_vmaxnmh_f16: |
8168 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
8169 | Int = Intrinsic::aarch64_neon_fmaxnm; |
8170 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm"); |
8171 | case NEON::BI__builtin_neon_vrecpss_f32: { |
8172 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
8173 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy), |
8174 | Ops, "vrecps"); |
8175 | } |
8176 | case NEON::BI__builtin_neon_vrecpsd_f64: |
8177 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
8178 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy), |
8179 | Ops, "vrecps"); |
8180 | case NEON::BI__builtin_neon_vrecpsh_f16: |
8181 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
8182 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy), |
8183 | Ops, "vrecps"); |
8184 | case NEON::BI__builtin_neon_vqshrun_n_v: |
8185 | Int = Intrinsic::aarch64_neon_sqshrun; |
8186 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n"); |
8187 | case NEON::BI__builtin_neon_vqrshrun_n_v: |
8188 | Int = Intrinsic::aarch64_neon_sqrshrun; |
8189 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n"); |
8190 | case NEON::BI__builtin_neon_vqshrn_n_v: |
8191 | Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn; |
8192 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n"); |
8193 | case NEON::BI__builtin_neon_vrshrn_n_v: |
8194 | Int = Intrinsic::aarch64_neon_rshrn; |
8195 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n"); |
8196 | case NEON::BI__builtin_neon_vqrshrn_n_v: |
8197 | Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn; |
8198 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n"); |
8199 | case NEON::BI__builtin_neon_vrndah_f16: { |
8200 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8201 | Int = Intrinsic::round; |
8202 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda"); |
8203 | } |
8204 | case NEON::BI__builtin_neon_vrnda_v: |
8205 | case NEON::BI__builtin_neon_vrndaq_v: { |
8206 | Int = Intrinsic::round; |
8207 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda"); |
8208 | } |
8209 | case NEON::BI__builtin_neon_vrndih_f16: { |
8210 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8211 | Int = Intrinsic::nearbyint; |
8212 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi"); |
8213 | } |
8214 | case NEON::BI__builtin_neon_vrndmh_f16: { |
8215 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8216 | Int = Intrinsic::floor; |
8217 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm"); |
8218 | } |
8219 | case NEON::BI__builtin_neon_vrndm_v: |
8220 | case NEON::BI__builtin_neon_vrndmq_v: { |
8221 | Int = Intrinsic::floor; |
8222 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm"); |
8223 | } |
8224 | case NEON::BI__builtin_neon_vrndnh_f16: { |
8225 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8226 | Int = Intrinsic::aarch64_neon_frintn; |
8227 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn"); |
8228 | } |
8229 | case NEON::BI__builtin_neon_vrndn_v: |
8230 | case NEON::BI__builtin_neon_vrndnq_v: { |
8231 | Int = Intrinsic::aarch64_neon_frintn; |
8232 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn"); |
8233 | } |
8234 | case NEON::BI__builtin_neon_vrndns_f32: { |
8235 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8236 | Int = Intrinsic::aarch64_neon_frintn; |
8237 | return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn"); |
8238 | } |
8239 | case NEON::BI__builtin_neon_vrndph_f16: { |
8240 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8241 | Int = Intrinsic::ceil; |
8242 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp"); |
8243 | } |
8244 | case NEON::BI__builtin_neon_vrndp_v: |
8245 | case NEON::BI__builtin_neon_vrndpq_v: { |
8246 | Int = Intrinsic::ceil; |
8247 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp"); |
8248 | } |
8249 | case NEON::BI__builtin_neon_vrndxh_f16: { |
8250 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8251 | Int = Intrinsic::rint; |
8252 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx"); |
8253 | } |
8254 | case NEON::BI__builtin_neon_vrndx_v: |
8255 | case NEON::BI__builtin_neon_vrndxq_v: { |
8256 | Int = Intrinsic::rint; |
8257 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx"); |
8258 | } |
8259 | case NEON::BI__builtin_neon_vrndh_f16: { |
8260 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8261 | Int = Intrinsic::trunc; |
8262 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz"); |
8263 | } |
8264 | case NEON::BI__builtin_neon_vrnd_v: |
8265 | case NEON::BI__builtin_neon_vrndq_v: { |
8266 | Int = Intrinsic::trunc; |
8267 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz"); |
8268 | } |
8269 | case NEON::BI__builtin_neon_vcvt_f64_v: |
8270 | case NEON::BI__builtin_neon_vcvtq_f64_v: |
8271 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
8272 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad)); |
8273 | return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") |
8274 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); |
8275 | case NEON::BI__builtin_neon_vcvt_f64_f32: { |
8276 | assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&((Type.getEltType() == NeonTypeFlags::Float64 && quad && "unexpected vcvt_f64_f32 builtin") ? static_cast< void> (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8277, __PRETTY_FUNCTION__)) |
8277 | "unexpected vcvt_f64_f32 builtin")((Type.getEltType() == NeonTypeFlags::Float64 && quad && "unexpected vcvt_f64_f32 builtin") ? static_cast< void> (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8277, __PRETTY_FUNCTION__)); |
8278 | NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false); |
8279 | Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag)); |
8280 | |
8281 | return Builder.CreateFPExt(Ops[0], Ty, "vcvt"); |
8282 | } |
8283 | case NEON::BI__builtin_neon_vcvt_f32_f64: { |
8284 | assert(Type.getEltType() == NeonTypeFlags::Float32 &&((Type.getEltType() == NeonTypeFlags::Float32 && "unexpected vcvt_f32_f64 builtin" ) ? static_cast<void> (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8285, __PRETTY_FUNCTION__)) |
8285 | "unexpected vcvt_f32_f64 builtin")((Type.getEltType() == NeonTypeFlags::Float32 && "unexpected vcvt_f32_f64 builtin" ) ? static_cast<void> (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8285, __PRETTY_FUNCTION__)); |
8286 | NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true); |
8287 | Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag)); |
8288 | |
8289 | return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt"); |
8290 | } |
8291 | case NEON::BI__builtin_neon_vcvt_s32_v: |
8292 | case NEON::BI__builtin_neon_vcvt_u32_v: |
8293 | case NEON::BI__builtin_neon_vcvt_s64_v: |
8294 | case NEON::BI__builtin_neon_vcvt_u64_v: |
8295 | case NEON::BI__builtin_neon_vcvt_s16_v: |
8296 | case NEON::BI__builtin_neon_vcvt_u16_v: |
8297 | case NEON::BI__builtin_neon_vcvtq_s32_v: |
8298 | case NEON::BI__builtin_neon_vcvtq_u32_v: |
8299 | case NEON::BI__builtin_neon_vcvtq_s64_v: |
8300 | case NEON::BI__builtin_neon_vcvtq_u64_v: |
8301 | case NEON::BI__builtin_neon_vcvtq_s16_v: |
8302 | case NEON::BI__builtin_neon_vcvtq_u16_v: { |
8303 | Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type)); |
8304 | if (usgn) |
8305 | return Builder.CreateFPToUI(Ops[0], Ty); |
8306 | return Builder.CreateFPToSI(Ops[0], Ty); |
8307 | } |
8308 | case NEON::BI__builtin_neon_vcvta_s16_v: |
8309 | case NEON::BI__builtin_neon_vcvta_u16_v: |
8310 | case NEON::BI__builtin_neon_vcvta_s32_v: |
8311 | case NEON::BI__builtin_neon_vcvtaq_s16_v: |
8312 | case NEON::BI__builtin_neon_vcvtaq_s32_v: |
8313 | case NEON::BI__builtin_neon_vcvta_u32_v: |
8314 | case NEON::BI__builtin_neon_vcvtaq_u16_v: |
8315 | case NEON::BI__builtin_neon_vcvtaq_u32_v: |
8316 | case NEON::BI__builtin_neon_vcvta_s64_v: |
8317 | case NEON::BI__builtin_neon_vcvtaq_s64_v: |
8318 | case NEON::BI__builtin_neon_vcvta_u64_v: |
8319 | case NEON::BI__builtin_neon_vcvtaq_u64_v: { |
8320 | Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas; |
8321 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
8322 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta"); |
8323 | } |
8324 | case NEON::BI__builtin_neon_vcvtm_s16_v: |
8325 | case NEON::BI__builtin_neon_vcvtm_s32_v: |
8326 | case NEON::BI__builtin_neon_vcvtmq_s16_v: |
8327 | case NEON::BI__builtin_neon_vcvtmq_s32_v: |
8328 | case NEON::BI__builtin_neon_vcvtm_u16_v: |
8329 | case NEON::BI__builtin_neon_vcvtm_u32_v: |
8330 | case NEON::BI__builtin_neon_vcvtmq_u16_v: |
8331 | case NEON::BI__builtin_neon_vcvtmq_u32_v: |
8332 | case NEON::BI__builtin_neon_vcvtm_s64_v: |
8333 | case NEON::BI__builtin_neon_vcvtmq_s64_v: |
8334 | case NEON::BI__builtin_neon_vcvtm_u64_v: |
8335 | case NEON::BI__builtin_neon_vcvtmq_u64_v: { |
8336 | Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms; |
8337 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
8338 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm"); |
8339 | } |
8340 | case NEON::BI__builtin_neon_vcvtn_s16_v: |
8341 | case NEON::BI__builtin_neon_vcvtn_s32_v: |
8342 | case NEON::BI__builtin_neon_vcvtnq_s16_v: |
8343 | case NEON::BI__builtin_neon_vcvtnq_s32_v: |
8344 | case NEON::BI__builtin_neon_vcvtn_u16_v: |
8345 | case NEON::BI__builtin_neon_vcvtn_u32_v: |
8346 | case NEON::BI__builtin_neon_vcvtnq_u16_v: |
8347 | case NEON::BI__builtin_neon_vcvtnq_u32_v: |
8348 | case NEON::BI__builtin_neon_vcvtn_s64_v: |
8349 | case NEON::BI__builtin_neon_vcvtnq_s64_v: |
8350 | case NEON::BI__builtin_neon_vcvtn_u64_v: |
8351 | case NEON::BI__builtin_neon_vcvtnq_u64_v: { |
8352 | Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns; |
8353 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
8354 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn"); |
8355 | } |
8356 | case NEON::BI__builtin_neon_vcvtp_s16_v: |
8357 | case NEON::BI__builtin_neon_vcvtp_s32_v: |
8358 | case NEON::BI__builtin_neon_vcvtpq_s16_v: |
8359 | case NEON::BI__builtin_neon_vcvtpq_s32_v: |
8360 | case NEON::BI__builtin_neon_vcvtp_u16_v: |
8361 | case NEON::BI__builtin_neon_vcvtp_u32_v: |
8362 | case NEON::BI__builtin_neon_vcvtpq_u16_v: |
8363 | case NEON::BI__builtin_neon_vcvtpq_u32_v: |
8364 | case NEON::BI__builtin_neon_vcvtp_s64_v: |
8365 | case NEON::BI__builtin_neon_vcvtpq_s64_v: |
8366 | case NEON::BI__builtin_neon_vcvtp_u64_v: |
8367 | case NEON::BI__builtin_neon_vcvtpq_u64_v: { |
8368 | Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps; |
8369 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
8370 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp"); |
8371 | } |
8372 | case NEON::BI__builtin_neon_vmulx_v: |
8373 | case NEON::BI__builtin_neon_vmulxq_v: { |
8374 | Int = Intrinsic::aarch64_neon_fmulx; |
8375 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx"); |
8376 | } |
8377 | case NEON::BI__builtin_neon_vmulxh_lane_f16: |
8378 | case NEON::BI__builtin_neon_vmulxh_laneq_f16: { |
8379 | // vmulx_lane should be mapped to Neon scalar mulx after |
8380 | // extracting the scalar element |
8381 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
8382 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); |
8383 | Ops.pop_back(); |
8384 | Int = Intrinsic::aarch64_neon_fmulx; |
8385 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx"); |
8386 | } |
8387 | case NEON::BI__builtin_neon_vmul_lane_v: |
8388 | case NEON::BI__builtin_neon_vmul_laneq_v: { |
8389 | // v1f64 vmul_lane should be mapped to Neon scalar mul lane |
8390 | bool Quad = false; |
8391 | if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v) |
8392 | Quad = true; |
8393 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
8394 | llvm::Type *VTy = GetNeonType(this, |
8395 | NeonTypeFlags(NeonTypeFlags::Float64, false, Quad)); |
8396 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); |
8397 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); |
8398 | Value *Result = Builder.CreateFMul(Ops[0], Ops[1]); |
8399 | return Builder.CreateBitCast(Result, Ty); |
8400 | } |
8401 | case NEON::BI__builtin_neon_vnegd_s64: |
8402 | return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd"); |
8403 | case NEON::BI__builtin_neon_vnegh_f16: |
8404 | return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh"); |
8405 | case NEON::BI__builtin_neon_vpmaxnm_v: |
8406 | case NEON::BI__builtin_neon_vpmaxnmq_v: { |
8407 | Int = Intrinsic::aarch64_neon_fmaxnmp; |
8408 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm"); |
8409 | } |
8410 | case NEON::BI__builtin_neon_vpminnm_v: |
8411 | case NEON::BI__builtin_neon_vpminnmq_v: { |
8412 | Int = Intrinsic::aarch64_neon_fminnmp; |
8413 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm"); |
8414 | } |
8415 | case NEON::BI__builtin_neon_vsqrth_f16: { |
8416 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8417 | Int = Intrinsic::sqrt; |
8418 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt"); |
8419 | } |
8420 | case NEON::BI__builtin_neon_vsqrt_v: |
8421 | case NEON::BI__builtin_neon_vsqrtq_v: { |
8422 | Int = Intrinsic::sqrt; |
8423 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
8424 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt"); |
8425 | } |
8426 | case NEON::BI__builtin_neon_vrbit_v: |
8427 | case NEON::BI__builtin_neon_vrbitq_v: { |
8428 | Int = Intrinsic::aarch64_neon_rbit; |
8429 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit"); |
8430 | } |
8431 | case NEON::BI__builtin_neon_vaddv_u8: |
8432 | // FIXME: These are handled by the AArch64 scalar code. |
8433 | usgn = true; |
8434 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
8435 | case NEON::BI__builtin_neon_vaddv_s8: { |
8436 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
8437 | Ty = Int32Ty; |
8438 | VTy = llvm::VectorType::get(Int8Ty, 8); |
8439 | llvm::Type *Tys[2] = { Ty, VTy }; |
8440 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8441 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
8442 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
8443 | } |
8444 | case NEON::BI__builtin_neon_vaddv_u16: |
8445 | usgn = true; |
8446 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
8447 | case NEON::BI__builtin_neon_vaddv_s16: { |
8448 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
8449 | Ty = Int32Ty; |
8450 | VTy = llvm::VectorType::get(Int16Ty, 4); |
8451 | llvm::Type *Tys[2] = { Ty, VTy }; |
8452 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8453 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
8454 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8455 | } |
8456 | case NEON::BI__builtin_neon_vaddvq_u8: |
8457 | usgn = true; |
8458 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
8459 | case NEON::BI__builtin_neon_vaddvq_s8: { |
8460 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
8461 | Ty = Int32Ty; |
8462 | VTy = llvm::VectorType::get(Int8Ty, 16); |
8463 | llvm::Type *Tys[2] = { Ty, VTy }; |
8464 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8465 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
8466 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
8467 | } |
8468 | case NEON::BI__builtin_neon_vaddvq_u16: |
8469 | usgn = true; |
8470 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
8471 | case NEON::BI__builtin_neon_vaddvq_s16: { |
8472 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
8473 | Ty = Int32Ty; |
8474 | VTy = llvm::VectorType::get(Int16Ty, 8); |
8475 | llvm::Type *Tys[2] = { Ty, VTy }; |
8476 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8477 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
8478 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8479 | } |
8480 | case NEON::BI__builtin_neon_vmaxv_u8: { |
8481 | Int = Intrinsic::aarch64_neon_umaxv; |
8482 | Ty = Int32Ty; |
8483 | VTy = llvm::VectorType::get(Int8Ty, 8); |
8484 | llvm::Type *Tys[2] = { Ty, VTy }; |
8485 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8486 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
8487 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
8488 | } |
8489 | case NEON::BI__builtin_neon_vmaxv_u16: { |
8490 | Int = Intrinsic::aarch64_neon_umaxv; |
8491 | Ty = Int32Ty; |
8492 | VTy = llvm::VectorType::get(Int16Ty, 4); |
8493 | llvm::Type *Tys[2] = { Ty, VTy }; |
8494 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8495 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
8496 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8497 | } |
8498 | case NEON::BI__builtin_neon_vmaxvq_u8: { |
8499 | Int = Intrinsic::aarch64_neon_umaxv; |
8500 | Ty = Int32Ty; |
8501 | VTy = llvm::VectorType::get(Int8Ty, 16); |
8502 | llvm::Type *Tys[2] = { Ty, VTy }; |
8503 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8504 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
8505 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
8506 | } |
8507 | case NEON::BI__builtin_neon_vmaxvq_u16: { |
8508 | Int = Intrinsic::aarch64_neon_umaxv; |
8509 | Ty = Int32Ty; |
8510 | VTy = llvm::VectorType::get(Int16Ty, 8); |
8511 | llvm::Type *Tys[2] = { Ty, VTy }; |
8512 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8513 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
8514 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8515 | } |
8516 | case NEON::BI__builtin_neon_vmaxv_s8: { |
8517 | Int = Intrinsic::aarch64_neon_smaxv; |
8518 | Ty = Int32Ty; |
8519 | VTy = llvm::VectorType::get(Int8Ty, 8); |
8520 | llvm::Type *Tys[2] = { Ty, VTy }; |
8521 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8522 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
8523 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
8524 | } |
8525 | case NEON::BI__builtin_neon_vmaxv_s16: { |
8526 | Int = Intrinsic::aarch64_neon_smaxv; |
8527 | Ty = Int32Ty; |
8528 | VTy = llvm::VectorType::get(Int16Ty, 4); |
8529 | llvm::Type *Tys[2] = { Ty, VTy }; |
8530 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8531 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
8532 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8533 | } |
8534 | case NEON::BI__builtin_neon_vmaxvq_s8: { |
8535 | Int = Intrinsic::aarch64_neon_smaxv; |
8536 | Ty = Int32Ty; |
8537 | VTy = llvm::VectorType::get(Int8Ty, 16); |
8538 | llvm::Type *Tys[2] = { Ty, VTy }; |
8539 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8540 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
8541 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
8542 | } |
8543 | case NEON::BI__builtin_neon_vmaxvq_s16: { |
8544 | Int = Intrinsic::aarch64_neon_smaxv; |
8545 | Ty = Int32Ty; |
8546 | VTy = llvm::VectorType::get(Int16Ty, 8); |
8547 | llvm::Type *Tys[2] = { Ty, VTy }; |
8548 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8549 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
8550 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8551 | } |
8552 | case NEON::BI__builtin_neon_vmaxv_f16: { |
8553 | Int = Intrinsic::aarch64_neon_fmaxv; |
8554 | Ty = HalfTy; |
8555 | VTy = llvm::VectorType::get(HalfTy, 4); |
8556 | llvm::Type *Tys[2] = { Ty, VTy }; |
8557 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8558 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
8559 | return Builder.CreateTrunc(Ops[0], HalfTy); |
8560 | } |
8561 | case NEON::BI__builtin_neon_vmaxvq_f16: { |
8562 | Int = Intrinsic::aarch64_neon_fmaxv; |
8563 | Ty = HalfTy; |
8564 | VTy = llvm::VectorType::get(HalfTy, 8); |
8565 | llvm::Type *Tys[2] = { Ty, VTy }; |
8566 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8567 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
8568 | return Builder.CreateTrunc(Ops[0], HalfTy); |
8569 | } |
8570 | case NEON::BI__builtin_neon_vminv_u8: { |
8571 | Int = Intrinsic::aarch64_neon_uminv; |
8572 | Ty = Int32Ty; |
8573 | VTy = llvm::VectorType::get(Int8Ty, 8); |
8574 | llvm::Type *Tys[2] = { Ty, VTy }; |
8575 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8576 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
8577 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
8578 | } |
8579 | case NEON::BI__builtin_neon_vminv_u16: { |
8580 | Int = Intrinsic::aarch64_neon_uminv; |
8581 | Ty = Int32Ty; |
8582 | VTy = llvm::VectorType::get(Int16Ty, 4); |
8583 | llvm::Type *Tys[2] = { Ty, VTy }; |
8584 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8585 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
8586 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8587 | } |
8588 | case NEON::BI__builtin_neon_vminvq_u8: { |
8589 | Int = Intrinsic::aarch64_neon_uminv; |
8590 | Ty = Int32Ty; |
8591 | VTy = llvm::VectorType::get(Int8Ty, 16); |
8592 | llvm::Type *Tys[2] = { Ty, VTy }; |
8593 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8594 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
8595 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
8596 | } |
8597 | case NEON::BI__builtin_neon_vminvq_u16: { |
8598 | Int = Intrinsic::aarch64_neon_uminv; |
8599 | Ty = Int32Ty; |
8600 | VTy = llvm::VectorType::get(Int16Ty, 8); |
8601 | llvm::Type *Tys[2] = { Ty, VTy }; |
8602 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8603 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
8604 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8605 | } |
8606 | case NEON::BI__builtin_neon_vminv_s8: { |
8607 | Int = Intrinsic::aarch64_neon_sminv; |
8608 | Ty = Int32Ty; |
8609 | VTy = llvm::VectorType::get(Int8Ty, 8); |
8610 | llvm::Type *Tys[2] = { Ty, VTy }; |
8611 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8612 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
8613 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
8614 | } |
8615 | case NEON::BI__builtin_neon_vminv_s16: { |
8616 | Int = Intrinsic::aarch64_neon_sminv; |
8617 | Ty = Int32Ty; |
8618 | VTy = llvm::VectorType::get(Int16Ty, 4); |
8619 | llvm::Type *Tys[2] = { Ty, VTy }; |
8620 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8621 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
8622 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8623 | } |
8624 | case NEON::BI__builtin_neon_vminvq_s8: { |
8625 | Int = Intrinsic::aarch64_neon_sminv; |
8626 | Ty = Int32Ty; |
8627 | VTy = llvm::VectorType::get(Int8Ty, 16); |
8628 | llvm::Type *Tys[2] = { Ty, VTy }; |
8629 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8630 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
8631 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
8632 | } |
8633 | case NEON::BI__builtin_neon_vminvq_s16: { |
8634 | Int = Intrinsic::aarch64_neon_sminv; |
8635 | Ty = Int32Ty; |
8636 | VTy = llvm::VectorType::get(Int16Ty, 8); |
8637 | llvm::Type *Tys[2] = { Ty, VTy }; |
8638 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8639 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
8640 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8641 | } |
8642 | case NEON::BI__builtin_neon_vminv_f16: { |
8643 | Int = Intrinsic::aarch64_neon_fminv; |
8644 | Ty = HalfTy; |
8645 | VTy = llvm::VectorType::get(HalfTy, 4); |
8646 | llvm::Type *Tys[2] = { Ty, VTy }; |
8647 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8648 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
8649 | return Builder.CreateTrunc(Ops[0], HalfTy); |
8650 | } |
8651 | case NEON::BI__builtin_neon_vminvq_f16: { |
8652 | Int = Intrinsic::aarch64_neon_fminv; |
8653 | Ty = HalfTy; |
8654 | VTy = llvm::VectorType::get(HalfTy, 8); |
8655 | llvm::Type *Tys[2] = { Ty, VTy }; |
8656 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8657 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
8658 | return Builder.CreateTrunc(Ops[0], HalfTy); |
8659 | } |
8660 | case NEON::BI__builtin_neon_vmaxnmv_f16: { |
8661 | Int = Intrinsic::aarch64_neon_fmaxnmv; |
8662 | Ty = HalfTy; |
8663 | VTy = llvm::VectorType::get(HalfTy, 4); |
8664 | llvm::Type *Tys[2] = { Ty, VTy }; |
8665 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8666 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv"); |
8667 | return Builder.CreateTrunc(Ops[0], HalfTy); |
8668 | } |
8669 | case NEON::BI__builtin_neon_vmaxnmvq_f16: { |
8670 | Int = Intrinsic::aarch64_neon_fmaxnmv; |
8671 | Ty = HalfTy; |
8672 | VTy = llvm::VectorType::get(HalfTy, 8); |
8673 | llvm::Type *Tys[2] = { Ty, VTy }; |
8674 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8675 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv"); |
8676 | return Builder.CreateTrunc(Ops[0], HalfTy); |
8677 | } |
8678 | case NEON::BI__builtin_neon_vminnmv_f16: { |
8679 | Int = Intrinsic::aarch64_neon_fminnmv; |
8680 | Ty = HalfTy; |
8681 | VTy = llvm::VectorType::get(HalfTy, 4); |
8682 | llvm::Type *Tys[2] = { Ty, VTy }; |
8683 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8684 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv"); |
8685 | return Builder.CreateTrunc(Ops[0], HalfTy); |
8686 | } |
8687 | case NEON::BI__builtin_neon_vminnmvq_f16: { |
8688 | Int = Intrinsic::aarch64_neon_fminnmv; |
8689 | Ty = HalfTy; |
8690 | VTy = llvm::VectorType::get(HalfTy, 8); |
8691 | llvm::Type *Tys[2] = { Ty, VTy }; |
8692 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8693 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv"); |
8694 | return Builder.CreateTrunc(Ops[0], HalfTy); |
8695 | } |
8696 | case NEON::BI__builtin_neon_vmul_n_f64: { |
8697 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
8698 | Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy); |
8699 | return Builder.CreateFMul(Ops[0], RHS); |
8700 | } |
8701 | case NEON::BI__builtin_neon_vaddlv_u8: { |
8702 | Int = Intrinsic::aarch64_neon_uaddlv; |
8703 | Ty = Int32Ty; |
8704 | VTy = llvm::VectorType::get(Int8Ty, 8); |
8705 | llvm::Type *Tys[2] = { Ty, VTy }; |
8706 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8707 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
8708 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8709 | } |
8710 | case NEON::BI__builtin_neon_vaddlv_u16: { |
8711 | Int = Intrinsic::aarch64_neon_uaddlv; |
8712 | Ty = Int32Ty; |
8713 | VTy = llvm::VectorType::get(Int16Ty, 4); |
8714 | llvm::Type *Tys[2] = { Ty, VTy }; |
8715 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8716 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
8717 | } |
8718 | case NEON::BI__builtin_neon_vaddlvq_u8: { |
8719 | Int = Intrinsic::aarch64_neon_uaddlv; |
8720 | Ty = Int32Ty; |
8721 | VTy = llvm::VectorType::get(Int8Ty, 16); |
8722 | llvm::Type *Tys[2] = { Ty, VTy }; |
8723 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8724 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
8725 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8726 | } |
8727 | case NEON::BI__builtin_neon_vaddlvq_u16: { |
8728 | Int = Intrinsic::aarch64_neon_uaddlv; |
8729 | Ty = Int32Ty; |
8730 | VTy = llvm::VectorType::get(Int16Ty, 8); |
8731 | llvm::Type *Tys[2] = { Ty, VTy }; |
8732 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8733 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
8734 | } |
8735 | case NEON::BI__builtin_neon_vaddlv_s8: { |
8736 | Int = Intrinsic::aarch64_neon_saddlv; |
8737 | Ty = Int32Ty; |
8738 | VTy = llvm::VectorType::get(Int8Ty, 8); |
8739 | llvm::Type *Tys[2] = { Ty, VTy }; |
8740 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8741 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
8742 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8743 | } |
8744 | case NEON::BI__builtin_neon_vaddlv_s16: { |
8745 | Int = Intrinsic::aarch64_neon_saddlv; |
8746 | Ty = Int32Ty; |
8747 | VTy = llvm::VectorType::get(Int16Ty, 4); |
8748 | llvm::Type *Tys[2] = { Ty, VTy }; |
8749 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8750 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
8751 | } |
8752 | case NEON::BI__builtin_neon_vaddlvq_s8: { |
8753 | Int = Intrinsic::aarch64_neon_saddlv; |
8754 | Ty = Int32Ty; |
8755 | VTy = llvm::VectorType::get(Int8Ty, 16); |
8756 | llvm::Type *Tys[2] = { Ty, VTy }; |
8757 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8758 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
8759 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
8760 | } |
8761 | case NEON::BI__builtin_neon_vaddlvq_s16: { |
8762 | Int = Intrinsic::aarch64_neon_saddlv; |
8763 | Ty = Int32Ty; |
8764 | VTy = llvm::VectorType::get(Int16Ty, 8); |
8765 | llvm::Type *Tys[2] = { Ty, VTy }; |
8766 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
8767 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
8768 | } |
8769 | case NEON::BI__builtin_neon_vsri_n_v: |
8770 | case NEON::BI__builtin_neon_vsriq_n_v: { |
8771 | Int = Intrinsic::aarch64_neon_vsri; |
8772 | llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty); |
8773 | return EmitNeonCall(Intrin, Ops, "vsri_n"); |
8774 | } |
8775 | case NEON::BI__builtin_neon_vsli_n_v: |
8776 | case NEON::BI__builtin_neon_vsliq_n_v: { |
8777 | Int = Intrinsic::aarch64_neon_vsli; |
8778 | llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty); |
8779 | return EmitNeonCall(Intrin, Ops, "vsli_n"); |
8780 | } |
8781 | case NEON::BI__builtin_neon_vsra_n_v: |
8782 | case NEON::BI__builtin_neon_vsraq_n_v: |
8783 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
8784 | Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n"); |
8785 | return Builder.CreateAdd(Ops[0], Ops[1]); |
8786 | case NEON::BI__builtin_neon_vrsra_n_v: |
8787 | case NEON::BI__builtin_neon_vrsraq_n_v: { |
8788 | Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl; |
8789 | SmallVector<llvm::Value*,2> TmpOps; |
8790 | TmpOps.push_back(Ops[1]); |
8791 | TmpOps.push_back(Ops[2]); |
8792 | Function* F = CGM.getIntrinsic(Int, Ty); |
8793 | llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true); |
8794 | Ops[0] = Builder.CreateBitCast(Ops[0], VTy); |
8795 | return Builder.CreateAdd(Ops[0], tmp); |
8796 | } |
8797 | case NEON::BI__builtin_neon_vld1_v: |
8798 | case NEON::BI__builtin_neon_vld1q_v: { |
8799 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy)); |
8800 | auto Alignment = CharUnits::fromQuantity( |
8801 | BuiltinID == NEON::BI__builtin_neon_vld1_v ? 8 : 16); |
8802 | return Builder.CreateAlignedLoad(VTy, Ops[0], Alignment); |
8803 | } |
8804 | case NEON::BI__builtin_neon_vst1_v: |
8805 | case NEON::BI__builtin_neon_vst1q_v: |
8806 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy)); |
8807 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); |
8808 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
8809 | case NEON::BI__builtin_neon_vld1_lane_v: |
8810 | case NEON::BI__builtin_neon_vld1q_lane_v: { |
8811 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
8812 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); |
8813 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
8814 | auto Alignment = CharUnits::fromQuantity( |
8815 | BuiltinID == NEON::BI__builtin_neon_vld1_lane_v ? 8 : 16); |
8816 | Ops[0] = |
8817 | Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment); |
8818 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane"); |
8819 | } |
8820 | case NEON::BI__builtin_neon_vld1_dup_v: |
8821 | case NEON::BI__builtin_neon_vld1q_dup_v: { |
8822 | Value *V = UndefValue::get(Ty); |
8823 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); |
8824 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
8825 | auto Alignment = CharUnits::fromQuantity( |
8826 | BuiltinID == NEON::BI__builtin_neon_vld1_dup_v ? 8 : 16); |
8827 | Ops[0] = |
8828 | Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment); |
8829 | llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); |
8830 | Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI); |
8831 | return EmitNeonSplat(Ops[0], CI); |
8832 | } |
8833 | case NEON::BI__builtin_neon_vst1_lane_v: |
8834 | case NEON::BI__builtin_neon_vst1q_lane_v: |
8835 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
8836 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); |
8837 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
8838 | return Builder.CreateDefaultAlignedStore(Ops[1], |
8839 | Builder.CreateBitCast(Ops[0], Ty)); |
8840 | case NEON::BI__builtin_neon_vld2_v: |
8841 | case NEON::BI__builtin_neon_vld2q_v: { |
8842 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); |
8843 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
8844 | llvm::Type *Tys[2] = { VTy, PTy }; |
8845 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys); |
8846 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); |
8847 | Ops[0] = Builder.CreateBitCast(Ops[0], |
8848 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
8849 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
8850 | } |
8851 | case NEON::BI__builtin_neon_vld3_v: |
8852 | case NEON::BI__builtin_neon_vld3q_v: { |
8853 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); |
8854 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
8855 | llvm::Type *Tys[2] = { VTy, PTy }; |
8856 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys); |
8857 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); |
8858 | Ops[0] = Builder.CreateBitCast(Ops[0], |
8859 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
8860 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
8861 | } |
8862 | case NEON::BI__builtin_neon_vld4_v: |
8863 | case NEON::BI__builtin_neon_vld4q_v: { |
8864 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); |
8865 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
8866 | llvm::Type *Tys[2] = { VTy, PTy }; |
8867 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys); |
8868 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); |
8869 | Ops[0] = Builder.CreateBitCast(Ops[0], |
8870 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
8871 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
8872 | } |
8873 | case NEON::BI__builtin_neon_vld2_dup_v: |
8874 | case NEON::BI__builtin_neon_vld2q_dup_v: { |
8875 | llvm::Type *PTy = |
8876 | llvm::PointerType::getUnqual(VTy->getElementType()); |
8877 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
8878 | llvm::Type *Tys[2] = { VTy, PTy }; |
8879 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys); |
8880 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); |
8881 | Ops[0] = Builder.CreateBitCast(Ops[0], |
8882 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
8883 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
8884 | } |
8885 | case NEON::BI__builtin_neon_vld3_dup_v: |
8886 | case NEON::BI__builtin_neon_vld3q_dup_v: { |
8887 | llvm::Type *PTy = |
8888 | llvm::PointerType::getUnqual(VTy->getElementType()); |
8889 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
8890 | llvm::Type *Tys[2] = { VTy, PTy }; |
8891 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys); |
8892 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); |
8893 | Ops[0] = Builder.CreateBitCast(Ops[0], |
8894 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
8895 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
8896 | } |
8897 | case NEON::BI__builtin_neon_vld4_dup_v: |
8898 | case NEON::BI__builtin_neon_vld4q_dup_v: { |
8899 | llvm::Type *PTy = |
8900 | llvm::PointerType::getUnqual(VTy->getElementType()); |
8901 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
8902 | llvm::Type *Tys[2] = { VTy, PTy }; |
8903 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys); |
8904 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); |
8905 | Ops[0] = Builder.CreateBitCast(Ops[0], |
8906 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
8907 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
8908 | } |
8909 | case NEON::BI__builtin_neon_vld2_lane_v: |
8910 | case NEON::BI__builtin_neon_vld2q_lane_v: { |
8911 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
8912 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys); |
8913 | Ops.push_back(Ops[1]); |
8914 | Ops.erase(Ops.begin()+1); |
8915 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
8916 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
8917 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); |
8918 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane"); |
8919 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
8920 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
8921 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
8922 | } |
8923 | case NEON::BI__builtin_neon_vld3_lane_v: |
8924 | case NEON::BI__builtin_neon_vld3q_lane_v: { |
8925 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
8926 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys); |
8927 | Ops.push_back(Ops[1]); |
8928 | Ops.erase(Ops.begin()+1); |
8929 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
8930 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
8931 | Ops[3] = Builder.CreateBitCast(Ops[3], Ty); |
8932 | Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty); |
8933 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); |
8934 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
8935 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
8936 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
8937 | } |
8938 | case NEON::BI__builtin_neon_vld4_lane_v: |
8939 | case NEON::BI__builtin_neon_vld4q_lane_v: { |
8940 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
8941 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys); |
8942 | Ops.push_back(Ops[1]); |
8943 | Ops.erase(Ops.begin()+1); |
8944 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
8945 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
8946 | Ops[3] = Builder.CreateBitCast(Ops[3], Ty); |
8947 | Ops[4] = Builder.CreateBitCast(Ops[4], Ty); |
8948 | Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty); |
8949 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane"); |
8950 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
8951 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
8952 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
8953 | } |
8954 | case NEON::BI__builtin_neon_vst2_v: |
8955 | case NEON::BI__builtin_neon_vst2q_v: { |
8956 | Ops.push_back(Ops[0]); |
8957 | Ops.erase(Ops.begin()); |
8958 | llvm::Type *Tys[2] = { VTy, Ops[2]->getType() }; |
8959 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys), |
8960 | Ops, ""); |
8961 | } |
8962 | case NEON::BI__builtin_neon_vst2_lane_v: |
8963 | case NEON::BI__builtin_neon_vst2q_lane_v: { |
8964 | Ops.push_back(Ops[0]); |
8965 | Ops.erase(Ops.begin()); |
8966 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
8967 | llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; |
8968 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys), |
8969 | Ops, ""); |
8970 | } |
8971 | case NEON::BI__builtin_neon_vst3_v: |
8972 | case NEON::BI__builtin_neon_vst3q_v: { |
8973 | Ops.push_back(Ops[0]); |
8974 | Ops.erase(Ops.begin()); |
8975 | llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; |
8976 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys), |
8977 | Ops, ""); |
8978 | } |
8979 | case NEON::BI__builtin_neon_vst3_lane_v: |
8980 | case NEON::BI__builtin_neon_vst3q_lane_v: { |
8981 | Ops.push_back(Ops[0]); |
8982 | Ops.erase(Ops.begin()); |
8983 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); |
8984 | llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; |
8985 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys), |
8986 | Ops, ""); |
8987 | } |
8988 | case NEON::BI__builtin_neon_vst4_v: |
8989 | case NEON::BI__builtin_neon_vst4q_v: { |
8990 | Ops.push_back(Ops[0]); |
8991 | Ops.erase(Ops.begin()); |
8992 | llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; |
8993 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys), |
8994 | Ops, ""); |
8995 | } |
8996 | case NEON::BI__builtin_neon_vst4_lane_v: |
8997 | case NEON::BI__builtin_neon_vst4q_lane_v: { |
8998 | Ops.push_back(Ops[0]); |
8999 | Ops.erase(Ops.begin()); |
9000 | Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty); |
9001 | llvm::Type *Tys[2] = { VTy, Ops[5]->getType() }; |
9002 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys), |
9003 | Ops, ""); |
9004 | } |
9005 | case NEON::BI__builtin_neon_vtrn_v: |
9006 | case NEON::BI__builtin_neon_vtrnq_v: { |
9007 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
9008 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
9009 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
9010 | Value *SV = nullptr; |
9011 | |
9012 | for (unsigned vi = 0; vi != 2; ++vi) { |
9013 | SmallVector<uint32_t, 16> Indices; |
9014 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
9015 | Indices.push_back(i+vi); |
9016 | Indices.push_back(i+e+vi); |
9017 | } |
9018 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
9019 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn"); |
9020 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
9021 | } |
9022 | return SV; |
9023 | } |
9024 | case NEON::BI__builtin_neon_vuzp_v: |
9025 | case NEON::BI__builtin_neon_vuzpq_v: { |
9026 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
9027 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
9028 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
9029 | Value *SV = nullptr; |
9030 | |
9031 | for (unsigned vi = 0; vi != 2; ++vi) { |
9032 | SmallVector<uint32_t, 16> Indices; |
9033 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
9034 | Indices.push_back(2*i+vi); |
9035 | |
9036 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
9037 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp"); |
9038 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
9039 | } |
9040 | return SV; |
9041 | } |
9042 | case NEON::BI__builtin_neon_vzip_v: |
9043 | case NEON::BI__builtin_neon_vzipq_v: { |
9044 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
9045 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
9046 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
9047 | Value *SV = nullptr; |
9048 | |
9049 | for (unsigned vi = 0; vi != 2; ++vi) { |
9050 | SmallVector<uint32_t, 16> Indices; |
9051 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
9052 | Indices.push_back((i + vi*e) >> 1); |
9053 | Indices.push_back(((i + vi*e) >> 1)+e); |
9054 | } |
9055 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
9056 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip"); |
9057 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
9058 | } |
9059 | return SV; |
9060 | } |
9061 | case NEON::BI__builtin_neon_vqtbl1q_v: { |
9062 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty), |
9063 | Ops, "vtbl1"); |
9064 | } |
9065 | case NEON::BI__builtin_neon_vqtbl2q_v: { |
9066 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty), |
9067 | Ops, "vtbl2"); |
9068 | } |
9069 | case NEON::BI__builtin_neon_vqtbl3q_v: { |
9070 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty), |
9071 | Ops, "vtbl3"); |
9072 | } |
9073 | case NEON::BI__builtin_neon_vqtbl4q_v: { |
9074 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty), |
9075 | Ops, "vtbl4"); |
9076 | } |
9077 | case NEON::BI__builtin_neon_vqtbx1q_v: { |
9078 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty), |
9079 | Ops, "vtbx1"); |
9080 | } |
9081 | case NEON::BI__builtin_neon_vqtbx2q_v: { |
9082 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty), |
9083 | Ops, "vtbx2"); |
9084 | } |
9085 | case NEON::BI__builtin_neon_vqtbx3q_v: { |
9086 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty), |
9087 | Ops, "vtbx3"); |
9088 | } |
9089 | case NEON::BI__builtin_neon_vqtbx4q_v: { |
9090 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty), |
9091 | Ops, "vtbx4"); |
9092 | } |
9093 | case NEON::BI__builtin_neon_vsqadd_v: |
9094 | case NEON::BI__builtin_neon_vsqaddq_v: { |
9095 | Int = Intrinsic::aarch64_neon_usqadd; |
9096 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd"); |
9097 | } |
9098 | case NEON::BI__builtin_neon_vuqadd_v: |
9099 | case NEON::BI__builtin_neon_vuqaddq_v: { |
9100 | Int = Intrinsic::aarch64_neon_suqadd; |
9101 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd"); |
9102 | } |
9103 | case AArch64::BI_BitScanForward: |
9104 | case AArch64::BI_BitScanForward64: |
9105 | return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E); |
9106 | case AArch64::BI_BitScanReverse: |
9107 | case AArch64::BI_BitScanReverse64: |
9108 | return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E); |
9109 | case AArch64::BI_InterlockedAnd64: |
9110 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E); |
9111 | case AArch64::BI_InterlockedExchange64: |
9112 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E); |
9113 | case AArch64::BI_InterlockedExchangeAdd64: |
9114 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E); |
9115 | case AArch64::BI_InterlockedExchangeSub64: |
9116 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E); |
9117 | case AArch64::BI_InterlockedOr64: |
9118 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E); |
9119 | case AArch64::BI_InterlockedXor64: |
9120 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E); |
9121 | case AArch64::BI_InterlockedDecrement64: |
9122 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E); |
9123 | case AArch64::BI_InterlockedIncrement64: |
9124 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E); |
9125 | case AArch64::BI_InterlockedExchangeAdd8_acq: |
9126 | case AArch64::BI_InterlockedExchangeAdd16_acq: |
9127 | case AArch64::BI_InterlockedExchangeAdd_acq: |
9128 | case AArch64::BI_InterlockedExchangeAdd64_acq: |
9129 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E); |
9130 | case AArch64::BI_InterlockedExchangeAdd8_rel: |
9131 | case AArch64::BI_InterlockedExchangeAdd16_rel: |
9132 | case AArch64::BI_InterlockedExchangeAdd_rel: |
9133 | case AArch64::BI_InterlockedExchangeAdd64_rel: |
9134 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E); |
9135 | case AArch64::BI_InterlockedExchangeAdd8_nf: |
9136 | case AArch64::BI_InterlockedExchangeAdd16_nf: |
9137 | case AArch64::BI_InterlockedExchangeAdd_nf: |
9138 | case AArch64::BI_InterlockedExchangeAdd64_nf: |
9139 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E); |
9140 | case AArch64::BI_InterlockedExchange8_acq: |
9141 | case AArch64::BI_InterlockedExchange16_acq: |
9142 | case AArch64::BI_InterlockedExchange_acq: |
9143 | case AArch64::BI_InterlockedExchange64_acq: |
9144 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E); |
9145 | case AArch64::BI_InterlockedExchange8_rel: |
9146 | case AArch64::BI_InterlockedExchange16_rel: |
9147 | case AArch64::BI_InterlockedExchange_rel: |
9148 | case AArch64::BI_InterlockedExchange64_rel: |
9149 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E); |
9150 | case AArch64::BI_InterlockedExchange8_nf: |
9151 | case AArch64::BI_InterlockedExchange16_nf: |
9152 | case AArch64::BI_InterlockedExchange_nf: |
9153 | case AArch64::BI_InterlockedExchange64_nf: |
9154 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E); |
9155 | case AArch64::BI_InterlockedCompareExchange8_acq: |
9156 | case AArch64::BI_InterlockedCompareExchange16_acq: |
9157 | case AArch64::BI_InterlockedCompareExchange_acq: |
9158 | case AArch64::BI_InterlockedCompareExchange64_acq: |
9159 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E); |
9160 | case AArch64::BI_InterlockedCompareExchange8_rel: |
9161 | case AArch64::BI_InterlockedCompareExchange16_rel: |
9162 | case AArch64::BI_InterlockedCompareExchange_rel: |
9163 | case AArch64::BI_InterlockedCompareExchange64_rel: |
9164 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E); |
9165 | case AArch64::BI_InterlockedCompareExchange8_nf: |
9166 | case AArch64::BI_InterlockedCompareExchange16_nf: |
9167 | case AArch64::BI_InterlockedCompareExchange_nf: |
9168 | case AArch64::BI_InterlockedCompareExchange64_nf: |
9169 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E); |
9170 | case AArch64::BI_InterlockedOr8_acq: |
9171 | case AArch64::BI_InterlockedOr16_acq: |
9172 | case AArch64::BI_InterlockedOr_acq: |
9173 | case AArch64::BI_InterlockedOr64_acq: |
9174 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E); |
9175 | case AArch64::BI_InterlockedOr8_rel: |
9176 | case AArch64::BI_InterlockedOr16_rel: |
9177 | case AArch64::BI_InterlockedOr_rel: |
9178 | case AArch64::BI_InterlockedOr64_rel: |
9179 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E); |
9180 | case AArch64::BI_InterlockedOr8_nf: |
9181 | case AArch64::BI_InterlockedOr16_nf: |
9182 | case AArch64::BI_InterlockedOr_nf: |
9183 | case AArch64::BI_InterlockedOr64_nf: |
9184 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E); |
9185 | case AArch64::BI_InterlockedXor8_acq: |
9186 | case AArch64::BI_InterlockedXor16_acq: |
9187 | case AArch64::BI_InterlockedXor_acq: |
9188 | case AArch64::BI_InterlockedXor64_acq: |
9189 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E); |
9190 | case AArch64::BI_InterlockedXor8_rel: |
9191 | case AArch64::BI_InterlockedXor16_rel: |
9192 | case AArch64::BI_InterlockedXor_rel: |
9193 | case AArch64::BI_InterlockedXor64_rel: |
9194 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E); |
9195 | case AArch64::BI_InterlockedXor8_nf: |
9196 | case AArch64::BI_InterlockedXor16_nf: |
9197 | case AArch64::BI_InterlockedXor_nf: |
9198 | case AArch64::BI_InterlockedXor64_nf: |
9199 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E); |
9200 | case AArch64::BI_InterlockedAnd8_acq: |
9201 | case AArch64::BI_InterlockedAnd16_acq: |
9202 | case AArch64::BI_InterlockedAnd_acq: |
9203 | case AArch64::BI_InterlockedAnd64_acq: |
9204 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E); |
9205 | case AArch64::BI_InterlockedAnd8_rel: |
9206 | case AArch64::BI_InterlockedAnd16_rel: |
9207 | case AArch64::BI_InterlockedAnd_rel: |
9208 | case AArch64::BI_InterlockedAnd64_rel: |
9209 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E); |
9210 | case AArch64::BI_InterlockedAnd8_nf: |
9211 | case AArch64::BI_InterlockedAnd16_nf: |
9212 | case AArch64::BI_InterlockedAnd_nf: |
9213 | case AArch64::BI_InterlockedAnd64_nf: |
9214 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E); |
9215 | case AArch64::BI_InterlockedIncrement16_acq: |
9216 | case AArch64::BI_InterlockedIncrement_acq: |
9217 | case AArch64::BI_InterlockedIncrement64_acq: |
9218 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E); |
9219 | case AArch64::BI_InterlockedIncrement16_rel: |
9220 | case AArch64::BI_InterlockedIncrement_rel: |
9221 | case AArch64::BI_InterlockedIncrement64_rel: |
9222 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E); |
9223 | case AArch64::BI_InterlockedIncrement16_nf: |
9224 | case AArch64::BI_InterlockedIncrement_nf: |
9225 | case AArch64::BI_InterlockedIncrement64_nf: |
9226 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E); |
9227 | case AArch64::BI_InterlockedDecrement16_acq: |
9228 | case AArch64::BI_InterlockedDecrement_acq: |
9229 | case AArch64::BI_InterlockedDecrement64_acq: |
9230 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E); |
9231 | case AArch64::BI_InterlockedDecrement16_rel: |
9232 | case AArch64::BI_InterlockedDecrement_rel: |
9233 | case AArch64::BI_InterlockedDecrement64_rel: |
9234 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E); |
9235 | case AArch64::BI_InterlockedDecrement16_nf: |
9236 | case AArch64::BI_InterlockedDecrement_nf: |
9237 | case AArch64::BI_InterlockedDecrement64_nf: |
9238 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E); |
9239 | |
9240 | case AArch64::BI_InterlockedAdd: { |
9241 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
9242 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); |
9243 | AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( |
9244 | AtomicRMWInst::Add, Arg0, Arg1, |
9245 | llvm::AtomicOrdering::SequentiallyConsistent); |
9246 | return Builder.CreateAdd(RMWI, Arg1); |
9247 | } |
9248 | } |
9249 | } |
9250 | |
9251 | llvm::Value *CodeGenFunction:: |
9252 | BuildVector(ArrayRef<llvm::Value*> Ops) { |
9253 | assert((Ops.size() & (Ops.size() - 1)) == 0 &&(((Ops.size() & (Ops.size() - 1)) == 0 && "Not a power-of-two sized vector!" ) ? static_cast<void> (0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9254, __PRETTY_FUNCTION__)) |
9254 | "Not a power-of-two sized vector!")(((Ops.size() & (Ops.size() - 1)) == 0 && "Not a power-of-two sized vector!" ) ? static_cast<void> (0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9254, __PRETTY_FUNCTION__)); |
9255 | bool AllConstants = true; |
9256 | for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i) |
9257 | AllConstants &= isa<Constant>(Ops[i]); |
9258 | |
9259 | // If this is a constant vector, create a ConstantVector. |
9260 | if (AllConstants) { |
9261 | SmallVector<llvm::Constant*, 16> CstOps; |
9262 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
9263 | CstOps.push_back(cast<Constant>(Ops[i])); |
9264 | return llvm::ConstantVector::get(CstOps); |
9265 | } |
9266 | |
9267 | // Otherwise, insertelement the values to build the vector. |
9268 | Value *Result = |
9269 | llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size())); |
9270 | |
9271 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
9272 | Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i)); |
9273 | |
9274 | return Result; |
9275 | } |
9276 | |
9277 | // Convert the mask from an integer type to a vector of i1. |
9278 | static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask, |
9279 | unsigned NumElts) { |
9280 | |
9281 | llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(), |
9282 | cast<IntegerType>(Mask->getType())->getBitWidth()); |
9283 | Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy); |
9284 | |
9285 | // If we have less than 8 elements, then the starting mask was an i8 and |
9286 | // we need to extract down to the right number of elements. |
9287 | if (NumElts < 8) { |
9288 | uint32_t Indices[4]; |
9289 | for (unsigned i = 0; i != NumElts; ++i) |
9290 | Indices[i] = i; |
9291 | MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec, |
9292 | makeArrayRef(Indices, NumElts), |
9293 | "extract"); |
9294 | } |
9295 | return MaskVec; |
9296 | } |
9297 | |
9298 | static Value *EmitX86MaskedStore(CodeGenFunction &CGF, |
9299 | ArrayRef<Value *> Ops, |
9300 | unsigned Align) { |
9301 | // Cast the pointer to right type. |
9302 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
9303 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
9304 | |
9305 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], |
9306 | Ops[1]->getType()->getVectorNumElements()); |
9307 | |
9308 | return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec); |
9309 | } |
9310 | |
9311 | static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, |
9312 | ArrayRef<Value *> Ops, unsigned Align) { |
9313 | // Cast the pointer to right type. |
9314 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
9315 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
9316 | |
9317 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], |
9318 | Ops[1]->getType()->getVectorNumElements()); |
9319 | |
9320 | return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]); |
9321 | } |
9322 | |
9323 | static Value *EmitX86ExpandLoad(CodeGenFunction &CGF, |
9324 | ArrayRef<Value *> Ops) { |
9325 | llvm::Type *ResultTy = Ops[1]->getType(); |
9326 | llvm::Type *PtrTy = ResultTy->getVectorElementType(); |
9327 | |
9328 | // Cast the pointer to element type. |
9329 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
9330 | llvm::PointerType::getUnqual(PtrTy)); |
9331 | |
9332 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], |
9333 | ResultTy->getVectorNumElements()); |
9334 | |
9335 | llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload, |
9336 | ResultTy); |
9337 | return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] }); |
9338 | } |
9339 | |
9340 | static Value *EmitX86CompressExpand(CodeGenFunction &CGF, |
9341 | ArrayRef<Value *> Ops, |
9342 | bool IsCompress) { |
9343 | llvm::Type *ResultTy = Ops[1]->getType(); |
9344 | |
9345 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], |
9346 | ResultTy->getVectorNumElements()); |
9347 | |
9348 | Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress |
9349 | : Intrinsic::x86_avx512_mask_expand; |
9350 | llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy); |
9351 | return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec }); |
9352 | } |
9353 | |
9354 | static Value *EmitX86CompressStore(CodeGenFunction &CGF, |
9355 | ArrayRef<Value *> Ops) { |
9356 | llvm::Type *ResultTy = Ops[1]->getType(); |
9357 | llvm::Type *PtrTy = ResultTy->getVectorElementType(); |
9358 | |
9359 | // Cast the pointer to element type. |
9360 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
9361 | llvm::PointerType::getUnqual(PtrTy)); |
9362 | |
9363 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], |
9364 | ResultTy->getVectorNumElements()); |
9365 | |
9366 | llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore, |
9367 | ResultTy); |
9368 | return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec }); |
9369 | } |
9370 | |
9371 | static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc, |
9372 | ArrayRef<Value *> Ops, |
9373 | bool InvertLHS = false) { |
9374 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
9375 | Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts); |
9376 | Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts); |
9377 | |
9378 | if (InvertLHS) |
9379 | LHS = CGF.Builder.CreateNot(LHS); |
9380 | |
9381 | return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS), |
9382 | Ops[0]->getType()); |
9383 | } |
9384 | |
9385 | static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1, |
9386 | Value *Amt, bool IsRight) { |
9387 | llvm::Type *Ty = Op0->getType(); |
9388 | |
9389 | // Amount may be scalar immediate, in which case create a splat vector. |
9390 | // Funnel shifts amounts are treated as modulo and types are all power-of-2 so |
9391 | // we only care about the lowest log2 bits anyway. |
9392 | if (Amt->getType() != Ty) { |
9393 | unsigned NumElts = Ty->getVectorNumElements(); |
9394 | Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false); |
9395 | Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt); |
9396 | } |
9397 | |
9398 | unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl; |
9399 | Function *F = CGF.CGM.getIntrinsic(IID, Ty); |
9400 | return CGF.Builder.CreateCall(F, {Op0, Op1, Amt}); |
9401 | } |
9402 | |
9403 | static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
9404 | bool IsSigned) { |
9405 | Value *Op0 = Ops[0]; |
9406 | Value *Op1 = Ops[1]; |
9407 | llvm::Type *Ty = Op0->getType(); |
9408 | uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; |
9409 | |
9410 | CmpInst::Predicate Pred; |
9411 | switch (Imm) { |
9412 | case 0x0: |
9413 | Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; |
9414 | break; |
9415 | case 0x1: |
9416 | Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; |
9417 | break; |
9418 | case 0x2: |
9419 | Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; |
9420 | break; |
9421 | case 0x3: |
9422 | Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; |
9423 | break; |
9424 | case 0x4: |
9425 | Pred = ICmpInst::ICMP_EQ; |
9426 | break; |
9427 | case 0x5: |
9428 | Pred = ICmpInst::ICMP_NE; |
9429 | break; |
9430 | case 0x6: |
9431 | return llvm::Constant::getNullValue(Ty); // FALSE |
9432 | case 0x7: |
9433 | return llvm::Constant::getAllOnesValue(Ty); // TRUE |
9434 | default: |
9435 | llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate")::llvm::llvm_unreachable_internal("Unexpected XOP vpcom/vpcomu predicate" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9435); |
9436 | } |
9437 | |
9438 | Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1); |
9439 | Value *Res = CGF.Builder.CreateSExt(Cmp, Ty); |
9440 | return Res; |
9441 | } |
9442 | |
9443 | static Value *EmitX86Select(CodeGenFunction &CGF, |
9444 | Value *Mask, Value *Op0, Value *Op1) { |
9445 | |
9446 | // If the mask is all ones just return first argument. |
9447 | if (const auto *C = dyn_cast<Constant>(Mask)) |
9448 | if (C->isAllOnesValue()) |
9449 | return Op0; |
9450 | |
9451 | Mask = getMaskVecValue(CGF, Mask, Op0->getType()->getVectorNumElements()); |
9452 | |
9453 | return CGF.Builder.CreateSelect(Mask, Op0, Op1); |
9454 | } |
9455 | |
9456 | static Value *EmitX86ScalarSelect(CodeGenFunction &CGF, |
9457 | Value *Mask, Value *Op0, Value *Op1) { |
9458 | // If the mask is all ones just return first argument. |
9459 | if (const auto *C = dyn_cast<Constant>(Mask)) |
9460 | if (C->isAllOnesValue()) |
9461 | return Op0; |
9462 | |
9463 | llvm::VectorType *MaskTy = |
9464 | llvm::VectorType::get(CGF.Builder.getInt1Ty(), |
9465 | Mask->getType()->getIntegerBitWidth()); |
9466 | Mask = CGF.Builder.CreateBitCast(Mask, MaskTy); |
9467 | Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0); |
9468 | return CGF.Builder.CreateSelect(Mask, Op0, Op1); |
9469 | } |
9470 | |
9471 | static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp, |
9472 | unsigned NumElts, Value *MaskIn) { |
9473 | if (MaskIn) { |
9474 | const auto *C = dyn_cast<Constant>(MaskIn); |
9475 | if (!C || !C->isAllOnesValue()) |
9476 | Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts)); |
9477 | } |
9478 | |
9479 | if (NumElts < 8) { |
9480 | uint32_t Indices[8]; |
9481 | for (unsigned i = 0; i != NumElts; ++i) |
9482 | Indices[i] = i; |
9483 | for (unsigned i = NumElts; i != 8; ++i) |
9484 | Indices[i] = i % NumElts + NumElts; |
9485 | Cmp = CGF.Builder.CreateShuffleVector( |
9486 | Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices); |
9487 | } |
9488 | |
9489 | return CGF.Builder.CreateBitCast(Cmp, |
9490 | IntegerType::get(CGF.getLLVMContext(), |
9491 | std::max(NumElts, 8U))); |
9492 | } |
9493 | |
9494 | static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC, |
9495 | bool Signed, ArrayRef<Value *> Ops) { |
9496 | assert((Ops.size() == 2 || Ops.size() == 4) &&(((Ops.size() == 2 || Ops.size() == 4) && "Unexpected number of arguments" ) ? static_cast<void> (0) : __assert_fail ("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9497, __PRETTY_FUNCTION__)) |
9497 | "Unexpected number of arguments")(((Ops.size() == 2 || Ops.size() == 4) && "Unexpected number of arguments" ) ? static_cast<void> (0) : __assert_fail ("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9497, __PRETTY_FUNCTION__)); |
9498 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); |
9499 | Value *Cmp; |
9500 | |
9501 | if (CC == 3) { |
9502 | Cmp = Constant::getNullValue( |
9503 | llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts)); |
9504 | } else if (CC == 7) { |
9505 | Cmp = Constant::getAllOnesValue( |
9506 | llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts)); |
9507 | } else { |
9508 | ICmpInst::Predicate Pred; |
9509 | switch (CC) { |
9510 | default: llvm_unreachable("Unknown condition code")::llvm::llvm_unreachable_internal("Unknown condition code", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9510); |
9511 | case 0: Pred = ICmpInst::ICMP_EQ; break; |
9512 | case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; |
9513 | case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; |
9514 | case 4: Pred = ICmpInst::ICMP_NE; break; |
9515 | case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; |
9516 | case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; |
9517 | } |
9518 | Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]); |
9519 | } |
9520 | |
9521 | Value *MaskIn = nullptr; |
9522 | if (Ops.size() == 4) |
9523 | MaskIn = Ops[3]; |
9524 | |
9525 | return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn); |
9526 | } |
9527 | |
9528 | static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) { |
9529 | Value *Zero = Constant::getNullValue(In->getType()); |
9530 | return EmitX86MaskedCompare(CGF, 1, true, { In, Zero }); |
9531 | } |
9532 | |
9533 | static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, |
9534 | ArrayRef<Value *> Ops, bool IsSigned) { |
9535 | unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue(); |
9536 | llvm::Type *Ty = Ops[1]->getType(); |
9537 | |
9538 | Value *Res; |
9539 | if (Rnd != 4) { |
9540 | Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round |
9541 | : Intrinsic::x86_avx512_uitofp_round; |
9542 | Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() }); |
9543 | Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] }); |
9544 | } else { |
9545 | Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty) |
9546 | : CGF.Builder.CreateUIToFP(Ops[0], Ty); |
9547 | } |
9548 | |
9549 | return EmitX86Select(CGF, Ops[2], Res, Ops[1]); |
9550 | } |
9551 | |
9552 | static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) { |
9553 | |
9554 | llvm::Type *Ty = Ops[0]->getType(); |
9555 | Value *Zero = llvm::Constant::getNullValue(Ty); |
9556 | Value *Sub = CGF.Builder.CreateSub(Zero, Ops[0]); |
9557 | Value *Cmp = CGF.Builder.CreateICmp(ICmpInst::ICMP_SGT, Ops[0], Zero); |
9558 | Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Sub); |
9559 | return Res; |
9560 | } |
9561 | |
9562 | static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred, |
9563 | ArrayRef<Value *> Ops) { |
9564 | Value *Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]); |
9565 | Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Ops[1]); |
9566 | |
9567 | assert(Ops.size() == 2)((Ops.size() == 2) ? static_cast<void> (0) : __assert_fail ("Ops.size() == 2", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9567, __PRETTY_FUNCTION__)); |
9568 | return Res; |
9569 | } |
9570 | |
9571 | // Lowers X86 FMA intrinsics to IR. |
9572 | static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
9573 | unsigned BuiltinID, bool IsAddSub) { |
9574 | |
9575 | bool Subtract = false; |
9576 | Intrinsic::ID IID = Intrinsic::not_intrinsic; |
9577 | switch (BuiltinID) { |
9578 | default: break; |
9579 | case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: |
9580 | Subtract = true; |
9581 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
9582 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask: |
9583 | case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: |
9584 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: |
9585 | IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break; |
9586 | case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: |
9587 | Subtract = true; |
9588 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
9589 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: |
9590 | case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: |
9591 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: |
9592 | IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break; |
9593 | case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
9594 | Subtract = true; |
9595 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
9596 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: |
9597 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
9598 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
9599 | IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512; |
9600 | break; |
9601 | case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
9602 | Subtract = true; |
9603 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
9604 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
9605 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
9606 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
9607 | IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512; |
9608 | break; |
9609 | } |
9610 | |
9611 | Value *A = Ops[0]; |
9612 | Value *B = Ops[1]; |
9613 | Value *C = Ops[2]; |
9614 | |
9615 | if (Subtract) |
9616 | C = CGF.Builder.CreateFNeg(C); |
9617 | |
9618 | Value *Res; |
9619 | |
9620 | // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding). |
9621 | if (IID != Intrinsic::not_intrinsic && |
9622 | cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4) { |
9623 | Function *Intr = CGF.CGM.getIntrinsic(IID); |
9624 | Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() }); |
9625 | } else { |
9626 | llvm::Type *Ty = A->getType(); |
9627 | Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty); |
9628 | Res = CGF.Builder.CreateCall(FMA, {A, B, C} ); |
9629 | |
9630 | if (IsAddSub) { |
9631 | // Negate even elts in C using a mask. |
9632 | unsigned NumElts = Ty->getVectorNumElements(); |
9633 | SmallVector<uint32_t, 16> Indices(NumElts); |
9634 | for (unsigned i = 0; i != NumElts; ++i) |
9635 | Indices[i] = i + (i % 2) * NumElts; |
9636 | |
9637 | Value *NegC = CGF.Builder.CreateFNeg(C); |
9638 | Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} ); |
9639 | Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices); |
9640 | } |
9641 | } |
9642 | |
9643 | // Handle any required masking. |
9644 | Value *MaskFalseVal = nullptr; |
9645 | switch (BuiltinID) { |
9646 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask: |
9647 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: |
9648 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: |
9649 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
9650 | MaskFalseVal = Ops[0]; |
9651 | break; |
9652 | case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: |
9653 | case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: |
9654 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
9655 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
9656 | MaskFalseVal = Constant::getNullValue(Ops[0]->getType()); |
9657 | break; |
9658 | case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: |
9659 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: |
9660 | case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: |
9661 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: |
9662 | case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
9663 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
9664 | case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
9665 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
9666 | MaskFalseVal = Ops[2]; |
9667 | break; |
9668 | } |
9669 | |
9670 | if (MaskFalseVal) |
9671 | return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal); |
9672 | |
9673 | return Res; |
9674 | } |
9675 | |
9676 | static Value * |
9677 | EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops, |
9678 | Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0, |
9679 | bool NegAcc = false) { |
9680 | unsigned Rnd = 4; |
9681 | if (Ops.size() > 4) |
9682 | Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue(); |
9683 | |
9684 | if (NegAcc) |
9685 | Ops[2] = CGF.Builder.CreateFNeg(Ops[2]); |
9686 | |
9687 | Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0); |
9688 | Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0); |
9689 | Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0); |
9690 | Value *Res; |
9691 | if (Rnd != 4) { |
9692 | Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ? |
9693 | Intrinsic::x86_avx512_vfmadd_f32 : |
9694 | Intrinsic::x86_avx512_vfmadd_f64; |
9695 | Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), |
9696 | {Ops[0], Ops[1], Ops[2], Ops[4]}); |
9697 | } else { |
9698 | Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType()); |
9699 | Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3)); |
9700 | } |
9701 | // If we have more than 3 arguments, we need to do masking. |
9702 | if (Ops.size() > 3) { |
9703 | Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType()) |
9704 | : Ops[PTIdx]; |
9705 | |
9706 | // If we negated the accumulator and the its the PassThru value we need to |
9707 | // bypass the negate. Conveniently Upper should be the same thing in this |
9708 | // case. |
9709 | if (NegAcc && PTIdx == 2) |
9710 | PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0); |
9711 | |
9712 | Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru); |
9713 | } |
9714 | return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0); |
9715 | } |
9716 | |
9717 | static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned, |
9718 | ArrayRef<Value *> Ops) { |
9719 | llvm::Type *Ty = Ops[0]->getType(); |
9720 | // Arguments have a vXi32 type so cast to vXi64. |
9721 | Ty = llvm::VectorType::get(CGF.Int64Ty, |
9722 | Ty->getPrimitiveSizeInBits() / 64); |
9723 | Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty); |
9724 | Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty); |
9725 | |
9726 | if (IsSigned) { |
9727 | // Shift left then arithmetic shift right. |
9728 | Constant *ShiftAmt = ConstantInt::get(Ty, 32); |
9729 | LHS = CGF.Builder.CreateShl(LHS, ShiftAmt); |
9730 | LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt); |
9731 | RHS = CGF.Builder.CreateShl(RHS, ShiftAmt); |
9732 | RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt); |
9733 | } else { |
9734 | // Clear the upper bits. |
9735 | Constant *Mask = ConstantInt::get(Ty, 0xffffffff); |
9736 | LHS = CGF.Builder.CreateAnd(LHS, Mask); |
9737 | RHS = CGF.Builder.CreateAnd(RHS, Mask); |
9738 | } |
9739 | |
9740 | return CGF.Builder.CreateMul(LHS, RHS); |
9741 | } |
9742 | |
9743 | // Emit a masked pternlog intrinsic. This only exists because the header has to |
9744 | // use a macro and we aren't able to pass the input argument to a pternlog |
9745 | // builtin and a select builtin without evaluating it twice. |
9746 | static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask, |
9747 | ArrayRef<Value *> Ops) { |
9748 | llvm::Type *Ty = Ops[0]->getType(); |
9749 | |
9750 | unsigned VecWidth = Ty->getPrimitiveSizeInBits(); |
9751 | unsigned EltWidth = Ty->getScalarSizeInBits(); |
9752 | Intrinsic::ID IID; |
9753 | if (VecWidth == 128 && EltWidth == 32) |
9754 | IID = Intrinsic::x86_avx512_pternlog_d_128; |
9755 | else if (VecWidth == 256 && EltWidth == 32) |
9756 | IID = Intrinsic::x86_avx512_pternlog_d_256; |
9757 | else if (VecWidth == 512 && EltWidth == 32) |
9758 | IID = Intrinsic::x86_avx512_pternlog_d_512; |
9759 | else if (VecWidth == 128 && EltWidth == 64) |
9760 | IID = Intrinsic::x86_avx512_pternlog_q_128; |
9761 | else if (VecWidth == 256 && EltWidth == 64) |
9762 | IID = Intrinsic::x86_avx512_pternlog_q_256; |
9763 | else if (VecWidth == 512 && EltWidth == 64) |
9764 | IID = Intrinsic::x86_avx512_pternlog_q_512; |
9765 | else |
9766 | llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9766); |
9767 | |
9768 | Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), |
9769 | Ops.drop_back()); |
9770 | Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0]; |
9771 | return EmitX86Select(CGF, Ops[4], Ternlog, PassThru); |
9772 | } |
9773 | |
9774 | static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op, |
9775 | llvm::Type *DstTy) { |
9776 | unsigned NumberOfElements = DstTy->getVectorNumElements(); |
9777 | Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements); |
9778 | return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2"); |
9779 | } |
9780 | |
9781 | // Emit addition or subtraction with signed/unsigned saturation. |
9782 | static Value *EmitX86AddSubSatExpr(CodeGenFunction &CGF, |
9783 | ArrayRef<Value *> Ops, bool IsSigned, |
9784 | bool IsAddition) { |
9785 | Intrinsic::ID IID = |
9786 | IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat) |
9787 | : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat); |
9788 | llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType()); |
9789 | return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]}); |
9790 | } |
9791 | |
9792 | Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) { |
9793 | const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts(); |
9794 | StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString(); |
9795 | return EmitX86CpuIs(CPUStr); |
9796 | } |
9797 | |
9798 | Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) { |
9799 | |
9800 | llvm::Type *Int32Ty = Builder.getInt32Ty(); |
9801 | |
9802 | // Matching the struct layout from the compiler-rt/libgcc structure that is |
9803 | // filled in: |
9804 | // unsigned int __cpu_vendor; |
9805 | // unsigned int __cpu_type; |
9806 | // unsigned int __cpu_subtype; |
9807 | // unsigned int __cpu_features[1]; |
9808 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, |
9809 | llvm::ArrayType::get(Int32Ty, 1)); |
9810 | |
9811 | // Grab the global __cpu_model. |
9812 | llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model"); |
9813 | cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true); |
9814 | |
9815 | // Calculate the index needed to access the correct field based on the |
9816 | // range. Also adjust the expected value. |
9817 | unsigned Index; |
9818 | unsigned Value; |
9819 | std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr) |
9820 | #define X86_VENDOR(ENUM, STRING) \ |
9821 | .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)}) |
9822 | #define X86_CPU_TYPE_COMPAT_WITH_ALIAS(ARCHNAME, ENUM, STR, ALIAS) \ |
9823 | .Cases(STR, ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)}) |
9824 | #define X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR) \ |
9825 | .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)}) |
9826 | #define X86_CPU_SUBTYPE_COMPAT(ARCHNAME, ENUM, STR) \ |
9827 | .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)}) |
9828 | #include "llvm/Support/X86TargetParser.def" |
9829 | .Default({0, 0}); |
9830 | assert(Value != 0 && "Invalid CPUStr passed to CpuIs")((Value != 0 && "Invalid CPUStr passed to CpuIs") ? static_cast <void> (0) : __assert_fail ("Value != 0 && \"Invalid CPUStr passed to CpuIs\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9830, __PRETTY_FUNCTION__)); |
9831 | |
9832 | // Grab the appropriate field from __cpu_model. |
9833 | llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0), |
9834 | ConstantInt::get(Int32Ty, Index)}; |
9835 | llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs); |
9836 | CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4)); |
9837 | |
9838 | // Check the value of the field against the requested value. |
9839 | return Builder.CreateICmpEQ(CpuValue, |
9840 | llvm::ConstantInt::get(Int32Ty, Value)); |
9841 | } |
9842 | |
9843 | Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) { |
9844 | const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts(); |
9845 | StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString(); |
9846 | return EmitX86CpuSupports(FeatureStr); |
9847 | } |
9848 | |
9849 | uint64_t |
9850 | CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) { |
9851 | // Processor features and mapping to processor feature value. |
9852 | uint64_t FeaturesMask = 0; |
9853 | for (const StringRef &FeatureStr : FeatureStrs) { |
9854 | unsigned Feature = |
9855 | StringSwitch<unsigned>(FeatureStr) |
9856 | #define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, VAL) |
9857 | #include "llvm/Support/X86TargetParser.def" |
9858 | ; |
9859 | FeaturesMask |= (1ULL << Feature); |
9860 | } |
9861 | return FeaturesMask; |
9862 | } |
9863 | |
9864 | Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) { |
9865 | return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs)); |
9866 | } |
9867 | |
9868 | llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) { |
9869 | uint32_t Features1 = Lo_32(FeaturesMask); |
9870 | uint32_t Features2 = Hi_32(FeaturesMask); |
9871 | |
9872 | Value *Result = Builder.getTrue(); |
9873 | |
9874 | if (Features1 != 0) { |
9875 | // Matching the struct layout from the compiler-rt/libgcc structure that is |
9876 | // filled in: |
9877 | // unsigned int __cpu_vendor; |
9878 | // unsigned int __cpu_type; |
9879 | // unsigned int __cpu_subtype; |
9880 | // unsigned int __cpu_features[1]; |
9881 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, |
9882 | llvm::ArrayType::get(Int32Ty, 1)); |
9883 | |
9884 | // Grab the global __cpu_model. |
9885 | llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model"); |
9886 | cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true); |
9887 | |
9888 | // Grab the first (0th) element from the field __cpu_features off of the |
9889 | // global in the struct STy. |
9890 | Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3), |
9891 | Builder.getInt32(0)}; |
9892 | Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs); |
9893 | Value *Features = |
9894 | Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4)); |
9895 | |
9896 | // Check the value of the bit corresponding to the feature requested. |
9897 | Value *Mask = Builder.getInt32(Features1); |
9898 | Value *Bitset = Builder.CreateAnd(Features, Mask); |
9899 | Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask); |
9900 | Result = Builder.CreateAnd(Result, Cmp); |
9901 | } |
9902 | |
9903 | if (Features2 != 0) { |
9904 | llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty, |
9905 | "__cpu_features2"); |
9906 | cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true); |
9907 | |
9908 | Value *Features = |
9909 | Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4)); |
9910 | |
9911 | // Check the value of the bit corresponding to the feature requested. |
9912 | Value *Mask = Builder.getInt32(Features2); |
9913 | Value *Bitset = Builder.CreateAnd(Features, Mask); |
9914 | Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask); |
9915 | Result = Builder.CreateAnd(Result, Cmp); |
9916 | } |
9917 | |
9918 | return Result; |
9919 | } |
9920 | |
9921 | Value *CodeGenFunction::EmitX86CpuInit() { |
9922 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, |
9923 | /*Variadic*/ false); |
9924 | llvm::FunctionCallee Func = |
9925 | CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init"); |
9926 | cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true); |
9927 | cast<llvm::GlobalValue>(Func.getCallee()) |
9928 | ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
9929 | return Builder.CreateCall(Func); |
9930 | } |
9931 | |
9932 | Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, |
9933 | const CallExpr *E) { |
9934 | if (BuiltinID == X86::BI__builtin_cpu_is) |
9935 | return EmitX86CpuIs(E); |
9936 | if (BuiltinID == X86::BI__builtin_cpu_supports) |
9937 | return EmitX86CpuSupports(E); |
9938 | if (BuiltinID == X86::BI__builtin_cpu_init) |
9939 | return EmitX86CpuInit(); |
9940 | |
9941 | SmallVector<Value*, 4> Ops; |
9942 | |
9943 | // Find out if any arguments are required to be integer constant expressions. |
9944 | unsigned ICEArguments = 0; |
9945 | ASTContext::GetBuiltinTypeError Error; |
9946 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
9947 | assert(Error == ASTContext::GE_None && "Should not codegen an error")((Error == ASTContext::GE_None && "Should not codegen an error" ) ? static_cast<void> (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9947, __PRETTY_FUNCTION__)); |
9948 | |
9949 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { |
9950 | // If this is a normal argument, just emit it as a scalar. |
9951 | if ((ICEArguments & (1 << i)) == 0) { |
9952 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
9953 | continue; |
9954 | } |
9955 | |
9956 | // If this is required to be a constant, constant fold it so that we know |
9957 | // that the generated intrinsic gets a ConstantInt. |
9958 | llvm::APSInt Result; |
9959 | bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); |
9960 | assert(IsConst && "Constant arg isn't actually constant?")((IsConst && "Constant arg isn't actually constant?") ? static_cast<void> (0) : __assert_fail ("IsConst && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9960, __PRETTY_FUNCTION__)); (void)IsConst; |
9961 | Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); |
9962 | } |
9963 | |
9964 | // These exist so that the builtin that takes an immediate can be bounds |
9965 | // checked by clang to avoid passing bad immediates to the backend. Since |
9966 | // AVX has a larger immediate than SSE we would need separate builtins to |
9967 | // do the different bounds checking. Rather than create a clang specific |
9968 | // SSE only builtin, this implements eight separate builtins to match gcc |
9969 | // implementation. |
9970 | auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) { |
9971 | Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm)); |
9972 | llvm::Function *F = CGM.getIntrinsic(ID); |
9973 | return Builder.CreateCall(F, Ops); |
9974 | }; |
9975 | |
9976 | // For the vector forms of FP comparisons, translate the builtins directly to |
9977 | // IR. |
9978 | // TODO: The builtins could be removed if the SSE header files used vector |
9979 | // extension comparisons directly (vector ordered/unordered may need |
9980 | // additional support via __builtin_isnan()). |
9981 | auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred) { |
9982 | Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]); |
9983 | llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType()); |
9984 | llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy); |
9985 | Value *Sext = Builder.CreateSExt(Cmp, IntVecTy); |
9986 | return Builder.CreateBitCast(Sext, FPVecTy); |
9987 | }; |
9988 | |
9989 | switch (BuiltinID) { |
9990 | default: return nullptr; |
9991 | case X86::BI_mm_prefetch: { |
9992 | Value *Address = Ops[0]; |
9993 | ConstantInt *C = cast<ConstantInt>(Ops[1]); |
9994 | Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1); |
9995 | Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3); |
9996 | Value *Data = ConstantInt::get(Int32Ty, 1); |
9997 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch); |
9998 | return Builder.CreateCall(F, {Address, RW, Locality, Data}); |
9999 | } |
10000 | case X86::BI_mm_clflush: { |
10001 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush), |
10002 | Ops[0]); |
10003 | } |
10004 | case X86::BI_mm_lfence: { |
10005 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence)); |
10006 | } |
10007 | case X86::BI_mm_mfence: { |
10008 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence)); |
10009 | } |
10010 | case X86::BI_mm_sfence: { |
10011 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence)); |
10012 | } |
10013 | case X86::BI_mm_pause: { |
10014 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause)); |
10015 | } |
10016 | case X86::BI__rdtsc: { |
10017 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc)); |
10018 | } |
10019 | case X86::BI__builtin_ia32_rdtscp: { |
10020 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp)); |
10021 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1), |
10022 | Ops[0]); |
10023 | return Builder.CreateExtractValue(Call, 0); |
10024 | } |
10025 | case X86::BI__builtin_ia32_lzcnt_u16: |
10026 | case X86::BI__builtin_ia32_lzcnt_u32: |
10027 | case X86::BI__builtin_ia32_lzcnt_u64: { |
10028 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); |
10029 | return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)}); |
10030 | } |
10031 | case X86::BI__builtin_ia32_tzcnt_u16: |
10032 | case X86::BI__builtin_ia32_tzcnt_u32: |
10033 | case X86::BI__builtin_ia32_tzcnt_u64: { |
10034 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType()); |
10035 | return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)}); |
10036 | } |
10037 | case X86::BI__builtin_ia32_undef128: |
10038 | case X86::BI__builtin_ia32_undef256: |
10039 | case X86::BI__builtin_ia32_undef512: |
10040 | // The x86 definition of "undef" is not the same as the LLVM definition |
10041 | // (PR32176). We leave optimizing away an unnecessary zero constant to the |
10042 | // IR optimizer and backend. |
10043 | // TODO: If we had a "freeze" IR instruction to generate a fixed undef |
10044 | // value, we should use that here instead of a zero. |
10045 | return llvm::Constant::getNullValue(ConvertType(E->getType())); |
10046 | case X86::BI__builtin_ia32_vec_init_v8qi: |
10047 | case X86::BI__builtin_ia32_vec_init_v4hi: |
10048 | case X86::BI__builtin_ia32_vec_init_v2si: |
10049 | return Builder.CreateBitCast(BuildVector(Ops), |
10050 | llvm::Type::getX86_MMXTy(getLLVMContext())); |
10051 | case X86::BI__builtin_ia32_vec_ext_v2si: |
10052 | case X86::BI__builtin_ia32_vec_ext_v16qi: |
10053 | case X86::BI__builtin_ia32_vec_ext_v8hi: |
10054 | case X86::BI__builtin_ia32_vec_ext_v4si: |
10055 | case X86::BI__builtin_ia32_vec_ext_v4sf: |
10056 | case X86::BI__builtin_ia32_vec_ext_v2di: |
10057 | case X86::BI__builtin_ia32_vec_ext_v32qi: |
10058 | case X86::BI__builtin_ia32_vec_ext_v16hi: |
10059 | case X86::BI__builtin_ia32_vec_ext_v8si: |
10060 | case X86::BI__builtin_ia32_vec_ext_v4di: { |
10061 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); |
10062 | uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue(); |
10063 | Index &= NumElts - 1; |
10064 | // These builtins exist so we can ensure the index is an ICE and in range. |
10065 | // Otherwise we could just do this in the header file. |
10066 | return Builder.CreateExtractElement(Ops[0], Index); |
10067 | } |
10068 | case X86::BI__builtin_ia32_vec_set_v16qi: |
10069 | case X86::BI__builtin_ia32_vec_set_v8hi: |
10070 | case X86::BI__builtin_ia32_vec_set_v4si: |
10071 | case X86::BI__builtin_ia32_vec_set_v2di: |
10072 | case X86::BI__builtin_ia32_vec_set_v32qi: |
10073 | case X86::BI__builtin_ia32_vec_set_v16hi: |
10074 | case X86::BI__builtin_ia32_vec_set_v8si: |
10075 | case X86::BI__builtin_ia32_vec_set_v4di: { |
10076 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); |
10077 | unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue(); |
10078 | Index &= NumElts - 1; |
10079 | // These builtins exist so we can ensure the index is an ICE and in range. |
10080 | // Otherwise we could just do this in the header file. |
10081 | return Builder.CreateInsertElement(Ops[0], Ops[1], Index); |
10082 | } |
10083 | case X86::BI_mm_setcsr: |
10084 | case X86::BI__builtin_ia32_ldmxcsr: { |
10085 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); |
10086 | Builder.CreateStore(Ops[0], Tmp); |
10087 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), |
10088 | Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); |
10089 | } |
10090 | case X86::BI_mm_getcsr: |
10091 | case X86::BI__builtin_ia32_stmxcsr: { |
10092 | Address Tmp = CreateMemTemp(E->getType()); |
10093 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), |
10094 | Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); |
10095 | return Builder.CreateLoad(Tmp, "stmxcsr"); |
10096 | } |
10097 | case X86::BI__builtin_ia32_xsave: |
10098 | case X86::BI__builtin_ia32_xsave64: |
10099 | case X86::BI__builtin_ia32_xrstor: |
10100 | case X86::BI__builtin_ia32_xrstor64: |
10101 | case X86::BI__builtin_ia32_xsaveopt: |
10102 | case X86::BI__builtin_ia32_xsaveopt64: |
10103 | case X86::BI__builtin_ia32_xrstors: |
10104 | case X86::BI__builtin_ia32_xrstors64: |
10105 | case X86::BI__builtin_ia32_xsavec: |
10106 | case X86::BI__builtin_ia32_xsavec64: |
10107 | case X86::BI__builtin_ia32_xsaves: |
10108 | case X86::BI__builtin_ia32_xsaves64: |
10109 | case X86::BI__builtin_ia32_xsetbv: |
10110 | case X86::BI_xsetbv: { |
10111 | Intrinsic::ID ID; |
10112 | #define INTRINSIC_X86_XSAVE_ID(NAME) \ |
10113 | case X86::BI__builtin_ia32_##NAME: \ |
10114 | ID = Intrinsic::x86_##NAME; \ |
10115 | break |
10116 | switch (BuiltinID) { |
10117 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10117); |
10118 | INTRINSIC_X86_XSAVE_ID(xsave); |
10119 | INTRINSIC_X86_XSAVE_ID(xsave64); |
10120 | INTRINSIC_X86_XSAVE_ID(xrstor); |
10121 | INTRINSIC_X86_XSAVE_ID(xrstor64); |
10122 | INTRINSIC_X86_XSAVE_ID(xsaveopt); |
10123 | INTRINSIC_X86_XSAVE_ID(xsaveopt64); |
10124 | INTRINSIC_X86_XSAVE_ID(xrstors); |
10125 | INTRINSIC_X86_XSAVE_ID(xrstors64); |
10126 | INTRINSIC_X86_XSAVE_ID(xsavec); |
10127 | INTRINSIC_X86_XSAVE_ID(xsavec64); |
10128 | INTRINSIC_X86_XSAVE_ID(xsaves); |
10129 | INTRINSIC_X86_XSAVE_ID(xsaves64); |
10130 | INTRINSIC_X86_XSAVE_ID(xsetbv); |
10131 | case X86::BI_xsetbv: |
10132 | ID = Intrinsic::x86_xsetbv; |
10133 | break; |
10134 | } |
10135 | #undef INTRINSIC_X86_XSAVE_ID |
10136 | Value *Mhi = Builder.CreateTrunc( |
10137 | Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty); |
10138 | Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty); |
10139 | Ops[1] = Mhi; |
10140 | Ops.push_back(Mlo); |
10141 | return Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
10142 | } |
10143 | case X86::BI__builtin_ia32_xgetbv: |
10144 | case X86::BI_xgetbv: |
10145 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops); |
10146 | case X86::BI__builtin_ia32_storedqudi128_mask: |
10147 | case X86::BI__builtin_ia32_storedqusi128_mask: |
10148 | case X86::BI__builtin_ia32_storedquhi128_mask: |
10149 | case X86::BI__builtin_ia32_storedquqi128_mask: |
10150 | case X86::BI__builtin_ia32_storeupd128_mask: |
10151 | case X86::BI__builtin_ia32_storeups128_mask: |
10152 | case X86::BI__builtin_ia32_storedqudi256_mask: |
10153 | case X86::BI__builtin_ia32_storedqusi256_mask: |
10154 | case X86::BI__builtin_ia32_storedquhi256_mask: |
10155 | case X86::BI__builtin_ia32_storedquqi256_mask: |
10156 | case X86::BI__builtin_ia32_storeupd256_mask: |
10157 | case X86::BI__builtin_ia32_storeups256_mask: |
10158 | case X86::BI__builtin_ia32_storedqudi512_mask: |
10159 | case X86::BI__builtin_ia32_storedqusi512_mask: |
10160 | case X86::BI__builtin_ia32_storedquhi512_mask: |
10161 | case X86::BI__builtin_ia32_storedquqi512_mask: |
10162 | case X86::BI__builtin_ia32_storeupd512_mask: |
10163 | case X86::BI__builtin_ia32_storeups512_mask: |
10164 | return EmitX86MaskedStore(*this, Ops, 1); |
10165 | |
10166 | case X86::BI__builtin_ia32_storess128_mask: |
10167 | case X86::BI__builtin_ia32_storesd128_mask: { |
10168 | return EmitX86MaskedStore(*this, Ops, 1); |
10169 | } |
10170 | case X86::BI__builtin_ia32_vpopcntb_128: |
10171 | case X86::BI__builtin_ia32_vpopcntd_128: |
10172 | case X86::BI__builtin_ia32_vpopcntq_128: |
10173 | case X86::BI__builtin_ia32_vpopcntw_128: |
10174 | case X86::BI__builtin_ia32_vpopcntb_256: |
10175 | case X86::BI__builtin_ia32_vpopcntd_256: |
10176 | case X86::BI__builtin_ia32_vpopcntq_256: |
10177 | case X86::BI__builtin_ia32_vpopcntw_256: |
10178 | case X86::BI__builtin_ia32_vpopcntb_512: |
10179 | case X86::BI__builtin_ia32_vpopcntd_512: |
10180 | case X86::BI__builtin_ia32_vpopcntq_512: |
10181 | case X86::BI__builtin_ia32_vpopcntw_512: { |
10182 | llvm::Type *ResultType = ConvertType(E->getType()); |
10183 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); |
10184 | return Builder.CreateCall(F, Ops); |
10185 | } |
10186 | case X86::BI__builtin_ia32_cvtmask2b128: |
10187 | case X86::BI__builtin_ia32_cvtmask2b256: |
10188 | case X86::BI__builtin_ia32_cvtmask2b512: |
10189 | case X86::BI__builtin_ia32_cvtmask2w128: |
10190 | case X86::BI__builtin_ia32_cvtmask2w256: |
10191 | case X86::BI__builtin_ia32_cvtmask2w512: |
10192 | case X86::BI__builtin_ia32_cvtmask2d128: |
10193 | case X86::BI__builtin_ia32_cvtmask2d256: |
10194 | case X86::BI__builtin_ia32_cvtmask2d512: |
10195 | case X86::BI__builtin_ia32_cvtmask2q128: |
10196 | case X86::BI__builtin_ia32_cvtmask2q256: |
10197 | case X86::BI__builtin_ia32_cvtmask2q512: |
10198 | return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType())); |
10199 | |
10200 | case X86::BI__builtin_ia32_cvtb2mask128: |
10201 | case X86::BI__builtin_ia32_cvtb2mask256: |
10202 | case X86::BI__builtin_ia32_cvtb2mask512: |
10203 | case X86::BI__builtin_ia32_cvtw2mask128: |
10204 | case X86::BI__builtin_ia32_cvtw2mask256: |
10205 | case X86::BI__builtin_ia32_cvtw2mask512: |
10206 | case X86::BI__builtin_ia32_cvtd2mask128: |
10207 | case X86::BI__builtin_ia32_cvtd2mask256: |
10208 | case X86::BI__builtin_ia32_cvtd2mask512: |
10209 | case X86::BI__builtin_ia32_cvtq2mask128: |
10210 | case X86::BI__builtin_ia32_cvtq2mask256: |
10211 | case X86::BI__builtin_ia32_cvtq2mask512: |
10212 | return EmitX86ConvertToMask(*this, Ops[0]); |
10213 | |
10214 | case X86::BI__builtin_ia32_cvtdq2ps512_mask: |
10215 | case X86::BI__builtin_ia32_cvtqq2ps512_mask: |
10216 | case X86::BI__builtin_ia32_cvtqq2pd512_mask: |
10217 | return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/true); |
10218 | case X86::BI__builtin_ia32_cvtudq2ps512_mask: |
10219 | case X86::BI__builtin_ia32_cvtuqq2ps512_mask: |
10220 | case X86::BI__builtin_ia32_cvtuqq2pd512_mask: |
10221 | return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/false); |
10222 | |
10223 | case X86::BI__builtin_ia32_vfmaddss3: |
10224 | case X86::BI__builtin_ia32_vfmaddsd3: |
10225 | case X86::BI__builtin_ia32_vfmaddss3_mask: |
10226 | case X86::BI__builtin_ia32_vfmaddsd3_mask: |
10227 | return EmitScalarFMAExpr(*this, Ops, Ops[0]); |
10228 | case X86::BI__builtin_ia32_vfmaddss: |
10229 | case X86::BI__builtin_ia32_vfmaddsd: |
10230 | return EmitScalarFMAExpr(*this, Ops, |
10231 | Constant::getNullValue(Ops[0]->getType())); |
10232 | case X86::BI__builtin_ia32_vfmaddss3_maskz: |
10233 | case X86::BI__builtin_ia32_vfmaddsd3_maskz: |
10234 | return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true); |
10235 | case X86::BI__builtin_ia32_vfmaddss3_mask3: |
10236 | case X86::BI__builtin_ia32_vfmaddsd3_mask3: |
10237 | return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2); |
10238 | case X86::BI__builtin_ia32_vfmsubss3_mask3: |
10239 | case X86::BI__builtin_ia32_vfmsubsd3_mask3: |
10240 | return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2, |
10241 | /*NegAcc*/true); |
10242 | case X86::BI__builtin_ia32_vfmaddps: |
10243 | case X86::BI__builtin_ia32_vfmaddpd: |
10244 | case X86::BI__builtin_ia32_vfmaddps256: |
10245 | case X86::BI__builtin_ia32_vfmaddpd256: |
10246 | case X86::BI__builtin_ia32_vfmaddps512_mask: |
10247 | case X86::BI__builtin_ia32_vfmaddps512_maskz: |
10248 | case X86::BI__builtin_ia32_vfmaddps512_mask3: |
10249 | case X86::BI__builtin_ia32_vfmsubps512_mask3: |
10250 | case X86::BI__builtin_ia32_vfmaddpd512_mask: |
10251 | case X86::BI__builtin_ia32_vfmaddpd512_maskz: |
10252 | case X86::BI__builtin_ia32_vfmaddpd512_mask3: |
10253 | case X86::BI__builtin_ia32_vfmsubpd512_mask3: |
10254 | return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false); |
10255 | case X86::BI__builtin_ia32_vfmaddsubps: |
10256 | case X86::BI__builtin_ia32_vfmaddsubpd: |
10257 | case X86::BI__builtin_ia32_vfmaddsubps256: |
10258 | case X86::BI__builtin_ia32_vfmaddsubpd256: |
10259 | case X86::BI__builtin_ia32_vfmaddsubps512_mask: |
10260 | case X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
10261 | case X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
10262 | case X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
10263 | case X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
10264 | case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
10265 | case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
10266 | case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
10267 | return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true); |
10268 | |
10269 | case X86::BI__builtin_ia32_movdqa32store128_mask: |
10270 | case X86::BI__builtin_ia32_movdqa64store128_mask: |
10271 | case X86::BI__builtin_ia32_storeaps128_mask: |
10272 | case X86::BI__builtin_ia32_storeapd128_mask: |
10273 | case X86::BI__builtin_ia32_movdqa32store256_mask: |
10274 | case X86::BI__builtin_ia32_movdqa64store256_mask: |
10275 | case X86::BI__builtin_ia32_storeaps256_mask: |
10276 | case X86::BI__builtin_ia32_storeapd256_mask: |
10277 | case X86::BI__builtin_ia32_movdqa32store512_mask: |
10278 | case X86::BI__builtin_ia32_movdqa64store512_mask: |
10279 | case X86::BI__builtin_ia32_storeaps512_mask: |
10280 | case X86::BI__builtin_ia32_storeapd512_mask: { |
10281 | unsigned Align = |
10282 | getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity(); |
10283 | return EmitX86MaskedStore(*this, Ops, Align); |
10284 | } |
10285 | case X86::BI__builtin_ia32_loadups128_mask: |
10286 | case X86::BI__builtin_ia32_loadups256_mask: |
10287 | case X86::BI__builtin_ia32_loadups512_mask: |
10288 | case X86::BI__builtin_ia32_loadupd128_mask: |
10289 | case X86::BI__builtin_ia32_loadupd256_mask: |
10290 | case X86::BI__builtin_ia32_loadupd512_mask: |
10291 | case X86::BI__builtin_ia32_loaddquqi128_mask: |
10292 | case X86::BI__builtin_ia32_loaddquqi256_mask: |
10293 | case X86::BI__builtin_ia32_loaddquqi512_mask: |
10294 | case X86::BI__builtin_ia32_loaddquhi128_mask: |
10295 | case X86::BI__builtin_ia32_loaddquhi256_mask: |
10296 | case X86::BI__builtin_ia32_loaddquhi512_mask: |
10297 | case X86::BI__builtin_ia32_loaddqusi128_mask: |
10298 | case X86::BI__builtin_ia32_loaddqusi256_mask: |
10299 | case X86::BI__builtin_ia32_loaddqusi512_mask: |
10300 | case X86::BI__builtin_ia32_loaddqudi128_mask: |
10301 | case X86::BI__builtin_ia32_loaddqudi256_mask: |
10302 | case X86::BI__builtin_ia32_loaddqudi512_mask: |
10303 | return EmitX86MaskedLoad(*this, Ops, 1); |
10304 | |
10305 | case X86::BI__builtin_ia32_loadss128_mask: |
10306 | case X86::BI__builtin_ia32_loadsd128_mask: |
10307 | return EmitX86MaskedLoad(*this, Ops, 1); |
10308 | |
10309 | case X86::BI__builtin_ia32_loadaps128_mask: |
10310 | case X86::BI__builtin_ia32_loadaps256_mask: |
10311 | case X86::BI__builtin_ia32_loadaps512_mask: |
10312 | case X86::BI__builtin_ia32_loadapd128_mask: |
10313 | case X86::BI__builtin_ia32_loadapd256_mask: |
10314 | case X86::BI__builtin_ia32_loadapd512_mask: |
10315 | case X86::BI__builtin_ia32_movdqa32load128_mask: |
10316 | case X86::BI__builtin_ia32_movdqa32load256_mask: |
10317 | case X86::BI__builtin_ia32_movdqa32load512_mask: |
10318 | case X86::BI__builtin_ia32_movdqa64load128_mask: |
10319 | case X86::BI__builtin_ia32_movdqa64load256_mask: |
10320 | case X86::BI__builtin_ia32_movdqa64load512_mask: { |
10321 | unsigned Align = |
10322 | getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity(); |
10323 | return EmitX86MaskedLoad(*this, Ops, Align); |
10324 | } |
10325 | |
10326 | case X86::BI__builtin_ia32_expandloaddf128_mask: |
10327 | case X86::BI__builtin_ia32_expandloaddf256_mask: |
10328 | case X86::BI__builtin_ia32_expandloaddf512_mask: |
10329 | case X86::BI__builtin_ia32_expandloadsf128_mask: |
10330 | case X86::BI__builtin_ia32_expandloadsf256_mask: |
10331 | case X86::BI__builtin_ia32_expandloadsf512_mask: |
10332 | case X86::BI__builtin_ia32_expandloaddi128_mask: |
10333 | case X86::BI__builtin_ia32_expandloaddi256_mask: |
10334 | case X86::BI__builtin_ia32_expandloaddi512_mask: |
10335 | case X86::BI__builtin_ia32_expandloadsi128_mask: |
10336 | case X86::BI__builtin_ia32_expandloadsi256_mask: |
10337 | case X86::BI__builtin_ia32_expandloadsi512_mask: |
10338 | case X86::BI__builtin_ia32_expandloadhi128_mask: |
10339 | case X86::BI__builtin_ia32_expandloadhi256_mask: |
10340 | case X86::BI__builtin_ia32_expandloadhi512_mask: |
10341 | case X86::BI__builtin_ia32_expandloadqi128_mask: |
10342 | case X86::BI__builtin_ia32_expandloadqi256_mask: |
10343 | case X86::BI__builtin_ia32_expandloadqi512_mask: |
10344 | return EmitX86ExpandLoad(*this, Ops); |
10345 | |
10346 | case X86::BI__builtin_ia32_compressstoredf128_mask: |
10347 | case X86::BI__builtin_ia32_compressstoredf256_mask: |
10348 | case X86::BI__builtin_ia32_compressstoredf512_mask: |
10349 | case X86::BI__builtin_ia32_compressstoresf128_mask: |
10350 | case X86::BI__builtin_ia32_compressstoresf256_mask: |
10351 | case X86::BI__builtin_ia32_compressstoresf512_mask: |
10352 | case X86::BI__builtin_ia32_compressstoredi128_mask: |
10353 | case X86::BI__builtin_ia32_compressstoredi256_mask: |
10354 | case X86::BI__builtin_ia32_compressstoredi512_mask: |
10355 | case X86::BI__builtin_ia32_compressstoresi128_mask: |
10356 | case X86::BI__builtin_ia32_compressstoresi256_mask: |
10357 | case X86::BI__builtin_ia32_compressstoresi512_mask: |
10358 | case X86::BI__builtin_ia32_compressstorehi128_mask: |
10359 | case X86::BI__builtin_ia32_compressstorehi256_mask: |
10360 | case X86::BI__builtin_ia32_compressstorehi512_mask: |
10361 | case X86::BI__builtin_ia32_compressstoreqi128_mask: |
10362 | case X86::BI__builtin_ia32_compressstoreqi256_mask: |
10363 | case X86::BI__builtin_ia32_compressstoreqi512_mask: |
10364 | return EmitX86CompressStore(*this, Ops); |
10365 | |
10366 | case X86::BI__builtin_ia32_expanddf128_mask: |
10367 | case X86::BI__builtin_ia32_expanddf256_mask: |
10368 | case X86::BI__builtin_ia32_expanddf512_mask: |
10369 | case X86::BI__builtin_ia32_expandsf128_mask: |
10370 | case X86::BI__builtin_ia32_expandsf256_mask: |
10371 | case X86::BI__builtin_ia32_expandsf512_mask: |
10372 | case X86::BI__builtin_ia32_expanddi128_mask: |
10373 | case X86::BI__builtin_ia32_expanddi256_mask: |
10374 | case X86::BI__builtin_ia32_expanddi512_mask: |
10375 | case X86::BI__builtin_ia32_expandsi128_mask: |
10376 | case X86::BI__builtin_ia32_expandsi256_mask: |
10377 | case X86::BI__builtin_ia32_expandsi512_mask: |
10378 | case X86::BI__builtin_ia32_expandhi128_mask: |
10379 | case X86::BI__builtin_ia32_expandhi256_mask: |
10380 | case X86::BI__builtin_ia32_expandhi512_mask: |
10381 | case X86::BI__builtin_ia32_expandqi128_mask: |
10382 | case X86::BI__builtin_ia32_expandqi256_mask: |
10383 | case X86::BI__builtin_ia32_expandqi512_mask: |
10384 | return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false); |
10385 | |
10386 | case X86::BI__builtin_ia32_compressdf128_mask: |
10387 | case X86::BI__builtin_ia32_compressdf256_mask: |
10388 | case X86::BI__builtin_ia32_compressdf512_mask: |
10389 | case X86::BI__builtin_ia32_compresssf128_mask: |
10390 | case X86::BI__builtin_ia32_compresssf256_mask: |
10391 | case X86::BI__builtin_ia32_compresssf512_mask: |
10392 | case X86::BI__builtin_ia32_compressdi128_mask: |
10393 | case X86::BI__builtin_ia32_compressdi256_mask: |
10394 | case X86::BI__builtin_ia32_compressdi512_mask: |
10395 | case X86::BI__builtin_ia32_compresssi128_mask: |
10396 | case X86::BI__builtin_ia32_compresssi256_mask: |
10397 | case X86::BI__builtin_ia32_compresssi512_mask: |
10398 | case X86::BI__builtin_ia32_compresshi128_mask: |
10399 | case X86::BI__builtin_ia32_compresshi256_mask: |
10400 | case X86::BI__builtin_ia32_compresshi512_mask: |
10401 | case X86::BI__builtin_ia32_compressqi128_mask: |
10402 | case X86::BI__builtin_ia32_compressqi256_mask: |
10403 | case X86::BI__builtin_ia32_compressqi512_mask: |
10404 | return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true); |
10405 | |
10406 | case X86::BI__builtin_ia32_gather3div2df: |
10407 | case X86::BI__builtin_ia32_gather3div2di: |
10408 | case X86::BI__builtin_ia32_gather3div4df: |
10409 | case X86::BI__builtin_ia32_gather3div4di: |
10410 | case X86::BI__builtin_ia32_gather3div4sf: |
10411 | case X86::BI__builtin_ia32_gather3div4si: |
10412 | case X86::BI__builtin_ia32_gather3div8sf: |
10413 | case X86::BI__builtin_ia32_gather3div8si: |
10414 | case X86::BI__builtin_ia32_gather3siv2df: |
10415 | case X86::BI__builtin_ia32_gather3siv2di: |
10416 | case X86::BI__builtin_ia32_gather3siv4df: |
10417 | case X86::BI__builtin_ia32_gather3siv4di: |
10418 | case X86::BI__builtin_ia32_gather3siv4sf: |
10419 | case X86::BI__builtin_ia32_gather3siv4si: |
10420 | case X86::BI__builtin_ia32_gather3siv8sf: |
10421 | case X86::BI__builtin_ia32_gather3siv8si: |
10422 | case X86::BI__builtin_ia32_gathersiv8df: |
10423 | case X86::BI__builtin_ia32_gathersiv16sf: |
10424 | case X86::BI__builtin_ia32_gatherdiv8df: |
10425 | case X86::BI__builtin_ia32_gatherdiv16sf: |
10426 | case X86::BI__builtin_ia32_gathersiv8di: |
10427 | case X86::BI__builtin_ia32_gathersiv16si: |
10428 | case X86::BI__builtin_ia32_gatherdiv8di: |
10429 | case X86::BI__builtin_ia32_gatherdiv16si: { |
10430 | Intrinsic::ID IID; |
10431 | switch (BuiltinID) { |
10432 | default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10432); |
10433 | case X86::BI__builtin_ia32_gather3div2df: |
10434 | IID = Intrinsic::x86_avx512_mask_gather3div2_df; |
10435 | break; |
10436 | case X86::BI__builtin_ia32_gather3div2di: |
10437 | IID = Intrinsic::x86_avx512_mask_gather3div2_di; |
10438 | break; |
10439 | case X86::BI__builtin_ia32_gather3div4df: |
10440 | IID = Intrinsic::x86_avx512_mask_gather3div4_df; |
10441 | break; |
10442 | case X86::BI__builtin_ia32_gather3div4di: |
10443 | IID = Intrinsic::x86_avx512_mask_gather3div4_di; |
10444 | break; |
10445 | case X86::BI__builtin_ia32_gather3div4sf: |
10446 | IID = Intrinsic::x86_avx512_mask_gather3div4_sf; |
10447 | break; |
10448 | case X86::BI__builtin_ia32_gather3div4si: |
10449 | IID = Intrinsic::x86_avx512_mask_gather3div4_si; |
10450 | break; |
10451 | case X86::BI__builtin_ia32_gather3div8sf: |
10452 | IID = Intrinsic::x86_avx512_mask_gather3div8_sf; |
10453 | break; |
10454 | case X86::BI__builtin_ia32_gather3div8si: |
10455 | IID = Intrinsic::x86_avx512_mask_gather3div8_si; |
10456 | break; |
10457 | case X86::BI__builtin_ia32_gather3siv2df: |
10458 | IID = Intrinsic::x86_avx512_mask_gather3siv2_df; |
10459 | break; |
10460 | case X86::BI__builtin_ia32_gather3siv2di: |
10461 | IID = Intrinsic::x86_avx512_mask_gather3siv2_di; |
10462 | break; |
10463 | case X86::BI__builtin_ia32_gather3siv4df: |
10464 | IID = Intrinsic::x86_avx512_mask_gather3siv4_df; |
10465 | break; |
10466 | case X86::BI__builtin_ia32_gather3siv4di: |
10467 | IID = Intrinsic::x86_avx512_mask_gather3siv4_di; |
10468 | break; |
10469 | case X86::BI__builtin_ia32_gather3siv4sf: |
10470 | IID = Intrinsic::x86_avx512_mask_gather3siv4_sf; |
10471 | break; |
10472 | case X86::BI__builtin_ia32_gather3siv4si: |
10473 | IID = Intrinsic::x86_avx512_mask_gather3siv4_si; |
10474 | break; |
10475 | case X86::BI__builtin_ia32_gather3siv8sf: |
10476 | IID = Intrinsic::x86_avx512_mask_gather3siv8_sf; |
10477 | break; |
10478 | case X86::BI__builtin_ia32_gather3siv8si: |
10479 | IID = Intrinsic::x86_avx512_mask_gather3siv8_si; |
10480 | break; |
10481 | case X86::BI__builtin_ia32_gathersiv8df: |
10482 | IID = Intrinsic::x86_avx512_mask_gather_dpd_512; |
10483 | break; |
10484 | case X86::BI__builtin_ia32_gathersiv16sf: |
10485 | IID = Intrinsic::x86_avx512_mask_gather_dps_512; |
10486 | break; |
10487 | case X86::BI__builtin_ia32_gatherdiv8df: |
10488 | IID = Intrinsic::x86_avx512_mask_gather_qpd_512; |
10489 | break; |
10490 | case X86::BI__builtin_ia32_gatherdiv16sf: |
10491 | IID = Intrinsic::x86_avx512_mask_gather_qps_512; |
10492 | break; |
10493 | case X86::BI__builtin_ia32_gathersiv8di: |
10494 | IID = Intrinsic::x86_avx512_mask_gather_dpq_512; |
10495 | break; |
10496 | case X86::BI__builtin_ia32_gathersiv16si: |
10497 | IID = Intrinsic::x86_avx512_mask_gather_dpi_512; |
10498 | break; |
10499 | case X86::BI__builtin_ia32_gatherdiv8di: |
10500 | IID = Intrinsic::x86_avx512_mask_gather_qpq_512; |
10501 | break; |
10502 | case X86::BI__builtin_ia32_gatherdiv16si: |
10503 | IID = Intrinsic::x86_avx512_mask_gather_qpi_512; |
10504 | break; |
10505 | } |
10506 | |
10507 | unsigned MinElts = std::min(Ops[0]->getType()->getVectorNumElements(), |
10508 | Ops[2]->getType()->getVectorNumElements()); |
10509 | Ops[3] = getMaskVecValue(*this, Ops[3], MinElts); |
10510 | Function *Intr = CGM.getIntrinsic(IID); |
10511 | return Builder.CreateCall(Intr, Ops); |
10512 | } |
10513 | |
10514 | case X86::BI__builtin_ia32_scattersiv8df: |
10515 | case X86::BI__builtin_ia32_scattersiv16sf: |
10516 | case X86::BI__builtin_ia32_scatterdiv8df: |
10517 | case X86::BI__builtin_ia32_scatterdiv16sf: |
10518 | case X86::BI__builtin_ia32_scattersiv8di: |
10519 | case X86::BI__builtin_ia32_scattersiv16si: |
10520 | case X86::BI__builtin_ia32_scatterdiv8di: |
10521 | case X86::BI__builtin_ia32_scatterdiv16si: |
10522 | case X86::BI__builtin_ia32_scatterdiv2df: |
10523 | case X86::BI__builtin_ia32_scatterdiv2di: |
10524 | case X86::BI__builtin_ia32_scatterdiv4df: |
10525 | case X86::BI__builtin_ia32_scatterdiv4di: |
10526 | case X86::BI__builtin_ia32_scatterdiv4sf: |
10527 | case X86::BI__builtin_ia32_scatterdiv4si: |
10528 | case X86::BI__builtin_ia32_scatterdiv8sf: |
10529 | case X86::BI__builtin_ia32_scatterdiv8si: |
10530 | case X86::BI__builtin_ia32_scattersiv2df: |
10531 | case X86::BI__builtin_ia32_scattersiv2di: |
10532 | case X86::BI__builtin_ia32_scattersiv4df: |
10533 | case X86::BI__builtin_ia32_scattersiv4di: |
10534 | case X86::BI__builtin_ia32_scattersiv4sf: |
10535 | case X86::BI__builtin_ia32_scattersiv4si: |
10536 | case X86::BI__builtin_ia32_scattersiv8sf: |
10537 | case X86::BI__builtin_ia32_scattersiv8si: { |
10538 | Intrinsic::ID IID; |
10539 | switch (BuiltinID) { |
10540 | default: llvm_unreachable("Unexpected builtin")::llvm::llvm_unreachable_internal("Unexpected builtin", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10540); |
10541 | case X86::BI__builtin_ia32_scattersiv8df: |
10542 | IID = Intrinsic::x86_avx512_mask_scatter_dpd_512; |
10543 | break; |
10544 | case X86::BI__builtin_ia32_scattersiv16sf: |
10545 | IID = Intrinsic::x86_avx512_mask_scatter_dps_512; |
10546 | break; |
10547 | case X86::BI__builtin_ia32_scatterdiv8df: |
10548 | IID = Intrinsic::x86_avx512_mask_scatter_qpd_512; |
10549 | break; |
10550 | case X86::BI__builtin_ia32_scatterdiv16sf: |
10551 | IID = Intrinsic::x86_avx512_mask_scatter_qps_512; |
10552 | break; |
10553 | case X86::BI__builtin_ia32_scattersiv8di: |
10554 | IID = Intrinsic::x86_avx512_mask_scatter_dpq_512; |
10555 | break; |
10556 | case X86::BI__builtin_ia32_scattersiv16si: |
10557 | IID = Intrinsic::x86_avx512_mask_scatter_dpi_512; |
10558 | break; |
10559 | case X86::BI__builtin_ia32_scatterdiv8di: |
10560 | IID = Intrinsic::x86_avx512_mask_scatter_qpq_512; |
10561 | break; |
10562 | case X86::BI__builtin_ia32_scatterdiv16si: |
10563 | IID = Intrinsic::x86_avx512_mask_scatter_qpi_512; |
10564 | break; |
10565 | case X86::BI__builtin_ia32_scatterdiv2df: |
10566 | IID = Intrinsic::x86_avx512_mask_scatterdiv2_df; |
10567 | break; |
10568 | case X86::BI__builtin_ia32_scatterdiv2di: |
10569 | IID = Intrinsic::x86_avx512_mask_scatterdiv2_di; |
10570 | break; |
10571 | case X86::BI__builtin_ia32_scatterdiv4df: |
10572 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_df; |
10573 | break; |
10574 | case X86::BI__builtin_ia32_scatterdiv4di: |
10575 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_di; |
10576 | break; |
10577 | case X86::BI__builtin_ia32_scatterdiv4sf: |
10578 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf; |
10579 | break; |
10580 | case X86::BI__builtin_ia32_scatterdiv4si: |
10581 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_si; |
10582 | break; |
10583 | case X86::BI__builtin_ia32_scatterdiv8sf: |
10584 | IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf; |
10585 | break; |
10586 | case X86::BI__builtin_ia32_scatterdiv8si: |
10587 | IID = Intrinsic::x86_avx512_mask_scatterdiv8_si; |
10588 | break; |
10589 | case X86::BI__builtin_ia32_scattersiv2df: |
10590 | IID = Intrinsic::x86_avx512_mask_scattersiv2_df; |
10591 | break; |
10592 | case X86::BI__builtin_ia32_scattersiv2di: |
10593 | IID = Intrinsic::x86_avx512_mask_scattersiv2_di; |
10594 | break; |
10595 | case X86::BI__builtin_ia32_scattersiv4df: |
10596 | IID = Intrinsic::x86_avx512_mask_scattersiv4_df; |
10597 | break; |
10598 | case X86::BI__builtin_ia32_scattersiv4di: |
10599 | IID = Intrinsic::x86_avx512_mask_scattersiv4_di; |
10600 | break; |
10601 | case X86::BI__builtin_ia32_scattersiv4sf: |
10602 | IID = Intrinsic::x86_avx512_mask_scattersiv4_sf; |
10603 | break; |
10604 | case X86::BI__builtin_ia32_scattersiv4si: |
10605 | IID = Intrinsic::x86_avx512_mask_scattersiv4_si; |
10606 | break; |
10607 | case X86::BI__builtin_ia32_scattersiv8sf: |
10608 | IID = Intrinsic::x86_avx512_mask_scattersiv8_sf; |
10609 | break; |
10610 | case X86::BI__builtin_ia32_scattersiv8si: |
10611 | IID = Intrinsic::x86_avx512_mask_scattersiv8_si; |
10612 | break; |
10613 | } |
10614 | |
10615 | unsigned MinElts = std::min(Ops[2]->getType()->getVectorNumElements(), |
10616 | Ops[3]->getType()->getVectorNumElements()); |
10617 | Ops[1] = getMaskVecValue(*this, Ops[1], MinElts); |
10618 | Function *Intr = CGM.getIntrinsic(IID); |
10619 | return Builder.CreateCall(Intr, Ops); |
10620 | } |
10621 | |
10622 | case X86::BI__builtin_ia32_storehps: |
10623 | case X86::BI__builtin_ia32_storelps: { |
10624 | llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty); |
10625 | llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); |
10626 | |
10627 | // cast val v2i64 |
10628 | Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); |
10629 | |
10630 | // extract (0, 1) |
10631 | unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; |
10632 | Ops[1] = Builder.CreateExtractElement(Ops[1], Index, "extract"); |
10633 | |
10634 | // cast pointer to i64 & store |
10635 | Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); |
10636 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
10637 | } |
10638 | case X86::BI__builtin_ia32_vextractf128_pd256: |
10639 | case X86::BI__builtin_ia32_vextractf128_ps256: |
10640 | case X86::BI__builtin_ia32_vextractf128_si256: |
10641 | case X86::BI__builtin_ia32_extract128i256: |
10642 | case X86::BI__builtin_ia32_extractf64x4_mask: |
10643 | case X86::BI__builtin_ia32_extractf32x4_mask: |
10644 | case X86::BI__builtin_ia32_extracti64x4_mask: |
10645 | case X86::BI__builtin_ia32_extracti32x4_mask: |
10646 | case X86::BI__builtin_ia32_extractf32x8_mask: |
10647 | case X86::BI__builtin_ia32_extracti32x8_mask: |
10648 | case X86::BI__builtin_ia32_extractf32x4_256_mask: |
10649 | case X86::BI__builtin_ia32_extracti32x4_256_mask: |
10650 | case X86::BI__builtin_ia32_extractf64x2_256_mask: |
10651 | case X86::BI__builtin_ia32_extracti64x2_256_mask: |
10652 | case X86::BI__builtin_ia32_extractf64x2_512_mask: |
10653 | case X86::BI__builtin_ia32_extracti64x2_512_mask: { |
10654 | llvm::Type *DstTy = ConvertType(E->getType()); |
10655 | unsigned NumElts = DstTy->getVectorNumElements(); |
10656 | unsigned SrcNumElts = Ops[0]->getType()->getVectorNumElements(); |
10657 | unsigned SubVectors = SrcNumElts / NumElts; |
10658 | unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue(); |
10659 | assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")((llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors" ) ? static_cast<void> (0) : __assert_fail ("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10659, __PRETTY_FUNCTION__)); |
10660 | Index &= SubVectors - 1; // Remove any extra bits. |
10661 | Index *= NumElts; |
10662 | |
10663 | uint32_t Indices[16]; |
10664 | for (unsigned i = 0; i != NumElts; ++i) |
10665 | Indices[i] = i + Index; |
10666 | |
10667 | Value *Res = Builder.CreateShuffleVector(Ops[0], |
10668 | UndefValue::get(Ops[0]->getType()), |
10669 | makeArrayRef(Indices, NumElts), |
10670 | "extract"); |
10671 | |
10672 | if (Ops.size() == 4) |
10673 | Res = EmitX86Select(*this, Ops[3], Res, Ops[2]); |
10674 | |
10675 | return Res; |
10676 | } |
10677 | case X86::BI__builtin_ia32_vinsertf128_pd256: |
10678 | case X86::BI__builtin_ia32_vinsertf128_ps256: |
10679 | case X86::BI__builtin_ia32_vinsertf128_si256: |
10680 | case X86::BI__builtin_ia32_insert128i256: |
10681 | case X86::BI__builtin_ia32_insertf64x4: |
10682 | case X86::BI__builtin_ia32_insertf32x4: |
10683 | case X86::BI__builtin_ia32_inserti64x4: |
10684 | case X86::BI__builtin_ia32_inserti32x4: |
10685 | case X86::BI__builtin_ia32_insertf32x8: |
10686 | case X86::BI__builtin_ia32_inserti32x8: |
10687 | case X86::BI__builtin_ia32_insertf32x4_256: |
10688 | case X86::BI__builtin_ia32_inserti32x4_256: |
10689 | case X86::BI__builtin_ia32_insertf64x2_256: |
10690 | case X86::BI__builtin_ia32_inserti64x2_256: |
10691 | case X86::BI__builtin_ia32_insertf64x2_512: |
10692 | case X86::BI__builtin_ia32_inserti64x2_512: { |
10693 | unsigned DstNumElts = Ops[0]->getType()->getVectorNumElements(); |
10694 | unsigned SrcNumElts = Ops[1]->getType()->getVectorNumElements(); |
10695 | unsigned SubVectors = DstNumElts / SrcNumElts; |
10696 | unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue(); |
10697 | assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")((llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors" ) ? static_cast<void> (0) : __assert_fail ("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10697, __PRETTY_FUNCTION__)); |
10698 | Index &= SubVectors - 1; // Remove any extra bits. |
10699 | Index *= SrcNumElts; |
10700 | |
10701 | uint32_t Indices[16]; |
10702 | for (unsigned i = 0; i != DstNumElts; ++i) |
10703 | Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i; |
10704 | |
10705 | Value *Op1 = Builder.CreateShuffleVector(Ops[1], |
10706 | UndefValue::get(Ops[1]->getType()), |
10707 | makeArrayRef(Indices, DstNumElts), |
10708 | "widen"); |
10709 | |
10710 | for (unsigned i = 0; i != DstNumElts; ++i) { |
10711 | if (i >= Index && i < (Index + SrcNumElts)) |
10712 | Indices[i] = (i - Index) + DstNumElts; |
10713 | else |
10714 | Indices[i] = i; |
10715 | } |
10716 | |
10717 | return Builder.CreateShuffleVector(Ops[0], Op1, |
10718 | makeArrayRef(Indices, DstNumElts), |
10719 | "insert"); |
10720 | } |
10721 | case X86::BI__builtin_ia32_pmovqd512_mask: |
10722 | case X86::BI__builtin_ia32_pmovwb512_mask: { |
10723 | Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType()); |
10724 | return EmitX86Select(*this, Ops[2], Res, Ops[1]); |
10725 | } |
10726 | case X86::BI__builtin_ia32_pmovdb512_mask: |
10727 | case X86::BI__builtin_ia32_pmovdw512_mask: |
10728 | case X86::BI__builtin_ia32_pmovqw512_mask: { |
10729 | if (const auto *C = dyn_cast<Constant>(Ops[2])) |
10730 | if (C->isAllOnesValue()) |
10731 | return Builder.CreateTrunc(Ops[0], Ops[1]->getType()); |
10732 | |
10733 | Intrinsic::ID IID; |
10734 | switch (BuiltinID) { |
10735 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10735); |
10736 | case X86::BI__builtin_ia32_pmovdb512_mask: |
10737 | IID = Intrinsic::x86_avx512_mask_pmov_db_512; |
10738 | break; |
10739 | case X86::BI__builtin_ia32_pmovdw512_mask: |
10740 | IID = Intrinsic::x86_avx512_mask_pmov_dw_512; |
10741 | break; |
10742 | case X86::BI__builtin_ia32_pmovqw512_mask: |
10743 | IID = Intrinsic::x86_avx512_mask_pmov_qw_512; |
10744 | break; |
10745 | } |
10746 | |
10747 | Function *Intr = CGM.getIntrinsic(IID); |
10748 | return Builder.CreateCall(Intr, Ops); |
10749 | } |
10750 | case X86::BI__builtin_ia32_pblendw128: |
10751 | case X86::BI__builtin_ia32_blendpd: |
10752 | case X86::BI__builtin_ia32_blendps: |
10753 | case X86::BI__builtin_ia32_blendpd256: |
10754 | case X86::BI__builtin_ia32_blendps256: |
10755 | case X86::BI__builtin_ia32_pblendw256: |
10756 | case X86::BI__builtin_ia32_pblendd128: |
10757 | case X86::BI__builtin_ia32_pblendd256: { |
10758 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); |
10759 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
10760 | |
10761 | uint32_t Indices[16]; |
10762 | // If there are more than 8 elements, the immediate is used twice so make |
10763 | // sure we handle that. |
10764 | for (unsigned i = 0; i != NumElts; ++i) |
10765 | Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i; |
10766 | |
10767 | return Builder.CreateShuffleVector(Ops[0], Ops[1], |
10768 | makeArrayRef(Indices, NumElts), |
10769 | "blend"); |
10770 | } |
10771 | case X86::BI__builtin_ia32_pshuflw: |
10772 | case X86::BI__builtin_ia32_pshuflw256: |
10773 | case X86::BI__builtin_ia32_pshuflw512: { |
10774 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
10775 | llvm::Type *Ty = Ops[0]->getType(); |
10776 | unsigned NumElts = Ty->getVectorNumElements(); |
10777 | |
10778 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
10779 | Imm = (Imm & 0xff) * 0x01010101; |
10780 | |
10781 | uint32_t Indices[32]; |
10782 | for (unsigned l = 0; l != NumElts; l += 8) { |
10783 | for (unsigned i = 0; i != 4; ++i) { |
10784 | Indices[l + i] = l + (Imm & 3); |
10785 | Imm >>= 2; |
10786 | } |
10787 | for (unsigned i = 4; i != 8; ++i) |
10788 | Indices[l + i] = l + i; |
10789 | } |
10790 | |
10791 | return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty), |
10792 | makeArrayRef(Indices, NumElts), |
10793 | "pshuflw"); |
10794 | } |
10795 | case X86::BI__builtin_ia32_pshufhw: |
10796 | case X86::BI__builtin_ia32_pshufhw256: |
10797 | case X86::BI__builtin_ia32_pshufhw512: { |
10798 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
10799 | llvm::Type *Ty = Ops[0]->getType(); |
10800 | unsigned NumElts = Ty->getVectorNumElements(); |
10801 | |
10802 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
10803 | Imm = (Imm & 0xff) * 0x01010101; |
10804 | |
10805 | uint32_t Indices[32]; |
10806 | for (unsigned l = 0; l != NumElts; l += 8) { |
10807 | for (unsigned i = 0; i != 4; ++i) |
10808 | Indices[l + i] = l + i; |
10809 | for (unsigned i = 4; i != 8; ++i) { |
10810 | Indices[l + i] = l + 4 + (Imm & 3); |
10811 | Imm >>= 2; |
10812 | } |
10813 | } |
10814 | |
10815 | return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty), |
10816 | makeArrayRef(Indices, NumElts), |
10817 | "pshufhw"); |
10818 | } |
10819 | case X86::BI__builtin_ia32_pshufd: |
10820 | case X86::BI__builtin_ia32_pshufd256: |
10821 | case X86::BI__builtin_ia32_pshufd512: |
10822 | case X86::BI__builtin_ia32_vpermilpd: |
10823 | case X86::BI__builtin_ia32_vpermilps: |
10824 | case X86::BI__builtin_ia32_vpermilpd256: |
10825 | case X86::BI__builtin_ia32_vpermilps256: |
10826 | case X86::BI__builtin_ia32_vpermilpd512: |
10827 | case X86::BI__builtin_ia32_vpermilps512: { |
10828 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
10829 | llvm::Type *Ty = Ops[0]->getType(); |
10830 | unsigned NumElts = Ty->getVectorNumElements(); |
10831 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; |
10832 | unsigned NumLaneElts = NumElts / NumLanes; |
10833 | |
10834 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
10835 | Imm = (Imm & 0xff) * 0x01010101; |
10836 | |
10837 | uint32_t Indices[16]; |
10838 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
10839 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
10840 | Indices[i + l] = (Imm % NumLaneElts) + l; |
10841 | Imm /= NumLaneElts; |
10842 | } |
10843 | } |
10844 | |
10845 | return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty), |
10846 | makeArrayRef(Indices, NumElts), |
10847 | "permil"); |
10848 | } |
10849 | case X86::BI__builtin_ia32_shufpd: |
10850 | case X86::BI__builtin_ia32_shufpd256: |
10851 | case X86::BI__builtin_ia32_shufpd512: |
10852 | case X86::BI__builtin_ia32_shufps: |
10853 | case X86::BI__builtin_ia32_shufps256: |
10854 | case X86::BI__builtin_ia32_shufps512: { |
10855 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
10856 | llvm::Type *Ty = Ops[0]->getType(); |
10857 | unsigned NumElts = Ty->getVectorNumElements(); |
10858 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; |
10859 | unsigned NumLaneElts = NumElts / NumLanes; |
10860 | |
10861 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. |
10862 | Imm = (Imm & 0xff) * 0x01010101; |
10863 | |
10864 | uint32_t Indices[16]; |
10865 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
10866 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
10867 | unsigned Index = Imm % NumLaneElts; |
10868 | Imm /= NumLaneElts; |
10869 | if (i >= (NumLaneElts / 2)) |
10870 | Index += NumElts; |
10871 | Indices[l + i] = l + Index; |
10872 | } |
10873 | } |
10874 | |
10875 | return Builder.CreateShuffleVector(Ops[0], Ops[1], |
10876 | makeArrayRef(Indices, NumElts), |
10877 | "shufp"); |
10878 | } |
10879 | case X86::BI__builtin_ia32_permdi256: |
10880 | case X86::BI__builtin_ia32_permdf256: |
10881 | case X86::BI__builtin_ia32_permdi512: |
10882 | case X86::BI__builtin_ia32_permdf512: { |
10883 | unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
10884 | llvm::Type *Ty = Ops[0]->getType(); |
10885 | unsigned NumElts = Ty->getVectorNumElements(); |
10886 | |
10887 | // These intrinsics operate on 256-bit lanes of four 64-bit elements. |
10888 | uint32_t Indices[8]; |
10889 | for (unsigned l = 0; l != NumElts; l += 4) |
10890 | for (unsigned i = 0; i != 4; ++i) |
10891 | Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3); |
10892 | |
10893 | return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty), |
10894 | makeArrayRef(Indices, NumElts), |
10895 | "perm"); |
10896 | } |
10897 | case X86::BI__builtin_ia32_palignr128: |
10898 | case X86::BI__builtin_ia32_palignr256: |
10899 | case X86::BI__builtin_ia32_palignr512: { |
10900 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; |
10901 | |
10902 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); |
10903 | assert(NumElts % 16 == 0)((NumElts % 16 == 0) ? static_cast<void> (0) : __assert_fail ("NumElts % 16 == 0", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10903, __PRETTY_FUNCTION__)); |
10904 | |
10905 | // If palignr is shifting the pair of vectors more than the size of two |
10906 | // lanes, emit zero. |
10907 | if (ShiftVal >= 32) |
10908 | return llvm::Constant::getNullValue(ConvertType(E->getType())); |
10909 | |
10910 | // If palignr is shifting the pair of input vectors more than one lane, |
10911 | // but less than two lanes, convert to shifting in zeroes. |
10912 | if (ShiftVal > 16) { |
10913 | ShiftVal -= 16; |
10914 | Ops[1] = Ops[0]; |
10915 | Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType()); |
10916 | } |
10917 | |
10918 | uint32_t Indices[64]; |
10919 | // 256-bit palignr operates on 128-bit lanes so we need to handle that |
10920 | for (unsigned l = 0; l != NumElts; l += 16) { |
10921 | for (unsigned i = 0; i != 16; ++i) { |
10922 | unsigned Idx = ShiftVal + i; |
10923 | if (Idx >= 16) |
10924 | Idx += NumElts - 16; // End of lane, switch operand. |
10925 | Indices[l + i] = Idx + l; |
10926 | } |
10927 | } |
10928 | |
10929 | return Builder.CreateShuffleVector(Ops[1], Ops[0], |
10930 | makeArrayRef(Indices, NumElts), |
10931 | "palignr"); |
10932 | } |
10933 | case X86::BI__builtin_ia32_alignd128: |
10934 | case X86::BI__builtin_ia32_alignd256: |
10935 | case X86::BI__builtin_ia32_alignd512: |
10936 | case X86::BI__builtin_ia32_alignq128: |
10937 | case X86::BI__builtin_ia32_alignq256: |
10938 | case X86::BI__builtin_ia32_alignq512: { |
10939 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); |
10940 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; |
10941 | |
10942 | // Mask the shift amount to width of two vectors. |
10943 | ShiftVal &= (2 * NumElts) - 1; |
10944 | |
10945 | uint32_t Indices[16]; |
10946 | for (unsigned i = 0; i != NumElts; ++i) |
10947 | Indices[i] = i + ShiftVal; |
10948 | |
10949 | return Builder.CreateShuffleVector(Ops[1], Ops[0], |
10950 | makeArrayRef(Indices, NumElts), |
10951 | "valign"); |
10952 | } |
10953 | case X86::BI__builtin_ia32_shuf_f32x4_256: |
10954 | case X86::BI__builtin_ia32_shuf_f64x2_256: |
10955 | case X86::BI__builtin_ia32_shuf_i32x4_256: |
10956 | case X86::BI__builtin_ia32_shuf_i64x2_256: |
10957 | case X86::BI__builtin_ia32_shuf_f32x4: |
10958 | case X86::BI__builtin_ia32_shuf_f64x2: |
10959 | case X86::BI__builtin_ia32_shuf_i32x4: |
10960 | case X86::BI__builtin_ia32_shuf_i64x2: { |
10961 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
10962 | llvm::Type *Ty = Ops[0]->getType(); |
10963 | unsigned NumElts = Ty->getVectorNumElements(); |
10964 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2; |
10965 | unsigned NumLaneElts = NumElts / NumLanes; |
10966 | |
10967 | uint32_t Indices[16]; |
10968 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
10969 | unsigned Index = (Imm % NumLanes) * NumLaneElts; |
10970 | Imm /= NumLanes; // Discard the bits we just used. |
10971 | if (l >= (NumElts / 2)) |
10972 | Index += NumElts; // Switch to other source. |
10973 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
10974 | Indices[l + i] = Index + i; |
10975 | } |
10976 | } |
10977 | |
10978 | return Builder.CreateShuffleVector(Ops[0], Ops[1], |
10979 | makeArrayRef(Indices, NumElts), |
10980 | "shuf"); |
10981 | } |
10982 | |
10983 | case X86::BI__builtin_ia32_vperm2f128_pd256: |
10984 | case X86::BI__builtin_ia32_vperm2f128_ps256: |
10985 | case X86::BI__builtin_ia32_vperm2f128_si256: |
10986 | case X86::BI__builtin_ia32_permti256: { |
10987 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
10988 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); |
10989 | |
10990 | // This takes a very simple approach since there are two lanes and a |
10991 | // shuffle can have 2 inputs. So we reserve the first input for the first |
10992 | // lane and the second input for the second lane. This may result in |
10993 | // duplicate sources, but this can be dealt with in the backend. |
10994 | |
10995 | Value *OutOps[2]; |
10996 | uint32_t Indices[8]; |
10997 | for (unsigned l = 0; l != 2; ++l) { |
10998 | // Determine the source for this lane. |
10999 | if (Imm & (1 << ((l * 4) + 3))) |
11000 | OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType()); |
11001 | else if (Imm & (1 << ((l * 4) + 1))) |
11002 | OutOps[l] = Ops[1]; |
11003 | else |
11004 | OutOps[l] = Ops[0]; |
11005 | |
11006 | for (unsigned i = 0; i != NumElts/2; ++i) { |
11007 | // Start with ith element of the source for this lane. |
11008 | unsigned Idx = (l * NumElts) + i; |
11009 | // If bit 0 of the immediate half is set, switch to the high half of |
11010 | // the source. |
11011 | if (Imm & (1 << (l * 4))) |
11012 | Idx += NumElts/2; |
11013 | Indices[(l * (NumElts/2)) + i] = Idx; |
11014 | } |
11015 | } |
11016 | |
11017 | return Builder.CreateShuffleVector(OutOps[0], OutOps[1], |
11018 | makeArrayRef(Indices, NumElts), |
11019 | "vperm"); |
11020 | } |
11021 | |
11022 | case X86::BI__builtin_ia32_pslldqi128_byteshift: |
11023 | case X86::BI__builtin_ia32_pslldqi256_byteshift: |
11024 | case X86::BI__builtin_ia32_pslldqi512_byteshift: { |
11025 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
11026 | llvm::Type *ResultType = Ops[0]->getType(); |
11027 | // Builtin type is vXi64 so multiply by 8 to get bytes. |
11028 | unsigned NumElts = ResultType->getVectorNumElements() * 8; |
11029 | |
11030 | // If pslldq is shifting the vector more than 15 bytes, emit zero. |
11031 | if (ShiftVal >= 16) |
11032 | return llvm::Constant::getNullValue(ResultType); |
11033 | |
11034 | uint32_t Indices[64]; |
11035 | // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that |
11036 | for (unsigned l = 0; l != NumElts; l += 16) { |
11037 | for (unsigned i = 0; i != 16; ++i) { |
11038 | unsigned Idx = NumElts + i - ShiftVal; |
11039 | if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand. |
11040 | Indices[l + i] = Idx + l; |
11041 | } |
11042 | } |
11043 | |
11044 | llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts); |
11045 | Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast"); |
11046 | Value *Zero = llvm::Constant::getNullValue(VecTy); |
11047 | Value *SV = Builder.CreateShuffleVector(Zero, Cast, |
11048 | makeArrayRef(Indices, NumElts), |
11049 | "pslldq"); |
11050 | return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast"); |
11051 | } |
11052 | case X86::BI__builtin_ia32_psrldqi128_byteshift: |
11053 | case X86::BI__builtin_ia32_psrldqi256_byteshift: |
11054 | case X86::BI__builtin_ia32_psrldqi512_byteshift: { |
11055 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
11056 | llvm::Type *ResultType = Ops[0]->getType(); |
11057 | // Builtin type is vXi64 so multiply by 8 to get bytes. |
11058 | unsigned NumElts = ResultType->getVectorNumElements() * 8; |
11059 | |
11060 | // If psrldq is shifting the vector more than 15 bytes, emit zero. |
11061 | if (ShiftVal >= 16) |
11062 | return llvm::Constant::getNullValue(ResultType); |
11063 | |
11064 | uint32_t Indices[64]; |
11065 | // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that |
11066 | for (unsigned l = 0; l != NumElts; l += 16) { |
11067 | for (unsigned i = 0; i != 16; ++i) { |
11068 | unsigned Idx = i + ShiftVal; |
11069 | if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand. |
11070 | Indices[l + i] = Idx + l; |
11071 | } |
11072 | } |
11073 | |
11074 | llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts); |
11075 | Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast"); |
11076 | Value *Zero = llvm::Constant::getNullValue(VecTy); |
11077 | Value *SV = Builder.CreateShuffleVector(Cast, Zero, |
11078 | makeArrayRef(Indices, NumElts), |
11079 | "psrldq"); |
11080 | return Builder.CreateBitCast(SV, ResultType, "cast"); |
11081 | } |
11082 | case X86::BI__builtin_ia32_kshiftliqi: |
11083 | case X86::BI__builtin_ia32_kshiftlihi: |
11084 | case X86::BI__builtin_ia32_kshiftlisi: |
11085 | case X86::BI__builtin_ia32_kshiftlidi: { |
11086 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
11087 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
11088 | |
11089 | if (ShiftVal >= NumElts) |
11090 | return llvm::Constant::getNullValue(Ops[0]->getType()); |
11091 | |
11092 | Value *In = getMaskVecValue(*this, Ops[0], NumElts); |
11093 | |
11094 | uint32_t Indices[64]; |
11095 | for (unsigned i = 0; i != NumElts; ++i) |
11096 | Indices[i] = NumElts + i - ShiftVal; |
11097 | |
11098 | Value *Zero = llvm::Constant::getNullValue(In->getType()); |
11099 | Value *SV = Builder.CreateShuffleVector(Zero, In, |
11100 | makeArrayRef(Indices, NumElts), |
11101 | "kshiftl"); |
11102 | return Builder.CreateBitCast(SV, Ops[0]->getType()); |
11103 | } |
11104 | case X86::BI__builtin_ia32_kshiftriqi: |
11105 | case X86::BI__builtin_ia32_kshiftrihi: |
11106 | case X86::BI__builtin_ia32_kshiftrisi: |
11107 | case X86::BI__builtin_ia32_kshiftridi: { |
11108 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
11109 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
11110 | |
11111 | if (ShiftVal >= NumElts) |
11112 | return llvm::Constant::getNullValue(Ops[0]->getType()); |
11113 | |
11114 | Value *In = getMaskVecValue(*this, Ops[0], NumElts); |
11115 | |
11116 | uint32_t Indices[64]; |
11117 | for (unsigned i = 0; i != NumElts; ++i) |
11118 | Indices[i] = i + ShiftVal; |
11119 | |
11120 | Value *Zero = llvm::Constant::getNullValue(In->getType()); |
11121 | Value *SV = Builder.CreateShuffleVector(In, Zero, |
11122 | makeArrayRef(Indices, NumElts), |
11123 | "kshiftr"); |
11124 | return Builder.CreateBitCast(SV, Ops[0]->getType()); |
11125 | } |
11126 | case X86::BI__builtin_ia32_movnti: |
11127 | case X86::BI__builtin_ia32_movnti64: |
11128 | case X86::BI__builtin_ia32_movntsd: |
11129 | case X86::BI__builtin_ia32_movntss: { |
11130 | llvm::MDNode *Node = llvm::MDNode::get( |
11131 | getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); |
11132 | |
11133 | Value *Ptr = Ops[0]; |
11134 | Value *Src = Ops[1]; |
11135 | |
11136 | // Extract the 0'th element of the source vector. |
11137 | if (BuiltinID == X86::BI__builtin_ia32_movntsd || |
11138 | BuiltinID == X86::BI__builtin_ia32_movntss) |
11139 | Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract"); |
11140 | |
11141 | // Convert the type of the pointer to a pointer to the stored type. |
11142 | Value *BC = Builder.CreateBitCast( |
11143 | Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast"); |
11144 | |
11145 | // Unaligned nontemporal store of the scalar value. |
11146 | StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC); |
11147 | SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); |
11148 | SI->setAlignment(1); |
11149 | return SI; |
11150 | } |
11151 | // Rotate is a special case of funnel shift - 1st 2 args are the same. |
11152 | case X86::BI__builtin_ia32_vprotb: |
11153 | case X86::BI__builtin_ia32_vprotw: |
11154 | case X86::BI__builtin_ia32_vprotd: |
11155 | case X86::BI__builtin_ia32_vprotq: |
11156 | case X86::BI__builtin_ia32_vprotbi: |
11157 | case X86::BI__builtin_ia32_vprotwi: |
11158 | case X86::BI__builtin_ia32_vprotdi: |
11159 | case X86::BI__builtin_ia32_vprotqi: |
11160 | case X86::BI__builtin_ia32_prold128: |
11161 | case X86::BI__builtin_ia32_prold256: |
11162 | case X86::BI__builtin_ia32_prold512: |
11163 | case X86::BI__builtin_ia32_prolq128: |
11164 | case X86::BI__builtin_ia32_prolq256: |
11165 | case X86::BI__builtin_ia32_prolq512: |
11166 | case X86::BI__builtin_ia32_prolvd128: |
11167 | case X86::BI__builtin_ia32_prolvd256: |
11168 | case X86::BI__builtin_ia32_prolvd512: |
11169 | case X86::BI__builtin_ia32_prolvq128: |
11170 | case X86::BI__builtin_ia32_prolvq256: |
11171 | case X86::BI__builtin_ia32_prolvq512: |
11172 | return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false); |
11173 | case X86::BI__builtin_ia32_prord128: |
11174 | case X86::BI__builtin_ia32_prord256: |
11175 | case X86::BI__builtin_ia32_prord512: |
11176 | case X86::BI__builtin_ia32_prorq128: |
11177 | case X86::BI__builtin_ia32_prorq256: |
11178 | case X86::BI__builtin_ia32_prorq512: |
11179 | case X86::BI__builtin_ia32_prorvd128: |
11180 | case X86::BI__builtin_ia32_prorvd256: |
11181 | case X86::BI__builtin_ia32_prorvd512: |
11182 | case X86::BI__builtin_ia32_prorvq128: |
11183 | case X86::BI__builtin_ia32_prorvq256: |
11184 | case X86::BI__builtin_ia32_prorvq512: |
11185 | return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true); |
11186 | case X86::BI__builtin_ia32_selectb_128: |
11187 | case X86::BI__builtin_ia32_selectb_256: |
11188 | case X86::BI__builtin_ia32_selectb_512: |
11189 | case X86::BI__builtin_ia32_selectw_128: |
11190 | case X86::BI__builtin_ia32_selectw_256: |
11191 | case X86::BI__builtin_ia32_selectw_512: |
11192 | case X86::BI__builtin_ia32_selectd_128: |
11193 | case X86::BI__builtin_ia32_selectd_256: |
11194 | case X86::BI__builtin_ia32_selectd_512: |
11195 | case X86::BI__builtin_ia32_selectq_128: |
11196 | case X86::BI__builtin_ia32_selectq_256: |
11197 | case X86::BI__builtin_ia32_selectq_512: |
11198 | case X86::BI__builtin_ia32_selectps_128: |
11199 | case X86::BI__builtin_ia32_selectps_256: |
11200 | case X86::BI__builtin_ia32_selectps_512: |
11201 | case X86::BI__builtin_ia32_selectpd_128: |
11202 | case X86::BI__builtin_ia32_selectpd_256: |
11203 | case X86::BI__builtin_ia32_selectpd_512: |
11204 | return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]); |
11205 | case X86::BI__builtin_ia32_selectss_128: |
11206 | case X86::BI__builtin_ia32_selectsd_128: { |
11207 | Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0); |
11208 | Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0); |
11209 | A = EmitX86ScalarSelect(*this, Ops[0], A, B); |
11210 | return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0); |
11211 | } |
11212 | case X86::BI__builtin_ia32_cmpb128_mask: |
11213 | case X86::BI__builtin_ia32_cmpb256_mask: |
11214 | case X86::BI__builtin_ia32_cmpb512_mask: |
11215 | case X86::BI__builtin_ia32_cmpw128_mask: |
11216 | case X86::BI__builtin_ia32_cmpw256_mask: |
11217 | case X86::BI__builtin_ia32_cmpw512_mask: |
11218 | case X86::BI__builtin_ia32_cmpd128_mask: |
11219 | case X86::BI__builtin_ia32_cmpd256_mask: |
11220 | case X86::BI__builtin_ia32_cmpd512_mask: |
11221 | case X86::BI__builtin_ia32_cmpq128_mask: |
11222 | case X86::BI__builtin_ia32_cmpq256_mask: |
11223 | case X86::BI__builtin_ia32_cmpq512_mask: { |
11224 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; |
11225 | return EmitX86MaskedCompare(*this, CC, true, Ops); |
11226 | } |
11227 | case X86::BI__builtin_ia32_ucmpb128_mask: |
11228 | case X86::BI__builtin_ia32_ucmpb256_mask: |
11229 | case X86::BI__builtin_ia32_ucmpb512_mask: |
11230 | case X86::BI__builtin_ia32_ucmpw128_mask: |
11231 | case X86::BI__builtin_ia32_ucmpw256_mask: |
11232 | case X86::BI__builtin_ia32_ucmpw512_mask: |
11233 | case X86::BI__builtin_ia32_ucmpd128_mask: |
11234 | case X86::BI__builtin_ia32_ucmpd256_mask: |
11235 | case X86::BI__builtin_ia32_ucmpd512_mask: |
11236 | case X86::BI__builtin_ia32_ucmpq128_mask: |
11237 | case X86::BI__builtin_ia32_ucmpq256_mask: |
11238 | case X86::BI__builtin_ia32_ucmpq512_mask: { |
11239 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; |
11240 | return EmitX86MaskedCompare(*this, CC, false, Ops); |
11241 | } |
11242 | case X86::BI__builtin_ia32_vpcomb: |
11243 | case X86::BI__builtin_ia32_vpcomw: |
11244 | case X86::BI__builtin_ia32_vpcomd: |
11245 | case X86::BI__builtin_ia32_vpcomq: |
11246 | return EmitX86vpcom(*this, Ops, true); |
11247 | case X86::BI__builtin_ia32_vpcomub: |
11248 | case X86::BI__builtin_ia32_vpcomuw: |
11249 | case X86::BI__builtin_ia32_vpcomud: |
11250 | case X86::BI__builtin_ia32_vpcomuq: |
11251 | return EmitX86vpcom(*this, Ops, false); |
11252 | |
11253 | case X86::BI__builtin_ia32_kortestcqi: |
11254 | case X86::BI__builtin_ia32_kortestchi: |
11255 | case X86::BI__builtin_ia32_kortestcsi: |
11256 | case X86::BI__builtin_ia32_kortestcdi: { |
11257 | Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops); |
11258 | Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType()); |
11259 | Value *Cmp = Builder.CreateICmpEQ(Or, C); |
11260 | return Builder.CreateZExt(Cmp, ConvertType(E->getType())); |
11261 | } |
11262 | case X86::BI__builtin_ia32_kortestzqi: |
11263 | case X86::BI__builtin_ia32_kortestzhi: |
11264 | case X86::BI__builtin_ia32_kortestzsi: |
11265 | case X86::BI__builtin_ia32_kortestzdi: { |
11266 | Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops); |
11267 | Value *C = llvm::Constant::getNullValue(Ops[0]->getType()); |
11268 | Value *Cmp = Builder.CreateICmpEQ(Or, C); |
11269 | return Builder.CreateZExt(Cmp, ConvertType(E->getType())); |
11270 | } |
11271 | |
11272 | case X86::BI__builtin_ia32_ktestcqi: |
11273 | case X86::BI__builtin_ia32_ktestzqi: |
11274 | case X86::BI__builtin_ia32_ktestchi: |
11275 | case X86::BI__builtin_ia32_ktestzhi: |
11276 | case X86::BI__builtin_ia32_ktestcsi: |
11277 | case X86::BI__builtin_ia32_ktestzsi: |
11278 | case X86::BI__builtin_ia32_ktestcdi: |
11279 | case X86::BI__builtin_ia32_ktestzdi: { |
11280 | Intrinsic::ID IID; |
11281 | switch (BuiltinID) { |
11282 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11282); |
11283 | case X86::BI__builtin_ia32_ktestcqi: |
11284 | IID = Intrinsic::x86_avx512_ktestc_b; |
11285 | break; |
11286 | case X86::BI__builtin_ia32_ktestzqi: |
11287 | IID = Intrinsic::x86_avx512_ktestz_b; |
11288 | break; |
11289 | case X86::BI__builtin_ia32_ktestchi: |
11290 | IID = Intrinsic::x86_avx512_ktestc_w; |
11291 | break; |
11292 | case X86::BI__builtin_ia32_ktestzhi: |
11293 | IID = Intrinsic::x86_avx512_ktestz_w; |
11294 | break; |
11295 | case X86::BI__builtin_ia32_ktestcsi: |
11296 | IID = Intrinsic::x86_avx512_ktestc_d; |
11297 | break; |
11298 | case X86::BI__builtin_ia32_ktestzsi: |
11299 | IID = Intrinsic::x86_avx512_ktestz_d; |
11300 | break; |
11301 | case X86::BI__builtin_ia32_ktestcdi: |
11302 | IID = Intrinsic::x86_avx512_ktestc_q; |
11303 | break; |
11304 | case X86::BI__builtin_ia32_ktestzdi: |
11305 | IID = Intrinsic::x86_avx512_ktestz_q; |
11306 | break; |
11307 | } |
11308 | |
11309 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
11310 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); |
11311 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); |
11312 | Function *Intr = CGM.getIntrinsic(IID); |
11313 | return Builder.CreateCall(Intr, {LHS, RHS}); |
11314 | } |
11315 | |
11316 | case X86::BI__builtin_ia32_kaddqi: |
11317 | case X86::BI__builtin_ia32_kaddhi: |
11318 | case X86::BI__builtin_ia32_kaddsi: |
11319 | case X86::BI__builtin_ia32_kadddi: { |
11320 | Intrinsic::ID IID; |
11321 | switch (BuiltinID) { |
11322 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11322); |
11323 | case X86::BI__builtin_ia32_kaddqi: |
11324 | IID = Intrinsic::x86_avx512_kadd_b; |
11325 | break; |
11326 | case X86::BI__builtin_ia32_kaddhi: |
11327 | IID = Intrinsic::x86_avx512_kadd_w; |
11328 | break; |
11329 | case X86::BI__builtin_ia32_kaddsi: |
11330 | IID = Intrinsic::x86_avx512_kadd_d; |
11331 | break; |
11332 | case X86::BI__builtin_ia32_kadddi: |
11333 | IID = Intrinsic::x86_avx512_kadd_q; |
11334 | break; |
11335 | } |
11336 | |
11337 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
11338 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); |
11339 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); |
11340 | Function *Intr = CGM.getIntrinsic(IID); |
11341 | Value *Res = Builder.CreateCall(Intr, {LHS, RHS}); |
11342 | return Builder.CreateBitCast(Res, Ops[0]->getType()); |
11343 | } |
11344 | case X86::BI__builtin_ia32_kandqi: |
11345 | case X86::BI__builtin_ia32_kandhi: |
11346 | case X86::BI__builtin_ia32_kandsi: |
11347 | case X86::BI__builtin_ia32_kanddi: |
11348 | return EmitX86MaskLogic(*this, Instruction::And, Ops); |
11349 | case X86::BI__builtin_ia32_kandnqi: |
11350 | case X86::BI__builtin_ia32_kandnhi: |
11351 | case X86::BI__builtin_ia32_kandnsi: |
11352 | case X86::BI__builtin_ia32_kandndi: |
11353 | return EmitX86MaskLogic(*this, Instruction::And, Ops, true); |
11354 | case X86::BI__builtin_ia32_korqi: |
11355 | case X86::BI__builtin_ia32_korhi: |
11356 | case X86::BI__builtin_ia32_korsi: |
11357 | case X86::BI__builtin_ia32_kordi: |
11358 | return EmitX86MaskLogic(*this, Instruction::Or, Ops); |
11359 | case X86::BI__builtin_ia32_kxnorqi: |
11360 | case X86::BI__builtin_ia32_kxnorhi: |
11361 | case X86::BI__builtin_ia32_kxnorsi: |
11362 | case X86::BI__builtin_ia32_kxnordi: |
11363 | return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true); |
11364 | case X86::BI__builtin_ia32_kxorqi: |
11365 | case X86::BI__builtin_ia32_kxorhi: |
11366 | case X86::BI__builtin_ia32_kxorsi: |
11367 | case X86::BI__builtin_ia32_kxordi: |
11368 | return EmitX86MaskLogic(*this, Instruction::Xor, Ops); |
11369 | case X86::BI__builtin_ia32_knotqi: |
11370 | case X86::BI__builtin_ia32_knothi: |
11371 | case X86::BI__builtin_ia32_knotsi: |
11372 | case X86::BI__builtin_ia32_knotdi: { |
11373 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
11374 | Value *Res = getMaskVecValue(*this, Ops[0], NumElts); |
11375 | return Builder.CreateBitCast(Builder.CreateNot(Res), |
11376 | Ops[0]->getType()); |
11377 | } |
11378 | case X86::BI__builtin_ia32_kmovb: |
11379 | case X86::BI__builtin_ia32_kmovw: |
11380 | case X86::BI__builtin_ia32_kmovd: |
11381 | case X86::BI__builtin_ia32_kmovq: { |
11382 | // Bitcast to vXi1 type and then back to integer. This gets the mask |
11383 | // register type into the IR, but might be optimized out depending on |
11384 | // what's around it. |
11385 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
11386 | Value *Res = getMaskVecValue(*this, Ops[0], NumElts); |
11387 | return Builder.CreateBitCast(Res, Ops[0]->getType()); |
11388 | } |
11389 | |
11390 | case X86::BI__builtin_ia32_kunpckdi: |
11391 | case X86::BI__builtin_ia32_kunpcksi: |
11392 | case X86::BI__builtin_ia32_kunpckhi: { |
11393 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
11394 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); |
11395 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); |
11396 | uint32_t Indices[64]; |
11397 | for (unsigned i = 0; i != NumElts; ++i) |
11398 | Indices[i] = i; |
11399 | |
11400 | // First extract half of each vector. This gives better codegen than |
11401 | // doing it in a single shuffle. |
11402 | LHS = Builder.CreateShuffleVector(LHS, LHS, |
11403 | makeArrayRef(Indices, NumElts / 2)); |
11404 | RHS = Builder.CreateShuffleVector(RHS, RHS, |
11405 | makeArrayRef(Indices, NumElts / 2)); |
11406 | // Concat the vectors. |
11407 | // NOTE: Operands are swapped to match the intrinsic definition. |
11408 | Value *Res = Builder.CreateShuffleVector(RHS, LHS, |
11409 | makeArrayRef(Indices, NumElts)); |
11410 | return Builder.CreateBitCast(Res, Ops[0]->getType()); |
11411 | } |
11412 | |
11413 | case X86::BI__builtin_ia32_vplzcntd_128: |
11414 | case X86::BI__builtin_ia32_vplzcntd_256: |
11415 | case X86::BI__builtin_ia32_vplzcntd_512: |
11416 | case X86::BI__builtin_ia32_vplzcntq_128: |
11417 | case X86::BI__builtin_ia32_vplzcntq_256: |
11418 | case X86::BI__builtin_ia32_vplzcntq_512: { |
11419 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); |
11420 | return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)}); |
11421 | } |
11422 | case X86::BI__builtin_ia32_sqrtss: |
11423 | case X86::BI__builtin_ia32_sqrtsd: { |
11424 | Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0); |
11425 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); |
11426 | A = Builder.CreateCall(F, {A}); |
11427 | return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); |
11428 | } |
11429 | case X86::BI__builtin_ia32_sqrtsd_round_mask: |
11430 | case X86::BI__builtin_ia32_sqrtss_round_mask: { |
11431 | unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue(); |
11432 | // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), |
11433 | // otherwise keep the intrinsic. |
11434 | if (CC != 4) { |
11435 | Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ? |
11436 | Intrinsic::x86_avx512_mask_sqrt_sd : |
11437 | Intrinsic::x86_avx512_mask_sqrt_ss; |
11438 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
11439 | } |
11440 | Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0); |
11441 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); |
11442 | A = Builder.CreateCall(F, A); |
11443 | Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0); |
11444 | A = EmitX86ScalarSelect(*this, Ops[3], A, Src); |
11445 | return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); |
11446 | } |
11447 | case X86::BI__builtin_ia32_sqrtpd256: |
11448 | case X86::BI__builtin_ia32_sqrtpd: |
11449 | case X86::BI__builtin_ia32_sqrtps256: |
11450 | case X86::BI__builtin_ia32_sqrtps: |
11451 | case X86::BI__builtin_ia32_sqrtps512: |
11452 | case X86::BI__builtin_ia32_sqrtpd512: { |
11453 | if (Ops.size() == 2) { |
11454 | unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
11455 | // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), |
11456 | // otherwise keep the intrinsic. |
11457 | if (CC != 4) { |
11458 | Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ? |
11459 | Intrinsic::x86_avx512_sqrt_ps_512 : |
11460 | Intrinsic::x86_avx512_sqrt_pd_512; |
11461 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
11462 | } |
11463 | } |
11464 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType()); |
11465 | return Builder.CreateCall(F, Ops[0]); |
11466 | } |
11467 | case X86::BI__builtin_ia32_pabsb128: |
11468 | case X86::BI__builtin_ia32_pabsw128: |
11469 | case X86::BI__builtin_ia32_pabsd128: |
11470 | case X86::BI__builtin_ia32_pabsb256: |
11471 | case X86::BI__builtin_ia32_pabsw256: |
11472 | case X86::BI__builtin_ia32_pabsd256: |
11473 | case X86::BI__builtin_ia32_pabsq128: |
11474 | case X86::BI__builtin_ia32_pabsq256: |
11475 | case X86::BI__builtin_ia32_pabsb512: |
11476 | case X86::BI__builtin_ia32_pabsw512: |
11477 | case X86::BI__builtin_ia32_pabsd512: |
11478 | case X86::BI__builtin_ia32_pabsq512: |
11479 | return EmitX86Abs(*this, Ops); |
11480 | |
11481 | case X86::BI__builtin_ia32_pmaxsb128: |
11482 | case X86::BI__builtin_ia32_pmaxsw128: |
11483 | case X86::BI__builtin_ia32_pmaxsd128: |
11484 | case X86::BI__builtin_ia32_pmaxsq128: |
11485 | case X86::BI__builtin_ia32_pmaxsb256: |
11486 | case X86::BI__builtin_ia32_pmaxsw256: |
11487 | case X86::BI__builtin_ia32_pmaxsd256: |
11488 | case X86::BI__builtin_ia32_pmaxsq256: |
11489 | case X86::BI__builtin_ia32_pmaxsb512: |
11490 | case X86::BI__builtin_ia32_pmaxsw512: |
11491 | case X86::BI__builtin_ia32_pmaxsd512: |
11492 | case X86::BI__builtin_ia32_pmaxsq512: |
11493 | return EmitX86MinMax(*this, ICmpInst::ICMP_SGT, Ops); |
11494 | case X86::BI__builtin_ia32_pmaxub128: |
11495 | case X86::BI__builtin_ia32_pmaxuw128: |
11496 | case X86::BI__builtin_ia32_pmaxud128: |
11497 | case X86::BI__builtin_ia32_pmaxuq128: |
11498 | case X86::BI__builtin_ia32_pmaxub256: |
11499 | case X86::BI__builtin_ia32_pmaxuw256: |
11500 | case X86::BI__builtin_ia32_pmaxud256: |
11501 | case X86::BI__builtin_ia32_pmaxuq256: |
11502 | case X86::BI__builtin_ia32_pmaxub512: |
11503 | case X86::BI__builtin_ia32_pmaxuw512: |
11504 | case X86::BI__builtin_ia32_pmaxud512: |
11505 | case X86::BI__builtin_ia32_pmaxuq512: |
11506 | return EmitX86MinMax(*this, ICmpInst::ICMP_UGT, Ops); |
11507 | case X86::BI__builtin_ia32_pminsb128: |
11508 | case X86::BI__builtin_ia32_pminsw128: |
11509 | case X86::BI__builtin_ia32_pminsd128: |
11510 | case X86::BI__builtin_ia32_pminsq128: |
11511 | case X86::BI__builtin_ia32_pminsb256: |
11512 | case X86::BI__builtin_ia32_pminsw256: |
11513 | case X86::BI__builtin_ia32_pminsd256: |
11514 | case X86::BI__builtin_ia32_pminsq256: |
11515 | case X86::BI__builtin_ia32_pminsb512: |
11516 | case X86::BI__builtin_ia32_pminsw512: |
11517 | case X86::BI__builtin_ia32_pminsd512: |
11518 | case X86::BI__builtin_ia32_pminsq512: |
11519 | return EmitX86MinMax(*this, ICmpInst::ICMP_SLT, Ops); |
11520 | case X86::BI__builtin_ia32_pminub128: |
11521 | case X86::BI__builtin_ia32_pminuw128: |
11522 | case X86::BI__builtin_ia32_pminud128: |
11523 | case X86::BI__builtin_ia32_pminuq128: |
11524 | case X86::BI__builtin_ia32_pminub256: |
11525 | case X86::BI__builtin_ia32_pminuw256: |
11526 | case X86::BI__builtin_ia32_pminud256: |
11527 | case X86::BI__builtin_ia32_pminuq256: |
11528 | case X86::BI__builtin_ia32_pminub512: |
11529 | case X86::BI__builtin_ia32_pminuw512: |
11530 | case X86::BI__builtin_ia32_pminud512: |
11531 | case X86::BI__builtin_ia32_pminuq512: |
11532 | return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops); |
11533 | |
11534 | case X86::BI__builtin_ia32_pmuludq128: |
11535 | case X86::BI__builtin_ia32_pmuludq256: |
11536 | case X86::BI__builtin_ia32_pmuludq512: |
11537 | return EmitX86Muldq(*this, /*IsSigned*/false, Ops); |
11538 | |
11539 | case X86::BI__builtin_ia32_pmuldq128: |
11540 | case X86::BI__builtin_ia32_pmuldq256: |
11541 | case X86::BI__builtin_ia32_pmuldq512: |
11542 | return EmitX86Muldq(*this, /*IsSigned*/true, Ops); |
11543 | |
11544 | case X86::BI__builtin_ia32_pternlogd512_mask: |
11545 | case X86::BI__builtin_ia32_pternlogq512_mask: |
11546 | case X86::BI__builtin_ia32_pternlogd128_mask: |
11547 | case X86::BI__builtin_ia32_pternlogd256_mask: |
11548 | case X86::BI__builtin_ia32_pternlogq128_mask: |
11549 | case X86::BI__builtin_ia32_pternlogq256_mask: |
11550 | return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops); |
11551 | |
11552 | case X86::BI__builtin_ia32_pternlogd512_maskz: |
11553 | case X86::BI__builtin_ia32_pternlogq512_maskz: |
11554 | case X86::BI__builtin_ia32_pternlogd128_maskz: |
11555 | case X86::BI__builtin_ia32_pternlogd256_maskz: |
11556 | case X86::BI__builtin_ia32_pternlogq128_maskz: |
11557 | case X86::BI__builtin_ia32_pternlogq256_maskz: |
11558 | return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops); |
11559 | |
11560 | case X86::BI__builtin_ia32_vpshldd128: |
11561 | case X86::BI__builtin_ia32_vpshldd256: |
11562 | case X86::BI__builtin_ia32_vpshldd512: |
11563 | case X86::BI__builtin_ia32_vpshldq128: |
11564 | case X86::BI__builtin_ia32_vpshldq256: |
11565 | case X86::BI__builtin_ia32_vpshldq512: |
11566 | case X86::BI__builtin_ia32_vpshldw128: |
11567 | case X86::BI__builtin_ia32_vpshldw256: |
11568 | case X86::BI__builtin_ia32_vpshldw512: |
11569 | return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false); |
11570 | |
11571 | case X86::BI__builtin_ia32_vpshrdd128: |
11572 | case X86::BI__builtin_ia32_vpshrdd256: |
11573 | case X86::BI__builtin_ia32_vpshrdd512: |
11574 | case X86::BI__builtin_ia32_vpshrdq128: |
11575 | case X86::BI__builtin_ia32_vpshrdq256: |
11576 | case X86::BI__builtin_ia32_vpshrdq512: |
11577 | case X86::BI__builtin_ia32_vpshrdw128: |
11578 | case X86::BI__builtin_ia32_vpshrdw256: |
11579 | case X86::BI__builtin_ia32_vpshrdw512: |
11580 | // Ops 0 and 1 are swapped. |
11581 | return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true); |
11582 | |
11583 | case X86::BI__builtin_ia32_vpshldvd128: |
11584 | case X86::BI__builtin_ia32_vpshldvd256: |
11585 | case X86::BI__builtin_ia32_vpshldvd512: |
11586 | case X86::BI__builtin_ia32_vpshldvq128: |
11587 | case X86::BI__builtin_ia32_vpshldvq256: |
11588 | case X86::BI__builtin_ia32_vpshldvq512: |
11589 | case X86::BI__builtin_ia32_vpshldvw128: |
11590 | case X86::BI__builtin_ia32_vpshldvw256: |
11591 | case X86::BI__builtin_ia32_vpshldvw512: |
11592 | return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false); |
11593 | |
11594 | case X86::BI__builtin_ia32_vpshrdvd128: |
11595 | case X86::BI__builtin_ia32_vpshrdvd256: |
11596 | case X86::BI__builtin_ia32_vpshrdvd512: |
11597 | case X86::BI__builtin_ia32_vpshrdvq128: |
11598 | case X86::BI__builtin_ia32_vpshrdvq256: |
11599 | case X86::BI__builtin_ia32_vpshrdvq512: |
11600 | case X86::BI__builtin_ia32_vpshrdvw128: |
11601 | case X86::BI__builtin_ia32_vpshrdvw256: |
11602 | case X86::BI__builtin_ia32_vpshrdvw512: |
11603 | // Ops 0 and 1 are swapped. |
11604 | return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true); |
11605 | |
11606 | // 3DNow! |
11607 | case X86::BI__builtin_ia32_pswapdsf: |
11608 | case X86::BI__builtin_ia32_pswapdsi: { |
11609 | llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext()); |
11610 | Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast"); |
11611 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd); |
11612 | return Builder.CreateCall(F, Ops, "pswapd"); |
11613 | } |
11614 | case X86::BI__builtin_ia32_rdrand16_step: |
11615 | case X86::BI__builtin_ia32_rdrand32_step: |
11616 | case X86::BI__builtin_ia32_rdrand64_step: |
11617 | case X86::BI__builtin_ia32_rdseed16_step: |
11618 | case X86::BI__builtin_ia32_rdseed32_step: |
11619 | case X86::BI__builtin_ia32_rdseed64_step: { |
11620 | Intrinsic::ID ID; |
11621 | switch (BuiltinID) { |
11622 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11622); |
11623 | case X86::BI__builtin_ia32_rdrand16_step: |
11624 | ID = Intrinsic::x86_rdrand_16; |
11625 | break; |
11626 | case X86::BI__builtin_ia32_rdrand32_step: |
11627 | ID = Intrinsic::x86_rdrand_32; |
11628 | break; |
11629 | case X86::BI__builtin_ia32_rdrand64_step: |
11630 | ID = Intrinsic::x86_rdrand_64; |
11631 | break; |
11632 | case X86::BI__builtin_ia32_rdseed16_step: |
11633 | ID = Intrinsic::x86_rdseed_16; |
11634 | break; |
11635 | case X86::BI__builtin_ia32_rdseed32_step: |
11636 | ID = Intrinsic::x86_rdseed_32; |
11637 | break; |
11638 | case X86::BI__builtin_ia32_rdseed64_step: |
11639 | ID = Intrinsic::x86_rdseed_64; |
11640 | break; |
11641 | } |
11642 | |
11643 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID)); |
11644 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0), |
11645 | Ops[0]); |
11646 | return Builder.CreateExtractValue(Call, 1); |
11647 | } |
11648 | case X86::BI__builtin_ia32_addcarryx_u32: |
11649 | case X86::BI__builtin_ia32_addcarryx_u64: |
11650 | case X86::BI__builtin_ia32_subborrow_u32: |
11651 | case X86::BI__builtin_ia32_subborrow_u64: { |
11652 | Intrinsic::ID IID; |
11653 | switch (BuiltinID) { |
11654 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11654); |
11655 | case X86::BI__builtin_ia32_addcarryx_u32: |
11656 | IID = Intrinsic::x86_addcarry_32; |
11657 | break; |
11658 | case X86::BI__builtin_ia32_addcarryx_u64: |
11659 | IID = Intrinsic::x86_addcarry_64; |
11660 | break; |
11661 | case X86::BI__builtin_ia32_subborrow_u32: |
11662 | IID = Intrinsic::x86_subborrow_32; |
11663 | break; |
11664 | case X86::BI__builtin_ia32_subborrow_u64: |
11665 | IID = Intrinsic::x86_subborrow_64; |
11666 | break; |
11667 | } |
11668 | |
11669 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), |
11670 | { Ops[0], Ops[1], Ops[2] }); |
11671 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1), |
11672 | Ops[3]); |
11673 | return Builder.CreateExtractValue(Call, 0); |
11674 | } |
11675 | |
11676 | case X86::BI__builtin_ia32_fpclassps128_mask: |
11677 | case X86::BI__builtin_ia32_fpclassps256_mask: |
11678 | case X86::BI__builtin_ia32_fpclassps512_mask: |
11679 | case X86::BI__builtin_ia32_fpclasspd128_mask: |
11680 | case X86::BI__builtin_ia32_fpclasspd256_mask: |
11681 | case X86::BI__builtin_ia32_fpclasspd512_mask: { |
11682 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); |
11683 | Value *MaskIn = Ops[2]; |
11684 | Ops.erase(&Ops[2]); |
11685 | |
11686 | Intrinsic::ID ID; |
11687 | switch (BuiltinID) { |
11688 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11688); |
11689 | case X86::BI__builtin_ia32_fpclassps128_mask: |
11690 | ID = Intrinsic::x86_avx512_fpclass_ps_128; |
11691 | break; |
11692 | case X86::BI__builtin_ia32_fpclassps256_mask: |
11693 | ID = Intrinsic::x86_avx512_fpclass_ps_256; |
11694 | break; |
11695 | case X86::BI__builtin_ia32_fpclassps512_mask: |
11696 | ID = Intrinsic::x86_avx512_fpclass_ps_512; |
11697 | break; |
11698 | case X86::BI__builtin_ia32_fpclasspd128_mask: |
11699 | ID = Intrinsic::x86_avx512_fpclass_pd_128; |
11700 | break; |
11701 | case X86::BI__builtin_ia32_fpclasspd256_mask: |
11702 | ID = Intrinsic::x86_avx512_fpclass_pd_256; |
11703 | break; |
11704 | case X86::BI__builtin_ia32_fpclasspd512_mask: |
11705 | ID = Intrinsic::x86_avx512_fpclass_pd_512; |
11706 | break; |
11707 | } |
11708 | |
11709 | Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
11710 | return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn); |
11711 | } |
11712 | |
11713 | case X86::BI__builtin_ia32_vp2intersect_q_512: |
11714 | case X86::BI__builtin_ia32_vp2intersect_q_256: |
11715 | case X86::BI__builtin_ia32_vp2intersect_q_128: |
11716 | case X86::BI__builtin_ia32_vp2intersect_d_512: |
11717 | case X86::BI__builtin_ia32_vp2intersect_d_256: |
11718 | case X86::BI__builtin_ia32_vp2intersect_d_128: { |
11719 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); |
11720 | Intrinsic::ID ID; |
11721 | |
11722 | switch (BuiltinID) { |
11723 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11723); |
11724 | case X86::BI__builtin_ia32_vp2intersect_q_512: |
11725 | ID = Intrinsic::x86_avx512_vp2intersect_q_512; |
11726 | break; |
11727 | case X86::BI__builtin_ia32_vp2intersect_q_256: |
11728 | ID = Intrinsic::x86_avx512_vp2intersect_q_256; |
11729 | break; |
11730 | case X86::BI__builtin_ia32_vp2intersect_q_128: |
11731 | ID = Intrinsic::x86_avx512_vp2intersect_q_128; |
11732 | break; |
11733 | case X86::BI__builtin_ia32_vp2intersect_d_512: |
11734 | ID = Intrinsic::x86_avx512_vp2intersect_d_512; |
11735 | break; |
11736 | case X86::BI__builtin_ia32_vp2intersect_d_256: |
11737 | ID = Intrinsic::x86_avx512_vp2intersect_d_256; |
11738 | break; |
11739 | case X86::BI__builtin_ia32_vp2intersect_d_128: |
11740 | ID = Intrinsic::x86_avx512_vp2intersect_d_128; |
11741 | break; |
11742 | } |
11743 | |
11744 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]}); |
11745 | Value *Result = Builder.CreateExtractValue(Call, 0); |
11746 | Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr); |
11747 | Value *Store = Builder.CreateDefaultAlignedStore(Result, Ops[2]); |
Value stored to 'Store' during its initialization is never read | |
11748 | |
11749 | Result = Builder.CreateExtractValue(Call, 1); |
11750 | Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr); |
11751 | Store = Builder.CreateDefaultAlignedStore(Result, Ops[3]); |
11752 | return Store; |
11753 | } |
11754 | |
11755 | case X86::BI__builtin_ia32_vpmultishiftqb128: |
11756 | case X86::BI__builtin_ia32_vpmultishiftqb256: |
11757 | case X86::BI__builtin_ia32_vpmultishiftqb512: { |
11758 | Intrinsic::ID ID; |
11759 | switch (BuiltinID) { |
11760 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11760); |
11761 | case X86::BI__builtin_ia32_vpmultishiftqb128: |
11762 | ID = Intrinsic::x86_avx512_pmultishift_qb_128; |
11763 | break; |
11764 | case X86::BI__builtin_ia32_vpmultishiftqb256: |
11765 | ID = Intrinsic::x86_avx512_pmultishift_qb_256; |
11766 | break; |
11767 | case X86::BI__builtin_ia32_vpmultishiftqb512: |
11768 | ID = Intrinsic::x86_avx512_pmultishift_qb_512; |
11769 | break; |
11770 | } |
11771 | |
11772 | return Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
11773 | } |
11774 | |
11775 | case X86::BI__builtin_ia32_vpshufbitqmb128_mask: |
11776 | case X86::BI__builtin_ia32_vpshufbitqmb256_mask: |
11777 | case X86::BI__builtin_ia32_vpshufbitqmb512_mask: { |
11778 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); |
11779 | Value *MaskIn = Ops[2]; |
11780 | Ops.erase(&Ops[2]); |
11781 | |
11782 | Intrinsic::ID ID; |
11783 | switch (BuiltinID) { |
11784 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11784); |
11785 | case X86::BI__builtin_ia32_vpshufbitqmb128_mask: |
11786 | ID = Intrinsic::x86_avx512_vpshufbitqmb_128; |
11787 | break; |
11788 | case X86::BI__builtin_ia32_vpshufbitqmb256_mask: |
11789 | ID = Intrinsic::x86_avx512_vpshufbitqmb_256; |
11790 | break; |
11791 | case X86::BI__builtin_ia32_vpshufbitqmb512_mask: |
11792 | ID = Intrinsic::x86_avx512_vpshufbitqmb_512; |
11793 | break; |
11794 | } |
11795 | |
11796 | Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
11797 | return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn); |
11798 | } |
11799 | |
11800 | // packed comparison intrinsics |
11801 | case X86::BI__builtin_ia32_cmpeqps: |
11802 | case X86::BI__builtin_ia32_cmpeqpd: |
11803 | return getVectorFCmpIR(CmpInst::FCMP_OEQ); |
11804 | case X86::BI__builtin_ia32_cmpltps: |
11805 | case X86::BI__builtin_ia32_cmpltpd: |
11806 | return getVectorFCmpIR(CmpInst::FCMP_OLT); |
11807 | case X86::BI__builtin_ia32_cmpleps: |
11808 | case X86::BI__builtin_ia32_cmplepd: |
11809 | return getVectorFCmpIR(CmpInst::FCMP_OLE); |
11810 | case X86::BI__builtin_ia32_cmpunordps: |
11811 | case X86::BI__builtin_ia32_cmpunordpd: |
11812 | return getVectorFCmpIR(CmpInst::FCMP_UNO); |
11813 | case X86::BI__builtin_ia32_cmpneqps: |
11814 | case X86::BI__builtin_ia32_cmpneqpd: |
11815 | return getVectorFCmpIR(CmpInst::FCMP_UNE); |
11816 | case X86::BI__builtin_ia32_cmpnltps: |
11817 | case X86::BI__builtin_ia32_cmpnltpd: |
11818 | return getVectorFCmpIR(CmpInst::FCMP_UGE); |
11819 | case X86::BI__builtin_ia32_cmpnleps: |
11820 | case X86::BI__builtin_ia32_cmpnlepd: |
11821 | return getVectorFCmpIR(CmpInst::FCMP_UGT); |
11822 | case X86::BI__builtin_ia32_cmpordps: |
11823 | case X86::BI__builtin_ia32_cmpordpd: |
11824 | return getVectorFCmpIR(CmpInst::FCMP_ORD); |
11825 | case X86::BI__builtin_ia32_cmpps: |
11826 | case X86::BI__builtin_ia32_cmpps256: |
11827 | case X86::BI__builtin_ia32_cmppd: |
11828 | case X86::BI__builtin_ia32_cmppd256: |
11829 | case X86::BI__builtin_ia32_cmpps128_mask: |
11830 | case X86::BI__builtin_ia32_cmpps256_mask: |
11831 | case X86::BI__builtin_ia32_cmpps512_mask: |
11832 | case X86::BI__builtin_ia32_cmppd128_mask: |
11833 | case X86::BI__builtin_ia32_cmppd256_mask: |
11834 | case X86::BI__builtin_ia32_cmppd512_mask: { |
11835 | // Lowering vector comparisons to fcmp instructions, while |
11836 | // ignoring signalling behaviour requested |
11837 | // ignoring rounding mode requested |
11838 | // This is is only possible as long as FENV_ACCESS is not implemented. |
11839 | // See also: https://reviews.llvm.org/D45616 |
11840 | |
11841 | // The third argument is the comparison condition, and integer in the |
11842 | // range [0, 31] |
11843 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f; |
11844 | |
11845 | // Lowering to IR fcmp instruction. |
11846 | // Ignoring requested signaling behaviour, |
11847 | // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT. |
11848 | FCmpInst::Predicate Pred; |
11849 | switch (CC) { |
11850 | case 0x00: Pred = FCmpInst::FCMP_OEQ; break; |
11851 | case 0x01: Pred = FCmpInst::FCMP_OLT; break; |
11852 | case 0x02: Pred = FCmpInst::FCMP_OLE; break; |
11853 | case 0x03: Pred = FCmpInst::FCMP_UNO; break; |
11854 | case 0x04: Pred = FCmpInst::FCMP_UNE; break; |
11855 | case 0x05: Pred = FCmpInst::FCMP_UGE; break; |
11856 | case 0x06: Pred = FCmpInst::FCMP_UGT; break; |
11857 | case 0x07: Pred = FCmpInst::FCMP_ORD; break; |
11858 | case 0x08: Pred = FCmpInst::FCMP_UEQ; break; |
11859 | case 0x09: Pred = FCmpInst::FCMP_ULT; break; |
11860 | case 0x0a: Pred = FCmpInst::FCMP_ULE; break; |
11861 | case 0x0b: Pred = FCmpInst::FCMP_FALSE; break; |
11862 | case 0x0c: Pred = FCmpInst::FCMP_ONE; break; |
11863 | case 0x0d: Pred = FCmpInst::FCMP_OGE; break; |
11864 | case 0x0e: Pred = FCmpInst::FCMP_OGT; break; |
11865 | case 0x0f: Pred = FCmpInst::FCMP_TRUE; break; |
11866 | case 0x10: Pred = FCmpInst::FCMP_OEQ; break; |
11867 | case 0x11: Pred = FCmpInst::FCMP_OLT; break; |
11868 | case 0x12: Pred = FCmpInst::FCMP_OLE; break; |
11869 | case 0x13: Pred = FCmpInst::FCMP_UNO; break; |
11870 | case 0x14: Pred = FCmpInst::FCMP_UNE; break; |
11871 | case 0x15: Pred = FCmpInst::FCMP_UGE; break; |
11872 | case 0x16: Pred = FCmpInst::FCMP_UGT; break; |
11873 | case 0x17: Pred = FCmpInst::FCMP_ORD; break; |
11874 | case 0x18: Pred = FCmpInst::FCMP_UEQ; break; |
11875 | case 0x19: Pred = FCmpInst::FCMP_ULT; break; |
11876 | case 0x1a: Pred = FCmpInst::FCMP_ULE; break; |
11877 | case 0x1b: Pred = FCmpInst::FCMP_FALSE; break; |
11878 | case 0x1c: Pred = FCmpInst::FCMP_ONE; break; |
11879 | case 0x1d: Pred = FCmpInst::FCMP_OGE; break; |
11880 | case 0x1e: Pred = FCmpInst::FCMP_OGT; break; |
11881 | case 0x1f: Pred = FCmpInst::FCMP_TRUE; break; |
11882 | default: llvm_unreachable("Unhandled CC")::llvm::llvm_unreachable_internal("Unhandled CC", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11882); |
11883 | } |
11884 | |
11885 | // Builtins without the _mask suffix return a vector of integers |
11886 | // of the same width as the input vectors |
11887 | switch (BuiltinID) { |
11888 | case X86::BI__builtin_ia32_cmpps512_mask: |
11889 | case X86::BI__builtin_ia32_cmppd512_mask: |
11890 | case X86::BI__builtin_ia32_cmpps128_mask: |
11891 | case X86::BI__builtin_ia32_cmpps256_mask: |
11892 | case X86::BI__builtin_ia32_cmppd128_mask: |
11893 | case X86::BI__builtin_ia32_cmppd256_mask: { |
11894 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); |
11895 | Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]); |
11896 | return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]); |
11897 | } |
11898 | default: |
11899 | return getVectorFCmpIR(Pred); |
11900 | } |
11901 | } |
11902 | |
11903 | // SSE scalar comparison intrinsics |
11904 | case X86::BI__builtin_ia32_cmpeqss: |
11905 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0); |
11906 | case X86::BI__builtin_ia32_cmpltss: |
11907 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1); |
11908 | case X86::BI__builtin_ia32_cmpless: |
11909 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2); |
11910 | case X86::BI__builtin_ia32_cmpunordss: |
11911 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3); |
11912 | case X86::BI__builtin_ia32_cmpneqss: |
11913 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4); |
11914 | case X86::BI__builtin_ia32_cmpnltss: |
11915 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5); |
11916 | case X86::BI__builtin_ia32_cmpnless: |
11917 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6); |
11918 | case X86::BI__builtin_ia32_cmpordss: |
11919 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7); |
11920 | case X86::BI__builtin_ia32_cmpeqsd: |
11921 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0); |
11922 | case X86::BI__builtin_ia32_cmpltsd: |
11923 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1); |
11924 | case X86::BI__builtin_ia32_cmplesd: |
11925 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2); |
11926 | case X86::BI__builtin_ia32_cmpunordsd: |
11927 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3); |
11928 | case X86::BI__builtin_ia32_cmpneqsd: |
11929 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4); |
11930 | case X86::BI__builtin_ia32_cmpnltsd: |
11931 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5); |
11932 | case X86::BI__builtin_ia32_cmpnlesd: |
11933 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6); |
11934 | case X86::BI__builtin_ia32_cmpordsd: |
11935 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7); |
11936 | |
11937 | // AVX512 bf16 intrinsics |
11938 | case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: { |
11939 | Ops[2] = getMaskVecValue(*this, Ops[2], |
11940 | Ops[0]->getType()->getVectorNumElements()); |
11941 | Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128; |
11942 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
11943 | } |
11944 | |
11945 | case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: |
11946 | case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: { |
11947 | Intrinsic::ID IID; |
11948 | switch (BuiltinID) { |
11949 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11949); |
11950 | case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: |
11951 | IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256; |
11952 | break; |
11953 | case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: |
11954 | IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512; |
11955 | break; |
11956 | } |
11957 | Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]); |
11958 | return EmitX86Select(*this, Ops[2], Res, Ops[1]); |
11959 | } |
11960 | |
11961 | case X86::BI__emul: |
11962 | case X86::BI__emulu: { |
11963 | llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64); |
11964 | bool isSigned = (BuiltinID == X86::BI__emul); |
11965 | Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned); |
11966 | Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned); |
11967 | return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned); |
11968 | } |
11969 | case X86::BI__mulh: |
11970 | case X86::BI__umulh: |
11971 | case X86::BI_mul128: |
11972 | case X86::BI_umul128: { |
11973 | llvm::Type *ResType = ConvertType(E->getType()); |
11974 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
11975 | |
11976 | bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128); |
11977 | Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned); |
11978 | Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned); |
11979 | |
11980 | Value *MulResult, *HigherBits; |
11981 | if (IsSigned) { |
11982 | MulResult = Builder.CreateNSWMul(LHS, RHS); |
11983 | HigherBits = Builder.CreateAShr(MulResult, 64); |
11984 | } else { |
11985 | MulResult = Builder.CreateNUWMul(LHS, RHS); |
11986 | HigherBits = Builder.CreateLShr(MulResult, 64); |
11987 | } |
11988 | HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned); |
11989 | |
11990 | if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh) |
11991 | return HigherBits; |
11992 | |
11993 | Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2)); |
11994 | Builder.CreateStore(HigherBits, HighBitsAddress); |
11995 | return Builder.CreateIntCast(MulResult, ResType, IsSigned); |
11996 | } |
11997 | |
11998 | case X86::BI__faststorefence: { |
11999 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
12000 | llvm::SyncScope::System); |
12001 | } |
12002 | case X86::BI__shiftleft128: |
12003 | case X86::BI__shiftright128: { |
12004 | // FIXME: Once fshl/fshr no longer add an unneeded and and cmov, do this: |
12005 | // llvm::Function *F = CGM.getIntrinsic( |
12006 | // BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr, |
12007 | // Int64Ty); |
12008 | // Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
12009 | // return Builder.CreateCall(F, Ops); |
12010 | llvm::Type *Int128Ty = Builder.getInt128Ty(); |
12011 | Value *HighPart128 = |
12012 | Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64); |
12013 | Value *LowPart128 = Builder.CreateZExt(Ops[0], Int128Ty); |
12014 | Value *Val = Builder.CreateOr(HighPart128, LowPart128); |
12015 | Value *Amt = Builder.CreateAnd(Builder.CreateZExt(Ops[2], Int128Ty), |
12016 | llvm::ConstantInt::get(Int128Ty, 0x3f)); |
12017 | Value *Res; |
12018 | if (BuiltinID == X86::BI__shiftleft128) |
12019 | Res = Builder.CreateLShr(Builder.CreateShl(Val, Amt), 64); |
12020 | else |
12021 | Res = Builder.CreateLShr(Val, Amt); |
12022 | return Builder.CreateTrunc(Res, Int64Ty); |
12023 | } |
12024 | case X86::BI_ReadWriteBarrier: |
12025 | case X86::BI_ReadBarrier: |
12026 | case X86::BI_WriteBarrier: { |
12027 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
12028 | llvm::SyncScope::SingleThread); |
12029 | } |
12030 | case X86::BI_BitScanForward: |
12031 | case X86::BI_BitScanForward64: |
12032 | return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E); |
12033 | case X86::BI_BitScanReverse: |
12034 | case X86::BI_BitScanReverse64: |
12035 | return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E); |
12036 | |
12037 | case X86::BI_InterlockedAnd64: |
12038 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E); |
12039 | case X86::BI_InterlockedExchange64: |
12040 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E); |
12041 | case X86::BI_InterlockedExchangeAdd64: |
12042 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E); |
12043 | case X86::BI_InterlockedExchangeSub64: |
12044 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E); |
12045 | case X86::BI_InterlockedOr64: |
12046 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E); |
12047 | case X86::BI_InterlockedXor64: |
12048 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E); |
12049 | case X86::BI_InterlockedDecrement64: |
12050 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E); |
12051 | case X86::BI_InterlockedIncrement64: |
12052 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E); |
12053 | case X86::BI_InterlockedCompareExchange128: { |
12054 | // InterlockedCompareExchange128 doesn't directly refer to 128bit ints, |
12055 | // instead it takes pointers to 64bit ints for Destination and |
12056 | // ComparandResult, and exchange is taken as two 64bit ints (high & low). |
12057 | // The previous value is written to ComparandResult, and success is |
12058 | // returned. |
12059 | |
12060 | llvm::Type *Int128Ty = Builder.getInt128Ty(); |
12061 | llvm::Type *Int128PtrTy = Int128Ty->getPointerTo(); |
12062 | |
12063 | Value *Destination = |
12064 | Builder.CreateBitCast(Ops[0], Int128PtrTy); |
12065 | Value *ExchangeHigh128 = Builder.CreateZExt(Ops[1], Int128Ty); |
12066 | Value *ExchangeLow128 = Builder.CreateZExt(Ops[2], Int128Ty); |
12067 | Address ComparandResult(Builder.CreateBitCast(Ops[3], Int128PtrTy), |
12068 | getContext().toCharUnitsFromBits(128)); |
12069 | |
12070 | Value *Exchange = Builder.CreateOr( |
12071 | Builder.CreateShl(ExchangeHigh128, 64, "", false, false), |
12072 | ExchangeLow128); |
12073 | |
12074 | Value *Comparand = Builder.CreateLoad(ComparandResult); |
12075 | |
12076 | AtomicCmpXchgInst *CXI = |
12077 | Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, |
12078 | AtomicOrdering::SequentiallyConsistent, |
12079 | AtomicOrdering::SequentiallyConsistent); |
12080 | CXI->setVolatile(true); |
12081 | |
12082 | // Write the result back to the inout pointer. |
12083 | Builder.CreateStore(Builder.CreateExtractValue(CXI, 0), ComparandResult); |
12084 | |
12085 | // Get the success boolean and zero extend it to i8. |
12086 | Value *Success = Builder.CreateExtractValue(CXI, 1); |
12087 | return Builder.CreateZExt(Success, ConvertType(E->getType())); |
12088 | } |
12089 | |
12090 | case X86::BI_AddressOfReturnAddress: { |
12091 | Function *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress); |
12092 | return Builder.CreateCall(F); |
12093 | } |
12094 | case X86::BI__stosb: { |
12095 | // We treat __stosb as a volatile memset - it may not generate "rep stosb" |
12096 | // instruction, but it will create a memset that won't be optimized away. |
12097 | return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], 1, true); |
12098 | } |
12099 | case X86::BI__ud2: |
12100 | // llvm.trap makes a ud2a instruction on x86. |
12101 | return EmitTrapCall(Intrinsic::trap); |
12102 | case X86::BI__int2c: { |
12103 | // This syscall signals a driver assertion failure in x86 NT kernels. |
12104 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); |
12105 | llvm::InlineAsm *IA = |
12106 | llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*SideEffects=*/true); |
12107 | llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( |
12108 | getLLVMContext(), llvm::AttributeList::FunctionIndex, |
12109 | llvm::Attribute::NoReturn); |
12110 | llvm::CallInst *CI = Builder.CreateCall(IA); |
12111 | CI->setAttributes(NoReturnAttr); |
12112 | return CI; |
12113 | } |
12114 | case X86::BI__readfsbyte: |
12115 | case X86::BI__readfsword: |
12116 | case X86::BI__readfsdword: |
12117 | case X86::BI__readfsqword: { |
12118 | llvm::Type *IntTy = ConvertType(E->getType()); |
12119 | Value *Ptr = |
12120 | Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257)); |
12121 | LoadInst *Load = Builder.CreateAlignedLoad( |
12122 | IntTy, Ptr, getContext().getTypeAlignInChars(E->getType())); |
12123 | Load->setVolatile(true); |
12124 | return Load; |
12125 | } |
12126 | case X86::BI__readgsbyte: |
12127 | case X86::BI__readgsword: |
12128 | case X86::BI__readgsdword: |
12129 | case X86::BI__readgsqword: { |
12130 | llvm::Type *IntTy = ConvertType(E->getType()); |
12131 | Value *Ptr = |
12132 | Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256)); |
12133 | LoadInst *Load = Builder.CreateAlignedLoad( |
12134 | IntTy, Ptr, getContext().getTypeAlignInChars(E->getType())); |
12135 | Load->setVolatile(true); |
12136 | return Load; |
12137 | } |
12138 | case X86::BI__builtin_ia32_paddsb512: |
12139 | case X86::BI__builtin_ia32_paddsw512: |
12140 | case X86::BI__builtin_ia32_paddsb256: |
12141 | case X86::BI__builtin_ia32_paddsw256: |
12142 | case X86::BI__builtin_ia32_paddsb128: |
12143 | case X86::BI__builtin_ia32_paddsw128: |
12144 | return EmitX86AddSubSatExpr(*this, Ops, true, true); |
12145 | case X86::BI__builtin_ia32_paddusb512: |
12146 | case X86::BI__builtin_ia32_paddusw512: |
12147 | case X86::BI__builtin_ia32_paddusb256: |
12148 | case X86::BI__builtin_ia32_paddusw256: |
12149 | case X86::BI__builtin_ia32_paddusb128: |
12150 | case X86::BI__builtin_ia32_paddusw128: |
12151 | return EmitX86AddSubSatExpr(*this, Ops, false, true); |
12152 | case X86::BI__builtin_ia32_psubsb512: |
12153 | case X86::BI__builtin_ia32_psubsw512: |
12154 | case X86::BI__builtin_ia32_psubsb256: |
12155 | case X86::BI__builtin_ia32_psubsw256: |
12156 | case X86::BI__builtin_ia32_psubsb128: |
12157 | case X86::BI__builtin_ia32_psubsw128: |
12158 | return EmitX86AddSubSatExpr(*this, Ops, true, false); |
12159 | case X86::BI__builtin_ia32_psubusb512: |
12160 | case X86::BI__builtin_ia32_psubusw512: |
12161 | case X86::BI__builtin_ia32_psubusb256: |
12162 | case X86::BI__builtin_ia32_psubusw256: |
12163 | case X86::BI__builtin_ia32_psubusb128: |
12164 | case X86::BI__builtin_ia32_psubusw128: |
12165 | return EmitX86AddSubSatExpr(*this, Ops, false, false); |
12166 | } |
12167 | } |
12168 | |
12169 | Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, |
12170 | const CallExpr *E) { |
12171 | SmallVector<Value*, 4> Ops; |
12172 | |
12173 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) |
12174 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
12175 | |
12176 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
12177 | |
12178 | switch (BuiltinID) { |
12179 | default: return nullptr; |
12180 | |
12181 | // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we |
12182 | // call __builtin_readcyclecounter. |
12183 | case PPC::BI__builtin_ppc_get_timebase: |
12184 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter)); |
12185 | |
12186 | // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr |
12187 | case PPC::BI__builtin_altivec_lvx: |
12188 | case PPC::BI__builtin_altivec_lvxl: |
12189 | case PPC::BI__builtin_altivec_lvebx: |
12190 | case PPC::BI__builtin_altivec_lvehx: |
12191 | case PPC::BI__builtin_altivec_lvewx: |
12192 | case PPC::BI__builtin_altivec_lvsl: |
12193 | case PPC::BI__builtin_altivec_lvsr: |
12194 | case PPC::BI__builtin_vsx_lxvd2x: |
12195 | case PPC::BI__builtin_vsx_lxvw4x: |
12196 | case PPC::BI__builtin_vsx_lxvd2x_be: |
12197 | case PPC::BI__builtin_vsx_lxvw4x_be: |
12198 | case PPC::BI__builtin_vsx_lxvl: |
12199 | case PPC::BI__builtin_vsx_lxvll: |
12200 | { |
12201 | if(BuiltinID == PPC::BI__builtin_vsx_lxvl || |
12202 | BuiltinID == PPC::BI__builtin_vsx_lxvll){ |
12203 | Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy); |
12204 | }else { |
12205 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); |
12206 | Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]); |
12207 | Ops.pop_back(); |
12208 | } |
12209 | |
12210 | switch (BuiltinID) { |
12211 | default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!")::llvm::llvm_unreachable_internal("Unsupported ld/lvsl/lvsr intrinsic!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12211); |
12212 | case PPC::BI__builtin_altivec_lvx: |
12213 | ID = Intrinsic::ppc_altivec_lvx; |
12214 | break; |
12215 | case PPC::BI__builtin_altivec_lvxl: |
12216 | ID = Intrinsic::ppc_altivec_lvxl; |
12217 | break; |
12218 | case PPC::BI__builtin_altivec_lvebx: |
12219 | ID = Intrinsic::ppc_altivec_lvebx; |
12220 | break; |
12221 | case PPC::BI__builtin_altivec_lvehx: |
12222 | ID = Intrinsic::ppc_altivec_lvehx; |
12223 | break; |
12224 | case PPC::BI__builtin_altivec_lvewx: |
12225 | ID = Intrinsic::ppc_altivec_lvewx; |
12226 | break; |
12227 | case PPC::BI__builtin_altivec_lvsl: |
12228 | ID = Intrinsic::ppc_altivec_lvsl; |
12229 | break; |
12230 | case PPC::BI__builtin_altivec_lvsr: |
12231 | ID = Intrinsic::ppc_altivec_lvsr; |
12232 | break; |
12233 | case PPC::BI__builtin_vsx_lxvd2x: |
12234 | ID = Intrinsic::ppc_vsx_lxvd2x; |
12235 | break; |
12236 | case PPC::BI__builtin_vsx_lxvw4x: |
12237 | ID = Intrinsic::ppc_vsx_lxvw4x; |
12238 | break; |
12239 | case PPC::BI__builtin_vsx_lxvd2x_be: |
12240 | ID = Intrinsic::ppc_vsx_lxvd2x_be; |
12241 | break; |
12242 | case PPC::BI__builtin_vsx_lxvw4x_be: |
12243 | ID = Intrinsic::ppc_vsx_lxvw4x_be; |
12244 | break; |
12245 | case PPC::BI__builtin_vsx_lxvl: |
12246 | ID = Intrinsic::ppc_vsx_lxvl; |
12247 | break; |
12248 | case PPC::BI__builtin_vsx_lxvll: |
12249 | ID = Intrinsic::ppc_vsx_lxvll; |
12250 | break; |
12251 | } |
12252 | llvm::Function *F = CGM.getIntrinsic(ID); |
12253 | return Builder.CreateCall(F, Ops, ""); |
12254 | } |
12255 | |
12256 | // vec_st, vec_xst_be |
12257 | case PPC::BI__builtin_altivec_stvx: |
12258 | case PPC::BI__builtin_altivec_stvxl: |
12259 | case PPC::BI__builtin_altivec_stvebx: |
12260 | case PPC::BI__builtin_altivec_stvehx: |
12261 | case PPC::BI__builtin_altivec_stvewx: |
12262 | case PPC::BI__builtin_vsx_stxvd2x: |
12263 | case PPC::BI__builtin_vsx_stxvw4x: |
12264 | case PPC::BI__builtin_vsx_stxvd2x_be: |
12265 | case PPC::BI__builtin_vsx_stxvw4x_be: |
12266 | case PPC::BI__builtin_vsx_stxvl: |
12267 | case PPC::BI__builtin_vsx_stxvll: |
12268 | { |
12269 | if(BuiltinID == PPC::BI__builtin_vsx_stxvl || |
12270 | BuiltinID == PPC::BI__builtin_vsx_stxvll ){ |
12271 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); |
12272 | }else { |
12273 | Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); |
12274 | Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]); |
12275 | Ops.pop_back(); |
12276 | } |
12277 | |
12278 | switch (BuiltinID) { |
12279 | default: llvm_unreachable("Unsupported st intrinsic!")::llvm::llvm_unreachable_internal("Unsupported st intrinsic!" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12279); |
12280 | case PPC::BI__builtin_altivec_stvx: |
12281 | ID = Intrinsic::ppc_altivec_stvx; |
12282 | break; |
12283 | case PPC::BI__builtin_altivec_stvxl: |
12284 | ID = Intrinsic::ppc_altivec_stvxl; |
12285 | break; |
12286 | case PPC::BI__builtin_altivec_stvebx: |
12287 | ID = Intrinsic::ppc_altivec_stvebx; |
12288 | break; |
12289 | case PPC::BI__builtin_altivec_stvehx: |
12290 | ID = Intrinsic::ppc_altivec_stvehx; |
12291 | break; |
12292 | case PPC::BI__builtin_altivec_stvewx: |
12293 | ID = Intrinsic::ppc_altivec_stvewx; |
12294 | break; |
12295 | case PPC::BI__builtin_vsx_stxvd2x: |
12296 | ID = Intrinsic::ppc_vsx_stxvd2x; |
12297 | break; |
12298 | case PPC::BI__builtin_vsx_stxvw4x: |
12299 | ID = Intrinsic::ppc_vsx_stxvw4x; |
12300 | break; |
12301 | case PPC::BI__builtin_vsx_stxvd2x_be: |
12302 | ID = Intrinsic::ppc_vsx_stxvd2x_be; |
12303 | break; |
12304 | case PPC::BI__builtin_vsx_stxvw4x_be: |
12305 | ID = Intrinsic::ppc_vsx_stxvw4x_be; |
12306 | break; |
12307 | case PPC::BI__builtin_vsx_stxvl: |
12308 | ID = Intrinsic::ppc_vsx_stxvl; |
12309 | break; |
12310 | case PPC::BI__builtin_vsx_stxvll: |
12311 | ID = Intrinsic::ppc_vsx_stxvll; |
12312 | break; |
12313 | } |
12314 | llvm::Function *F = CGM.getIntrinsic(ID); |
12315 | return Builder.CreateCall(F, Ops, ""); |
12316 | } |
12317 | // Square root |
12318 | case PPC::BI__builtin_vsx_xvsqrtsp: |
12319 | case PPC::BI__builtin_vsx_xvsqrtdp: { |
12320 | llvm::Type *ResultType = ConvertType(E->getType()); |
12321 | Value *X = EmitScalarExpr(E->getArg(0)); |
12322 | ID = Intrinsic::sqrt; |
12323 | llvm::Function *F = CGM.getIntrinsic(ID, ResultType); |
12324 | return Builder.CreateCall(F, X); |
12325 | } |
12326 | // Count leading zeros |
12327 | case PPC::BI__builtin_altivec_vclzb: |
12328 | case PPC::BI__builtin_altivec_vclzh: |
12329 | case PPC::BI__builtin_altivec_vclzw: |
12330 | case PPC::BI__builtin_altivec_vclzd: { |
12331 | llvm::Type *ResultType = ConvertType(E->getType()); |
12332 | Value *X = EmitScalarExpr(E->getArg(0)); |
12333 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
12334 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); |
12335 | return Builder.CreateCall(F, {X, Undef}); |
12336 | } |
12337 | case PPC::BI__builtin_altivec_vctzb: |
12338 | case PPC::BI__builtin_altivec_vctzh: |
12339 | case PPC::BI__builtin_altivec_vctzw: |
12340 | case PPC::BI__builtin_altivec_vctzd: { |
12341 | llvm::Type *ResultType = ConvertType(E->getType()); |
12342 | Value *X = EmitScalarExpr(E->getArg(0)); |
12343 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
12344 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType); |
12345 | return Builder.CreateCall(F, {X, Undef}); |
12346 | } |
12347 | case PPC::BI__builtin_altivec_vpopcntb: |
12348 | case PPC::BI__builtin_altivec_vpopcnth: |
12349 | case PPC::BI__builtin_altivec_vpopcntw: |
12350 | case PPC::BI__builtin_altivec_vpopcntd: { |
12351 | llvm::Type *ResultType = ConvertType(E->getType()); |
12352 | Value *X = EmitScalarExpr(E->getArg(0)); |
12353 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); |
12354 | return Builder.CreateCall(F, X); |
12355 | } |
12356 | // Copy sign |
12357 | case PPC::BI__builtin_vsx_xvcpsgnsp: |
12358 | case PPC::BI__builtin_vsx_xvcpsgndp: { |
12359 | llvm::Type *ResultType = ConvertType(E->getType()); |
12360 | Value *X = EmitScalarExpr(E->getArg(0)); |
12361 | Value *Y = EmitScalarExpr(E->getArg(1)); |
12362 | ID = Intrinsic::copysign; |
12363 | llvm::Function *F = CGM.getIntrinsic(ID, ResultType); |
12364 | return Builder.CreateCall(F, {X, Y}); |
12365 | } |
12366 | // Rounding/truncation |
12367 | case PPC::BI__builtin_vsx_xvrspip: |
12368 | case PPC::BI__builtin_vsx_xvrdpip: |
12369 | case PPC::BI__builtin_vsx_xvrdpim: |
12370 | case PPC::BI__builtin_vsx_xvrspim: |
12371 | case PPC::BI__builtin_vsx_xvrdpi: |
12372 | case PPC::BI__builtin_vsx_xvrspi: |
12373 | case PPC::BI__builtin_vsx_xvrdpic: |
12374 | case PPC::BI__builtin_vsx_xvrspic: |
12375 | case PPC::BI__builtin_vsx_xvrdpiz: |
12376 | case PPC::BI__builtin_vsx_xvrspiz: { |
12377 | llvm::Type *ResultType = ConvertType(E->getType()); |
12378 | Value *X = EmitScalarExpr(E->getArg(0)); |
12379 | if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim || |
12380 | BuiltinID == PPC::BI__builtin_vsx_xvrspim) |
12381 | ID = Intrinsic::floor; |
12382 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi || |
12383 | BuiltinID == PPC::BI__builtin_vsx_xvrspi) |
12384 | ID = Intrinsic::round; |
12385 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic || |
12386 | BuiltinID == PPC::BI__builtin_vsx_xvrspic) |
12387 | ID = Intrinsic::nearbyint; |
12388 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip || |
12389 | BuiltinID == PPC::BI__builtin_vsx_xvrspip) |
12390 | ID = Intrinsic::ceil; |
12391 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz || |
12392 | BuiltinID == PPC::BI__builtin_vsx_xvrspiz) |
12393 | ID = Intrinsic::trunc; |
12394 | llvm::Function *F = CGM.getIntrinsic(ID, ResultType); |
12395 | return Builder.CreateCall(F, X); |
12396 | } |
12397 | |
12398 | // Absolute value |
12399 | case PPC::BI__builtin_vsx_xvabsdp: |
12400 | case PPC::BI__builtin_vsx_xvabssp: { |
12401 | llvm::Type *ResultType = ConvertType(E->getType()); |
12402 | Value *X = EmitScalarExpr(E->getArg(0)); |
12403 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); |
12404 | return Builder.CreateCall(F, X); |
12405 | } |
12406 | |
12407 | // FMA variations |
12408 | case PPC::BI__builtin_vsx_xvmaddadp: |
12409 | case PPC::BI__builtin_vsx_xvmaddasp: |
12410 | case PPC::BI__builtin_vsx_xvnmaddadp: |
12411 | case PPC::BI__builtin_vsx_xvnmaddasp: |
12412 | case PPC::BI__builtin_vsx_xvmsubadp: |
12413 | case PPC::BI__builtin_vsx_xvmsubasp: |
12414 | case PPC::BI__builtin_vsx_xvnmsubadp: |
12415 | case PPC::BI__builtin_vsx_xvnmsubasp: { |
12416 | llvm::Type *ResultType = ConvertType(E->getType()); |
12417 | Value *X = EmitScalarExpr(E->getArg(0)); |
12418 | Value *Y = EmitScalarExpr(E->getArg(1)); |
12419 | Value *Z = EmitScalarExpr(E->getArg(2)); |
12420 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType); |
12421 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
12422 | switch (BuiltinID) { |
12423 | case PPC::BI__builtin_vsx_xvmaddadp: |
12424 | case PPC::BI__builtin_vsx_xvmaddasp: |
12425 | return Builder.CreateCall(F, {X, Y, Z}); |
12426 | case PPC::BI__builtin_vsx_xvnmaddadp: |
12427 | case PPC::BI__builtin_vsx_xvnmaddasp: |
12428 | return Builder.CreateFSub(Zero, |
12429 | Builder.CreateCall(F, {X, Y, Z}), "sub"); |
12430 | case PPC::BI__builtin_vsx_xvmsubadp: |
12431 | case PPC::BI__builtin_vsx_xvmsubasp: |
12432 | return Builder.CreateCall(F, |
12433 | {X, Y, Builder.CreateFSub(Zero, Z, "sub")}); |
12434 | case PPC::BI__builtin_vsx_xvnmsubadp: |
12435 | case PPC::BI__builtin_vsx_xvnmsubasp: |
12436 | Value *FsubRes = |
12437 | Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")}); |
12438 | return Builder.CreateFSub(Zero, FsubRes, "sub"); |
12439 | } |
12440 | llvm_unreachable("Unknown FMA operation")::llvm::llvm_unreachable_internal("Unknown FMA operation", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12440); |
12441 | return nullptr; // Suppress no-return warning |
12442 | } |
12443 | |
12444 | case PPC::BI__builtin_vsx_insertword: { |
12445 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw); |
12446 | |
12447 | // Third argument is a compile time constant int. It must be clamped to |
12448 | // to the range [0, 12]. |
12449 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); |
12450 | assert(ArgCI &&((ArgCI && "Third arg to xxinsertw intrinsic must be constant integer" ) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12451, __PRETTY_FUNCTION__)) |
12451 | "Third arg to xxinsertw intrinsic must be constant integer")((ArgCI && "Third arg to xxinsertw intrinsic must be constant integer" ) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12451, __PRETTY_FUNCTION__)); |
12452 | const int64_t MaxIndex = 12; |
12453 | int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex); |
12454 | |
12455 | // The builtin semantics don't exactly match the xxinsertw instructions |
12456 | // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the |
12457 | // word from the first argument, and inserts it in the second argument. The |
12458 | // instruction extracts the word from its second input register and inserts |
12459 | // it into its first input register, so swap the first and second arguments. |
12460 | std::swap(Ops[0], Ops[1]); |
12461 | |
12462 | // Need to cast the second argument from a vector of unsigned int to a |
12463 | // vector of long long. |
12464 | Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2)); |
12465 | |
12466 | if (getTarget().isLittleEndian()) { |
12467 | // Create a shuffle mask of (1, 0) |
12468 | Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1), |
12469 | ConstantInt::get(Int32Ty, 0) |
12470 | }; |
12471 | Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts); |
12472 | |
12473 | // Reverse the double words in the vector we will extract from. |
12474 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2)); |
12475 | Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ShuffleMask); |
12476 | |
12477 | // Reverse the index. |
12478 | Index = MaxIndex - Index; |
12479 | } |
12480 | |
12481 | // Intrinsic expects the first arg to be a vector of int. |
12482 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4)); |
12483 | Ops[2] = ConstantInt::getSigned(Int32Ty, Index); |
12484 | return Builder.CreateCall(F, Ops); |
12485 | } |
12486 | |
12487 | case PPC::BI__builtin_vsx_extractuword: { |
12488 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw); |
12489 | |
12490 | // Intrinsic expects the first argument to be a vector of doublewords. |
12491 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2)); |
12492 | |
12493 | // The second argument is a compile time constant int that needs to |
12494 | // be clamped to the range [0, 12]. |
12495 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]); |
12496 | assert(ArgCI &&((ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!" ) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12497, __PRETTY_FUNCTION__)) |
12497 | "Second Arg to xxextractuw intrinsic must be a constant integer!")((ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!" ) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12497, __PRETTY_FUNCTION__)); |
12498 | const int64_t MaxIndex = 12; |
12499 | int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex); |
12500 | |
12501 | if (getTarget().isLittleEndian()) { |
12502 | // Reverse the index. |
12503 | Index = MaxIndex - Index; |
12504 | Ops[1] = ConstantInt::getSigned(Int32Ty, Index); |
12505 | |
12506 | // Emit the call, then reverse the double words of the results vector. |
12507 | Value *Call = Builder.CreateCall(F, Ops); |
12508 | |
12509 | // Create a shuffle mask of (1, 0) |
12510 | Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1), |
12511 | ConstantInt::get(Int32Ty, 0) |
12512 | }; |
12513 | Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts); |
12514 | |
12515 | Value *ShuffleCall = Builder.CreateShuffleVector(Call, Call, ShuffleMask); |
12516 | return ShuffleCall; |
12517 | } else { |
12518 | Ops[1] = ConstantInt::getSigned(Int32Ty, Index); |
12519 | return Builder.CreateCall(F, Ops); |
12520 | } |
12521 | } |
12522 | |
12523 | case PPC::BI__builtin_vsx_xxpermdi: { |
12524 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); |
12525 | assert(ArgCI && "Third arg must be constant integer!")((ArgCI && "Third arg must be constant integer!") ? static_cast <void> (0) : __assert_fail ("ArgCI && \"Third arg must be constant integer!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12525, __PRETTY_FUNCTION__)); |
12526 | |
12527 | unsigned Index = ArgCI->getZExtValue(); |
12528 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2)); |
12529 | Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2)); |
12530 | |
12531 | // Account for endianness by treating this as just a shuffle. So we use the |
12532 | // same indices for both LE and BE in order to produce expected results in |
12533 | // both cases. |
12534 | unsigned ElemIdx0 = (Index & 2) >> 1; |
12535 | unsigned ElemIdx1 = 2 + (Index & 1); |
12536 | |
12537 | Constant *ShuffleElts[2] = {ConstantInt::get(Int32Ty, ElemIdx0), |
12538 | ConstantInt::get(Int32Ty, ElemIdx1)}; |
12539 | Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts); |
12540 | |
12541 | Value *ShuffleCall = |
12542 | Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask); |
12543 | QualType BIRetType = E->getType(); |
12544 | auto RetTy = ConvertType(BIRetType); |
12545 | return Builder.CreateBitCast(ShuffleCall, RetTy); |
12546 | } |
12547 | |
12548 | case PPC::BI__builtin_vsx_xxsldwi: { |
12549 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); |
12550 | assert(ArgCI && "Third argument must be a compile time constant")((ArgCI && "Third argument must be a compile time constant" ) ? static_cast<void> (0) : __assert_fail ("ArgCI && \"Third argument must be a compile time constant\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12550, __PRETTY_FUNCTION__)); |
12551 | unsigned Index = ArgCI->getZExtValue() & 0x3; |
12552 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4)); |
12553 | Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int32Ty, 4)); |
12554 | |
12555 | // Create a shuffle mask |
12556 | unsigned ElemIdx0; |
12557 | unsigned ElemIdx1; |
12558 | unsigned ElemIdx2; |
12559 | unsigned ElemIdx3; |
12560 | if (getTarget().isLittleEndian()) { |
12561 | // Little endian element N comes from element 8+N-Index of the |
12562 | // concatenated wide vector (of course, using modulo arithmetic on |
12563 | // the total number of elements). |
12564 | ElemIdx0 = (8 - Index) % 8; |
12565 | ElemIdx1 = (9 - Index) % 8; |
12566 | ElemIdx2 = (10 - Index) % 8; |
12567 | ElemIdx3 = (11 - Index) % 8; |
12568 | } else { |
12569 | // Big endian ElemIdx<N> = Index + N |
12570 | ElemIdx0 = Index; |
12571 | ElemIdx1 = Index + 1; |
12572 | ElemIdx2 = Index + 2; |
12573 | ElemIdx3 = Index + 3; |
12574 | } |
12575 | |
12576 | Constant *ShuffleElts[4] = {ConstantInt::get(Int32Ty, ElemIdx0), |
12577 | ConstantInt::get(Int32Ty, ElemIdx1), |
12578 | ConstantInt::get(Int32Ty, ElemIdx2), |
12579 | ConstantInt::get(Int32Ty, ElemIdx3)}; |
12580 | |
12581 | Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts); |
12582 | Value *ShuffleCall = |
12583 | Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask); |
12584 | QualType BIRetType = E->getType(); |
12585 | auto RetTy = ConvertType(BIRetType); |
12586 | return Builder.CreateBitCast(ShuffleCall, RetTy); |
12587 | } |
12588 | |
12589 | case PPC::BI__builtin_pack_vector_int128: { |
12590 | bool isLittleEndian = getTarget().isLittleEndian(); |
12591 | Value *UndefValue = |
12592 | llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), 2)); |
12593 | Value *Res = Builder.CreateInsertElement( |
12594 | UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0)); |
12595 | Res = Builder.CreateInsertElement(Res, Ops[1], |
12596 | (uint64_t)(isLittleEndian ? 0 : 1)); |
12597 | return Builder.CreateBitCast(Res, ConvertType(E->getType())); |
12598 | } |
12599 | |
12600 | case PPC::BI__builtin_unpack_vector_int128: { |
12601 | ConstantInt *Index = cast<ConstantInt>(Ops[1]); |
12602 | Value *Unpacked = Builder.CreateBitCast( |
12603 | Ops[0], llvm::VectorType::get(ConvertType(E->getType()), 2)); |
12604 | |
12605 | if (getTarget().isLittleEndian()) |
12606 | Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue()); |
12607 | |
12608 | return Builder.CreateExtractElement(Unpacked, Index); |
12609 | } |
12610 | } |
12611 | } |
12612 | |
12613 | Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, |
12614 | const CallExpr *E) { |
12615 | switch (BuiltinID) { |
12616 | case AMDGPU::BI__builtin_amdgcn_div_scale: |
12617 | case AMDGPU::BI__builtin_amdgcn_div_scalef: { |
12618 | // Translate from the intrinsics's struct return to the builtin's out |
12619 | // argument. |
12620 | |
12621 | Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3)); |
12622 | |
12623 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
12624 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
12625 | llvm::Value *Z = EmitScalarExpr(E->getArg(2)); |
12626 | |
12627 | llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale, |
12628 | X->getType()); |
12629 | |
12630 | llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z}); |
12631 | |
12632 | llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0); |
12633 | llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1); |
12634 | |
12635 | llvm::Type *RealFlagType |
12636 | = FlagOutPtr.getPointer()->getType()->getPointerElementType(); |
12637 | |
12638 | llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType); |
12639 | Builder.CreateStore(FlagExt, FlagOutPtr); |
12640 | return Result; |
12641 | } |
12642 | case AMDGPU::BI__builtin_amdgcn_div_fmas: |
12643 | case AMDGPU::BI__builtin_amdgcn_div_fmasf: { |
12644 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
12645 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
12646 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
12647 | llvm::Value *Src3 = EmitScalarExpr(E->getArg(3)); |
12648 | |
12649 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas, |
12650 | Src0->getType()); |
12651 | llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3); |
12652 | return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool}); |
12653 | } |
12654 | |
12655 | case AMDGPU::BI__builtin_amdgcn_ds_swizzle: |
12656 | return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle); |
12657 | case AMDGPU::BI__builtin_amdgcn_mov_dpp: |
12658 | case AMDGPU::BI__builtin_amdgcn_update_dpp: { |
12659 | llvm::SmallVector<llvm::Value *, 6> Args; |
12660 | for (unsigned I = 0; I != E->getNumArgs(); ++I) |
12661 | Args.push_back(EmitScalarExpr(E->getArg(I))); |
12662 | assert(Args.size() == 5 || Args.size() == 6)((Args.size() == 5 || Args.size() == 6) ? static_cast<void > (0) : __assert_fail ("Args.size() == 5 || Args.size() == 6" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12662, __PRETTY_FUNCTION__)); |
12663 | if (Args.size() == 5) |
12664 | Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType())); |
12665 | Function *F = |
12666 | CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType()); |
12667 | return Builder.CreateCall(F, Args); |
12668 | } |
12669 | case AMDGPU::BI__builtin_amdgcn_div_fixup: |
12670 | case AMDGPU::BI__builtin_amdgcn_div_fixupf: |
12671 | case AMDGPU::BI__builtin_amdgcn_div_fixuph: |
12672 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup); |
12673 | case AMDGPU::BI__builtin_amdgcn_trig_preop: |
12674 | case AMDGPU::BI__builtin_amdgcn_trig_preopf: |
12675 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop); |
12676 | case AMDGPU::BI__builtin_amdgcn_rcp: |
12677 | case AMDGPU::BI__builtin_amdgcn_rcpf: |
12678 | case AMDGPU::BI__builtin_amdgcn_rcph: |
12679 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp); |
12680 | case AMDGPU::BI__builtin_amdgcn_rsq: |
12681 | case AMDGPU::BI__builtin_amdgcn_rsqf: |
12682 | case AMDGPU::BI__builtin_amdgcn_rsqh: |
12683 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq); |
12684 | case AMDGPU::BI__builtin_amdgcn_rsq_clamp: |
12685 | case AMDGPU::BI__builtin_amdgcn_rsq_clampf: |
12686 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp); |
12687 | case AMDGPU::BI__builtin_amdgcn_sinf: |
12688 | case AMDGPU::BI__builtin_amdgcn_sinh: |
12689 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin); |
12690 | case AMDGPU::BI__builtin_amdgcn_cosf: |
12691 | case AMDGPU::BI__builtin_amdgcn_cosh: |
12692 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos); |
12693 | case AMDGPU::BI__builtin_amdgcn_log_clampf: |
12694 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp); |
12695 | case AMDGPU::BI__builtin_amdgcn_ldexp: |
12696 | case AMDGPU::BI__builtin_amdgcn_ldexpf: |
12697 | case AMDGPU::BI__builtin_amdgcn_ldexph: |
12698 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp); |
12699 | case AMDGPU::BI__builtin_amdgcn_frexp_mant: |
12700 | case AMDGPU::BI__builtin_amdgcn_frexp_mantf: |
12701 | case AMDGPU::BI__builtin_amdgcn_frexp_manth: |
12702 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant); |
12703 | case AMDGPU::BI__builtin_amdgcn_frexp_exp: |
12704 | case AMDGPU::BI__builtin_amdgcn_frexp_expf: { |
12705 | Value *Src0 = EmitScalarExpr(E->getArg(0)); |
12706 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp, |
12707 | { Builder.getInt32Ty(), Src0->getType() }); |
12708 | return Builder.CreateCall(F, Src0); |
12709 | } |
12710 | case AMDGPU::BI__builtin_amdgcn_frexp_exph: { |
12711 | Value *Src0 = EmitScalarExpr(E->getArg(0)); |
12712 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp, |
12713 | { Builder.getInt16Ty(), Src0->getType() }); |
12714 | return Builder.CreateCall(F, Src0); |
12715 | } |
12716 | case AMDGPU::BI__builtin_amdgcn_fract: |
12717 | case AMDGPU::BI__builtin_amdgcn_fractf: |
12718 | case AMDGPU::BI__builtin_amdgcn_fracth: |
12719 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract); |
12720 | case AMDGPU::BI__builtin_amdgcn_lerp: |
12721 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp); |
12722 | case AMDGPU::BI__builtin_amdgcn_uicmp: |
12723 | case AMDGPU::BI__builtin_amdgcn_uicmpl: |
12724 | case AMDGPU::BI__builtin_amdgcn_sicmp: |
12725 | case AMDGPU::BI__builtin_amdgcn_sicmpl: |
12726 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_icmp); |
12727 | case AMDGPU::BI__builtin_amdgcn_fcmp: |
12728 | case AMDGPU::BI__builtin_amdgcn_fcmpf: |
12729 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fcmp); |
12730 | case AMDGPU::BI__builtin_amdgcn_class: |
12731 | case AMDGPU::BI__builtin_amdgcn_classf: |
12732 | case AMDGPU::BI__builtin_amdgcn_classh: |
12733 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class); |
12734 | case AMDGPU::BI__builtin_amdgcn_fmed3f: |
12735 | case AMDGPU::BI__builtin_amdgcn_fmed3h: |
12736 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3); |
12737 | case AMDGPU::BI__builtin_amdgcn_ds_append: |
12738 | case AMDGPU::BI__builtin_amdgcn_ds_consume: { |
12739 | Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ? |
12740 | Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume; |
12741 | Value *Src0 = EmitScalarExpr(E->getArg(0)); |
12742 | Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() }); |
12743 | return Builder.CreateCall(F, { Src0, Builder.getFalse() }); |
12744 | } |
12745 | case AMDGPU::BI__builtin_amdgcn_read_exec: { |
12746 | CallInst *CI = cast<CallInst>( |
12747 | EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, true, "exec")); |
12748 | CI->setConvergent(); |
12749 | return CI; |
12750 | } |
12751 | case AMDGPU::BI__builtin_amdgcn_read_exec_lo: |
12752 | case AMDGPU::BI__builtin_amdgcn_read_exec_hi: { |
12753 | StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ? |
12754 | "exec_lo" : "exec_hi"; |
12755 | CallInst *CI = cast<CallInst>( |
12756 | EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, true, RegName)); |
12757 | CI->setConvergent(); |
12758 | return CI; |
12759 | } |
12760 | // amdgcn workitem |
12761 | case AMDGPU::BI__builtin_amdgcn_workitem_id_x: |
12762 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024); |
12763 | case AMDGPU::BI__builtin_amdgcn_workitem_id_y: |
12764 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024); |
12765 | case AMDGPU::BI__builtin_amdgcn_workitem_id_z: |
12766 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024); |
12767 | |
12768 | // r600 intrinsics |
12769 | case AMDGPU::BI__builtin_r600_recipsqrt_ieee: |
12770 | case AMDGPU::BI__builtin_r600_recipsqrt_ieeef: |
12771 | return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee); |
12772 | case AMDGPU::BI__builtin_r600_read_tidig_x: |
12773 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024); |
12774 | case AMDGPU::BI__builtin_r600_read_tidig_y: |
12775 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024); |
12776 | case AMDGPU::BI__builtin_r600_read_tidig_z: |
12777 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024); |
12778 | default: |
12779 | return nullptr; |
12780 | } |
12781 | } |
12782 | |
12783 | /// Handle a SystemZ function in which the final argument is a pointer |
12784 | /// to an int that receives the post-instruction CC value. At the LLVM level |
12785 | /// this is represented as a function that returns a {result, cc} pair. |
12786 | static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF, |
12787 | unsigned IntrinsicID, |
12788 | const CallExpr *E) { |
12789 | unsigned NumArgs = E->getNumArgs() - 1; |
12790 | SmallVector<Value *, 8> Args(NumArgs); |
12791 | for (unsigned I = 0; I < NumArgs; ++I) |
12792 | Args[I] = CGF.EmitScalarExpr(E->getArg(I)); |
12793 | Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs)); |
12794 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID); |
12795 | Value *Call = CGF.Builder.CreateCall(F, Args); |
12796 | Value *CC = CGF.Builder.CreateExtractValue(Call, 1); |
12797 | CGF.Builder.CreateStore(CC, CCPtr); |
12798 | return CGF.Builder.CreateExtractValue(Call, 0); |
12799 | } |
12800 | |
12801 | Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID, |
12802 | const CallExpr *E) { |
12803 | switch (BuiltinID) { |
12804 | case SystemZ::BI__builtin_tbegin: { |
12805 | Value *TDB = EmitScalarExpr(E->getArg(0)); |
12806 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); |
12807 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin); |
12808 | return Builder.CreateCall(F, {TDB, Control}); |
12809 | } |
12810 | case SystemZ::BI__builtin_tbegin_nofloat: { |
12811 | Value *TDB = EmitScalarExpr(E->getArg(0)); |
12812 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); |
12813 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat); |
12814 | return Builder.CreateCall(F, {TDB, Control}); |
12815 | } |
12816 | case SystemZ::BI__builtin_tbeginc: { |
12817 | Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy); |
12818 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08); |
12819 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc); |
12820 | return Builder.CreateCall(F, {TDB, Control}); |
12821 | } |
12822 | case SystemZ::BI__builtin_tabort: { |
12823 | Value *Data = EmitScalarExpr(E->getArg(0)); |
12824 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort); |
12825 | return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort")); |
12826 | } |
12827 | case SystemZ::BI__builtin_non_tx_store: { |
12828 | Value *Address = EmitScalarExpr(E->getArg(0)); |
12829 | Value *Data = EmitScalarExpr(E->getArg(1)); |
12830 | Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg); |
12831 | return Builder.CreateCall(F, {Data, Address}); |
12832 | } |
12833 | |
12834 | // Vector builtins. Note that most vector builtins are mapped automatically |
12835 | // to target-specific LLVM intrinsics. The ones handled specially here can |
12836 | // be represented via standard LLVM IR, which is preferable to enable common |
12837 | // LLVM optimizations. |
12838 | |
12839 | case SystemZ::BI__builtin_s390_vpopctb: |
12840 | case SystemZ::BI__builtin_s390_vpopcth: |
12841 | case SystemZ::BI__builtin_s390_vpopctf: |
12842 | case SystemZ::BI__builtin_s390_vpopctg: { |
12843 | llvm::Type *ResultType = ConvertType(E->getType()); |
12844 | Value *X = EmitScalarExpr(E->getArg(0)); |
12845 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); |
12846 | return Builder.CreateCall(F, X); |
12847 | } |
12848 | |
12849 | case SystemZ::BI__builtin_s390_vclzb: |
12850 | case SystemZ::BI__builtin_s390_vclzh: |
12851 | case SystemZ::BI__builtin_s390_vclzf: |
12852 | case SystemZ::BI__builtin_s390_vclzg: { |
12853 | llvm::Type *ResultType = ConvertType(E->getType()); |
12854 | Value *X = EmitScalarExpr(E->getArg(0)); |
12855 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
12856 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); |
12857 | return Builder.CreateCall(F, {X, Undef}); |
12858 | } |
12859 | |
12860 | case SystemZ::BI__builtin_s390_vctzb: |
12861 | case SystemZ::BI__builtin_s390_vctzh: |
12862 | case SystemZ::BI__builtin_s390_vctzf: |
12863 | case SystemZ::BI__builtin_s390_vctzg: { |
12864 | llvm::Type *ResultType = ConvertType(E->getType()); |
12865 | Value *X = EmitScalarExpr(E->getArg(0)); |
12866 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
12867 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType); |
12868 | return Builder.CreateCall(F, {X, Undef}); |
12869 | } |
12870 | |
12871 | case SystemZ::BI__builtin_s390_vfsqsb: |
12872 | case SystemZ::BI__builtin_s390_vfsqdb: { |
12873 | llvm::Type *ResultType = ConvertType(E->getType()); |
12874 | Value *X = EmitScalarExpr(E->getArg(0)); |
12875 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); |
12876 | return Builder.CreateCall(F, X); |
12877 | } |
12878 | case SystemZ::BI__builtin_s390_vfmasb: |
12879 | case SystemZ::BI__builtin_s390_vfmadb: { |
12880 | llvm::Type *ResultType = ConvertType(E->getType()); |
12881 | Value *X = EmitScalarExpr(E->getArg(0)); |
12882 | Value *Y = EmitScalarExpr(E->getArg(1)); |
12883 | Value *Z = EmitScalarExpr(E->getArg(2)); |
12884 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
12885 | return Builder.CreateCall(F, {X, Y, Z}); |
12886 | } |
12887 | case SystemZ::BI__builtin_s390_vfmssb: |
12888 | case SystemZ::BI__builtin_s390_vfmsdb: { |
12889 | llvm::Type *ResultType = ConvertType(E->getType()); |
12890 | Value *X = EmitScalarExpr(E->getArg(0)); |
12891 | Value *Y = EmitScalarExpr(E->getArg(1)); |
12892 | Value *Z = EmitScalarExpr(E->getArg(2)); |
12893 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType); |
12894 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
12895 | return Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")}); |
12896 | } |
12897 | case SystemZ::BI__builtin_s390_vfnmasb: |
12898 | case SystemZ::BI__builtin_s390_vfnmadb: { |
12899 | llvm::Type *ResultType = ConvertType(E->getType()); |
12900 | Value *X = EmitScalarExpr(E->getArg(0)); |
12901 | Value *Y = EmitScalarExpr(E->getArg(1)); |
12902 | Value *Z = EmitScalarExpr(E->getArg(2)); |
12903 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType); |
12904 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
12905 | return Builder.CreateFSub(Zero, Builder.CreateCall(F, {X, Y, Z}), "sub"); |
12906 | } |
12907 | case SystemZ::BI__builtin_s390_vfnmssb: |
12908 | case SystemZ::BI__builtin_s390_vfnmsdb: { |
12909 | llvm::Type *ResultType = ConvertType(E->getType()); |
12910 | Value *X = EmitScalarExpr(E->getArg(0)); |
12911 | Value *Y = EmitScalarExpr(E->getArg(1)); |
12912 | Value *Z = EmitScalarExpr(E->getArg(2)); |
12913 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType); |
12914 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
12915 | Value *NegZ = Builder.CreateFSub(Zero, Z, "sub"); |
12916 | return Builder.CreateFSub(Zero, Builder.CreateCall(F, {X, Y, NegZ})); |
12917 | } |
12918 | case SystemZ::BI__builtin_s390_vflpsb: |
12919 | case SystemZ::BI__builtin_s390_vflpdb: { |
12920 | llvm::Type *ResultType = ConvertType(E->getType()); |
12921 | Value *X = EmitScalarExpr(E->getArg(0)); |
12922 | Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); |
12923 | return Builder.CreateCall(F, X); |
12924 | } |
12925 | case SystemZ::BI__builtin_s390_vflnsb: |
12926 | case SystemZ::BI__builtin_s390_vflndb: { |
12927 | llvm::Type *ResultType = ConvertType(E->getType()); |
12928 | Value *X = EmitScalarExpr(E->getArg(0)); |
12929 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType); |
12930 | Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); |
12931 | return Builder.CreateFSub(Zero, Builder.CreateCall(F, X), "sub"); |
12932 | } |
12933 | case SystemZ::BI__builtin_s390_vfisb: |
12934 | case SystemZ::BI__builtin_s390_vfidb: { |
12935 | llvm::Type *ResultType = ConvertType(E->getType()); |
12936 | Value *X = EmitScalarExpr(E->getArg(0)); |
12937 | // Constant-fold the M4 and M5 mask arguments. |
12938 | llvm::APSInt M4, M5; |
12939 | bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext()); |
12940 | bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext()); |
12941 | assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?")((IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?" ) ? static_cast<void> (0) : __assert_fail ("IsConstM4 && IsConstM5 && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12941, __PRETTY_FUNCTION__)); |
12942 | (void)IsConstM4; (void)IsConstM5; |
12943 | // Check whether this instance can be represented via a LLVM standard |
12944 | // intrinsic. We only support some combinations of M4 and M5. |
12945 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
12946 | switch (M4.getZExtValue()) { |
12947 | default: break; |
12948 | case 0: // IEEE-inexact exception allowed |
12949 | switch (M5.getZExtValue()) { |
12950 | default: break; |
12951 | case 0: ID = Intrinsic::rint; break; |
12952 | } |
12953 | break; |
12954 | case 4: // IEEE-inexact exception suppressed |
12955 | switch (M5.getZExtValue()) { |
12956 | default: break; |
12957 | case 0: ID = Intrinsic::nearbyint; break; |
12958 | case 1: ID = Intrinsic::round; break; |
12959 | case 5: ID = Intrinsic::trunc; break; |
12960 | case 6: ID = Intrinsic::ceil; break; |
12961 | case 7: ID = Intrinsic::floor; break; |
12962 | } |
12963 | break; |
12964 | } |
12965 | if (ID != Intrinsic::not_intrinsic) { |
12966 | Function *F = CGM.getIntrinsic(ID, ResultType); |
12967 | return Builder.CreateCall(F, X); |
12968 | } |
12969 | switch (BuiltinID) { |
12970 | case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break; |
12971 | case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break; |
12972 | default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12972); |
12973 | } |
12974 | Function *F = CGM.getIntrinsic(ID); |
12975 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); |
12976 | Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5); |
12977 | return Builder.CreateCall(F, {X, M4Value, M5Value}); |
12978 | } |
12979 | case SystemZ::BI__builtin_s390_vfmaxsb: |
12980 | case SystemZ::BI__builtin_s390_vfmaxdb: { |
12981 | llvm::Type *ResultType = ConvertType(E->getType()); |
12982 | Value *X = EmitScalarExpr(E->getArg(0)); |
12983 | Value *Y = EmitScalarExpr(E->getArg(1)); |
12984 | // Constant-fold the M4 mask argument. |
12985 | llvm::APSInt M4; |
12986 | bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext()); |
12987 | assert(IsConstM4 && "Constant arg isn't actually constant?")((IsConstM4 && "Constant arg isn't actually constant?" ) ? static_cast<void> (0) : __assert_fail ("IsConstM4 && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 12987, __PRETTY_FUNCTION__)); |
12988 | (void)IsConstM4; |
12989 | // Check whether this instance can be represented via a LLVM standard |
12990 | // intrinsic. We only support some values of M4. |
12991 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
12992 | switch (M4.getZExtValue()) { |
12993 | default: break; |
12994 | case 4: ID = Intrinsic::maxnum; break; |
12995 | } |
12996 | if (ID != Intrinsic::not_intrinsic) { |
12997 | Function *F = CGM.getIntrinsic(ID, ResultType); |
12998 | return Builder.CreateCall(F, {X, Y}); |
12999 | } |
13000 | switch (BuiltinID) { |
13001 | case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break; |
13002 | case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break; |
13003 | default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13003); |
13004 | } |
13005 | Function *F = CGM.getIntrinsic(ID); |
13006 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); |
13007 | return Builder.CreateCall(F, {X, Y, M4Value}); |
13008 | } |
13009 | case SystemZ::BI__builtin_s390_vfminsb: |
13010 | case SystemZ::BI__builtin_s390_vfmindb: { |
13011 | llvm::Type *ResultType = ConvertType(E->getType()); |
13012 | Value *X = EmitScalarExpr(E->getArg(0)); |
13013 | Value *Y = EmitScalarExpr(E->getArg(1)); |
13014 | // Constant-fold the M4 mask argument. |
13015 | llvm::APSInt M4; |
13016 | bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext()); |
13017 | assert(IsConstM4 && "Constant arg isn't actually constant?")((IsConstM4 && "Constant arg isn't actually constant?" ) ? static_cast<void> (0) : __assert_fail ("IsConstM4 && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13017, __PRETTY_FUNCTION__)); |
13018 | (void)IsConstM4; |
13019 | // Check whether this instance can be represented via a LLVM standard |
13020 | // intrinsic. We only support some values of M4. |
13021 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
13022 | switch (M4.getZExtValue()) { |
13023 | default: break; |
13024 | case 4: ID = Intrinsic::minnum; break; |
13025 | } |
13026 | if (ID != Intrinsic::not_intrinsic) { |
13027 | Function *F = CGM.getIntrinsic(ID, ResultType); |
13028 | return Builder.CreateCall(F, {X, Y}); |
13029 | } |
13030 | switch (BuiltinID) { |
13031 | case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break; |
13032 | case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break; |
13033 | default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13033); |
13034 | } |
13035 | Function *F = CGM.getIntrinsic(ID); |
13036 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); |
13037 | return Builder.CreateCall(F, {X, Y, M4Value}); |
13038 | } |
13039 | |
13040 | // Vector intrinsics that output the post-instruction CC value. |
13041 | |
13042 | #define INTRINSIC_WITH_CC(NAME) \ |
13043 | case SystemZ::BI__builtin_##NAME: \ |
13044 | return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E) |
13045 | |
13046 | INTRINSIC_WITH_CC(s390_vpkshs); |
13047 | INTRINSIC_WITH_CC(s390_vpksfs); |
13048 | INTRINSIC_WITH_CC(s390_vpksgs); |
13049 | |
13050 | INTRINSIC_WITH_CC(s390_vpklshs); |
13051 | INTRINSIC_WITH_CC(s390_vpklsfs); |
13052 | INTRINSIC_WITH_CC(s390_vpklsgs); |
13053 | |
13054 | INTRINSIC_WITH_CC(s390_vceqbs); |
13055 | INTRINSIC_WITH_CC(s390_vceqhs); |
13056 | INTRINSIC_WITH_CC(s390_vceqfs); |
13057 | INTRINSIC_WITH_CC(s390_vceqgs); |
13058 | |
13059 | INTRINSIC_WITH_CC(s390_vchbs); |
13060 | INTRINSIC_WITH_CC(s390_vchhs); |
13061 | INTRINSIC_WITH_CC(s390_vchfs); |
13062 | INTRINSIC_WITH_CC(s390_vchgs); |
13063 | |
13064 | INTRINSIC_WITH_CC(s390_vchlbs); |
13065 | INTRINSIC_WITH_CC(s390_vchlhs); |
13066 | INTRINSIC_WITH_CC(s390_vchlfs); |
13067 | INTRINSIC_WITH_CC(s390_vchlgs); |
13068 | |
13069 | INTRINSIC_WITH_CC(s390_vfaebs); |
13070 | INTRINSIC_WITH_CC(s390_vfaehs); |
13071 | INTRINSIC_WITH_CC(s390_vfaefs); |
13072 | |
13073 | INTRINSIC_WITH_CC(s390_vfaezbs); |
13074 | INTRINSIC_WITH_CC(s390_vfaezhs); |
13075 | INTRINSIC_WITH_CC(s390_vfaezfs); |
13076 | |
13077 | INTRINSIC_WITH_CC(s390_vfeebs); |
13078 | INTRINSIC_WITH_CC(s390_vfeehs); |
13079 | INTRINSIC_WITH_CC(s390_vfeefs); |
13080 | |
13081 | INTRINSIC_WITH_CC(s390_vfeezbs); |
13082 | INTRINSIC_WITH_CC(s390_vfeezhs); |
13083 | INTRINSIC_WITH_CC(s390_vfeezfs); |
13084 | |
13085 | INTRINSIC_WITH_CC(s390_vfenebs); |
13086 | INTRINSIC_WITH_CC(s390_vfenehs); |
13087 | INTRINSIC_WITH_CC(s390_vfenefs); |
13088 | |
13089 | INTRINSIC_WITH_CC(s390_vfenezbs); |
13090 | INTRINSIC_WITH_CC(s390_vfenezhs); |
13091 | INTRINSIC_WITH_CC(s390_vfenezfs); |
13092 | |
13093 | INTRINSIC_WITH_CC(s390_vistrbs); |
13094 | INTRINSIC_WITH_CC(s390_vistrhs); |
13095 | INTRINSIC_WITH_CC(s390_vistrfs); |
13096 | |
13097 | INTRINSIC_WITH_CC(s390_vstrcbs); |
13098 | INTRINSIC_WITH_CC(s390_vstrchs); |
13099 | INTRINSIC_WITH_CC(s390_vstrcfs); |
13100 | |
13101 | INTRINSIC_WITH_CC(s390_vstrczbs); |
13102 | INTRINSIC_WITH_CC(s390_vstrczhs); |
13103 | INTRINSIC_WITH_CC(s390_vstrczfs); |
13104 | |
13105 | INTRINSIC_WITH_CC(s390_vfcesbs); |
13106 | INTRINSIC_WITH_CC(s390_vfcedbs); |
13107 | INTRINSIC_WITH_CC(s390_vfchsbs); |
13108 | INTRINSIC_WITH_CC(s390_vfchdbs); |
13109 | INTRINSIC_WITH_CC(s390_vfchesbs); |
13110 | INTRINSIC_WITH_CC(s390_vfchedbs); |
13111 | |
13112 | INTRINSIC_WITH_CC(s390_vftcisb); |
13113 | INTRINSIC_WITH_CC(s390_vftcidb); |
13114 | |
13115 | #undef INTRINSIC_WITH_CC |
13116 | |
13117 | default: |
13118 | return nullptr; |
13119 | } |
13120 | } |
13121 | |
13122 | namespace { |
13123 | // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant. |
13124 | struct NVPTXMmaLdstInfo { |
13125 | unsigned NumResults; // Number of elements to load/store |
13126 | // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported. |
13127 | unsigned IID_col; |
13128 | unsigned IID_row; |
13129 | }; |
13130 | |
13131 | #define MMA_INTR(geom_op_type, layout) \ |
13132 | Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride |
13133 | #define MMA_LDST(n, geom_op_type) \ |
13134 | { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) } |
13135 | |
13136 | static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) { |
13137 | switch (BuiltinID) { |
13138 | // FP MMA loads |
13139 | case NVPTX::BI__hmma_m16n16k16_ld_a: |
13140 | return MMA_LDST(8, m16n16k16_load_a_f16); |
13141 | case NVPTX::BI__hmma_m16n16k16_ld_b: |
13142 | return MMA_LDST(8, m16n16k16_load_b_f16); |
13143 | case NVPTX::BI__hmma_m16n16k16_ld_c_f16: |
13144 | return MMA_LDST(4, m16n16k16_load_c_f16); |
13145 | case NVPTX::BI__hmma_m16n16k16_ld_c_f32: |
13146 | return MMA_LDST(8, m16n16k16_load_c_f32); |
13147 | case NVPTX::BI__hmma_m32n8k16_ld_a: |
13148 | return MMA_LDST(8, m32n8k16_load_a_f16); |
13149 | case NVPTX::BI__hmma_m32n8k16_ld_b: |
13150 | return MMA_LDST(8, m32n8k16_load_b_f16); |
13151 | case NVPTX::BI__hmma_m32n8k16_ld_c_f16: |
13152 | return MMA_LDST(4, m32n8k16_load_c_f16); |
13153 | case NVPTX::BI__hmma_m32n8k16_ld_c_f32: |
13154 | return MMA_LDST(8, m32n8k16_load_c_f32); |
13155 | case NVPTX::BI__hmma_m8n32k16_ld_a: |
13156 | return MMA_LDST(8, m8n32k16_load_a_f16); |
13157 | case NVPTX::BI__hmma_m8n32k16_ld_b: |
13158 | return MMA_LDST(8, m8n32k16_load_b_f16); |
13159 | case NVPTX::BI__hmma_m8n32k16_ld_c_f16: |
13160 | return MMA_LDST(4, m8n32k16_load_c_f16); |
13161 | case NVPTX::BI__hmma_m8n32k16_ld_c_f32: |
13162 | return MMA_LDST(8, m8n32k16_load_c_f32); |
13163 | |
13164 | // Integer MMA loads |
13165 | case NVPTX::BI__imma_m16n16k16_ld_a_s8: |
13166 | return MMA_LDST(2, m16n16k16_load_a_s8); |
13167 | case NVPTX::BI__imma_m16n16k16_ld_a_u8: |
13168 | return MMA_LDST(2, m16n16k16_load_a_u8); |
13169 | case NVPTX::BI__imma_m16n16k16_ld_b_s8: |
13170 | return MMA_LDST(2, m16n16k16_load_b_s8); |
13171 | case NVPTX::BI__imma_m16n16k16_ld_b_u8: |
13172 | return MMA_LDST(2, m16n16k16_load_b_u8); |
13173 | case NVPTX::BI__imma_m16n16k16_ld_c: |
13174 | return MMA_LDST(8, m16n16k16_load_c_s32); |
13175 | case NVPTX::BI__imma_m32n8k16_ld_a_s8: |
13176 | return MMA_LDST(4, m32n8k16_load_a_s8); |
13177 | case NVPTX::BI__imma_m32n8k16_ld_a_u8: |
13178 | return MMA_LDST(4, m32n8k16_load_a_u8); |
13179 | case NVPTX::BI__imma_m32n8k16_ld_b_s8: |
13180 | return MMA_LDST(1, m32n8k16_load_b_s8); |
13181 | case NVPTX::BI__imma_m32n8k16_ld_b_u8: |
13182 | return MMA_LDST(1, m32n8k16_load_b_u8); |
13183 | case NVPTX::BI__imma_m32n8k16_ld_c: |
13184 | return MMA_LDST(8, m32n8k16_load_c_s32); |
13185 | case NVPTX::BI__imma_m8n32k16_ld_a_s8: |
13186 | return MMA_LDST(1, m8n32k16_load_a_s8); |
13187 | case NVPTX::BI__imma_m8n32k16_ld_a_u8: |
13188 | return MMA_LDST(1, m8n32k16_load_a_u8); |
13189 | case NVPTX::BI__imma_m8n32k16_ld_b_s8: |
13190 | return MMA_LDST(4, m8n32k16_load_b_s8); |
13191 | case NVPTX::BI__imma_m8n32k16_ld_b_u8: |
13192 | return MMA_LDST(4, m8n32k16_load_b_u8); |
13193 | case NVPTX::BI__imma_m8n32k16_ld_c: |
13194 | return MMA_LDST(8, m8n32k16_load_c_s32); |
13195 | |
13196 | // Sub-integer MMA loads. |
13197 | // Only row/col layout is supported by A/B fragments. |
13198 | case NVPTX::BI__imma_m8n8k32_ld_a_s4: |
13199 | return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)}; |
13200 | case NVPTX::BI__imma_m8n8k32_ld_a_u4: |
13201 | return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)}; |
13202 | case NVPTX::BI__imma_m8n8k32_ld_b_s4: |
13203 | return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0}; |
13204 | case NVPTX::BI__imma_m8n8k32_ld_b_u4: |
13205 | return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0}; |
13206 | case NVPTX::BI__imma_m8n8k32_ld_c: |
13207 | return MMA_LDST(2, m8n8k32_load_c_s32); |
13208 | case NVPTX::BI__bmma_m8n8k128_ld_a_b1: |
13209 | return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)}; |
13210 | case NVPTX::BI__bmma_m8n8k128_ld_b_b1: |
13211 | return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0}; |
13212 | case NVPTX::BI__bmma_m8n8k128_ld_c: |
13213 | return MMA_LDST(2, m8n8k128_load_c_s32); |
13214 | |
13215 | // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike |
13216 | // PTX and LLVM IR where stores always use fragment D, NVCC builtins always |
13217 | // use fragment C for both loads and stores. |
13218 | // FP MMA stores. |
13219 | case NVPTX::BI__hmma_m16n16k16_st_c_f16: |
13220 | return MMA_LDST(4, m16n16k16_store_d_f16); |
13221 | case NVPTX::BI__hmma_m16n16k16_st_c_f32: |
13222 | return MMA_LDST(8, m16n16k16_store_d_f32); |
13223 | case NVPTX::BI__hmma_m32n8k16_st_c_f16: |
13224 | return MMA_LDST(4, m32n8k16_store_d_f16); |
13225 | case NVPTX::BI__hmma_m32n8k16_st_c_f32: |
13226 | return MMA_LDST(8, m32n8k16_store_d_f32); |
13227 | case NVPTX::BI__hmma_m8n32k16_st_c_f16: |
13228 | return MMA_LDST(4, m8n32k16_store_d_f16); |
13229 | case NVPTX::BI__hmma_m8n32k16_st_c_f32: |
13230 | return MMA_LDST(8, m8n32k16_store_d_f32); |
13231 | |
13232 | // Integer and sub-integer MMA stores. |
13233 | // Another naming quirk. Unlike other MMA builtins that use PTX types in the |
13234 | // name, integer loads/stores use LLVM's i32. |
13235 | case NVPTX::BI__imma_m16n16k16_st_c_i32: |
13236 | return MMA_LDST(8, m16n16k16_store_d_s32); |
13237 | case NVPTX::BI__imma_m32n8k16_st_c_i32: |
13238 | return MMA_LDST(8, m32n8k16_store_d_s32); |
13239 | case NVPTX::BI__imma_m8n32k16_st_c_i32: |
13240 | return MMA_LDST(8, m8n32k16_store_d_s32); |
13241 | case NVPTX::BI__imma_m8n8k32_st_c_i32: |
13242 | return MMA_LDST(2, m8n8k32_store_d_s32); |
13243 | case NVPTX::BI__bmma_m8n8k128_st_c_i32: |
13244 | return MMA_LDST(2, m8n8k128_store_d_s32); |
13245 | |
13246 | default: |
13247 | llvm_unreachable("Unknown MMA builtin")::llvm::llvm_unreachable_internal("Unknown MMA builtin", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13247); |
13248 | } |
13249 | } |
13250 | #undef MMA_LDST |
13251 | #undef MMA_INTR |
13252 | |
13253 | |
13254 | struct NVPTXMmaInfo { |
13255 | unsigned NumEltsA; |
13256 | unsigned NumEltsB; |
13257 | unsigned NumEltsC; |
13258 | unsigned NumEltsD; |
13259 | std::array<unsigned, 8> Variants; |
13260 | |
13261 | unsigned getMMAIntrinsic(int Layout, bool Satf) { |
13262 | unsigned Index = Layout * 2 + Satf; |
13263 | if (Index >= Variants.size()) |
13264 | return 0; |
13265 | return Variants[Index]; |
13266 | } |
13267 | }; |
13268 | |
13269 | // Returns an intrinsic that matches Layout and Satf for valid combinations of |
13270 | // Layout and Satf, 0 otherwise. |
13271 | static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) { |
13272 | // clang-format off |
13273 | #define MMA_VARIANTS(geom, type) {{ \ |
13274 | Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \ |
13275 | Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \ |
13276 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \ |
13277 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \ |
13278 | Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \ |
13279 | Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \ |
13280 | Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \ |
13281 | Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \ |
13282 | }} |
13283 | // Sub-integer MMA only supports row.col layout. |
13284 | #define MMA_VARIANTS_I4(geom, type) {{ \ |
13285 | 0, \ |
13286 | 0, \ |
13287 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \ |
13288 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \ |
13289 | 0, \ |
13290 | 0, \ |
13291 | 0, \ |
13292 | 0 \ |
13293 | }} |
13294 | // b1 MMA does not support .satfinite. |
13295 | #define MMA_VARIANTS_B1(geom, type) {{ \ |
13296 | 0, \ |
13297 | 0, \ |
13298 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \ |
13299 | 0, \ |
13300 | 0, \ |
13301 | 0, \ |
13302 | 0, \ |
13303 | 0 \ |
13304 | }} |
13305 | // clang-format on |
13306 | switch (BuiltinID) { |
13307 | // FP MMA |
13308 | // Note that 'type' argument of MMA_VARIANT uses D_C notation, while |
13309 | // NumEltsN of return value are ordered as A,B,C,D. |
13310 | case NVPTX::BI__hmma_m16n16k16_mma_f16f16: |
13311 | return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)}; |
13312 | case NVPTX::BI__hmma_m16n16k16_mma_f32f16: |
13313 | return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)}; |
13314 | case NVPTX::BI__hmma_m16n16k16_mma_f16f32: |
13315 | return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)}; |
13316 | case NVPTX::BI__hmma_m16n16k16_mma_f32f32: |
13317 | return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)}; |
13318 | case NVPTX::BI__hmma_m32n8k16_mma_f16f16: |
13319 | return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)}; |
13320 | case NVPTX::BI__hmma_m32n8k16_mma_f32f16: |
13321 | return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)}; |
13322 | case NVPTX::BI__hmma_m32n8k16_mma_f16f32: |
13323 | return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)}; |
13324 | case NVPTX::BI__hmma_m32n8k16_mma_f32f32: |
13325 | return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)}; |
13326 | case NVPTX::BI__hmma_m8n32k16_mma_f16f16: |
13327 | return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)}; |
13328 | case NVPTX::BI__hmma_m8n32k16_mma_f32f16: |
13329 | return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)}; |
13330 | case NVPTX::BI__hmma_m8n32k16_mma_f16f32: |
13331 | return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)}; |
13332 | case NVPTX::BI__hmma_m8n32k16_mma_f32f32: |
13333 | return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)}; |
13334 | |
13335 | // Integer MMA |
13336 | case NVPTX::BI__imma_m16n16k16_mma_s8: |
13337 | return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)}; |
13338 | case NVPTX::BI__imma_m16n16k16_mma_u8: |
13339 | return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)}; |
13340 | case NVPTX::BI__imma_m32n8k16_mma_s8: |
13341 | return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)}; |
13342 | case NVPTX::BI__imma_m32n8k16_mma_u8: |
13343 | return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)}; |
13344 | case NVPTX::BI__imma_m8n32k16_mma_s8: |
13345 | return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)}; |
13346 | case NVPTX::BI__imma_m8n32k16_mma_u8: |
13347 | return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)}; |
13348 | |
13349 | // Sub-integer MMA |
13350 | case NVPTX::BI__imma_m8n8k32_mma_s4: |
13351 | return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)}; |
13352 | case NVPTX::BI__imma_m8n8k32_mma_u4: |
13353 | return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)}; |
13354 | case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: |
13355 | return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)}; |
13356 | default: |
13357 | llvm_unreachable("Unexpected builtin ID.")::llvm::llvm_unreachable_internal("Unexpected builtin ID.", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13357); |
13358 | } |
13359 | #undef MMA_VARIANTS |
13360 | #undef MMA_VARIANTS_I4 |
13361 | #undef MMA_VARIANTS_B1 |
13362 | } |
13363 | |
13364 | } // namespace |
13365 | |
13366 | Value * |
13367 | CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) { |
13368 | auto MakeLdg = [&](unsigned IntrinsicID) { |
13369 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
13370 | clang::CharUnits Align = |
13371 | getNaturalPointeeTypeAlignment(E->getArg(0)->getType()); |
13372 | return Builder.CreateCall( |
13373 | CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(), |
13374 | Ptr->getType()}), |
13375 | {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())}); |
13376 | }; |
13377 | auto MakeScopedAtomic = [&](unsigned IntrinsicID) { |
13378 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
13379 | return Builder.CreateCall( |
13380 | CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(), |
13381 | Ptr->getType()}), |
13382 | {Ptr, EmitScalarExpr(E->getArg(1))}); |
13383 | }; |
13384 | switch (BuiltinID) { |
13385 | case NVPTX::BI__nvvm_atom_add_gen_i: |
13386 | case NVPTX::BI__nvvm_atom_add_gen_l: |
13387 | case NVPTX::BI__nvvm_atom_add_gen_ll: |
13388 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E); |
13389 | |
13390 | case NVPTX::BI__nvvm_atom_sub_gen_i: |
13391 | case NVPTX::BI__nvvm_atom_sub_gen_l: |
13392 | case NVPTX::BI__nvvm_atom_sub_gen_ll: |
13393 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E); |
13394 | |
13395 | case NVPTX::BI__nvvm_atom_and_gen_i: |
13396 | case NVPTX::BI__nvvm_atom_and_gen_l: |
13397 | case NVPTX::BI__nvvm_atom_and_gen_ll: |
13398 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E); |
13399 | |
13400 | case NVPTX::BI__nvvm_atom_or_gen_i: |
13401 | case NVPTX::BI__nvvm_atom_or_gen_l: |
13402 | case NVPTX::BI__nvvm_atom_or_gen_ll: |
13403 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E); |
13404 | |
13405 | case NVPTX::BI__nvvm_atom_xor_gen_i: |
13406 | case NVPTX::BI__nvvm_atom_xor_gen_l: |
13407 | case NVPTX::BI__nvvm_atom_xor_gen_ll: |
13408 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E); |
13409 | |
13410 | case NVPTX::BI__nvvm_atom_xchg_gen_i: |
13411 | case NVPTX::BI__nvvm_atom_xchg_gen_l: |
13412 | case NVPTX::BI__nvvm_atom_xchg_gen_ll: |
13413 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E); |
13414 | |
13415 | case NVPTX::BI__nvvm_atom_max_gen_i: |
13416 | case NVPTX::BI__nvvm_atom_max_gen_l: |
13417 | case NVPTX::BI__nvvm_atom_max_gen_ll: |
13418 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E); |
13419 | |
13420 | case NVPTX::BI__nvvm_atom_max_gen_ui: |
13421 | case NVPTX::BI__nvvm_atom_max_gen_ul: |
13422 | case NVPTX::BI__nvvm_atom_max_gen_ull: |
13423 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E); |
13424 | |
13425 | case NVPTX::BI__nvvm_atom_min_gen_i: |
13426 | case NVPTX::BI__nvvm_atom_min_gen_l: |
13427 | case NVPTX::BI__nvvm_atom_min_gen_ll: |
13428 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E); |
13429 | |
13430 | case NVPTX::BI__nvvm_atom_min_gen_ui: |
13431 | case NVPTX::BI__nvvm_atom_min_gen_ul: |
13432 | case NVPTX::BI__nvvm_atom_min_gen_ull: |
13433 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E); |
13434 | |
13435 | case NVPTX::BI__nvvm_atom_cas_gen_i: |
13436 | case NVPTX::BI__nvvm_atom_cas_gen_l: |
13437 | case NVPTX::BI__nvvm_atom_cas_gen_ll: |
13438 | // __nvvm_atom_cas_gen_* should return the old value rather than the |
13439 | // success flag. |
13440 | return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false); |
13441 | |
13442 | case NVPTX::BI__nvvm_atom_add_gen_f: { |
13443 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
13444 | Value *Val = EmitScalarExpr(E->getArg(1)); |
13445 | // atomicrmw only deals with integer arguments so we need to use |
13446 | // LLVM's nvvm_atomic_load_add_f32 intrinsic for that. |
13447 | Function *FnALAF32 = |
13448 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f32, Ptr->getType()); |
13449 | return Builder.CreateCall(FnALAF32, {Ptr, Val}); |
13450 | } |
13451 | |
13452 | case NVPTX::BI__nvvm_atom_add_gen_d: { |
13453 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
13454 | Value *Val = EmitScalarExpr(E->getArg(1)); |
13455 | // atomicrmw only deals with integer arguments, so we need to use |
13456 | // LLVM's nvvm_atomic_load_add_f64 intrinsic. |
13457 | Function *FnALAF64 = |
13458 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f64, Ptr->getType()); |
13459 | return Builder.CreateCall(FnALAF64, {Ptr, Val}); |
13460 | } |
13461 | |
13462 | case NVPTX::BI__nvvm_atom_inc_gen_ui: { |
13463 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
13464 | Value *Val = EmitScalarExpr(E->getArg(1)); |
13465 | Function *FnALI32 = |
13466 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType()); |
13467 | return Builder.CreateCall(FnALI32, {Ptr, Val}); |
13468 | } |
13469 | |
13470 | case NVPTX::BI__nvvm_atom_dec_gen_ui: { |
13471 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
13472 | Value *Val = EmitScalarExpr(E->getArg(1)); |
13473 | Function *FnALD32 = |
13474 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType()); |
13475 | return Builder.CreateCall(FnALD32, {Ptr, Val}); |
13476 | } |
13477 | |
13478 | case NVPTX::BI__nvvm_ldg_c: |
13479 | case NVPTX::BI__nvvm_ldg_c2: |
13480 | case NVPTX::BI__nvvm_ldg_c4: |
13481 | case NVPTX::BI__nvvm_ldg_s: |
13482 | case NVPTX::BI__nvvm_ldg_s2: |
13483 | case NVPTX::BI__nvvm_ldg_s4: |
13484 | case NVPTX::BI__nvvm_ldg_i: |
13485 | case NVPTX::BI__nvvm_ldg_i2: |
13486 | case NVPTX::BI__nvvm_ldg_i4: |
13487 | case NVPTX::BI__nvvm_ldg_l: |
13488 | case NVPTX::BI__nvvm_ldg_ll: |
13489 | case NVPTX::BI__nvvm_ldg_ll2: |
13490 | case NVPTX::BI__nvvm_ldg_uc: |
13491 | case NVPTX::BI__nvvm_ldg_uc2: |
13492 | case NVPTX::BI__nvvm_ldg_uc4: |
13493 | case NVPTX::BI__nvvm_ldg_us: |
13494 | case NVPTX::BI__nvvm_ldg_us2: |
13495 | case NVPTX::BI__nvvm_ldg_us4: |
13496 | case NVPTX::BI__nvvm_ldg_ui: |
13497 | case NVPTX::BI__nvvm_ldg_ui2: |
13498 | case NVPTX::BI__nvvm_ldg_ui4: |
13499 | case NVPTX::BI__nvvm_ldg_ul: |
13500 | case NVPTX::BI__nvvm_ldg_ull: |
13501 | case NVPTX::BI__nvvm_ldg_ull2: |
13502 | // PTX Interoperability section 2.2: "For a vector with an even number of |
13503 | // elements, its alignment is set to number of elements times the alignment |
13504 | // of its member: n*alignof(t)." |
13505 | return MakeLdg(Intrinsic::nvvm_ldg_global_i); |
13506 | case NVPTX::BI__nvvm_ldg_f: |
13507 | case NVPTX::BI__nvvm_ldg_f2: |
13508 | case NVPTX::BI__nvvm_ldg_f4: |
13509 | case NVPTX::BI__nvvm_ldg_d: |
13510 | case NVPTX::BI__nvvm_ldg_d2: |
13511 | return MakeLdg(Intrinsic::nvvm_ldg_global_f); |
13512 | |
13513 | case NVPTX::BI__nvvm_atom_cta_add_gen_i: |
13514 | case NVPTX::BI__nvvm_atom_cta_add_gen_l: |
13515 | case NVPTX::BI__nvvm_atom_cta_add_gen_ll: |
13516 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta); |
13517 | case NVPTX::BI__nvvm_atom_sys_add_gen_i: |
13518 | case NVPTX::BI__nvvm_atom_sys_add_gen_l: |
13519 | case NVPTX::BI__nvvm_atom_sys_add_gen_ll: |
13520 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys); |
13521 | case NVPTX::BI__nvvm_atom_cta_add_gen_f: |
13522 | case NVPTX::BI__nvvm_atom_cta_add_gen_d: |
13523 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta); |
13524 | case NVPTX::BI__nvvm_atom_sys_add_gen_f: |
13525 | case NVPTX::BI__nvvm_atom_sys_add_gen_d: |
13526 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys); |
13527 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_i: |
13528 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_l: |
13529 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll: |
13530 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta); |
13531 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_i: |
13532 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_l: |
13533 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll: |
13534 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys); |
13535 | case NVPTX::BI__nvvm_atom_cta_max_gen_i: |
13536 | case NVPTX::BI__nvvm_atom_cta_max_gen_ui: |
13537 | case NVPTX::BI__nvvm_atom_cta_max_gen_l: |
13538 | case NVPTX::BI__nvvm_atom_cta_max_gen_ul: |
13539 | case NVPTX::BI__nvvm_atom_cta_max_gen_ll: |
13540 | case NVPTX::BI__nvvm_atom_cta_max_gen_ull: |
13541 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta); |
13542 | case NVPTX::BI__nvvm_atom_sys_max_gen_i: |
13543 | case NVPTX::BI__nvvm_atom_sys_max_gen_ui: |
13544 | case NVPTX::BI__nvvm_atom_sys_max_gen_l: |
13545 | case NVPTX::BI__nvvm_atom_sys_max_gen_ul: |
13546 | case NVPTX::BI__nvvm_atom_sys_max_gen_ll: |
13547 | case NVPTX::BI__nvvm_atom_sys_max_gen_ull: |
13548 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys); |
13549 | case NVPTX::BI__nvvm_atom_cta_min_gen_i: |
13550 | case NVPTX::BI__nvvm_atom_cta_min_gen_ui: |
13551 | case NVPTX::BI__nvvm_atom_cta_min_gen_l: |
13552 | case NVPTX::BI__nvvm_atom_cta_min_gen_ul: |
13553 | case NVPTX::BI__nvvm_atom_cta_min_gen_ll: |
13554 | case NVPTX::BI__nvvm_atom_cta_min_gen_ull: |
13555 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta); |
13556 | case NVPTX::BI__nvvm_atom_sys_min_gen_i: |
13557 | case NVPTX::BI__nvvm_atom_sys_min_gen_ui: |
13558 | case NVPTX::BI__nvvm_atom_sys_min_gen_l: |
13559 | case NVPTX::BI__nvvm_atom_sys_min_gen_ul: |
13560 | case NVPTX::BI__nvvm_atom_sys_min_gen_ll: |
13561 | case NVPTX::BI__nvvm_atom_sys_min_gen_ull: |
13562 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys); |
13563 | case NVPTX::BI__nvvm_atom_cta_inc_gen_ui: |
13564 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta); |
13565 | case NVPTX::BI__nvvm_atom_cta_dec_gen_ui: |
13566 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta); |
13567 | case NVPTX::BI__nvvm_atom_sys_inc_gen_ui: |
13568 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys); |
13569 | case NVPTX::BI__nvvm_atom_sys_dec_gen_ui: |
13570 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys); |
13571 | case NVPTX::BI__nvvm_atom_cta_and_gen_i: |
13572 | case NVPTX::BI__nvvm_atom_cta_and_gen_l: |
13573 | case NVPTX::BI__nvvm_atom_cta_and_gen_ll: |
13574 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta); |
13575 | case NVPTX::BI__nvvm_atom_sys_and_gen_i: |
13576 | case NVPTX::BI__nvvm_atom_sys_and_gen_l: |
13577 | case NVPTX::BI__nvvm_atom_sys_and_gen_ll: |
13578 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys); |
13579 | case NVPTX::BI__nvvm_atom_cta_or_gen_i: |
13580 | case NVPTX::BI__nvvm_atom_cta_or_gen_l: |
13581 | case NVPTX::BI__nvvm_atom_cta_or_gen_ll: |
13582 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta); |
13583 | case NVPTX::BI__nvvm_atom_sys_or_gen_i: |
13584 | case NVPTX::BI__nvvm_atom_sys_or_gen_l: |
13585 | case NVPTX::BI__nvvm_atom_sys_or_gen_ll: |
13586 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys); |
13587 | case NVPTX::BI__nvvm_atom_cta_xor_gen_i: |
13588 | case NVPTX::BI__nvvm_atom_cta_xor_gen_l: |
13589 | case NVPTX::BI__nvvm_atom_cta_xor_gen_ll: |
13590 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta); |
13591 | case NVPTX::BI__nvvm_atom_sys_xor_gen_i: |
13592 | case NVPTX::BI__nvvm_atom_sys_xor_gen_l: |
13593 | case NVPTX::BI__nvvm_atom_sys_xor_gen_ll: |
13594 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys); |
13595 | case NVPTX::BI__nvvm_atom_cta_cas_gen_i: |
13596 | case NVPTX::BI__nvvm_atom_cta_cas_gen_l: |
13597 | case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: { |
13598 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
13599 | return Builder.CreateCall( |
13600 | CGM.getIntrinsic( |
13601 | Intrinsic::nvvm_atomic_cas_gen_i_cta, |
13602 | {Ptr->getType()->getPointerElementType(), Ptr->getType()}), |
13603 | {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); |
13604 | } |
13605 | case NVPTX::BI__nvvm_atom_sys_cas_gen_i: |
13606 | case NVPTX::BI__nvvm_atom_sys_cas_gen_l: |
13607 | case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: { |
13608 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
13609 | return Builder.CreateCall( |
13610 | CGM.getIntrinsic( |
13611 | Intrinsic::nvvm_atomic_cas_gen_i_sys, |
13612 | {Ptr->getType()->getPointerElementType(), Ptr->getType()}), |
13613 | {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); |
13614 | } |
13615 | case NVPTX::BI__nvvm_match_all_sync_i32p: |
13616 | case NVPTX::BI__nvvm_match_all_sync_i64p: { |
13617 | Value *Mask = EmitScalarExpr(E->getArg(0)); |
13618 | Value *Val = EmitScalarExpr(E->getArg(1)); |
13619 | Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2)); |
13620 | Value *ResultPair = Builder.CreateCall( |
13621 | CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p |
13622 | ? Intrinsic::nvvm_match_all_sync_i32p |
13623 | : Intrinsic::nvvm_match_all_sync_i64p), |
13624 | {Mask, Val}); |
13625 | Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1), |
13626 | PredOutPtr.getElementType()); |
13627 | Builder.CreateStore(Pred, PredOutPtr); |
13628 | return Builder.CreateExtractValue(ResultPair, 0); |
13629 | } |
13630 | |
13631 | // FP MMA loads |
13632 | case NVPTX::BI__hmma_m16n16k16_ld_a: |
13633 | case NVPTX::BI__hmma_m16n16k16_ld_b: |
13634 | case NVPTX::BI__hmma_m16n16k16_ld_c_f16: |
13635 | case NVPTX::BI__hmma_m16n16k16_ld_c_f32: |
13636 | case NVPTX::BI__hmma_m32n8k16_ld_a: |
13637 | case NVPTX::BI__hmma_m32n8k16_ld_b: |
13638 | case NVPTX::BI__hmma_m32n8k16_ld_c_f16: |
13639 | case NVPTX::BI__hmma_m32n8k16_ld_c_f32: |
13640 | case NVPTX::BI__hmma_m8n32k16_ld_a: |
13641 | case NVPTX::BI__hmma_m8n32k16_ld_b: |
13642 | case NVPTX::BI__hmma_m8n32k16_ld_c_f16: |
13643 | case NVPTX::BI__hmma_m8n32k16_ld_c_f32: |
13644 | // Integer MMA loads. |
13645 | case NVPTX::BI__imma_m16n16k16_ld_a_s8: |
13646 | case NVPTX::BI__imma_m16n16k16_ld_a_u8: |
13647 | case NVPTX::BI__imma_m16n16k16_ld_b_s8: |
13648 | case NVPTX::BI__imma_m16n16k16_ld_b_u8: |
13649 | case NVPTX::BI__imma_m16n16k16_ld_c: |
13650 | case NVPTX::BI__imma_m32n8k16_ld_a_s8: |
13651 | case NVPTX::BI__imma_m32n8k16_ld_a_u8: |
13652 | case NVPTX::BI__imma_m32n8k16_ld_b_s8: |
13653 | case NVPTX::BI__imma_m32n8k16_ld_b_u8: |
13654 | case NVPTX::BI__imma_m32n8k16_ld_c: |
13655 | case NVPTX::BI__imma_m8n32k16_ld_a_s8: |
13656 | case NVPTX::BI__imma_m8n32k16_ld_a_u8: |
13657 | case NVPTX::BI__imma_m8n32k16_ld_b_s8: |
13658 | case NVPTX::BI__imma_m8n32k16_ld_b_u8: |
13659 | case NVPTX::BI__imma_m8n32k16_ld_c: |
13660 | // Sub-integer MMA loads. |
13661 | case NVPTX::BI__imma_m8n8k32_ld_a_s4: |
13662 | case NVPTX::BI__imma_m8n8k32_ld_a_u4: |
13663 | case NVPTX::BI__imma_m8n8k32_ld_b_s4: |
13664 | case NVPTX::BI__imma_m8n8k32_ld_b_u4: |
13665 | case NVPTX::BI__imma_m8n8k32_ld_c: |
13666 | case NVPTX::BI__bmma_m8n8k128_ld_a_b1: |
13667 | case NVPTX::BI__bmma_m8n8k128_ld_b_b1: |
13668 | case NVPTX::BI__bmma_m8n8k128_ld_c: |
13669 | { |
13670 | Address Dst = EmitPointerWithAlignment(E->getArg(0)); |
13671 | Value *Src = EmitScalarExpr(E->getArg(1)); |
13672 | Value *Ldm = EmitScalarExpr(E->getArg(2)); |
13673 | llvm::APSInt isColMajorArg; |
13674 | if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext())) |
13675 | return nullptr; |
13676 | bool isColMajor = isColMajorArg.getSExtValue(); |
13677 | NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID); |
13678 | unsigned IID = isColMajor ? II.IID_col : II.IID_row; |
13679 | if (IID == 0) |
13680 | return nullptr; |
13681 | |
13682 | Value *Result = |
13683 | Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm}); |
13684 | |
13685 | // Save returned values. |
13686 | assert(II.NumResults)((II.NumResults) ? static_cast<void> (0) : __assert_fail ("II.NumResults", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13686, __PRETTY_FUNCTION__)); |
13687 | if (II.NumResults == 1) { |
13688 | Builder.CreateAlignedStore(Result, Dst.getPointer(), |
13689 | CharUnits::fromQuantity(4)); |
13690 | } else { |
13691 | for (unsigned i = 0; i < II.NumResults; ++i) { |
13692 | Builder.CreateAlignedStore( |
13693 | Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), |
13694 | Dst.getElementType()), |
13695 | Builder.CreateGEP(Dst.getPointer(), |
13696 | llvm::ConstantInt::get(IntTy, i)), |
13697 | CharUnits::fromQuantity(4)); |
13698 | } |
13699 | } |
13700 | return Result; |
13701 | } |
13702 | |
13703 | case NVPTX::BI__hmma_m16n16k16_st_c_f16: |
13704 | case NVPTX::BI__hmma_m16n16k16_st_c_f32: |
13705 | case NVPTX::BI__hmma_m32n8k16_st_c_f16: |
13706 | case NVPTX::BI__hmma_m32n8k16_st_c_f32: |
13707 | case NVPTX::BI__hmma_m8n32k16_st_c_f16: |
13708 | case NVPTX::BI__hmma_m8n32k16_st_c_f32: |
13709 | case NVPTX::BI__imma_m16n16k16_st_c_i32: |
13710 | case NVPTX::BI__imma_m32n8k16_st_c_i32: |
13711 | case NVPTX::BI__imma_m8n32k16_st_c_i32: |
13712 | case NVPTX::BI__imma_m8n8k32_st_c_i32: |
13713 | case NVPTX::BI__bmma_m8n8k128_st_c_i32: { |
13714 | Value *Dst = EmitScalarExpr(E->getArg(0)); |
13715 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
13716 | Value *Ldm = EmitScalarExpr(E->getArg(2)); |
13717 | llvm::APSInt isColMajorArg; |
13718 | if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext())) |
13719 | return nullptr; |
13720 | bool isColMajor = isColMajorArg.getSExtValue(); |
13721 | NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID); |
13722 | unsigned IID = isColMajor ? II.IID_col : II.IID_row; |
13723 | if (IID == 0) |
13724 | return nullptr; |
13725 | Function *Intrinsic = |
13726 | CGM.getIntrinsic(IID, Dst->getType()); |
13727 | llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1); |
13728 | SmallVector<Value *, 10> Values = {Dst}; |
13729 | for (unsigned i = 0; i < II.NumResults; ++i) { |
13730 | Value *V = Builder.CreateAlignedLoad( |
13731 | Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)), |
13732 | CharUnits::fromQuantity(4)); |
13733 | Values.push_back(Builder.CreateBitCast(V, ParamType)); |
13734 | } |
13735 | Values.push_back(Ldm); |
13736 | Value *Result = Builder.CreateCall(Intrinsic, Values); |
13737 | return Result; |
13738 | } |
13739 | |
13740 | // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) --> |
13741 | // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf> |
13742 | case NVPTX::BI__hmma_m16n16k16_mma_f16f16: |
13743 | case NVPTX::BI__hmma_m16n16k16_mma_f32f16: |
13744 | case NVPTX::BI__hmma_m16n16k16_mma_f32f32: |
13745 | case NVPTX::BI__hmma_m16n16k16_mma_f16f32: |
13746 | case NVPTX::BI__hmma_m32n8k16_mma_f16f16: |
13747 | case NVPTX::BI__hmma_m32n8k16_mma_f32f16: |
13748 | case NVPTX::BI__hmma_m32n8k16_mma_f32f32: |
13749 | case NVPTX::BI__hmma_m32n8k16_mma_f16f32: |
13750 | case NVPTX::BI__hmma_m8n32k16_mma_f16f16: |
13751 | case NVPTX::BI__hmma_m8n32k16_mma_f32f16: |
13752 | case NVPTX::BI__hmma_m8n32k16_mma_f32f32: |
13753 | case NVPTX::BI__hmma_m8n32k16_mma_f16f32: |
13754 | case NVPTX::BI__imma_m16n16k16_mma_s8: |
13755 | case NVPTX::BI__imma_m16n16k16_mma_u8: |
13756 | case NVPTX::BI__imma_m32n8k16_mma_s8: |
13757 | case NVPTX::BI__imma_m32n8k16_mma_u8: |
13758 | case NVPTX::BI__imma_m8n32k16_mma_s8: |
13759 | case NVPTX::BI__imma_m8n32k16_mma_u8: |
13760 | case NVPTX::BI__imma_m8n8k32_mma_s4: |
13761 | case NVPTX::BI__imma_m8n8k32_mma_u4: |
13762 | case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: { |
13763 | Address Dst = EmitPointerWithAlignment(E->getArg(0)); |
13764 | Address SrcA = EmitPointerWithAlignment(E->getArg(1)); |
13765 | Address SrcB = EmitPointerWithAlignment(E->getArg(2)); |
13766 | Address SrcC = EmitPointerWithAlignment(E->getArg(3)); |
13767 | llvm::APSInt LayoutArg; |
13768 | if (!E->getArg(4)->isIntegerConstantExpr(LayoutArg, getContext())) |
13769 | return nullptr; |
13770 | int Layout = LayoutArg.getSExtValue(); |
13771 | if (Layout < 0 || Layout > 3) |
13772 | return nullptr; |
13773 | llvm::APSInt SatfArg; |
13774 | if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1) |
13775 | SatfArg = 0; // .b1 does not have satf argument. |
13776 | else if (!E->getArg(5)->isIntegerConstantExpr(SatfArg, getContext())) |
13777 | return nullptr; |
13778 | bool Satf = SatfArg.getSExtValue(); |
13779 | NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID); |
13780 | unsigned IID = MI.getMMAIntrinsic(Layout, Satf); |
13781 | if (IID == 0) // Unsupported combination of Layout/Satf. |
13782 | return nullptr; |
13783 | |
13784 | SmallVector<Value *, 24> Values; |
13785 | Function *Intrinsic = CGM.getIntrinsic(IID); |
13786 | llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0); |
13787 | // Load A |
13788 | for (unsigned i = 0; i < MI.NumEltsA; ++i) { |
13789 | Value *V = Builder.CreateAlignedLoad( |
13790 | Builder.CreateGEP(SrcA.getPointer(), |
13791 | llvm::ConstantInt::get(IntTy, i)), |
13792 | CharUnits::fromQuantity(4)); |
13793 | Values.push_back(Builder.CreateBitCast(V, AType)); |
13794 | } |
13795 | // Load B |
13796 | llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA); |
13797 | for (unsigned i = 0; i < MI.NumEltsB; ++i) { |
13798 | Value *V = Builder.CreateAlignedLoad( |
13799 | Builder.CreateGEP(SrcB.getPointer(), |
13800 | llvm::ConstantInt::get(IntTy, i)), |
13801 | CharUnits::fromQuantity(4)); |
13802 | Values.push_back(Builder.CreateBitCast(V, BType)); |
13803 | } |
13804 | // Load C |
13805 | llvm::Type *CType = |
13806 | Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB); |
13807 | for (unsigned i = 0; i < MI.NumEltsC; ++i) { |
13808 | Value *V = Builder.CreateAlignedLoad( |
13809 | Builder.CreateGEP(SrcC.getPointer(), |
13810 | llvm::ConstantInt::get(IntTy, i)), |
13811 | CharUnits::fromQuantity(4)); |
13812 | Values.push_back(Builder.CreateBitCast(V, CType)); |
13813 | } |
13814 | Value *Result = Builder.CreateCall(Intrinsic, Values); |
13815 | llvm::Type *DType = Dst.getElementType(); |
13816 | for (unsigned i = 0; i < MI.NumEltsD; ++i) |
13817 | Builder.CreateAlignedStore( |
13818 | Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType), |
13819 | Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)), |
13820 | CharUnits::fromQuantity(4)); |
13821 | return Result; |
13822 | } |
13823 | default: |
13824 | return nullptr; |
13825 | } |
13826 | } |
13827 | |
13828 | Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, |
13829 | const CallExpr *E) { |
13830 | switch (BuiltinID) { |
13831 | case WebAssembly::BI__builtin_wasm_memory_size: { |
13832 | llvm::Type *ResultType = ConvertType(E->getType()); |
13833 | Value *I = EmitScalarExpr(E->getArg(0)); |
13834 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType); |
13835 | return Builder.CreateCall(Callee, I); |
13836 | } |
13837 | case WebAssembly::BI__builtin_wasm_memory_grow: { |
13838 | llvm::Type *ResultType = ConvertType(E->getType()); |
13839 | Value *Args[] = { |
13840 | EmitScalarExpr(E->getArg(0)), |
13841 | EmitScalarExpr(E->getArg(1)) |
13842 | }; |
13843 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType); |
13844 | return Builder.CreateCall(Callee, Args); |
13845 | } |
13846 | case WebAssembly::BI__builtin_wasm_memory_init: { |
13847 | llvm::APSInt SegConst; |
13848 | if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext())) |
13849 | llvm_unreachable("Constant arg isn't actually constant?")::llvm::llvm_unreachable_internal("Constant arg isn't actually constant?" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13849); |
13850 | llvm::APSInt MemConst; |
13851 | if (!E->getArg(1)->isIntegerConstantExpr(MemConst, getContext())) |
13852 | llvm_unreachable("Constant arg isn't actually constant?")::llvm::llvm_unreachable_internal("Constant arg isn't actually constant?" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13852); |
13853 | if (!MemConst.isNullValue()) |
13854 | ErrorUnsupported(E, "non-zero memory index"); |
13855 | Value *Args[] = {llvm::ConstantInt::get(getLLVMContext(), SegConst), |
13856 | llvm::ConstantInt::get(getLLVMContext(), MemConst), |
13857 | EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)), |
13858 | EmitScalarExpr(E->getArg(4))}; |
13859 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_init); |
13860 | return Builder.CreateCall(Callee, Args); |
13861 | } |
13862 | case WebAssembly::BI__builtin_wasm_data_drop: { |
13863 | llvm::APSInt SegConst; |
13864 | if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext())) |
13865 | llvm_unreachable("Constant arg isn't actually constant?")::llvm::llvm_unreachable_internal("Constant arg isn't actually constant?" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13865); |
13866 | Value *Arg = llvm::ConstantInt::get(getLLVMContext(), SegConst); |
13867 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_data_drop); |
13868 | return Builder.CreateCall(Callee, {Arg}); |
13869 | } |
13870 | case WebAssembly::BI__builtin_wasm_throw: { |
13871 | Value *Tag = EmitScalarExpr(E->getArg(0)); |
13872 | Value *Obj = EmitScalarExpr(E->getArg(1)); |
13873 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw); |
13874 | return Builder.CreateCall(Callee, {Tag, Obj}); |
13875 | } |
13876 | case WebAssembly::BI__builtin_wasm_rethrow_in_catch: { |
13877 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow_in_catch); |
13878 | return Builder.CreateCall(Callee); |
13879 | } |
13880 | case WebAssembly::BI__builtin_wasm_atomic_wait_i32: { |
13881 | Value *Addr = EmitScalarExpr(E->getArg(0)); |
13882 | Value *Expected = EmitScalarExpr(E->getArg(1)); |
13883 | Value *Timeout = EmitScalarExpr(E->getArg(2)); |
13884 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i32); |
13885 | return Builder.CreateCall(Callee, {Addr, Expected, Timeout}); |
13886 | } |
13887 | case WebAssembly::BI__builtin_wasm_atomic_wait_i64: { |
13888 | Value *Addr = EmitScalarExpr(E->getArg(0)); |
13889 | Value *Expected = EmitScalarExpr(E->getArg(1)); |
13890 | Value *Timeout = EmitScalarExpr(E->getArg(2)); |
13891 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i64); |
13892 | return Builder.CreateCall(Callee, {Addr, Expected, Timeout}); |
13893 | } |
13894 | case WebAssembly::BI__builtin_wasm_atomic_notify: { |
13895 | Value *Addr = EmitScalarExpr(E->getArg(0)); |
13896 | Value *Count = EmitScalarExpr(E->getArg(1)); |
13897 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify); |
13898 | return Builder.CreateCall(Callee, {Addr, Count}); |
13899 | } |
13900 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32: |
13901 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64: |
13902 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32: |
13903 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64: |
13904 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: |
13905 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64x2_f64x2: { |
13906 | Value *Src = EmitScalarExpr(E->getArg(0)); |
13907 | llvm::Type *ResT = ConvertType(E->getType()); |
13908 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed, |
13909 | {ResT, Src->getType()}); |
13910 | return Builder.CreateCall(Callee, {Src}); |
13911 | } |
13912 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32: |
13913 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64: |
13914 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32: |
13915 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64: |
13916 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: |
13917 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64x2_f64x2: { |
13918 | Value *Src = EmitScalarExpr(E->getArg(0)); |
13919 | llvm::Type *ResT = ConvertType(E->getType()); |
13920 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned, |
13921 | {ResT, Src->getType()}); |
13922 | return Builder.CreateCall(Callee, {Src}); |
13923 | } |
13924 | case WebAssembly::BI__builtin_wasm_min_f32: |
13925 | case WebAssembly::BI__builtin_wasm_min_f64: |
13926 | case WebAssembly::BI__builtin_wasm_min_f32x4: |
13927 | case WebAssembly::BI__builtin_wasm_min_f64x2: { |
13928 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
13929 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
13930 | Function *Callee = CGM.getIntrinsic(Intrinsic::minimum, |
13931 | ConvertType(E->getType())); |
13932 | return Builder.CreateCall(Callee, {LHS, RHS}); |
13933 | } |
13934 | case WebAssembly::BI__builtin_wasm_max_f32: |
13935 | case WebAssembly::BI__builtin_wasm_max_f64: |
13936 | case WebAssembly::BI__builtin_wasm_max_f32x4: |
13937 | case WebAssembly::BI__builtin_wasm_max_f64x2: { |
13938 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
13939 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
13940 | Function *Callee = CGM.getIntrinsic(Intrinsic::maximum, |
13941 | ConvertType(E->getType())); |
13942 | return Builder.CreateCall(Callee, {LHS, RHS}); |
13943 | } |
13944 | case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16: |
13945 | case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16: |
13946 | case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8: |
13947 | case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8: |
13948 | case WebAssembly::BI__builtin_wasm_extract_lane_i32x4: |
13949 | case WebAssembly::BI__builtin_wasm_extract_lane_i64x2: |
13950 | case WebAssembly::BI__builtin_wasm_extract_lane_f32x4: |
13951 | case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: { |
13952 | llvm::APSInt LaneConst; |
13953 | if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext())) |
13954 | llvm_unreachable("Constant arg isn't actually constant?")::llvm::llvm_unreachable_internal("Constant arg isn't actually constant?" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13954); |
13955 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
13956 | Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst); |
13957 | Value *Extract = Builder.CreateExtractElement(Vec, Lane); |
13958 | switch (BuiltinID) { |
13959 | case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16: |
13960 | case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8: |
13961 | return Builder.CreateSExt(Extract, ConvertType(E->getType())); |
13962 | case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16: |
13963 | case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8: |
13964 | return Builder.CreateZExt(Extract, ConvertType(E->getType())); |
13965 | case WebAssembly::BI__builtin_wasm_extract_lane_i32x4: |
13966 | case WebAssembly::BI__builtin_wasm_extract_lane_i64x2: |
13967 | case WebAssembly::BI__builtin_wasm_extract_lane_f32x4: |
13968 | case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: |
13969 | return Extract; |
13970 | default: |
13971 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13971); |
13972 | } |
13973 | } |
13974 | case WebAssembly::BI__builtin_wasm_replace_lane_i8x16: |
13975 | case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: |
13976 | case WebAssembly::BI__builtin_wasm_replace_lane_i32x4: |
13977 | case WebAssembly::BI__builtin_wasm_replace_lane_i64x2: |
13978 | case WebAssembly::BI__builtin_wasm_replace_lane_f32x4: |
13979 | case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: { |
13980 | llvm::APSInt LaneConst; |
13981 | if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext())) |
13982 | llvm_unreachable("Constant arg isn't actually constant?")::llvm::llvm_unreachable_internal("Constant arg isn't actually constant?" , "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13982); |
13983 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
13984 | Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst); |
13985 | Value *Val = EmitScalarExpr(E->getArg(2)); |
13986 | switch (BuiltinID) { |
13987 | case WebAssembly::BI__builtin_wasm_replace_lane_i8x16: |
13988 | case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: { |
13989 | llvm::Type *ElemType = ConvertType(E->getType())->getVectorElementType(); |
13990 | Value *Trunc = Builder.CreateTrunc(Val, ElemType); |
13991 | return Builder.CreateInsertElement(Vec, Trunc, Lane); |
13992 | } |
13993 | case WebAssembly::BI__builtin_wasm_replace_lane_i32x4: |
13994 | case WebAssembly::BI__builtin_wasm_replace_lane_i64x2: |
13995 | case WebAssembly::BI__builtin_wasm_replace_lane_f32x4: |
13996 | case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: |
13997 | return Builder.CreateInsertElement(Vec, Val, Lane); |
13998 | default: |
13999 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 13999); |
14000 | } |
14001 | } |
14002 | case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16: |
14003 | case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16: |
14004 | case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8: |
14005 | case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8: |
14006 | case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16: |
14007 | case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16: |
14008 | case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8: |
14009 | case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: { |
14010 | unsigned IntNo; |
14011 | switch (BuiltinID) { |
14012 | case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16: |
14013 | case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8: |
14014 | IntNo = Intrinsic::sadd_sat; |
14015 | break; |
14016 | case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16: |
14017 | case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8: |
14018 | IntNo = Intrinsic::uadd_sat; |
14019 | break; |
14020 | case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16: |
14021 | case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8: |
14022 | IntNo = Intrinsic::wasm_sub_saturate_signed; |
14023 | break; |
14024 | case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16: |
14025 | case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: |
14026 | IntNo = Intrinsic::wasm_sub_saturate_unsigned; |
14027 | break; |
14028 | default: |
14029 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 14029); |
14030 | } |
14031 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
14032 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
14033 | Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); |
14034 | return Builder.CreateCall(Callee, {LHS, RHS}); |
14035 | } |
14036 | case WebAssembly::BI__builtin_wasm_bitselect: { |
14037 | Value *V1 = EmitScalarExpr(E->getArg(0)); |
14038 | Value *V2 = EmitScalarExpr(E->getArg(1)); |
14039 | Value *C = EmitScalarExpr(E->getArg(2)); |
14040 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect, |
14041 | ConvertType(E->getType())); |
14042 | return Builder.CreateCall(Callee, {V1, V2, C}); |
14043 | } |
14044 | case WebAssembly::BI__builtin_wasm_any_true_i8x16: |
14045 | case WebAssembly::BI__builtin_wasm_any_true_i16x8: |
14046 | case WebAssembly::BI__builtin_wasm_any_true_i32x4: |
14047 | case WebAssembly::BI__builtin_wasm_any_true_i64x2: |
14048 | case WebAssembly::BI__builtin_wasm_all_true_i8x16: |
14049 | case WebAssembly::BI__builtin_wasm_all_true_i16x8: |
14050 | case WebAssembly::BI__builtin_wasm_all_true_i32x4: |
14051 | case WebAssembly::BI__builtin_wasm_all_true_i64x2: { |
14052 | unsigned IntNo; |
14053 | switch (BuiltinID) { |
14054 | case WebAssembly::BI__builtin_wasm_any_true_i8x16: |
14055 | case WebAssembly::BI__builtin_wasm_any_true_i16x8: |
14056 | case WebAssembly::BI__builtin_wasm_any_true_i32x4: |
14057 | case WebAssembly::BI__builtin_wasm_any_true_i64x2: |
14058 | IntNo = Intrinsic::wasm_anytrue; |
14059 | break; |
14060 | case WebAssembly::BI__builtin_wasm_all_true_i8x16: |
14061 | case WebAssembly::BI__builtin_wasm_all_true_i16x8: |
14062 | case WebAssembly::BI__builtin_wasm_all_true_i32x4: |
14063 | case WebAssembly::BI__builtin_wasm_all_true_i64x2: |
14064 | IntNo = Intrinsic::wasm_alltrue; |
14065 | break; |
14066 | default: |
14067 | llvm_unreachable("unexpected builtin ID")::llvm::llvm_unreachable_internal("unexpected builtin ID", "/build/llvm-toolchain-snapshot-9~svn362543/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 14067); |
14068 | } |
14069 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
14070 | Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType()); |
14071 | return Builder.CreateCall(Callee, {Vec}); |
14072 | } |
14073 | case WebAssembly::BI__builtin_wasm_abs_f32x4: |
14074 | case WebAssembly::BI__builtin_wasm_abs_f64x2: { |
14075 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
14076 | Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType()); |
14077 | return Builder.CreateCall(Callee, {Vec}); |
14078 | } |
14079 | case WebAssembly::BI__builtin_wasm_sqrt_f32x4: |
14080 | case WebAssembly::BI__builtin_wasm_sqrt_f64x2: { |
14081 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
14082 | Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType()); |
14083 | return Builder.CreateCall(Callee, {Vec}); |
14084 | } |
14085 | |
14086 | default: |
14087 | return nullptr; |
14088 | } |
14089 | } |
14090 | |
14091 | Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID, |
14092 | const CallExpr *E) { |
14093 | SmallVector<llvm::Value *, 4> Ops; |
14094 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
14095 | |
14096 | auto MakeCircLd = [&](unsigned IntID, bool HasImm) { |
14097 | // The base pointer is passed by address, so it needs to be loaded. |
14098 | Address BP = EmitPointerWithAlignment(E->getArg(0)); |
14099 | BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy), |
14100 | BP.getAlignment()); |
14101 | llvm::Value *Base = Builder.CreateLoad(BP); |
14102 | // Operands are Base, Increment, Modifier, Start. |
14103 | if (HasImm) |
14104 | Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), |
14105 | EmitScalarExpr(E->getArg(3)) }; |
14106 | else |
14107 | Ops = { Base, EmitScalarExpr(E->getArg(1)), |
14108 | EmitScalarExpr(E->getArg(2)) }; |
14109 | |
14110 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops); |
14111 | llvm::Value *NewBase = Builder.CreateExtractValue(Result, 1); |
14112 | llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), |
14113 | NewBase->getType()->getPointerTo()); |
14114 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
14115 | // The intrinsic generates two results. The new value for the base pointer |
14116 | // needs to be stored. |
14117 | Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment()); |
14118 | return Builder.CreateExtractValue(Result, 0); |
14119 | }; |
14120 | |
14121 | auto MakeCircSt = [&](unsigned IntID, bool HasImm) { |
14122 | // The base pointer is passed by address, so it needs to be loaded. |
14123 | Address BP = EmitPointerWithAlignment(E->getArg(0)); |
14124 | BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy), |
14125 | BP.getAlignment()); |
14126 | llvm::Value *Base = Builder.CreateLoad(BP); |
14127 | // Operands are Base, Increment, Modifier, Value, Start. |
14128 | if (HasImm) |
14129 | Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), |
14130 | EmitScalarExpr(E->getArg(3)), EmitScalarExpr(E->getArg(4)) }; |
14131 | else |
14132 | Ops = { Base, EmitScalarExpr(E->getArg(1)), |
14133 | EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)) }; |
14134 | |
14135 | llvm::Value *NewBase = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops); |
14136 | llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), |
14137 | NewBase->getType()->getPointerTo()); |
14138 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
14139 | // The intrinsic generates one result, which is the new value for the base |
14140 | // pointer. It needs to be stored. |
14141 | return Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment()); |
14142 | }; |
14143 | |
14144 | // Handle the conversion of bit-reverse load intrinsics to bit code. |
14145 | // The intrinsic call after this function only reads from memory and the |
14146 | // write to memory is dealt by the store instruction. |
14147 | auto MakeBrevLd = [&](unsigned IntID, llvm::Type *DestTy) { |
14148 | // The intrinsic generates one result, which is the new value for the base |
14149 | // pointer. It needs to be returned. The result of the load instruction is |
14150 | // passed to intrinsic by address, so the value needs to be stored. |
14151 | llvm::Value *BaseAddress = |
14152 | Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy); |
14153 | |
14154 | // Expressions like &(*pt++) will be incremented per evaluation. |
14155 | // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression |
14156 | // per call. |
14157 | Address DestAddr = EmitPointerWithAlignment(E->getArg(1)); |
14158 | DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy), |
14159 | DestAddr.getAlignment()); |
14160 | llvm::Value *DestAddress = DestAddr.getPointer(); |
14161 | |
14162 | // Operands are Base, Dest, Modifier. |
14163 | // The intrinsic format in LLVM IR is defined as |
14164 | // { ValueType, i8* } (i8*, i32). |
14165 | Ops = {BaseAddress, EmitScalarExpr(E->getArg(2))}; |
14166 | |
14167 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops); |
14168 | // The value needs to be stored as the variable is passed by reference. |
14169 | llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0); |
14170 | |
14171 | // The store needs to be truncated to fit the destination type. |
14172 | // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs |
14173 | // to be handled with stores of respective destination type. |
14174 | DestVal = Builder.CreateTrunc(DestVal, DestTy); |
14175 | |
14176 | llvm::Value *DestForStore = |
14177 | Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo()); |
14178 | Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment()); |
14179 | // The updated value of the base pointer is returned. |
14180 | return Builder.CreateExtractValue(Result, 1); |
14181 | }; |
14182 | |
14183 | switch (BuiltinID) { |
14184 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry: |
14185 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: { |
14186 | Address Dest = EmitPointerWithAlignment(E->getArg(2)); |
14187 | unsigned Size; |
14188 | if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vaddcarry) { |
14189 | Size = 512; |
14190 | ID = Intrinsic::hexagon_V6_vaddcarry; |
14191 | } else { |
14192 | Size = 1024; |
14193 | ID = Intrinsic::hexagon_V6_vaddcarry_128B; |
14194 | } |
14195 | Dest = Builder.CreateBitCast(Dest, |
14196 | llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0)); |
14197 | LoadInst *QLd = Builder.CreateLoad(Dest); |
14198 | Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd }; |
14199 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
14200 | llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1); |
14201 | llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)), |
14202 | Vprd->getType()->getPointerTo(0)); |
14203 | Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment()); |
14204 | return Builder.CreateExtractValue(Result, 0); |
14205 | } |
14206 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry: |
14207 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: { |
14208 | Address Dest = EmitPointerWithAlignment(E->getArg(2)); |
14209 | unsigned Size; |
14210 | if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vsubcarry) { |
14211 | Size = 512; |
14212 | ID = Intrinsic::hexagon_V6_vsubcarry; |
14213 | } else { |
14214 | Size = 1024; |
14215 | ID = Intrinsic::hexagon_V6_vsubcarry_128B; |
14216 | } |
14217 | Dest = Builder.CreateBitCast(Dest, |
14218 | llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0)); |
14219 | LoadInst *QLd = Builder.CreateLoad(Dest); |
14220 | Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd }; |
14221 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
14222 | llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1); |
14223 | llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)), |
14224 | Vprd->getType()->getPointerTo(0)); |
14225 | Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment()); |
14226 | return Builder.CreateExtractValue(Result, 0); |
14227 | } |
14228 | case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci: |
14229 | return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pci, /*HasImm*/true); |
14230 | case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci: |
14231 | return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pci, /*HasImm*/true); |
14232 | case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci: |
14233 | return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pci, /*HasImm*/true); |
14234 | case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci: |
14235 | return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pci, /*HasImm*/true); |
14236 | case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci: |
14237 | return MakeCircLd(Intrinsic::hexagon_L2_loadri_pci, /*HasImm*/true); |
14238 | case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci: |
14239 | return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pci, /*HasImm*/true); |
14240 | case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr: |
14241 | return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pcr, /*HasImm*/false); |
14242 | case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr: |
14243 | return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pcr, /*HasImm*/false); |
14244 | case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr: |
14245 | return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pcr, /*HasImm*/false); |
14246 | case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr: |
14247 | return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pcr, /*HasImm*/false); |
14248 | case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr: |
14249 | return MakeCircLd(Intrinsic::hexagon_L2_loadri_pcr, /*HasImm*/false); |
14250 | case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr: |
14251 | return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pcr, /*HasImm*/false); |
14252 | case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci: |
14253 | return MakeCircSt(Intrinsic::hexagon_S2_storerb_pci, /*HasImm*/true); |
14254 | case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci: |
14255 | return MakeCircSt(Intrinsic::hexagon_S2_storerh_pci, /*HasImm*/true); |
14256 | case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci: |
14257 | return MakeCircSt(Intrinsic::hexagon_S2_storerf_pci, /*HasImm*/true); |
14258 | case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci: |
14259 | return MakeCircSt(Intrinsic::hexagon_S2_storeri_pci, /*HasImm*/true); |
14260 | case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci: |
14261 | return MakeCircSt(Intrinsic::hexagon_S2_storerd_pci, /*HasImm*/true); |
14262 | case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr: |
14263 | return MakeCircSt(Intrinsic::hexagon_S2_storerb_pcr, /*HasImm*/false); |
14264 | case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr: |
14265 | return MakeCircSt(Intrinsic::hexagon_S2_storerh_pcr, /*HasImm*/false); |
14266 | case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr: |
14267 | return MakeCircSt(Intrinsic::hexagon_S2_storerf_pcr, /*HasImm*/false); |
14268 | case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr: |
14269 | return MakeCircSt(Intrinsic::hexagon_S2_storeri_pcr, /*HasImm*/false); |
14270 | case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr: |
14271 | return MakeCircSt(Intrinsic::hexagon_S2_storerd_pcr, /*HasImm*/false); |
14272 | case Hexagon::BI__builtin_brev_ldub: |
14273 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty); |
14274 | case Hexagon::BI__builtin_brev_ldb: |
14275 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty); |
14276 | case Hexagon::BI__builtin_brev_lduh: |
14277 | return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty); |
14278 | case Hexagon::BI__builtin_brev_ldh: |
14279 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty); |
14280 | case Hexagon::BI__builtin_brev_ldw: |
14281 | return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty); |
14282 | case Hexagon::BI__builtin_brev_ldd: |
14283 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty); |
14284 | default: |
14285 | break; |
14286 | } // switch |
14287 | |
14288 | return nullptr; |
14289 | } |