File: | tools/clang/lib/CodeGen/CGBuiltin.cpp |
Warning: | line 3357, column 7 Undefined or garbage value returned to caller |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | // | |||
10 | // This contains code to emit Builtin calls as LLVM code. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "CGCXXABI.h" | |||
15 | #include "CGObjCRuntime.h" | |||
16 | #include "CGOpenCLRuntime.h" | |||
17 | #include "CGRecordLayout.h" | |||
18 | #include "CodeGenFunction.h" | |||
19 | #include "CodeGenModule.h" | |||
20 | #include "ConstantEmitter.h" | |||
21 | #include "TargetInfo.h" | |||
22 | #include "clang/AST/ASTContext.h" | |||
23 | #include "clang/AST/Decl.h" | |||
24 | #include "clang/Analysis/Analyses/OSLog.h" | |||
25 | #include "clang/Basic/TargetBuiltins.h" | |||
26 | #include "clang/Basic/TargetInfo.h" | |||
27 | #include "clang/CodeGen/CGFunctionInfo.h" | |||
28 | #include "llvm/ADT/StringExtras.h" | |||
29 | #include "llvm/IR/CallSite.h" | |||
30 | #include "llvm/IR/DataLayout.h" | |||
31 | #include "llvm/IR/InlineAsm.h" | |||
32 | #include "llvm/IR/Intrinsics.h" | |||
33 | #include "llvm/IR/MDBuilder.h" | |||
34 | #include "llvm/Support/ConvertUTF.h" | |||
35 | #include "llvm/Support/ScopedPrinter.h" | |||
36 | #include "llvm/Support/TargetParser.h" | |||
37 | #include <sstream> | |||
38 | ||||
39 | using namespace clang; | |||
40 | using namespace CodeGen; | |||
41 | using namespace llvm; | |||
42 | ||||
43 | static | |||
44 | int64_t clamp(int64_t Value, int64_t Low, int64_t High) { | |||
45 | return std::min(High, std::max(Low, Value)); | |||
46 | } | |||
47 | ||||
48 | /// getBuiltinLibFunction - Given a builtin id for a function like | |||
49 | /// "__builtin_fabsf", return a Function* for "fabsf". | |||
50 | llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, | |||
51 | unsigned BuiltinID) { | |||
52 | assert(Context.BuiltinInfo.isLibFunction(BuiltinID))(static_cast <bool> (Context.BuiltinInfo.isLibFunction( BuiltinID)) ? void (0) : __assert_fail ("Context.BuiltinInfo.isLibFunction(BuiltinID)" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 52, __extension__ __PRETTY_FUNCTION__)); | |||
53 | ||||
54 | // Get the name, skip over the __builtin_ prefix (if necessary). | |||
55 | StringRef Name; | |||
56 | GlobalDecl D(FD); | |||
57 | ||||
58 | // If the builtin has been declared explicitly with an assembler label, | |||
59 | // use the mangled name. This differs from the plain label on platforms | |||
60 | // that prefix labels. | |||
61 | if (FD->hasAttr<AsmLabelAttr>()) | |||
62 | Name = getMangledName(D); | |||
63 | else | |||
64 | Name = Context.BuiltinInfo.getName(BuiltinID) + 10; | |||
65 | ||||
66 | llvm::FunctionType *Ty = | |||
67 | cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); | |||
68 | ||||
69 | return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); | |||
70 | } | |||
71 | ||||
72 | /// Emit the conversions required to turn the given value into an | |||
73 | /// integer of the given size. | |||
74 | static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, | |||
75 | QualType T, llvm::IntegerType *IntType) { | |||
76 | V = CGF.EmitToMemory(V, T); | |||
77 | ||||
78 | if (V->getType()->isPointerTy()) | |||
79 | return CGF.Builder.CreatePtrToInt(V, IntType); | |||
80 | ||||
81 | assert(V->getType() == IntType)(static_cast <bool> (V->getType() == IntType) ? void (0) : __assert_fail ("V->getType() == IntType", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 81, __extension__ __PRETTY_FUNCTION__)); | |||
82 | return V; | |||
83 | } | |||
84 | ||||
85 | static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, | |||
86 | QualType T, llvm::Type *ResultType) { | |||
87 | V = CGF.EmitFromMemory(V, T); | |||
88 | ||||
89 | if (ResultType->isPointerTy()) | |||
90 | return CGF.Builder.CreateIntToPtr(V, ResultType); | |||
91 | ||||
92 | assert(V->getType() == ResultType)(static_cast <bool> (V->getType() == ResultType) ? void (0) : __assert_fail ("V->getType() == ResultType", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 92, __extension__ __PRETTY_FUNCTION__)); | |||
93 | return V; | |||
94 | } | |||
95 | ||||
96 | /// Utility to insert an atomic instruction based on Instrinsic::ID | |||
97 | /// and the expression node. | |||
98 | static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF, | |||
99 | llvm::AtomicRMWInst::BinOp Kind, | |||
100 | const CallExpr *E) { | |||
101 | QualType T = E->getType(); | |||
102 | assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()-> isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 102, __extension__ __PRETTY_FUNCTION__)); | |||
103 | assert(CGF.getContext().hasSameUnqualifiedType(T,(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(0)->getType()->getPointeeType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 104, __extension__ __PRETTY_FUNCTION__)) | |||
104 | E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(0)->getType()->getPointeeType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 104, __extension__ __PRETTY_FUNCTION__)); | |||
105 | assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(1)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 105, __extension__ __PRETTY_FUNCTION__)); | |||
106 | ||||
107 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); | |||
108 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); | |||
109 | ||||
110 | llvm::IntegerType *IntType = | |||
111 | llvm::IntegerType::get(CGF.getLLVMContext(), | |||
112 | CGF.getContext().getTypeSize(T)); | |||
113 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); | |||
114 | ||||
115 | llvm::Value *Args[2]; | |||
116 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); | |||
117 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); | |||
118 | llvm::Type *ValueType = Args[1]->getType(); | |||
119 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); | |||
120 | ||||
121 | llvm::Value *Result = CGF.Builder.CreateAtomicRMW( | |||
122 | Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); | |||
123 | return EmitFromInt(CGF, Result, T, ValueType); | |||
124 | } | |||
125 | ||||
126 | static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) { | |||
127 | Value *Val = CGF.EmitScalarExpr(E->getArg(0)); | |||
128 | Value *Address = CGF.EmitScalarExpr(E->getArg(1)); | |||
129 | ||||
130 | // Convert the type of the pointer to a pointer to the stored type. | |||
131 | Val = CGF.EmitToMemory(Val, E->getArg(0)->getType()); | |||
132 | Value *BC = CGF.Builder.CreateBitCast( | |||
133 | Address, llvm::PointerType::getUnqual(Val->getType()), "cast"); | |||
134 | LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType()); | |||
135 | LV.setNontemporal(true); | |||
136 | CGF.EmitStoreOfScalar(Val, LV, false); | |||
137 | return nullptr; | |||
138 | } | |||
139 | ||||
140 | static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) { | |||
141 | Value *Address = CGF.EmitScalarExpr(E->getArg(0)); | |||
142 | ||||
143 | LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType()); | |||
144 | LV.setNontemporal(true); | |||
145 | return CGF.EmitLoadOfScalar(LV, E->getExprLoc()); | |||
146 | } | |||
147 | ||||
148 | static RValue EmitBinaryAtomic(CodeGenFunction &CGF, | |||
149 | llvm::AtomicRMWInst::BinOp Kind, | |||
150 | const CallExpr *E) { | |||
151 | return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E)); | |||
152 | } | |||
153 | ||||
154 | /// Utility to insert an atomic instruction based Instrinsic::ID and | |||
155 | /// the expression node, where the return value is the result of the | |||
156 | /// operation. | |||
157 | static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, | |||
158 | llvm::AtomicRMWInst::BinOp Kind, | |||
159 | const CallExpr *E, | |||
160 | Instruction::BinaryOps Op, | |||
161 | bool Invert = false) { | |||
162 | QualType T = E->getType(); | |||
163 | assert(E->getArg(0)->getType()->isPointerType())(static_cast <bool> (E->getArg(0)->getType()-> isPointerType()) ? void (0) : __assert_fail ("E->getArg(0)->getType()->isPointerType()" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 163, __extension__ __PRETTY_FUNCTION__)); | |||
164 | assert(CGF.getContext().hasSameUnqualifiedType(T,(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(0)->getType()->getPointeeType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 165, __extension__ __PRETTY_FUNCTION__)) | |||
165 | E->getArg(0)->getType()->getPointeeType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(0)->getType()->getPointeeType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(0)->getType()->getPointeeType())" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 165, __extension__ __PRETTY_FUNCTION__)); | |||
166 | assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()))(static_cast <bool> (CGF.getContext().hasSameUnqualifiedType (T, E->getArg(1)->getType())) ? void (0) : __assert_fail ("CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 166, __extension__ __PRETTY_FUNCTION__)); | |||
167 | ||||
168 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); | |||
169 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); | |||
170 | ||||
171 | llvm::IntegerType *IntType = | |||
172 | llvm::IntegerType::get(CGF.getLLVMContext(), | |||
173 | CGF.getContext().getTypeSize(T)); | |||
174 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); | |||
175 | ||||
176 | llvm::Value *Args[2]; | |||
177 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); | |||
178 | llvm::Type *ValueType = Args[1]->getType(); | |||
179 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); | |||
180 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); | |||
181 | ||||
182 | llvm::Value *Result = CGF.Builder.CreateAtomicRMW( | |||
183 | Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); | |||
184 | Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); | |||
185 | if (Invert) | |||
186 | Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result, | |||
187 | llvm::ConstantInt::get(IntType, -1)); | |||
188 | Result = EmitFromInt(CGF, Result, T, ValueType); | |||
189 | return RValue::get(Result); | |||
190 | } | |||
191 | ||||
192 | /// Utility to insert an atomic cmpxchg instruction. | |||
193 | /// | |||
194 | /// @param CGF The current codegen function. | |||
195 | /// @param E Builtin call expression to convert to cmpxchg. | |||
196 | /// arg0 - address to operate on | |||
197 | /// arg1 - value to compare with | |||
198 | /// arg2 - new value | |||
199 | /// @param ReturnBool Specifies whether to return success flag of | |||
200 | /// cmpxchg result or the old value. | |||
201 | /// | |||
202 | /// @returns result of cmpxchg, according to ReturnBool | |||
203 | static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, | |||
204 | bool ReturnBool) { | |||
205 | QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType(); | |||
206 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); | |||
207 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); | |||
208 | ||||
209 | llvm::IntegerType *IntType = llvm::IntegerType::get( | |||
210 | CGF.getLLVMContext(), CGF.getContext().getTypeSize(T)); | |||
211 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); | |||
212 | ||||
213 | Value *Args[3]; | |||
214 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); | |||
215 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); | |||
216 | llvm::Type *ValueType = Args[1]->getType(); | |||
217 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); | |||
218 | Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); | |||
219 | ||||
220 | Value *Pair = CGF.Builder.CreateAtomicCmpXchg( | |||
221 | Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent, | |||
222 | llvm::AtomicOrdering::SequentiallyConsistent); | |||
223 | if (ReturnBool) | |||
224 | // Extract boolean success flag and zext it to int. | |||
225 | return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1), | |||
226 | CGF.ConvertType(E->getType())); | |||
227 | else | |||
228 | // Extract old value and emit it using the same type as compare value. | |||
229 | return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T, | |||
230 | ValueType); | |||
231 | } | |||
232 | ||||
233 | // Emit a simple mangled intrinsic that has 1 argument and a return type | |||
234 | // matching the argument type. | |||
235 | static Value *emitUnaryBuiltin(CodeGenFunction &CGF, | |||
236 | const CallExpr *E, | |||
237 | unsigned IntrinsicID) { | |||
238 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); | |||
239 | ||||
240 | Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); | |||
241 | return CGF.Builder.CreateCall(F, Src0); | |||
242 | } | |||
243 | ||||
244 | // Emit an intrinsic that has 2 operands of the same type as its result. | |||
245 | static Value *emitBinaryBuiltin(CodeGenFunction &CGF, | |||
246 | const CallExpr *E, | |||
247 | unsigned IntrinsicID) { | |||
248 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); | |||
249 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); | |||
250 | ||||
251 | Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); | |||
252 | return CGF.Builder.CreateCall(F, { Src0, Src1 }); | |||
253 | } | |||
254 | ||||
255 | // Emit an intrinsic that has 3 operands of the same type as its result. | |||
256 | static Value *emitTernaryBuiltin(CodeGenFunction &CGF, | |||
257 | const CallExpr *E, | |||
258 | unsigned IntrinsicID) { | |||
259 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); | |||
260 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); | |||
261 | llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); | |||
262 | ||||
263 | Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); | |||
264 | return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); | |||
265 | } | |||
266 | ||||
267 | // Emit an intrinsic that has 1 float or double operand, and 1 integer. | |||
268 | static Value *emitFPIntBuiltin(CodeGenFunction &CGF, | |||
269 | const CallExpr *E, | |||
270 | unsigned IntrinsicID) { | |||
271 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); | |||
272 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); | |||
273 | ||||
274 | Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); | |||
275 | return CGF.Builder.CreateCall(F, {Src0, Src1}); | |||
276 | } | |||
277 | ||||
278 | /// EmitFAbs - Emit a call to @llvm.fabs(). | |||
279 | static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) { | |||
280 | Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType()); | |||
281 | llvm::CallInst *Call = CGF.Builder.CreateCall(F, V); | |||
282 | Call->setDoesNotAccessMemory(); | |||
283 | return Call; | |||
284 | } | |||
285 | ||||
286 | /// Emit the computation of the sign bit for a floating point value. Returns | |||
287 | /// the i1 sign bit value. | |||
288 | static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) { | |||
289 | LLVMContext &C = CGF.CGM.getLLVMContext(); | |||
290 | ||||
291 | llvm::Type *Ty = V->getType(); | |||
292 | int Width = Ty->getPrimitiveSizeInBits(); | |||
293 | llvm::Type *IntTy = llvm::IntegerType::get(C, Width); | |||
294 | V = CGF.Builder.CreateBitCast(V, IntTy); | |||
295 | if (Ty->isPPC_FP128Ty()) { | |||
296 | // We want the sign bit of the higher-order double. The bitcast we just | |||
297 | // did works as if the double-double was stored to memory and then | |||
298 | // read as an i128. The "store" will put the higher-order double in the | |||
299 | // lower address in both little- and big-Endian modes, but the "load" | |||
300 | // will treat those bits as a different part of the i128: the low bits in | |||
301 | // little-Endian, the high bits in big-Endian. Therefore, on big-Endian | |||
302 | // we need to shift the high bits down to the low before truncating. | |||
303 | Width >>= 1; | |||
304 | if (CGF.getTarget().isBigEndian()) { | |||
305 | Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width); | |||
306 | V = CGF.Builder.CreateLShr(V, ShiftCst); | |||
307 | } | |||
308 | // We are truncating value in order to extract the higher-order | |||
309 | // double, which we will be using to extract the sign from. | |||
310 | IntTy = llvm::IntegerType::get(C, Width); | |||
311 | V = CGF.Builder.CreateTrunc(V, IntTy); | |||
312 | } | |||
313 | Value *Zero = llvm::Constant::getNullValue(IntTy); | |||
314 | return CGF.Builder.CreateICmpSLT(V, Zero); | |||
315 | } | |||
316 | ||||
317 | static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD, | |||
318 | const CallExpr *E, llvm::Constant *calleeValue) { | |||
319 | CGCallee callee = CGCallee::forDirect(calleeValue, FD); | |||
320 | return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); | |||
321 | } | |||
322 | ||||
323 | /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.* | |||
324 | /// depending on IntrinsicID. | |||
325 | /// | |||
326 | /// \arg CGF The current codegen function. | |||
327 | /// \arg IntrinsicID The ID for the Intrinsic we wish to generate. | |||
328 | /// \arg X The first argument to the llvm.*.with.overflow.*. | |||
329 | /// \arg Y The second argument to the llvm.*.with.overflow.*. | |||
330 | /// \arg Carry The carry returned by the llvm.*.with.overflow.*. | |||
331 | /// \returns The result (i.e. sum/product) returned by the intrinsic. | |||
332 | static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, | |||
333 | const llvm::Intrinsic::ID IntrinsicID, | |||
334 | llvm::Value *X, llvm::Value *Y, | |||
335 | llvm::Value *&Carry) { | |||
336 | // Make sure we have integers of the same width. | |||
337 | assert(X->getType() == Y->getType() &&(static_cast <bool> (X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both " "arguments have the same integer width?)") ? void (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 339, __extension__ __PRETTY_FUNCTION__)) | |||
338 | "Arguments must be the same type. (Did you forget to make sure both "(static_cast <bool> (X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both " "arguments have the same integer width?)") ? void (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 339, __extension__ __PRETTY_FUNCTION__)) | |||
339 | "arguments have the same integer width?)")(static_cast <bool> (X->getType() == Y->getType() && "Arguments must be the same type. (Did you forget to make sure both " "arguments have the same integer width?)") ? void (0) : __assert_fail ("X->getType() == Y->getType() && \"Arguments must be the same type. (Did you forget to make sure both \" \"arguments have the same integer width?)\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 339, __extension__ __PRETTY_FUNCTION__)); | |||
340 | ||||
341 | llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); | |||
342 | llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y}); | |||
343 | Carry = CGF.Builder.CreateExtractValue(Tmp, 1); | |||
344 | return CGF.Builder.CreateExtractValue(Tmp, 0); | |||
345 | } | |||
346 | ||||
347 | static Value *emitRangedBuiltin(CodeGenFunction &CGF, | |||
348 | unsigned IntrinsicID, | |||
349 | int low, int high) { | |||
350 | llvm::MDBuilder MDHelper(CGF.getLLVMContext()); | |||
351 | llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high)); | |||
352 | Value *F = CGF.CGM.getIntrinsic(IntrinsicID, {}); | |||
353 | llvm::Instruction *Call = CGF.Builder.CreateCall(F); | |||
354 | Call->setMetadata(llvm::LLVMContext::MD_range, RNode); | |||
355 | return Call; | |||
356 | } | |||
357 | ||||
358 | namespace { | |||
359 | struct WidthAndSignedness { | |||
360 | unsigned Width; | |||
361 | bool Signed; | |||
362 | }; | |||
363 | } | |||
364 | ||||
365 | static WidthAndSignedness | |||
366 | getIntegerWidthAndSignedness(const clang::ASTContext &context, | |||
367 | const clang::QualType Type) { | |||
368 | assert(Type->isIntegerType() && "Given type is not an integer.")(static_cast <bool> (Type->isIntegerType() && "Given type is not an integer.") ? void (0) : __assert_fail ( "Type->isIntegerType() && \"Given type is not an integer.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 368, __extension__ __PRETTY_FUNCTION__)); | |||
369 | unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width; | |||
370 | bool Signed = Type->isSignedIntegerType(); | |||
371 | return {Width, Signed}; | |||
372 | } | |||
373 | ||||
374 | // Given one or more integer types, this function produces an integer type that | |||
375 | // encompasses them: any value in one of the given types could be expressed in | |||
376 | // the encompassing type. | |||
377 | static struct WidthAndSignedness | |||
378 | EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) { | |||
379 | assert(Types.size() > 0 && "Empty list of types.")(static_cast <bool> (Types.size() > 0 && "Empty list of types." ) ? void (0) : __assert_fail ("Types.size() > 0 && \"Empty list of types.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 379, __extension__ __PRETTY_FUNCTION__)); | |||
380 | ||||
381 | // If any of the given types is signed, we must return a signed type. | |||
382 | bool Signed = false; | |||
383 | for (const auto &Type : Types) { | |||
384 | Signed |= Type.Signed; | |||
385 | } | |||
386 | ||||
387 | // The encompassing type must have a width greater than or equal to the width | |||
388 | // of the specified types. Additionally, if the encompassing type is signed, | |||
389 | // its width must be strictly greater than the width of any unsigned types | |||
390 | // given. | |||
391 | unsigned Width = 0; | |||
392 | for (const auto &Type : Types) { | |||
393 | unsigned MinWidth = Type.Width + (Signed && !Type.Signed); | |||
394 | if (Width < MinWidth) { | |||
395 | Width = MinWidth; | |||
396 | } | |||
397 | } | |||
398 | ||||
399 | return {Width, Signed}; | |||
400 | } | |||
401 | ||||
402 | Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) { | |||
403 | llvm::Type *DestType = Int8PtrTy; | |||
404 | if (ArgValue->getType() != DestType) | |||
405 | ArgValue = | |||
406 | Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data()); | |||
407 | ||||
408 | Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend; | |||
409 | return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue); | |||
410 | } | |||
411 | ||||
412 | /// Checks if using the result of __builtin_object_size(p, @p From) in place of | |||
413 | /// __builtin_object_size(p, @p To) is correct | |||
414 | static bool areBOSTypesCompatible(int From, int To) { | |||
415 | // Note: Our __builtin_object_size implementation currently treats Type=0 and | |||
416 | // Type=2 identically. Encoding this implementation detail here may make | |||
417 | // improving __builtin_object_size difficult in the future, so it's omitted. | |||
418 | return From == To || (From == 0 && To == 1) || (From == 3 && To == 2); | |||
419 | } | |||
420 | ||||
421 | static llvm::Value * | |||
422 | getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) { | |||
423 | return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true); | |||
424 | } | |||
425 | ||||
426 | llvm::Value * | |||
427 | CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, | |||
428 | llvm::IntegerType *ResType, | |||
429 | llvm::Value *EmittedE) { | |||
430 | uint64_t ObjectSize; | |||
431 | if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) | |||
432 | return emitBuiltinObjectSize(E, Type, ResType, EmittedE); | |||
433 | return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true); | |||
434 | } | |||
435 | ||||
436 | /// Returns a Value corresponding to the size of the given expression. | |||
437 | /// This Value may be either of the following: | |||
438 | /// - A llvm::Argument (if E is a param with the pass_object_size attribute on | |||
439 | /// it) | |||
440 | /// - A call to the @llvm.objectsize intrinsic | |||
441 | /// | |||
442 | /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null | |||
443 | /// and we wouldn't otherwise try to reference a pass_object_size parameter, | |||
444 | /// we'll call @llvm.objectsize on EmittedE, rather than emitting E. | |||
445 | llvm::Value * | |||
446 | CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, | |||
447 | llvm::IntegerType *ResType, | |||
448 | llvm::Value *EmittedE) { | |||
449 | // We need to reference an argument if the pointer is a parameter with the | |||
450 | // pass_object_size attribute. | |||
451 | if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) { | |||
452 | auto *Param = dyn_cast<ParmVarDecl>(D->getDecl()); | |||
453 | auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>(); | |||
454 | if (Param != nullptr && PS != nullptr && | |||
455 | areBOSTypesCompatible(PS->getType(), Type)) { | |||
456 | auto Iter = SizeArguments.find(Param); | |||
457 | assert(Iter != SizeArguments.end())(static_cast <bool> (Iter != SizeArguments.end()) ? void (0) : __assert_fail ("Iter != SizeArguments.end()", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 457, __extension__ __PRETTY_FUNCTION__)); | |||
458 | ||||
459 | const ImplicitParamDecl *D = Iter->second; | |||
460 | auto DIter = LocalDeclMap.find(D); | |||
461 | assert(DIter != LocalDeclMap.end())(static_cast <bool> (DIter != LocalDeclMap.end()) ? void (0) : __assert_fail ("DIter != LocalDeclMap.end()", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 461, __extension__ __PRETTY_FUNCTION__)); | |||
462 | ||||
463 | return EmitLoadOfScalar(DIter->second, /*volatile=*/false, | |||
464 | getContext().getSizeType(), E->getLocStart()); | |||
465 | } | |||
466 | } | |||
467 | ||||
468 | // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't | |||
469 | // evaluate E for side-effects. In either case, we shouldn't lower to | |||
470 | // @llvm.objectsize. | |||
471 | if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext()))) | |||
472 | return getDefaultBuiltinObjectSizeResult(Type, ResType); | |||
473 | ||||
474 | Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E); | |||
475 | assert(Ptr->getType()->isPointerTy() &&(static_cast <bool> (Ptr->getType()->isPointerTy( ) && "Non-pointer passed to __builtin_object_size?") ? void (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 476, __extension__ __PRETTY_FUNCTION__)) | |||
476 | "Non-pointer passed to __builtin_object_size?")(static_cast <bool> (Ptr->getType()->isPointerTy( ) && "Non-pointer passed to __builtin_object_size?") ? void (0) : __assert_fail ("Ptr->getType()->isPointerTy() && \"Non-pointer passed to __builtin_object_size?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 476, __extension__ __PRETTY_FUNCTION__)); | |||
477 | ||||
478 | Value *F = CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()}); | |||
479 | ||||
480 | // LLVM only supports 0 and 2, make sure that we pass along that as a boolean. | |||
481 | Value *Min = Builder.getInt1((Type & 2) != 0); | |||
482 | // For GCC compatibility, __builtin_object_size treat NULL as unknown size. | |||
483 | Value *NullIsUnknown = Builder.getTrue(); | |||
484 | return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown}); | |||
485 | } | |||
486 | ||||
487 | namespace { | |||
488 | /// A struct to generically desribe a bit test intrinsic. | |||
489 | struct BitTest { | |||
490 | enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set }; | |||
491 | enum InterlockingKind : uint8_t { | |||
492 | Unlocked, | |||
493 | Sequential, | |||
494 | Acquire, | |||
495 | Release, | |||
496 | NoFence | |||
497 | }; | |||
498 | ||||
499 | ActionKind Action; | |||
500 | InterlockingKind Interlocking; | |||
501 | bool Is64Bit; | |||
502 | ||||
503 | static BitTest decodeBitTestBuiltin(unsigned BuiltinID); | |||
504 | }; | |||
505 | } // namespace | |||
506 | ||||
507 | BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) { | |||
508 | switch (BuiltinID) { | |||
509 | // Main portable variants. | |||
510 | case Builtin::BI_bittest: | |||
511 | return {TestOnly, Unlocked, false}; | |||
512 | case Builtin::BI_bittestandcomplement: | |||
513 | return {Complement, Unlocked, false}; | |||
514 | case Builtin::BI_bittestandreset: | |||
515 | return {Reset, Unlocked, false}; | |||
516 | case Builtin::BI_bittestandset: | |||
517 | return {Set, Unlocked, false}; | |||
518 | case Builtin::BI_interlockedbittestandreset: | |||
519 | return {Reset, Sequential, false}; | |||
520 | case Builtin::BI_interlockedbittestandset: | |||
521 | return {Set, Sequential, false}; | |||
522 | ||||
523 | // X86-specific 64-bit variants. | |||
524 | case Builtin::BI_bittest64: | |||
525 | return {TestOnly, Unlocked, true}; | |||
526 | case Builtin::BI_bittestandcomplement64: | |||
527 | return {Complement, Unlocked, true}; | |||
528 | case Builtin::BI_bittestandreset64: | |||
529 | return {Reset, Unlocked, true}; | |||
530 | case Builtin::BI_bittestandset64: | |||
531 | return {Set, Unlocked, true}; | |||
532 | case Builtin::BI_interlockedbittestandreset64: | |||
533 | return {Reset, Sequential, true}; | |||
534 | case Builtin::BI_interlockedbittestandset64: | |||
535 | return {Set, Sequential, true}; | |||
536 | ||||
537 | // ARM/AArch64-specific ordering variants. | |||
538 | case Builtin::BI_interlockedbittestandset_acq: | |||
539 | return {Set, Acquire, false}; | |||
540 | case Builtin::BI_interlockedbittestandset_rel: | |||
541 | return {Set, Release, false}; | |||
542 | case Builtin::BI_interlockedbittestandset_nf: | |||
543 | return {Set, NoFence, false}; | |||
544 | case Builtin::BI_interlockedbittestandreset_acq: | |||
545 | return {Reset, Acquire, false}; | |||
546 | case Builtin::BI_interlockedbittestandreset_rel: | |||
547 | return {Reset, Release, false}; | |||
548 | case Builtin::BI_interlockedbittestandreset_nf: | |||
549 | return {Reset, NoFence, false}; | |||
550 | } | |||
551 | llvm_unreachable("expected only bittest intrinsics")::llvm::llvm_unreachable_internal("expected only bittest intrinsics" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 551); | |||
552 | } | |||
553 | ||||
554 | static char bitActionToX86BTCode(BitTest::ActionKind A) { | |||
555 | switch (A) { | |||
556 | case BitTest::TestOnly: return '\0'; | |||
557 | case BitTest::Complement: return 'c'; | |||
558 | case BitTest::Reset: return 'r'; | |||
559 | case BitTest::Set: return 's'; | |||
560 | } | |||
561 | llvm_unreachable("invalid action")::llvm::llvm_unreachable_internal("invalid action", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 561); | |||
562 | } | |||
563 | ||||
564 | static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF, | |||
565 | BitTest BT, | |||
566 | const CallExpr *E, Value *BitBase, | |||
567 | Value *BitPos) { | |||
568 | char Action = bitActionToX86BTCode(BT.Action); | |||
569 | char SizeSuffix = BT.Is64Bit ? 'q' : 'l'; | |||
570 | ||||
571 | // Build the assembly. | |||
572 | SmallString<64> Asm; | |||
573 | raw_svector_ostream AsmOS(Asm); | |||
574 | if (BT.Interlocking != BitTest::Unlocked) | |||
575 | AsmOS << "lock "; | |||
576 | AsmOS << "bt"; | |||
577 | if (Action) | |||
578 | AsmOS << Action; | |||
579 | AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}"; | |||
580 | ||||
581 | // Build the constraints. FIXME: We should support immediates when possible. | |||
582 | std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}"; | |||
583 | llvm::IntegerType *IntType = llvm::IntegerType::get( | |||
584 | CGF.getLLVMContext(), | |||
585 | CGF.getContext().getTypeSize(E->getArg(1)->getType())); | |||
586 | llvm::Type *IntPtrType = IntType->getPointerTo(); | |||
587 | llvm::FunctionType *FTy = | |||
588 | llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false); | |||
589 | ||||
590 | llvm::InlineAsm *IA = | |||
591 | llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true); | |||
592 | return CGF.Builder.CreateCall(IA, {BitBase, BitPos}); | |||
593 | } | |||
594 | ||||
595 | static llvm::AtomicOrdering | |||
596 | getBitTestAtomicOrdering(BitTest::InterlockingKind I) { | |||
597 | switch (I) { | |||
598 | case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic; | |||
599 | case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent; | |||
600 | case BitTest::Acquire: return llvm::AtomicOrdering::Acquire; | |||
601 | case BitTest::Release: return llvm::AtomicOrdering::Release; | |||
602 | case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic; | |||
603 | } | |||
604 | llvm_unreachable("invalid interlocking")::llvm::llvm_unreachable_internal("invalid interlocking", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 604); | |||
605 | } | |||
606 | ||||
607 | /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of | |||
608 | /// bits and a bit position and read and optionally modify the bit at that | |||
609 | /// position. The position index can be arbitrarily large, i.e. it can be larger | |||
610 | /// than 31 or 63, so we need an indexed load in the general case. | |||
611 | static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF, | |||
612 | unsigned BuiltinID, | |||
613 | const CallExpr *E) { | |||
614 | Value *BitBase = CGF.EmitScalarExpr(E->getArg(0)); | |||
615 | Value *BitPos = CGF.EmitScalarExpr(E->getArg(1)); | |||
616 | ||||
617 | BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID); | |||
618 | ||||
619 | // X86 has special BT, BTC, BTR, and BTS instructions that handle the array | |||
620 | // indexing operation internally. Use them if possible. | |||
621 | llvm::Triple::ArchType Arch = CGF.getTarget().getTriple().getArch(); | |||
622 | if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) | |||
623 | return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos); | |||
624 | ||||
625 | // Otherwise, use generic code to load one byte and test the bit. Use all but | |||
626 | // the bottom three bits as the array index, and the bottom three bits to form | |||
627 | // a mask. | |||
628 | // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0; | |||
629 | Value *ByteIndex = CGF.Builder.CreateAShr( | |||
630 | BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx"); | |||
631 | Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy); | |||
632 | Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8, | |||
633 | ByteIndex, "bittest.byteaddr"), | |||
634 | CharUnits::One()); | |||
635 | Value *PosLow = | |||
636 | CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty), | |||
637 | llvm::ConstantInt::get(CGF.Int8Ty, 0x7)); | |||
638 | ||||
639 | // The updating instructions will need a mask. | |||
640 | Value *Mask = nullptr; | |||
641 | if (BT.Action != BitTest::TestOnly) { | |||
642 | Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow, | |||
643 | "bittest.mask"); | |||
644 | } | |||
645 | ||||
646 | // Check the action and ordering of the interlocked intrinsics. | |||
647 | llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking); | |||
648 | ||||
649 | Value *OldByte = nullptr; | |||
650 | if (Ordering != llvm::AtomicOrdering::NotAtomic) { | |||
651 | // Emit a combined atomicrmw load/store operation for the interlocked | |||
652 | // intrinsics. | |||
653 | llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or; | |||
654 | if (BT.Action == BitTest::Reset) { | |||
655 | Mask = CGF.Builder.CreateNot(Mask); | |||
656 | RMWOp = llvm::AtomicRMWInst::And; | |||
657 | } | |||
658 | OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask, | |||
659 | Ordering); | |||
660 | } else { | |||
661 | // Emit a plain load for the non-interlocked intrinsics. | |||
662 | OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte"); | |||
663 | Value *NewByte = nullptr; | |||
664 | switch (BT.Action) { | |||
665 | case BitTest::TestOnly: | |||
666 | // Don't store anything. | |||
667 | break; | |||
668 | case BitTest::Complement: | |||
669 | NewByte = CGF.Builder.CreateXor(OldByte, Mask); | |||
670 | break; | |||
671 | case BitTest::Reset: | |||
672 | NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask)); | |||
673 | break; | |||
674 | case BitTest::Set: | |||
675 | NewByte = CGF.Builder.CreateOr(OldByte, Mask); | |||
676 | break; | |||
677 | } | |||
678 | if (NewByte) | |||
679 | CGF.Builder.CreateStore(NewByte, ByteAddr); | |||
680 | } | |||
681 | ||||
682 | // However we loaded the old byte, either by plain load or atomicrmw, shift | |||
683 | // the bit into the low position and mask it to 0 or 1. | |||
684 | Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr"); | |||
685 | return CGF.Builder.CreateAnd( | |||
686 | ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res"); | |||
687 | } | |||
688 | ||||
689 | namespace { | |||
690 | enum class MSVCSetJmpKind { | |||
691 | _setjmpex, | |||
692 | _setjmp3, | |||
693 | _setjmp | |||
694 | }; | |||
695 | } | |||
696 | ||||
697 | /// MSVC handles setjmp a bit differently on different platforms. On every | |||
698 | /// architecture except 32-bit x86, the frame address is passed. On x86, extra | |||
699 | /// parameters can be passed as variadic arguments, but we always pass none. | |||
700 | static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, | |||
701 | const CallExpr *E) { | |||
702 | llvm::Value *Arg1 = nullptr; | |||
703 | llvm::Type *Arg1Ty = nullptr; | |||
704 | StringRef Name; | |||
705 | bool IsVarArg = false; | |||
706 | if (SJKind == MSVCSetJmpKind::_setjmp3) { | |||
707 | Name = "_setjmp3"; | |||
708 | Arg1Ty = CGF.Int32Ty; | |||
709 | Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0); | |||
710 | IsVarArg = true; | |||
711 | } else { | |||
712 | Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex"; | |||
713 | Arg1Ty = CGF.Int8PtrTy; | |||
714 | Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::frameaddress), | |||
715 | llvm::ConstantInt::get(CGF.Int32Ty, 0)); | |||
716 | } | |||
717 | ||||
718 | // Mark the call site and declaration with ReturnsTwice. | |||
719 | llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty}; | |||
720 | llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get( | |||
721 | CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, | |||
722 | llvm::Attribute::ReturnsTwice); | |||
723 | llvm::Constant *SetJmpFn = CGF.CGM.CreateRuntimeFunction( | |||
724 | llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name, | |||
725 | ReturnsTwiceAttr, /*Local=*/true); | |||
726 | ||||
727 | llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast( | |||
728 | CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy); | |||
729 | llvm::Value *Args[] = {Buf, Arg1}; | |||
730 | llvm::CallSite CS = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args); | |||
731 | CS.setAttributes(ReturnsTwiceAttr); | |||
732 | return RValue::get(CS.getInstruction()); | |||
733 | } | |||
734 | ||||
735 | // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code, | |||
736 | // we handle them here. | |||
737 | enum class CodeGenFunction::MSVCIntrin { | |||
738 | _BitScanForward, | |||
739 | _BitScanReverse, | |||
740 | _InterlockedAnd, | |||
741 | _InterlockedDecrement, | |||
742 | _InterlockedExchange, | |||
743 | _InterlockedExchangeAdd, | |||
744 | _InterlockedExchangeSub, | |||
745 | _InterlockedIncrement, | |||
746 | _InterlockedOr, | |||
747 | _InterlockedXor, | |||
748 | __fastfail, | |||
749 | }; | |||
750 | ||||
751 | Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, | |||
752 | const CallExpr *E) { | |||
753 | switch (BuiltinID) { | |||
754 | case MSVCIntrin::_BitScanForward: | |||
755 | case MSVCIntrin::_BitScanReverse: { | |||
756 | Value *ArgValue = EmitScalarExpr(E->getArg(1)); | |||
757 | ||||
758 | llvm::Type *ArgType = ArgValue->getType(); | |||
759 | llvm::Type *IndexType = | |||
760 | EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType(); | |||
761 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
762 | ||||
763 | Value *ArgZero = llvm::Constant::getNullValue(ArgType); | |||
764 | Value *ResZero = llvm::Constant::getNullValue(ResultType); | |||
765 | Value *ResOne = llvm::ConstantInt::get(ResultType, 1); | |||
766 | ||||
767 | BasicBlock *Begin = Builder.GetInsertBlock(); | |||
768 | BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn); | |||
769 | Builder.SetInsertPoint(End); | |||
770 | PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result"); | |||
771 | ||||
772 | Builder.SetInsertPoint(Begin); | |||
773 | Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero); | |||
774 | BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn); | |||
775 | Builder.CreateCondBr(IsZero, End, NotZero); | |||
776 | Result->addIncoming(ResZero, Begin); | |||
777 | ||||
778 | Builder.SetInsertPoint(NotZero); | |||
779 | Address IndexAddress = EmitPointerWithAlignment(E->getArg(0)); | |||
780 | ||||
781 | if (BuiltinID == MSVCIntrin::_BitScanForward) { | |||
782 | Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); | |||
783 | Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); | |||
784 | ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); | |||
785 | Builder.CreateStore(ZeroCount, IndexAddress, false); | |||
786 | } else { | |||
787 | unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth(); | |||
788 | Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1); | |||
789 | ||||
790 | Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); | |||
791 | Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); | |||
792 | ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); | |||
793 | Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount); | |||
794 | Builder.CreateStore(Index, IndexAddress, false); | |||
795 | } | |||
796 | Builder.CreateBr(End); | |||
797 | Result->addIncoming(ResOne, NotZero); | |||
798 | ||||
799 | Builder.SetInsertPoint(End); | |||
800 | return Result; | |||
801 | } | |||
802 | case MSVCIntrin::_InterlockedAnd: | |||
803 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E); | |||
804 | case MSVCIntrin::_InterlockedExchange: | |||
805 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E); | |||
806 | case MSVCIntrin::_InterlockedExchangeAdd: | |||
807 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E); | |||
808 | case MSVCIntrin::_InterlockedExchangeSub: | |||
809 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E); | |||
810 | case MSVCIntrin::_InterlockedOr: | |||
811 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E); | |||
812 | case MSVCIntrin::_InterlockedXor: | |||
813 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E); | |||
814 | ||||
815 | case MSVCIntrin::_InterlockedDecrement: { | |||
816 | llvm::Type *IntTy = ConvertType(E->getType()); | |||
817 | AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( | |||
818 | AtomicRMWInst::Sub, | |||
819 | EmitScalarExpr(E->getArg(0)), | |||
820 | ConstantInt::get(IntTy, 1), | |||
821 | llvm::AtomicOrdering::SequentiallyConsistent); | |||
822 | return Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1)); | |||
823 | } | |||
824 | case MSVCIntrin::_InterlockedIncrement: { | |||
825 | llvm::Type *IntTy = ConvertType(E->getType()); | |||
826 | AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( | |||
827 | AtomicRMWInst::Add, | |||
828 | EmitScalarExpr(E->getArg(0)), | |||
829 | ConstantInt::get(IntTy, 1), | |||
830 | llvm::AtomicOrdering::SequentiallyConsistent); | |||
831 | return Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1)); | |||
832 | } | |||
833 | ||||
834 | case MSVCIntrin::__fastfail: { | |||
835 | // Request immediate process termination from the kernel. The instruction | |||
836 | // sequences to do this are documented on MSDN: | |||
837 | // https://msdn.microsoft.com/en-us/library/dn774154.aspx | |||
838 | llvm::Triple::ArchType ISA = getTarget().getTriple().getArch(); | |||
839 | StringRef Asm, Constraints; | |||
840 | switch (ISA) { | |||
841 | default: | |||
842 | ErrorUnsupported(E, "__fastfail call for this architecture"); | |||
843 | break; | |||
844 | case llvm::Triple::x86: | |||
845 | case llvm::Triple::x86_64: | |||
846 | Asm = "int $$0x29"; | |||
847 | Constraints = "{cx}"; | |||
848 | break; | |||
849 | case llvm::Triple::thumb: | |||
850 | Asm = "udf #251"; | |||
851 | Constraints = "{r0}"; | |||
852 | break; | |||
853 | } | |||
854 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false); | |||
855 | llvm::InlineAsm *IA = | |||
856 | llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true); | |||
857 | llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( | |||
858 | getLLVMContext(), llvm::AttributeList::FunctionIndex, | |||
859 | llvm::Attribute::NoReturn); | |||
860 | CallSite CS = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0))); | |||
861 | CS.setAttributes(NoReturnAttr); | |||
862 | return CS.getInstruction(); | |||
863 | } | |||
864 | } | |||
865 | llvm_unreachable("Incorrect MSVC intrinsic!")::llvm::llvm_unreachable_internal("Incorrect MSVC intrinsic!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 865); | |||
866 | } | |||
867 | ||||
868 | namespace { | |||
869 | // ARC cleanup for __builtin_os_log_format | |||
870 | struct CallObjCArcUse final : EHScopeStack::Cleanup { | |||
871 | CallObjCArcUse(llvm::Value *object) : object(object) {} | |||
872 | llvm::Value *object; | |||
873 | ||||
874 | void Emit(CodeGenFunction &CGF, Flags flags) override { | |||
875 | CGF.EmitARCIntrinsicUse(object); | |||
876 | } | |||
877 | }; | |||
878 | } | |||
879 | ||||
880 | Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E, | |||
881 | BuiltinCheckKind Kind) { | |||
882 | assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)(static_cast <bool> ((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && "Unsupported builtin check kind" ) ? void (0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 883, __extension__ __PRETTY_FUNCTION__)) | |||
883 | && "Unsupported builtin check kind")(static_cast <bool> ((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && "Unsupported builtin check kind" ) ? void (0) : __assert_fail ("(Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && \"Unsupported builtin check kind\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 883, __extension__ __PRETTY_FUNCTION__)); | |||
884 | ||||
885 | Value *ArgValue = EmitScalarExpr(E); | |||
886 | if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef()) | |||
887 | return ArgValue; | |||
888 | ||||
889 | SanitizerScope SanScope(this); | |||
890 | Value *Cond = Builder.CreateICmpNE( | |||
891 | ArgValue, llvm::Constant::getNullValue(ArgValue->getType())); | |||
892 | EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin), | |||
893 | SanitizerHandler::InvalidBuiltin, | |||
894 | {EmitCheckSourceLocation(E->getExprLoc()), | |||
895 | llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)}, | |||
896 | None); | |||
897 | return ArgValue; | |||
898 | } | |||
899 | ||||
900 | /// Get the argument type for arguments to os_log_helper. | |||
901 | static CanQualType getOSLogArgType(ASTContext &C, int Size) { | |||
902 | QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false); | |||
903 | return C.getCanonicalType(UnsignedTy); | |||
904 | } | |||
905 | ||||
906 | llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction( | |||
907 | const analyze_os_log::OSLogBufferLayout &Layout, | |||
908 | CharUnits BufferAlignment) { | |||
909 | ASTContext &Ctx = getContext(); | |||
910 | ||||
911 | llvm::SmallString<64> Name; | |||
912 | { | |||
913 | raw_svector_ostream OS(Name); | |||
914 | OS << "__os_log_helper"; | |||
915 | OS << "_" << BufferAlignment.getQuantity(); | |||
916 | OS << "_" << int(Layout.getSummaryByte()); | |||
917 | OS << "_" << int(Layout.getNumArgsByte()); | |||
918 | for (const auto &Item : Layout.Items) | |||
919 | OS << "_" << int(Item.getSizeByte()) << "_" | |||
920 | << int(Item.getDescriptorByte()); | |||
921 | } | |||
922 | ||||
923 | if (llvm::Function *F = CGM.getModule().getFunction(Name)) | |||
924 | return F; | |||
925 | ||||
926 | llvm::SmallVector<ImplicitParamDecl, 4> Params; | |||
927 | Params.emplace_back(Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), | |||
928 | Ctx.VoidPtrTy, ImplicitParamDecl::Other); | |||
929 | ||||
930 | for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) { | |||
931 | char Size = Layout.Items[I].getSizeByte(); | |||
932 | if (!Size) | |||
933 | continue; | |||
934 | ||||
935 | Params.emplace_back( | |||
936 | Ctx, nullptr, SourceLocation(), | |||
937 | &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), | |||
938 | getOSLogArgType(Ctx, Size), ImplicitParamDecl::Other); | |||
939 | } | |||
940 | ||||
941 | FunctionArgList Args; | |||
942 | for (auto &P : Params) | |||
943 | Args.push_back(&P); | |||
944 | ||||
945 | // The helper function has linkonce_odr linkage to enable the linker to merge | |||
946 | // identical functions. To ensure the merging always happens, 'noinline' is | |||
947 | // attached to the function when compiling with -Oz. | |||
948 | const CGFunctionInfo &FI = | |||
949 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args); | |||
950 | llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI); | |||
951 | llvm::Function *Fn = llvm::Function::Create( | |||
952 | FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule()); | |||
953 | Fn->setVisibility(llvm::GlobalValue::HiddenVisibility); | |||
954 | CGM.SetLLVMFunctionAttributes(nullptr, FI, Fn); | |||
955 | CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn); | |||
956 | ||||
957 | // Attach 'noinline' at -Oz. | |||
958 | if (CGM.getCodeGenOpts().OptimizeSize == 2) | |||
959 | Fn->addFnAttr(llvm::Attribute::NoInline); | |||
960 | ||||
961 | auto NL = ApplyDebugLocation::CreateEmpty(*this); | |||
962 | IdentifierInfo *II = &Ctx.Idents.get(Name); | |||
963 | FunctionDecl *FD = FunctionDecl::Create( | |||
964 | Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, | |||
965 | Ctx.VoidTy, nullptr, SC_PrivateExtern, false, false); | |||
966 | ||||
967 | StartFunction(FD, Ctx.VoidTy, Fn, FI, Args); | |||
968 | ||||
969 | // Create a scope with an artificial location for the body of this function. | |||
970 | auto AL = ApplyDebugLocation::CreateArtificial(*this); | |||
971 | ||||
972 | CharUnits Offset; | |||
973 | Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(&Params[0]), "buf"), | |||
974 | BufferAlignment); | |||
975 | Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()), | |||
976 | Builder.CreateConstByteGEP(BufAddr, Offset++, "summary")); | |||
977 | Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()), | |||
978 | Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs")); | |||
979 | ||||
980 | unsigned I = 1; | |||
981 | for (const auto &Item : Layout.Items) { | |||
982 | Builder.CreateStore( | |||
983 | Builder.getInt8(Item.getDescriptorByte()), | |||
984 | Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor")); | |||
985 | Builder.CreateStore( | |||
986 | Builder.getInt8(Item.getSizeByte()), | |||
987 | Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize")); | |||
988 | ||||
989 | CharUnits Size = Item.size(); | |||
990 | if (!Size.getQuantity()) | |||
991 | continue; | |||
992 | ||||
993 | Address Arg = GetAddrOfLocalVar(&Params[I]); | |||
994 | Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData"); | |||
995 | Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(), | |||
996 | "argDataCast"); | |||
997 | Builder.CreateStore(Builder.CreateLoad(Arg), Addr); | |||
998 | Offset += Size; | |||
999 | ++I; | |||
1000 | } | |||
1001 | ||||
1002 | FinishFunction(); | |||
1003 | ||||
1004 | return Fn; | |||
1005 | } | |||
1006 | ||||
1007 | RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) { | |||
1008 | assert(E.getNumArgs() >= 2 &&(static_cast <bool> (E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments" ) ? void (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1009, __extension__ __PRETTY_FUNCTION__)) | |||
1009 | "__builtin_os_log_format takes at least 2 arguments")(static_cast <bool> (E.getNumArgs() >= 2 && "__builtin_os_log_format takes at least 2 arguments" ) ? void (0) : __assert_fail ("E.getNumArgs() >= 2 && \"__builtin_os_log_format takes at least 2 arguments\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1009, __extension__ __PRETTY_FUNCTION__)); | |||
1010 | ASTContext &Ctx = getContext(); | |||
1011 | analyze_os_log::OSLogBufferLayout Layout; | |||
1012 | analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout); | |||
1013 | Address BufAddr = EmitPointerWithAlignment(E.getArg(0)); | |||
1014 | llvm::SmallVector<llvm::Value *, 4> RetainableOperands; | |||
1015 | ||||
1016 | // Ignore argument 1, the format string. It is not currently used. | |||
1017 | CallArgList Args; | |||
1018 | Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy); | |||
1019 | ||||
1020 | for (const auto &Item : Layout.Items) { | |||
1021 | int Size = Item.getSizeByte(); | |||
1022 | if (!Size) | |||
1023 | continue; | |||
1024 | ||||
1025 | llvm::Value *ArgVal; | |||
1026 | ||||
1027 | if (const Expr *TheExpr = Item.getExpr()) { | |||
1028 | ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false); | |||
1029 | ||||
1030 | // Check if this is a retainable type. | |||
1031 | if (TheExpr->getType()->isObjCRetainableType()) { | |||
1032 | assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&(static_cast <bool> (getEvaluationKind(TheExpr->getType ()) == TEK_Scalar && "Only scalar can be a ObjC retainable type" ) ? void (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1033, __extension__ __PRETTY_FUNCTION__)) | |||
1033 | "Only scalar can be a ObjC retainable type")(static_cast <bool> (getEvaluationKind(TheExpr->getType ()) == TEK_Scalar && "Only scalar can be a ObjC retainable type" ) ? void (0) : __assert_fail ("getEvaluationKind(TheExpr->getType()) == TEK_Scalar && \"Only scalar can be a ObjC retainable type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1033, __extension__ __PRETTY_FUNCTION__)); | |||
1034 | // Check if the object is constant, if not, save it in | |||
1035 | // RetainableOperands. | |||
1036 | if (!isa<Constant>(ArgVal)) | |||
1037 | RetainableOperands.push_back(ArgVal); | |||
1038 | } | |||
1039 | } else { | |||
1040 | ArgVal = Builder.getInt32(Item.getConstValue().getQuantity()); | |||
1041 | } | |||
1042 | ||||
1043 | unsigned ArgValSize = | |||
1044 | CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType()); | |||
1045 | llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(), | |||
1046 | ArgValSize); | |||
1047 | ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy); | |||
1048 | CanQualType ArgTy = getOSLogArgType(Ctx, Size); | |||
1049 | // If ArgVal has type x86_fp80, zero-extend ArgVal. | |||
1050 | ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy)); | |||
1051 | Args.add(RValue::get(ArgVal), ArgTy); | |||
1052 | } | |||
1053 | ||||
1054 | const CGFunctionInfo &FI = | |||
1055 | CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args); | |||
1056 | llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction( | |||
1057 | Layout, BufAddr.getAlignment()); | |||
1058 | EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args); | |||
1059 | ||||
1060 | // Push a clang.arc.use cleanup for each object in RetainableOperands. The | |||
1061 | // cleanup will cause the use to appear after the final log call, keeping | |||
1062 | // the object valid while it’s held in the log buffer. Note that if there’s | |||
1063 | // a release cleanup on the object, it will already be active; since | |||
1064 | // cleanups are emitted in reverse order, the use will occur before the | |||
1065 | // object is released. | |||
1066 | if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount && | |||
1067 | CGM.getCodeGenOpts().OptimizationLevel != 0) | |||
1068 | for (llvm::Value *Object : RetainableOperands) | |||
1069 | pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), Object); | |||
1070 | ||||
1071 | return RValue::get(BufAddr.getPointer()); | |||
1072 | } | |||
1073 | ||||
1074 | /// Determine if a binop is a checked mixed-sign multiply we can specialize. | |||
1075 | static bool isSpecialMixedSignMultiply(unsigned BuiltinID, | |||
1076 | WidthAndSignedness Op1Info, | |||
1077 | WidthAndSignedness Op2Info, | |||
1078 | WidthAndSignedness ResultInfo) { | |||
1079 | return BuiltinID == Builtin::BI__builtin_mul_overflow && | |||
1080 | Op1Info.Width == Op2Info.Width && Op1Info.Width >= ResultInfo.Width && | |||
1081 | Op1Info.Signed != Op2Info.Signed; | |||
1082 | } | |||
1083 | ||||
1084 | /// Emit a checked mixed-sign multiply. This is a cheaper specialization of | |||
1085 | /// the generic checked-binop irgen. | |||
1086 | static RValue | |||
1087 | EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, | |||
1088 | WidthAndSignedness Op1Info, const clang::Expr *Op2, | |||
1089 | WidthAndSignedness Op2Info, | |||
1090 | const clang::Expr *ResultArg, QualType ResultQTy, | |||
1091 | WidthAndSignedness ResultInfo) { | |||
1092 | assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,(static_cast <bool> (isSpecialMixedSignMultiply(Builtin ::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize") ? void ( 0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1094, __extension__ __PRETTY_FUNCTION__)) | |||
1093 | Op2Info, ResultInfo) &&(static_cast <bool> (isSpecialMixedSignMultiply(Builtin ::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize") ? void ( 0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1094, __extension__ __PRETTY_FUNCTION__)) | |||
1094 | "Not a mixed-sign multipliction we can specialize")(static_cast <bool> (isSpecialMixedSignMultiply(Builtin ::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && "Not a mixed-sign multipliction we can specialize") ? void ( 0) : __assert_fail ("isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && \"Not a mixed-sign multipliction we can specialize\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1094, __extension__ __PRETTY_FUNCTION__)); | |||
1095 | ||||
1096 | // Emit the signed and unsigned operands. | |||
1097 | const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2; | |||
1098 | const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1; | |||
1099 | llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp); | |||
1100 | llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp); | |||
1101 | ||||
1102 | llvm::Type *OpTy = Signed->getType(); | |||
1103 | llvm::Value *Zero = llvm::Constant::getNullValue(OpTy); | |||
1104 | Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg); | |||
1105 | llvm::Type *ResTy = ResultPtr.getElementType(); | |||
1106 | ||||
1107 | // Take the absolute value of the signed operand. | |||
1108 | llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero); | |||
1109 | llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed); | |||
1110 | llvm::Value *AbsSigned = | |||
1111 | CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed); | |||
1112 | ||||
1113 | // Perform a checked unsigned multiplication. | |||
1114 | llvm::Value *UnsignedOverflow; | |||
1115 | llvm::Value *UnsignedResult = | |||
1116 | EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned, | |||
1117 | Unsigned, UnsignedOverflow); | |||
1118 | ||||
1119 | llvm::Value *Overflow, *Result; | |||
1120 | if (ResultInfo.Signed) { | |||
1121 | // Signed overflow occurs if the result is greater than INT_MAX or lesser | |||
1122 | // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative). | |||
1123 | auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width) | |||
1124 | .zextOrSelf(Op1Info.Width); | |||
1125 | llvm::Value *MaxResult = | |||
1126 | CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax), | |||
1127 | CGF.Builder.CreateZExt(IsNegative, OpTy)); | |||
1128 | llvm::Value *SignedOverflow = | |||
1129 | CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult); | |||
1130 | Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow); | |||
1131 | ||||
1132 | // Prepare the signed result (possibly by negating it). | |||
1133 | llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult); | |||
1134 | llvm::Value *SignedResult = | |||
1135 | CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult); | |||
1136 | Result = CGF.Builder.CreateTrunc(SignedResult, ResTy); | |||
1137 | } else { | |||
1138 | // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX. | |||
1139 | llvm::Value *Underflow = CGF.Builder.CreateAnd( | |||
1140 | IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult)); | |||
1141 | Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow); | |||
1142 | if (ResultInfo.Width < Op1Info.Width) { | |||
1143 | auto IntMax = | |||
1144 | llvm::APInt::getMaxValue(ResultInfo.Width).zext(Op1Info.Width); | |||
1145 | llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT( | |||
1146 | UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax)); | |||
1147 | Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow); | |||
1148 | } | |||
1149 | ||||
1150 | // Negate the product if it would be negative in infinite precision. | |||
1151 | Result = CGF.Builder.CreateSelect( | |||
1152 | IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult); | |||
1153 | ||||
1154 | Result = CGF.Builder.CreateTrunc(Result, ResTy); | |||
1155 | } | |||
1156 | assert(Overflow && Result && "Missing overflow or result")(static_cast <bool> (Overflow && Result && "Missing overflow or result") ? void (0) : __assert_fail ("Overflow && Result && \"Missing overflow or result\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1156, __extension__ __PRETTY_FUNCTION__)); | |||
1157 | ||||
1158 | bool isVolatile = | |||
1159 | ResultArg->getType()->getPointeeType().isVolatileQualified(); | |||
1160 | CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr, | |||
1161 | isVolatile); | |||
1162 | return RValue::get(Overflow); | |||
1163 | } | |||
1164 | ||||
1165 | static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType, | |||
1166 | Value *&RecordPtr, CharUnits Align, Value *Func, | |||
1167 | int Lvl) { | |||
1168 | const auto *RT = RType->getAs<RecordType>(); | |||
1169 | ASTContext &Context = CGF.getContext(); | |||
1170 | RecordDecl *RD = RT->getDecl()->getDefinition(); | |||
1171 | ASTContext &Ctx = RD->getASTContext(); | |||
1172 | const ASTRecordLayout &RL = Ctx.getASTRecordLayout(RD); | |||
1173 | std::string Pad = std::string(Lvl * 4, ' '); | |||
1174 | ||||
1175 | Value *GString = | |||
1176 | CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n"); | |||
1177 | Value *Res = CGF.Builder.CreateCall(Func, {GString}); | |||
1178 | ||||
1179 | static llvm::DenseMap<QualType, const char *> Types; | |||
1180 | if (Types.empty()) { | |||
1181 | Types[Context.CharTy] = "%c"; | |||
1182 | Types[Context.BoolTy] = "%d"; | |||
1183 | Types[Context.SignedCharTy] = "%hhd"; | |||
1184 | Types[Context.UnsignedCharTy] = "%hhu"; | |||
1185 | Types[Context.IntTy] = "%d"; | |||
1186 | Types[Context.UnsignedIntTy] = "%u"; | |||
1187 | Types[Context.LongTy] = "%ld"; | |||
1188 | Types[Context.UnsignedLongTy] = "%lu"; | |||
1189 | Types[Context.LongLongTy] = "%lld"; | |||
1190 | Types[Context.UnsignedLongLongTy] = "%llu"; | |||
1191 | Types[Context.ShortTy] = "%hd"; | |||
1192 | Types[Context.UnsignedShortTy] = "%hu"; | |||
1193 | Types[Context.VoidPtrTy] = "%p"; | |||
1194 | Types[Context.FloatTy] = "%f"; | |||
1195 | Types[Context.DoubleTy] = "%f"; | |||
1196 | Types[Context.LongDoubleTy] = "%Lf"; | |||
1197 | Types[Context.getPointerType(Context.CharTy)] = "%s"; | |||
1198 | Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s"; | |||
1199 | } | |||
1200 | ||||
1201 | for (const auto *FD : RD->fields()) { | |||
1202 | uint64_t Off = RL.getFieldOffset(FD->getFieldIndex()); | |||
1203 | Off = Ctx.toCharUnitsFromBits(Off).getQuantity(); | |||
1204 | ||||
1205 | Value *FieldPtr = RecordPtr; | |||
1206 | if (RD->isUnion()) | |||
1207 | FieldPtr = CGF.Builder.CreatePointerCast( | |||
1208 | FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType()))); | |||
1209 | else | |||
1210 | FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr, | |||
1211 | FD->getFieldIndex()); | |||
1212 | ||||
1213 | GString = CGF.Builder.CreateGlobalStringPtr( | |||
1214 | llvm::Twine(Pad) | |||
1215 | .concat(FD->getType().getAsString()) | |||
1216 | .concat(llvm::Twine(' ')) | |||
1217 | .concat(FD->getNameAsString()) | |||
1218 | .concat(" : ") | |||
1219 | .str()); | |||
1220 | Value *TmpRes = CGF.Builder.CreateCall(Func, {GString}); | |||
1221 | Res = CGF.Builder.CreateAdd(Res, TmpRes); | |||
1222 | ||||
1223 | QualType CanonicalType = | |||
1224 | FD->getType().getUnqualifiedType().getCanonicalType(); | |||
1225 | ||||
1226 | // We check whether we are in a recursive type | |||
1227 | if (CanonicalType->isRecordType()) { | |||
1228 | Value *TmpRes = | |||
1229 | dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1); | |||
1230 | Res = CGF.Builder.CreateAdd(TmpRes, Res); | |||
1231 | continue; | |||
1232 | } | |||
1233 | ||||
1234 | // We try to determine the best format to print the current field | |||
1235 | llvm::Twine Format = Types.find(CanonicalType) == Types.end() | |||
1236 | ? Types[Context.VoidPtrTy] | |||
1237 | : Types[CanonicalType]; | |||
1238 | ||||
1239 | Address FieldAddress = Address(FieldPtr, Align); | |||
1240 | FieldPtr = CGF.Builder.CreateLoad(FieldAddress); | |||
1241 | ||||
1242 | // FIXME Need to handle bitfield here | |||
1243 | GString = CGF.Builder.CreateGlobalStringPtr( | |||
1244 | Format.concat(llvm::Twine('\n')).str()); | |||
1245 | TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr}); | |||
1246 | Res = CGF.Builder.CreateAdd(Res, TmpRes); | |||
1247 | } | |||
1248 | ||||
1249 | GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n"); | |||
1250 | Value *TmpRes = CGF.Builder.CreateCall(Func, {GString}); | |||
1251 | Res = CGF.Builder.CreateAdd(Res, TmpRes); | |||
1252 | return Res; | |||
1253 | } | |||
1254 | ||||
1255 | RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, | |||
1256 | unsigned BuiltinID, const CallExpr *E, | |||
1257 | ReturnValueSlot ReturnValue) { | |||
1258 | // See if we can constant fold this builtin. If so, don't emit it at all. | |||
1259 | Expr::EvalResult Result; | |||
1260 | if (E->EvaluateAsRValue(Result, CGM.getContext()) && | |||
1261 | !Result.hasSideEffects()) { | |||
1262 | if (Result.Val.isInt()) | |||
1263 | return RValue::get(llvm::ConstantInt::get(getLLVMContext(), | |||
1264 | Result.Val.getInt())); | |||
1265 | if (Result.Val.isFloat()) | |||
1266 | return RValue::get(llvm::ConstantFP::get(getLLVMContext(), | |||
1267 | Result.Val.getFloat())); | |||
1268 | } | |||
1269 | ||||
1270 | // There are LLVM math intrinsics/instructions corresponding to math library | |||
1271 | // functions except the LLVM op will never set errno while the math library | |||
1272 | // might. Also, math builtins have the same semantics as their math library | |||
1273 | // twins. Thus, we can transform math library and builtin calls to their | |||
1274 | // LLVM counterparts if the call is marked 'const' (known to never set errno). | |||
1275 | if (FD->hasAttr<ConstAttr>()) { | |||
1276 | switch (BuiltinID) { | |||
1277 | case Builtin::BIceil: | |||
1278 | case Builtin::BIceilf: | |||
1279 | case Builtin::BIceill: | |||
1280 | case Builtin::BI__builtin_ceil: | |||
1281 | case Builtin::BI__builtin_ceilf: | |||
1282 | case Builtin::BI__builtin_ceill: | |||
1283 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::ceil)); | |||
1284 | ||||
1285 | case Builtin::BIcopysign: | |||
1286 | case Builtin::BIcopysignf: | |||
1287 | case Builtin::BIcopysignl: | |||
1288 | case Builtin::BI__builtin_copysign: | |||
1289 | case Builtin::BI__builtin_copysignf: | |||
1290 | case Builtin::BI__builtin_copysignl: | |||
1291 | case Builtin::BI__builtin_copysignf128: | |||
1292 | return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign)); | |||
1293 | ||||
1294 | case Builtin::BIcos: | |||
1295 | case Builtin::BIcosf: | |||
1296 | case Builtin::BIcosl: | |||
1297 | case Builtin::BI__builtin_cos: | |||
1298 | case Builtin::BI__builtin_cosf: | |||
1299 | case Builtin::BI__builtin_cosl: | |||
1300 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::cos)); | |||
1301 | ||||
1302 | case Builtin::BIexp: | |||
1303 | case Builtin::BIexpf: | |||
1304 | case Builtin::BIexpl: | |||
1305 | case Builtin::BI__builtin_exp: | |||
1306 | case Builtin::BI__builtin_expf: | |||
1307 | case Builtin::BI__builtin_expl: | |||
1308 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp)); | |||
1309 | ||||
1310 | case Builtin::BIexp2: | |||
1311 | case Builtin::BIexp2f: | |||
1312 | case Builtin::BIexp2l: | |||
1313 | case Builtin::BI__builtin_exp2: | |||
1314 | case Builtin::BI__builtin_exp2f: | |||
1315 | case Builtin::BI__builtin_exp2l: | |||
1316 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp2)); | |||
1317 | ||||
1318 | case Builtin::BIfabs: | |||
1319 | case Builtin::BIfabsf: | |||
1320 | case Builtin::BIfabsl: | |||
1321 | case Builtin::BI__builtin_fabs: | |||
1322 | case Builtin::BI__builtin_fabsf: | |||
1323 | case Builtin::BI__builtin_fabsl: | |||
1324 | case Builtin::BI__builtin_fabsf128: | |||
1325 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs)); | |||
1326 | ||||
1327 | case Builtin::BIfloor: | |||
1328 | case Builtin::BIfloorf: | |||
1329 | case Builtin::BIfloorl: | |||
1330 | case Builtin::BI__builtin_floor: | |||
1331 | case Builtin::BI__builtin_floorf: | |||
1332 | case Builtin::BI__builtin_floorl: | |||
1333 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::floor)); | |||
1334 | ||||
1335 | case Builtin::BIfma: | |||
1336 | case Builtin::BIfmaf: | |||
1337 | case Builtin::BIfmal: | |||
1338 | case Builtin::BI__builtin_fma: | |||
1339 | case Builtin::BI__builtin_fmaf: | |||
1340 | case Builtin::BI__builtin_fmal: | |||
1341 | return RValue::get(emitTernaryBuiltin(*this, E, Intrinsic::fma)); | |||
1342 | ||||
1343 | case Builtin::BIfmax: | |||
1344 | case Builtin::BIfmaxf: | |||
1345 | case Builtin::BIfmaxl: | |||
1346 | case Builtin::BI__builtin_fmax: | |||
1347 | case Builtin::BI__builtin_fmaxf: | |||
1348 | case Builtin::BI__builtin_fmaxl: | |||
1349 | return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::maxnum)); | |||
1350 | ||||
1351 | case Builtin::BIfmin: | |||
1352 | case Builtin::BIfminf: | |||
1353 | case Builtin::BIfminl: | |||
1354 | case Builtin::BI__builtin_fmin: | |||
1355 | case Builtin::BI__builtin_fminf: | |||
1356 | case Builtin::BI__builtin_fminl: | |||
1357 | return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::minnum)); | |||
1358 | ||||
1359 | // fmod() is a special-case. It maps to the frem instruction rather than an | |||
1360 | // LLVM intrinsic. | |||
1361 | case Builtin::BIfmod: | |||
1362 | case Builtin::BIfmodf: | |||
1363 | case Builtin::BIfmodl: | |||
1364 | case Builtin::BI__builtin_fmod: | |||
1365 | case Builtin::BI__builtin_fmodf: | |||
1366 | case Builtin::BI__builtin_fmodl: { | |||
1367 | Value *Arg1 = EmitScalarExpr(E->getArg(0)); | |||
1368 | Value *Arg2 = EmitScalarExpr(E->getArg(1)); | |||
1369 | return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod")); | |||
1370 | } | |||
1371 | ||||
1372 | case Builtin::BIlog: | |||
1373 | case Builtin::BIlogf: | |||
1374 | case Builtin::BIlogl: | |||
1375 | case Builtin::BI__builtin_log: | |||
1376 | case Builtin::BI__builtin_logf: | |||
1377 | case Builtin::BI__builtin_logl: | |||
1378 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log)); | |||
1379 | ||||
1380 | case Builtin::BIlog10: | |||
1381 | case Builtin::BIlog10f: | |||
1382 | case Builtin::BIlog10l: | |||
1383 | case Builtin::BI__builtin_log10: | |||
1384 | case Builtin::BI__builtin_log10f: | |||
1385 | case Builtin::BI__builtin_log10l: | |||
1386 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log10)); | |||
1387 | ||||
1388 | case Builtin::BIlog2: | |||
1389 | case Builtin::BIlog2f: | |||
1390 | case Builtin::BIlog2l: | |||
1391 | case Builtin::BI__builtin_log2: | |||
1392 | case Builtin::BI__builtin_log2f: | |||
1393 | case Builtin::BI__builtin_log2l: | |||
1394 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log2)); | |||
1395 | ||||
1396 | case Builtin::BInearbyint: | |||
1397 | case Builtin::BInearbyintf: | |||
1398 | case Builtin::BInearbyintl: | |||
1399 | case Builtin::BI__builtin_nearbyint: | |||
1400 | case Builtin::BI__builtin_nearbyintf: | |||
1401 | case Builtin::BI__builtin_nearbyintl: | |||
1402 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::nearbyint)); | |||
1403 | ||||
1404 | case Builtin::BIpow: | |||
1405 | case Builtin::BIpowf: | |||
1406 | case Builtin::BIpowl: | |||
1407 | case Builtin::BI__builtin_pow: | |||
1408 | case Builtin::BI__builtin_powf: | |||
1409 | case Builtin::BI__builtin_powl: | |||
1410 | return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::pow)); | |||
1411 | ||||
1412 | case Builtin::BIrint: | |||
1413 | case Builtin::BIrintf: | |||
1414 | case Builtin::BIrintl: | |||
1415 | case Builtin::BI__builtin_rint: | |||
1416 | case Builtin::BI__builtin_rintf: | |||
1417 | case Builtin::BI__builtin_rintl: | |||
1418 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::rint)); | |||
1419 | ||||
1420 | case Builtin::BIround: | |||
1421 | case Builtin::BIroundf: | |||
1422 | case Builtin::BIroundl: | |||
1423 | case Builtin::BI__builtin_round: | |||
1424 | case Builtin::BI__builtin_roundf: | |||
1425 | case Builtin::BI__builtin_roundl: | |||
1426 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::round)); | |||
1427 | ||||
1428 | case Builtin::BIsin: | |||
1429 | case Builtin::BIsinf: | |||
1430 | case Builtin::BIsinl: | |||
1431 | case Builtin::BI__builtin_sin: | |||
1432 | case Builtin::BI__builtin_sinf: | |||
1433 | case Builtin::BI__builtin_sinl: | |||
1434 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sin)); | |||
1435 | ||||
1436 | case Builtin::BIsqrt: | |||
1437 | case Builtin::BIsqrtf: | |||
1438 | case Builtin::BIsqrtl: | |||
1439 | case Builtin::BI__builtin_sqrt: | |||
1440 | case Builtin::BI__builtin_sqrtf: | |||
1441 | case Builtin::BI__builtin_sqrtl: | |||
1442 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sqrt)); | |||
1443 | ||||
1444 | case Builtin::BItrunc: | |||
1445 | case Builtin::BItruncf: | |||
1446 | case Builtin::BItruncl: | |||
1447 | case Builtin::BI__builtin_trunc: | |||
1448 | case Builtin::BI__builtin_truncf: | |||
1449 | case Builtin::BI__builtin_truncl: | |||
1450 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::trunc)); | |||
1451 | ||||
1452 | default: | |||
1453 | break; | |||
1454 | } | |||
1455 | } | |||
1456 | ||||
1457 | switch (BuiltinID) { | |||
1458 | default: break; | |||
1459 | case Builtin::BI__builtin___CFStringMakeConstantString: | |||
1460 | case Builtin::BI__builtin___NSStringMakeConstantString: | |||
1461 | return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType())); | |||
1462 | case Builtin::BI__builtin_stdarg_start: | |||
1463 | case Builtin::BI__builtin_va_start: | |||
1464 | case Builtin::BI__va_start: | |||
1465 | case Builtin::BI__builtin_va_end: | |||
1466 | return RValue::get( | |||
1467 | EmitVAStartEnd(BuiltinID == Builtin::BI__va_start | |||
1468 | ? EmitScalarExpr(E->getArg(0)) | |||
1469 | : EmitVAListRef(E->getArg(0)).getPointer(), | |||
1470 | BuiltinID != Builtin::BI__builtin_va_end)); | |||
1471 | case Builtin::BI__builtin_va_copy: { | |||
1472 | Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer(); | |||
1473 | Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer(); | |||
1474 | ||||
1475 | llvm::Type *Type = Int8PtrTy; | |||
1476 | ||||
1477 | DstPtr = Builder.CreateBitCast(DstPtr, Type); | |||
1478 | SrcPtr = Builder.CreateBitCast(SrcPtr, Type); | |||
1479 | return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), | |||
1480 | {DstPtr, SrcPtr})); | |||
1481 | } | |||
1482 | case Builtin::BI__builtin_abs: | |||
1483 | case Builtin::BI__builtin_labs: | |||
1484 | case Builtin::BI__builtin_llabs: { | |||
1485 | // X < 0 ? -X : X | |||
1486 | // The negation has 'nsw' because abs of INT_MIN is undefined. | |||
1487 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); | |||
1488 | Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg"); | |||
1489 | Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType()); | |||
1490 | Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond"); | |||
1491 | Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs"); | |||
1492 | return RValue::get(Result); | |||
1493 | } | |||
1494 | case Builtin::BI__builtin_conj: | |||
1495 | case Builtin::BI__builtin_conjf: | |||
1496 | case Builtin::BI__builtin_conjl: { | |||
1497 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); | |||
1498 | Value *Real = ComplexVal.first; | |||
1499 | Value *Imag = ComplexVal.second; | |||
1500 | Value *Zero = | |||
1501 | Imag->getType()->isFPOrFPVectorTy() | |||
1502 | ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType()) | |||
1503 | : llvm::Constant::getNullValue(Imag->getType()); | |||
1504 | ||||
1505 | Imag = Builder.CreateFSub(Zero, Imag, "sub"); | |||
1506 | return RValue::getComplex(std::make_pair(Real, Imag)); | |||
1507 | } | |||
1508 | case Builtin::BI__builtin_creal: | |||
1509 | case Builtin::BI__builtin_crealf: | |||
1510 | case Builtin::BI__builtin_creall: | |||
1511 | case Builtin::BIcreal: | |||
1512 | case Builtin::BIcrealf: | |||
1513 | case Builtin::BIcreall: { | |||
1514 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); | |||
1515 | return RValue::get(ComplexVal.first); | |||
1516 | } | |||
1517 | ||||
1518 | case Builtin::BI__builtin_dump_struct: { | |||
1519 | Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts()); | |||
1520 | CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment(); | |||
1521 | ||||
1522 | const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts(); | |||
1523 | QualType Arg0Type = Arg0->getType()->getPointeeType(); | |||
1524 | ||||
1525 | Value *RecordPtr = EmitScalarExpr(Arg0); | |||
1526 | Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align, Func, 0); | |||
1527 | return RValue::get(Res); | |||
1528 | } | |||
1529 | ||||
1530 | case Builtin::BI__builtin_cimag: | |||
1531 | case Builtin::BI__builtin_cimagf: | |||
1532 | case Builtin::BI__builtin_cimagl: | |||
1533 | case Builtin::BIcimag: | |||
1534 | case Builtin::BIcimagf: | |||
1535 | case Builtin::BIcimagl: { | |||
1536 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); | |||
1537 | return RValue::get(ComplexVal.second); | |||
1538 | } | |||
1539 | ||||
1540 | case Builtin::BI__builtin_ctzs: | |||
1541 | case Builtin::BI__builtin_ctz: | |||
1542 | case Builtin::BI__builtin_ctzl: | |||
1543 | case Builtin::BI__builtin_ctzll: { | |||
1544 | Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero); | |||
1545 | ||||
1546 | llvm::Type *ArgType = ArgValue->getType(); | |||
1547 | Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); | |||
1548 | ||||
1549 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
1550 | Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); | |||
1551 | Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); | |||
1552 | if (Result->getType() != ResultType) | |||
1553 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, | |||
1554 | "cast"); | |||
1555 | return RValue::get(Result); | |||
1556 | } | |||
1557 | case Builtin::BI__builtin_clzs: | |||
1558 | case Builtin::BI__builtin_clz: | |||
1559 | case Builtin::BI__builtin_clzl: | |||
1560 | case Builtin::BI__builtin_clzll: { | |||
1561 | Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero); | |||
1562 | ||||
1563 | llvm::Type *ArgType = ArgValue->getType(); | |||
1564 | Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); | |||
1565 | ||||
1566 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
1567 | Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); | |||
1568 | Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); | |||
1569 | if (Result->getType() != ResultType) | |||
1570 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, | |||
1571 | "cast"); | |||
1572 | return RValue::get(Result); | |||
1573 | } | |||
1574 | case Builtin::BI__builtin_ffs: | |||
1575 | case Builtin::BI__builtin_ffsl: | |||
1576 | case Builtin::BI__builtin_ffsll: { | |||
1577 | // ffs(x) -> x ? cttz(x) + 1 : 0 | |||
1578 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); | |||
1579 | ||||
1580 | llvm::Type *ArgType = ArgValue->getType(); | |||
1581 | Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); | |||
1582 | ||||
1583 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
1584 | Value *Tmp = | |||
1585 | Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}), | |||
1586 | llvm::ConstantInt::get(ArgType, 1)); | |||
1587 | Value *Zero = llvm::Constant::getNullValue(ArgType); | |||
1588 | Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); | |||
1589 | Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); | |||
1590 | if (Result->getType() != ResultType) | |||
1591 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, | |||
1592 | "cast"); | |||
1593 | return RValue::get(Result); | |||
1594 | } | |||
1595 | case Builtin::BI__builtin_parity: | |||
1596 | case Builtin::BI__builtin_parityl: | |||
1597 | case Builtin::BI__builtin_parityll: { | |||
1598 | // parity(x) -> ctpop(x) & 1 | |||
1599 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); | |||
1600 | ||||
1601 | llvm::Type *ArgType = ArgValue->getType(); | |||
1602 | Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); | |||
1603 | ||||
1604 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
1605 | Value *Tmp = Builder.CreateCall(F, ArgValue); | |||
1606 | Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); | |||
1607 | if (Result->getType() != ResultType) | |||
1608 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, | |||
1609 | "cast"); | |||
1610 | return RValue::get(Result); | |||
1611 | } | |||
1612 | case Builtin::BI__popcnt16: | |||
1613 | case Builtin::BI__popcnt: | |||
1614 | case Builtin::BI__popcnt64: | |||
1615 | case Builtin::BI__builtin_popcount: | |||
1616 | case Builtin::BI__builtin_popcountl: | |||
1617 | case Builtin::BI__builtin_popcountll: { | |||
1618 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); | |||
1619 | ||||
1620 | llvm::Type *ArgType = ArgValue->getType(); | |||
1621 | Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); | |||
1622 | ||||
1623 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
1624 | Value *Result = Builder.CreateCall(F, ArgValue); | |||
1625 | if (Result->getType() != ResultType) | |||
1626 | Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, | |||
1627 | "cast"); | |||
1628 | return RValue::get(Result); | |||
1629 | } | |||
1630 | case Builtin::BI_rotr8: | |||
1631 | case Builtin::BI_rotr16: | |||
1632 | case Builtin::BI_rotr: | |||
1633 | case Builtin::BI_lrotr: | |||
1634 | case Builtin::BI_rotr64: { | |||
1635 | Value *Val = EmitScalarExpr(E->getArg(0)); | |||
1636 | Value *Shift = EmitScalarExpr(E->getArg(1)); | |||
1637 | ||||
1638 | llvm::Type *ArgType = Val->getType(); | |||
1639 | Shift = Builder.CreateIntCast(Shift, ArgType, false); | |||
1640 | unsigned ArgWidth = ArgType->getIntegerBitWidth(); | |||
1641 | Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1); | |||
1642 | ||||
1643 | Value *RightShiftAmt = Builder.CreateAnd(Shift, Mask); | |||
1644 | Value *RightShifted = Builder.CreateLShr(Val, RightShiftAmt); | |||
1645 | Value *LeftShiftAmt = Builder.CreateAnd(Builder.CreateNeg(Shift), Mask); | |||
1646 | Value *LeftShifted = Builder.CreateShl(Val, LeftShiftAmt); | |||
1647 | Value *Result = Builder.CreateOr(LeftShifted, RightShifted); | |||
1648 | return RValue::get(Result); | |||
1649 | } | |||
1650 | case Builtin::BI_rotl8: | |||
1651 | case Builtin::BI_rotl16: | |||
1652 | case Builtin::BI_rotl: | |||
1653 | case Builtin::BI_lrotl: | |||
1654 | case Builtin::BI_rotl64: { | |||
1655 | Value *Val = EmitScalarExpr(E->getArg(0)); | |||
1656 | Value *Shift = EmitScalarExpr(E->getArg(1)); | |||
1657 | ||||
1658 | llvm::Type *ArgType = Val->getType(); | |||
1659 | Shift = Builder.CreateIntCast(Shift, ArgType, false); | |||
1660 | unsigned ArgWidth = ArgType->getIntegerBitWidth(); | |||
1661 | Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1); | |||
1662 | ||||
1663 | Value *LeftShiftAmt = Builder.CreateAnd(Shift, Mask); | |||
1664 | Value *LeftShifted = Builder.CreateShl(Val, LeftShiftAmt); | |||
1665 | Value *RightShiftAmt = Builder.CreateAnd(Builder.CreateNeg(Shift), Mask); | |||
1666 | Value *RightShifted = Builder.CreateLShr(Val, RightShiftAmt); | |||
1667 | Value *Result = Builder.CreateOr(LeftShifted, RightShifted); | |||
1668 | return RValue::get(Result); | |||
1669 | } | |||
1670 | case Builtin::BI__builtin_unpredictable: { | |||
1671 | // Always return the argument of __builtin_unpredictable. LLVM does not | |||
1672 | // handle this builtin. Metadata for this builtin should be added directly | |||
1673 | // to instructions such as branches or switches that use it. | |||
1674 | return RValue::get(EmitScalarExpr(E->getArg(0))); | |||
1675 | } | |||
1676 | case Builtin::BI__builtin_expect: { | |||
1677 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); | |||
1678 | llvm::Type *ArgType = ArgValue->getType(); | |||
1679 | ||||
1680 | Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); | |||
1681 | // Don't generate llvm.expect on -O0 as the backend won't use it for | |||
1682 | // anything. | |||
1683 | // Note, we still IRGen ExpectedValue because it could have side-effects. | |||
1684 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) | |||
1685 | return RValue::get(ArgValue); | |||
1686 | ||||
1687 | Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); | |||
1688 | Value *Result = | |||
1689 | Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval"); | |||
1690 | return RValue::get(Result); | |||
1691 | } | |||
1692 | case Builtin::BI__builtin_assume_aligned: { | |||
1693 | Value *PtrValue = EmitScalarExpr(E->getArg(0)); | |||
1694 | Value *OffsetValue = | |||
1695 | (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr; | |||
1696 | ||||
1697 | Value *AlignmentValue = EmitScalarExpr(E->getArg(1)); | |||
1698 | ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue); | |||
1699 | unsigned Alignment = (unsigned) AlignmentCI->getZExtValue(); | |||
1700 | ||||
1701 | EmitAlignmentAssumption(PtrValue, Alignment, OffsetValue); | |||
1702 | return RValue::get(PtrValue); | |||
1703 | } | |||
1704 | case Builtin::BI__assume: | |||
1705 | case Builtin::BI__builtin_assume: { | |||
1706 | if (E->getArg(0)->HasSideEffects(getContext())) | |||
1707 | return RValue::get(nullptr); | |||
1708 | ||||
1709 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); | |||
1710 | Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume); | |||
1711 | return RValue::get(Builder.CreateCall(FnAssume, ArgValue)); | |||
1712 | } | |||
1713 | case Builtin::BI__builtin_bswap16: | |||
1714 | case Builtin::BI__builtin_bswap32: | |||
1715 | case Builtin::BI__builtin_bswap64: { | |||
1716 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap)); | |||
1717 | } | |||
1718 | case Builtin::BI__builtin_bitreverse8: | |||
1719 | case Builtin::BI__builtin_bitreverse16: | |||
1720 | case Builtin::BI__builtin_bitreverse32: | |||
1721 | case Builtin::BI__builtin_bitreverse64: { | |||
1722 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse)); | |||
1723 | } | |||
1724 | case Builtin::BI__builtin_object_size: { | |||
1725 | unsigned Type = | |||
1726 | E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); | |||
1727 | auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType())); | |||
1728 | ||||
1729 | // We pass this builtin onto the optimizer so that it can figure out the | |||
1730 | // object size in more complex cases. | |||
1731 | return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, | |||
1732 | /*EmittedE=*/nullptr)); | |||
1733 | } | |||
1734 | case Builtin::BI__builtin_prefetch: { | |||
1735 | Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); | |||
1736 | // FIXME: Technically these constants should of type 'int', yes? | |||
1737 | RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : | |||
1738 | llvm::ConstantInt::get(Int32Ty, 0); | |||
1739 | Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : | |||
1740 | llvm::ConstantInt::get(Int32Ty, 3); | |||
1741 | Value *Data = llvm::ConstantInt::get(Int32Ty, 1); | |||
1742 | Value *F = CGM.getIntrinsic(Intrinsic::prefetch); | |||
1743 | return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data})); | |||
1744 | } | |||
1745 | case Builtin::BI__builtin_readcyclecounter: { | |||
1746 | Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); | |||
1747 | return RValue::get(Builder.CreateCall(F)); | |||
1748 | } | |||
1749 | case Builtin::BI__builtin___clear_cache: { | |||
1750 | Value *Begin = EmitScalarExpr(E->getArg(0)); | |||
1751 | Value *End = EmitScalarExpr(E->getArg(1)); | |||
1752 | Value *F = CGM.getIntrinsic(Intrinsic::clear_cache); | |||
1753 | return RValue::get(Builder.CreateCall(F, {Begin, End})); | |||
1754 | } | |||
1755 | case Builtin::BI__builtin_trap: | |||
1756 | return RValue::get(EmitTrapCall(Intrinsic::trap)); | |||
1757 | case Builtin::BI__debugbreak: | |||
1758 | return RValue::get(EmitTrapCall(Intrinsic::debugtrap)); | |||
1759 | case Builtin::BI__builtin_unreachable: { | |||
1760 | EmitUnreachable(E->getExprLoc()); | |||
1761 | ||||
1762 | // We do need to preserve an insertion point. | |||
1763 | EmitBlock(createBasicBlock("unreachable.cont")); | |||
1764 | ||||
1765 | return RValue::get(nullptr); | |||
1766 | } | |||
1767 | ||||
1768 | case Builtin::BI__builtin_powi: | |||
1769 | case Builtin::BI__builtin_powif: | |||
1770 | case Builtin::BI__builtin_powil: { | |||
1771 | Value *Base = EmitScalarExpr(E->getArg(0)); | |||
1772 | Value *Exponent = EmitScalarExpr(E->getArg(1)); | |||
1773 | llvm::Type *ArgType = Base->getType(); | |||
1774 | Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType); | |||
1775 | return RValue::get(Builder.CreateCall(F, {Base, Exponent})); | |||
1776 | } | |||
1777 | ||||
1778 | case Builtin::BI__builtin_isgreater: | |||
1779 | case Builtin::BI__builtin_isgreaterequal: | |||
1780 | case Builtin::BI__builtin_isless: | |||
1781 | case Builtin::BI__builtin_islessequal: | |||
1782 | case Builtin::BI__builtin_islessgreater: | |||
1783 | case Builtin::BI__builtin_isunordered: { | |||
1784 | // Ordered comparisons: we know the arguments to these are matching scalar | |||
1785 | // floating point values. | |||
1786 | Value *LHS = EmitScalarExpr(E->getArg(0)); | |||
1787 | Value *RHS = EmitScalarExpr(E->getArg(1)); | |||
1788 | ||||
1789 | switch (BuiltinID) { | |||
1790 | default: llvm_unreachable("Unknown ordered comparison")::llvm::llvm_unreachable_internal("Unknown ordered comparison" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 1790); | |||
1791 | case Builtin::BI__builtin_isgreater: | |||
1792 | LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); | |||
1793 | break; | |||
1794 | case Builtin::BI__builtin_isgreaterequal: | |||
1795 | LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); | |||
1796 | break; | |||
1797 | case Builtin::BI__builtin_isless: | |||
1798 | LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); | |||
1799 | break; | |||
1800 | case Builtin::BI__builtin_islessequal: | |||
1801 | LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); | |||
1802 | break; | |||
1803 | case Builtin::BI__builtin_islessgreater: | |||
1804 | LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); | |||
1805 | break; | |||
1806 | case Builtin::BI__builtin_isunordered: | |||
1807 | LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); | |||
1808 | break; | |||
1809 | } | |||
1810 | // ZExt bool to int type. | |||
1811 | return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); | |||
1812 | } | |||
1813 | case Builtin::BI__builtin_isnan: { | |||
1814 | Value *V = EmitScalarExpr(E->getArg(0)); | |||
1815 | V = Builder.CreateFCmpUNO(V, V, "cmp"); | |||
1816 | return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); | |||
1817 | } | |||
1818 | ||||
1819 | case Builtin::BIfinite: | |||
1820 | case Builtin::BI__finite: | |||
1821 | case Builtin::BIfinitef: | |||
1822 | case Builtin::BI__finitef: | |||
1823 | case Builtin::BIfinitel: | |||
1824 | case Builtin::BI__finitel: | |||
1825 | case Builtin::BI__builtin_isinf: | |||
1826 | case Builtin::BI__builtin_isfinite: { | |||
1827 | // isinf(x) --> fabs(x) == infinity | |||
1828 | // isfinite(x) --> fabs(x) != infinity | |||
1829 | // x != NaN via the ordered compare in either case. | |||
1830 | Value *V = EmitScalarExpr(E->getArg(0)); | |||
1831 | Value *Fabs = EmitFAbs(*this, V); | |||
1832 | Constant *Infinity = ConstantFP::getInfinity(V->getType()); | |||
1833 | CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf) | |||
1834 | ? CmpInst::FCMP_OEQ | |||
1835 | : CmpInst::FCMP_ONE; | |||
1836 | Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf"); | |||
1837 | return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType()))); | |||
1838 | } | |||
1839 | ||||
1840 | case Builtin::BI__builtin_isinf_sign: { | |||
1841 | // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0 | |||
1842 | Value *Arg = EmitScalarExpr(E->getArg(0)); | |||
1843 | Value *AbsArg = EmitFAbs(*this, Arg); | |||
1844 | Value *IsInf = Builder.CreateFCmpOEQ( | |||
1845 | AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf"); | |||
1846 | Value *IsNeg = EmitSignBit(*this, Arg); | |||
1847 | ||||
1848 | llvm::Type *IntTy = ConvertType(E->getType()); | |||
1849 | Value *Zero = Constant::getNullValue(IntTy); | |||
1850 | Value *One = ConstantInt::get(IntTy, 1); | |||
1851 | Value *NegativeOne = ConstantInt::get(IntTy, -1); | |||
1852 | Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One); | |||
1853 | Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero); | |||
1854 | return RValue::get(Result); | |||
1855 | } | |||
1856 | ||||
1857 | case Builtin::BI__builtin_isnormal: { | |||
1858 | // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min | |||
1859 | Value *V = EmitScalarExpr(E->getArg(0)); | |||
1860 | Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); | |||
1861 | ||||
1862 | Value *Abs = EmitFAbs(*this, V); | |||
1863 | Value *IsLessThanInf = | |||
1864 | Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); | |||
1865 | APFloat Smallest = APFloat::getSmallestNormalized( | |||
1866 | getContext().getFloatTypeSemantics(E->getArg(0)->getType())); | |||
1867 | Value *IsNormal = | |||
1868 | Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), | |||
1869 | "isnormal"); | |||
1870 | V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); | |||
1871 | V = Builder.CreateAnd(V, IsNormal, "and"); | |||
1872 | return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); | |||
1873 | } | |||
1874 | ||||
1875 | case Builtin::BI__builtin_fpclassify: { | |||
1876 | Value *V = EmitScalarExpr(E->getArg(5)); | |||
1877 | llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); | |||
1878 | ||||
1879 | // Create Result | |||
1880 | BasicBlock *Begin = Builder.GetInsertBlock(); | |||
1881 | BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); | |||
1882 | Builder.SetInsertPoint(End); | |||
1883 | PHINode *Result = | |||
1884 | Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, | |||
1885 | "fpclassify_result"); | |||
1886 | ||||
1887 | // if (V==0) return FP_ZERO | |||
1888 | Builder.SetInsertPoint(Begin); | |||
1889 | Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), | |||
1890 | "iszero"); | |||
1891 | Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); | |||
1892 | BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); | |||
1893 | Builder.CreateCondBr(IsZero, End, NotZero); | |||
1894 | Result->addIncoming(ZeroLiteral, Begin); | |||
1895 | ||||
1896 | // if (V != V) return FP_NAN | |||
1897 | Builder.SetInsertPoint(NotZero); | |||
1898 | Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); | |||
1899 | Value *NanLiteral = EmitScalarExpr(E->getArg(0)); | |||
1900 | BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); | |||
1901 | Builder.CreateCondBr(IsNan, End, NotNan); | |||
1902 | Result->addIncoming(NanLiteral, NotZero); | |||
1903 | ||||
1904 | // if (fabs(V) == infinity) return FP_INFINITY | |||
1905 | Builder.SetInsertPoint(NotNan); | |||
1906 | Value *VAbs = EmitFAbs(*this, V); | |||
1907 | Value *IsInf = | |||
1908 | Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), | |||
1909 | "isinf"); | |||
1910 | Value *InfLiteral = EmitScalarExpr(E->getArg(1)); | |||
1911 | BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); | |||
1912 | Builder.CreateCondBr(IsInf, End, NotInf); | |||
1913 | Result->addIncoming(InfLiteral, NotNan); | |||
1914 | ||||
1915 | // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL | |||
1916 | Builder.SetInsertPoint(NotInf); | |||
1917 | APFloat Smallest = APFloat::getSmallestNormalized( | |||
1918 | getContext().getFloatTypeSemantics(E->getArg(5)->getType())); | |||
1919 | Value *IsNormal = | |||
1920 | Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), | |||
1921 | "isnormal"); | |||
1922 | Value *NormalResult = | |||
1923 | Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), | |||
1924 | EmitScalarExpr(E->getArg(3))); | |||
1925 | Builder.CreateBr(End); | |||
1926 | Result->addIncoming(NormalResult, NotInf); | |||
1927 | ||||
1928 | // return Result | |||
1929 | Builder.SetInsertPoint(End); | |||
1930 | return RValue::get(Result); | |||
1931 | } | |||
1932 | ||||
1933 | case Builtin::BIalloca: | |||
1934 | case Builtin::BI_alloca: | |||
1935 | case Builtin::BI__builtin_alloca: { | |||
1936 | Value *Size = EmitScalarExpr(E->getArg(0)); | |||
1937 | const TargetInfo &TI = getContext().getTargetInfo(); | |||
1938 | // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__. | |||
1939 | unsigned SuitableAlignmentInBytes = | |||
1940 | CGM.getContext() | |||
1941 | .toCharUnitsFromBits(TI.getSuitableAlign()) | |||
1942 | .getQuantity(); | |||
1943 | AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size); | |||
1944 | AI->setAlignment(SuitableAlignmentInBytes); | |||
1945 | return RValue::get(AI); | |||
1946 | } | |||
1947 | ||||
1948 | case Builtin::BI__builtin_alloca_with_align: { | |||
1949 | Value *Size = EmitScalarExpr(E->getArg(0)); | |||
1950 | Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1)); | |||
1951 | auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue); | |||
1952 | unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue(); | |||
1953 | unsigned AlignmentInBytes = | |||
1954 | CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getQuantity(); | |||
1955 | AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size); | |||
1956 | AI->setAlignment(AlignmentInBytes); | |||
1957 | return RValue::get(AI); | |||
1958 | } | |||
1959 | ||||
1960 | case Builtin::BIbzero: | |||
1961 | case Builtin::BI__builtin_bzero: { | |||
1962 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); | |||
1963 | Value *SizeVal = EmitScalarExpr(E->getArg(1)); | |||
1964 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), | |||
1965 | E->getArg(0)->getExprLoc(), FD, 0); | |||
1966 | Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false); | |||
1967 | return RValue::get(nullptr); | |||
1968 | } | |||
1969 | case Builtin::BImemcpy: | |||
1970 | case Builtin::BI__builtin_memcpy: { | |||
1971 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); | |||
1972 | Address Src = EmitPointerWithAlignment(E->getArg(1)); | |||
1973 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); | |||
1974 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), | |||
1975 | E->getArg(0)->getExprLoc(), FD, 0); | |||
1976 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), | |||
1977 | E->getArg(1)->getExprLoc(), FD, 1); | |||
1978 | Builder.CreateMemCpy(Dest, Src, SizeVal, false); | |||
1979 | return RValue::get(Dest.getPointer()); | |||
1980 | } | |||
1981 | ||||
1982 | case Builtin::BI__builtin_char_memchr: | |||
1983 | BuiltinID = Builtin::BI__builtin_memchr; | |||
1984 | break; | |||
1985 | ||||
1986 | case Builtin::BI__builtin___memcpy_chk: { | |||
1987 | // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. | |||
1988 | llvm::APSInt Size, DstSize; | |||
1989 | if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || | |||
1990 | !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) | |||
1991 | break; | |||
1992 | if (Size.ugt(DstSize)) | |||
1993 | break; | |||
1994 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); | |||
1995 | Address Src = EmitPointerWithAlignment(E->getArg(1)); | |||
1996 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); | |||
1997 | Builder.CreateMemCpy(Dest, Src, SizeVal, false); | |||
1998 | return RValue::get(Dest.getPointer()); | |||
1999 | } | |||
2000 | ||||
2001 | case Builtin::BI__builtin_objc_memmove_collectable: { | |||
2002 | Address DestAddr = EmitPointerWithAlignment(E->getArg(0)); | |||
2003 | Address SrcAddr = EmitPointerWithAlignment(E->getArg(1)); | |||
2004 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); | |||
2005 | CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, | |||
2006 | DestAddr, SrcAddr, SizeVal); | |||
2007 | return RValue::get(DestAddr.getPointer()); | |||
2008 | } | |||
2009 | ||||
2010 | case Builtin::BI__builtin___memmove_chk: { | |||
2011 | // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2. | |||
2012 | llvm::APSInt Size, DstSize; | |||
2013 | if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || | |||
2014 | !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) | |||
2015 | break; | |||
2016 | if (Size.ugt(DstSize)) | |||
2017 | break; | |||
2018 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); | |||
2019 | Address Src = EmitPointerWithAlignment(E->getArg(1)); | |||
2020 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); | |||
2021 | Builder.CreateMemMove(Dest, Src, SizeVal, false); | |||
2022 | return RValue::get(Dest.getPointer()); | |||
2023 | } | |||
2024 | ||||
2025 | case Builtin::BImemmove: | |||
2026 | case Builtin::BI__builtin_memmove: { | |||
2027 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); | |||
2028 | Address Src = EmitPointerWithAlignment(E->getArg(1)); | |||
2029 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); | |||
2030 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), | |||
2031 | E->getArg(0)->getExprLoc(), FD, 0); | |||
2032 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), | |||
2033 | E->getArg(1)->getExprLoc(), FD, 1); | |||
2034 | Builder.CreateMemMove(Dest, Src, SizeVal, false); | |||
2035 | return RValue::get(Dest.getPointer()); | |||
2036 | } | |||
2037 | case Builtin::BImemset: | |||
2038 | case Builtin::BI__builtin_memset: { | |||
2039 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); | |||
2040 | Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), | |||
2041 | Builder.getInt8Ty()); | |||
2042 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); | |||
2043 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), | |||
2044 | E->getArg(0)->getExprLoc(), FD, 0); | |||
2045 | Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); | |||
2046 | return RValue::get(Dest.getPointer()); | |||
2047 | } | |||
2048 | case Builtin::BI__builtin___memset_chk: { | |||
2049 | // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. | |||
2050 | llvm::APSInt Size, DstSize; | |||
2051 | if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || | |||
2052 | !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) | |||
2053 | break; | |||
2054 | if (Size.ugt(DstSize)) | |||
2055 | break; | |||
2056 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); | |||
2057 | Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), | |||
2058 | Builder.getInt8Ty()); | |||
2059 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); | |||
2060 | Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); | |||
2061 | return RValue::get(Dest.getPointer()); | |||
2062 | } | |||
2063 | case Builtin::BI__builtin_wmemcmp: { | |||
2064 | // The MSVC runtime library does not provide a definition of wmemcmp, so we | |||
2065 | // need an inline implementation. | |||
2066 | if (!getTarget().getTriple().isOSMSVCRT()) | |||
2067 | break; | |||
2068 | ||||
2069 | llvm::Type *WCharTy = ConvertType(getContext().WCharTy); | |||
2070 | ||||
2071 | Value *Dst = EmitScalarExpr(E->getArg(0)); | |||
2072 | Value *Src = EmitScalarExpr(E->getArg(1)); | |||
2073 | Value *Size = EmitScalarExpr(E->getArg(2)); | |||
2074 | ||||
2075 | BasicBlock *Entry = Builder.GetInsertBlock(); | |||
2076 | BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt"); | |||
2077 | BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt"); | |||
2078 | BasicBlock *Next = createBasicBlock("wmemcmp.next"); | |||
2079 | BasicBlock *Exit = createBasicBlock("wmemcmp.exit"); | |||
2080 | Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0)); | |||
2081 | Builder.CreateCondBr(SizeEq0, Exit, CmpGT); | |||
2082 | ||||
2083 | EmitBlock(CmpGT); | |||
2084 | PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2); | |||
2085 | DstPhi->addIncoming(Dst, Entry); | |||
2086 | PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2); | |||
2087 | SrcPhi->addIncoming(Src, Entry); | |||
2088 | PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2); | |||
2089 | SizePhi->addIncoming(Size, Entry); | |||
2090 | CharUnits WCharAlign = | |||
2091 | getContext().getTypeAlignInChars(getContext().WCharTy); | |||
2092 | Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign); | |||
2093 | Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign); | |||
2094 | Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh); | |||
2095 | Builder.CreateCondBr(DstGtSrc, Exit, CmpLT); | |||
2096 | ||||
2097 | EmitBlock(CmpLT); | |||
2098 | Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh); | |||
2099 | Builder.CreateCondBr(DstLtSrc, Exit, Next); | |||
2100 | ||||
2101 | EmitBlock(Next); | |||
2102 | Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1); | |||
2103 | Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1); | |||
2104 | Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1)); | |||
2105 | Value *NextSizeEq0 = | |||
2106 | Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0)); | |||
2107 | Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT); | |||
2108 | DstPhi->addIncoming(NextDst, Next); | |||
2109 | SrcPhi->addIncoming(NextSrc, Next); | |||
2110 | SizePhi->addIncoming(NextSize, Next); | |||
2111 | ||||
2112 | EmitBlock(Exit); | |||
2113 | PHINode *Ret = Builder.CreatePHI(IntTy, 4); | |||
2114 | Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry); | |||
2115 | Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT); | |||
2116 | Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT); | |||
2117 | Ret->addIncoming(ConstantInt::get(IntTy, 0), Next); | |||
2118 | return RValue::get(Ret); | |||
2119 | } | |||
2120 | case Builtin::BI__builtin_dwarf_cfa: { | |||
2121 | // The offset in bytes from the first argument to the CFA. | |||
2122 | // | |||
2123 | // Why on earth is this in the frontend? Is there any reason at | |||
2124 | // all that the backend can't reasonably determine this while | |||
2125 | // lowering llvm.eh.dwarf.cfa()? | |||
2126 | // | |||
2127 | // TODO: If there's a satisfactory reason, add a target hook for | |||
2128 | // this instead of hard-coding 0, which is correct for most targets. | |||
2129 | int32_t Offset = 0; | |||
2130 | ||||
2131 | Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); | |||
2132 | return RValue::get(Builder.CreateCall(F, | |||
2133 | llvm::ConstantInt::get(Int32Ty, Offset))); | |||
2134 | } | |||
2135 | case Builtin::BI__builtin_return_address: { | |||
2136 | Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), | |||
2137 | getContext().UnsignedIntTy); | |||
2138 | Value *F = CGM.getIntrinsic(Intrinsic::returnaddress); | |||
2139 | return RValue::get(Builder.CreateCall(F, Depth)); | |||
2140 | } | |||
2141 | case Builtin::BI_ReturnAddress: { | |||
2142 | Value *F = CGM.getIntrinsic(Intrinsic::returnaddress); | |||
2143 | return RValue::get(Builder.CreateCall(F, Builder.getInt32(0))); | |||
2144 | } | |||
2145 | case Builtin::BI__builtin_frame_address: { | |||
2146 | Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), | |||
2147 | getContext().UnsignedIntTy); | |||
2148 | Value *F = CGM.getIntrinsic(Intrinsic::frameaddress); | |||
2149 | return RValue::get(Builder.CreateCall(F, Depth)); | |||
2150 | } | |||
2151 | case Builtin::BI__builtin_extract_return_addr: { | |||
2152 | Value *Address = EmitScalarExpr(E->getArg(0)); | |||
2153 | Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); | |||
2154 | return RValue::get(Result); | |||
2155 | } | |||
2156 | case Builtin::BI__builtin_frob_return_addr: { | |||
2157 | Value *Address = EmitScalarExpr(E->getArg(0)); | |||
2158 | Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); | |||
2159 | return RValue::get(Result); | |||
2160 | } | |||
2161 | case Builtin::BI__builtin_dwarf_sp_column: { | |||
2162 | llvm::IntegerType *Ty | |||
2163 | = cast<llvm::IntegerType>(ConvertType(E->getType())); | |||
2164 | int Column = getTargetHooks().getDwarfEHStackPointer(CGM); | |||
2165 | if (Column == -1) { | |||
2166 | CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); | |||
2167 | return RValue::get(llvm::UndefValue::get(Ty)); | |||
2168 | } | |||
2169 | return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); | |||
2170 | } | |||
2171 | case Builtin::BI__builtin_init_dwarf_reg_size_table: { | |||
2172 | Value *Address = EmitScalarExpr(E->getArg(0)); | |||
2173 | if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) | |||
2174 | CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); | |||
2175 | return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); | |||
2176 | } | |||
2177 | case Builtin::BI__builtin_eh_return: { | |||
2178 | Value *Int = EmitScalarExpr(E->getArg(0)); | |||
2179 | Value *Ptr = EmitScalarExpr(E->getArg(1)); | |||
2180 | ||||
2181 | llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); | |||
2182 | assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&(static_cast <bool> ((IntTy->getBitWidth() == 32 || IntTy ->getBitWidth() == 64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants" ) ? void (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 2183, __extension__ __PRETTY_FUNCTION__)) | |||
2183 | "LLVM's __builtin_eh_return only supports 32- and 64-bit variants")(static_cast <bool> ((IntTy->getBitWidth() == 32 || IntTy ->getBitWidth() == 64) && "LLVM's __builtin_eh_return only supports 32- and 64-bit variants" ) ? void (0) : __assert_fail ("(IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && \"LLVM's __builtin_eh_return only supports 32- and 64-bit variants\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 2183, __extension__ __PRETTY_FUNCTION__)); | |||
2184 | Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32 | |||
2185 | ? Intrinsic::eh_return_i32 | |||
2186 | : Intrinsic::eh_return_i64); | |||
2187 | Builder.CreateCall(F, {Int, Ptr}); | |||
2188 | Builder.CreateUnreachable(); | |||
2189 | ||||
2190 | // We do need to preserve an insertion point. | |||
2191 | EmitBlock(createBasicBlock("builtin_eh_return.cont")); | |||
2192 | ||||
2193 | return RValue::get(nullptr); | |||
2194 | } | |||
2195 | case Builtin::BI__builtin_unwind_init: { | |||
2196 | Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); | |||
2197 | return RValue::get(Builder.CreateCall(F)); | |||
2198 | } | |||
2199 | case Builtin::BI__builtin_extend_pointer: { | |||
2200 | // Extends a pointer to the size of an _Unwind_Word, which is | |||
2201 | // uint64_t on all platforms. Generally this gets poked into a | |||
2202 | // register and eventually used as an address, so if the | |||
2203 | // addressing registers are wider than pointers and the platform | |||
2204 | // doesn't implicitly ignore high-order bits when doing | |||
2205 | // addressing, we need to make sure we zext / sext based on | |||
2206 | // the platform's expectations. | |||
2207 | // | |||
2208 | // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html | |||
2209 | ||||
2210 | // Cast the pointer to intptr_t. | |||
2211 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
2212 | Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); | |||
2213 | ||||
2214 | // If that's 64 bits, we're done. | |||
2215 | if (IntPtrTy->getBitWidth() == 64) | |||
2216 | return RValue::get(Result); | |||
2217 | ||||
2218 | // Otherwise, ask the codegen data what to do. | |||
2219 | if (getTargetHooks().extendPointerWithSExt()) | |||
2220 | return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); | |||
2221 | else | |||
2222 | return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); | |||
2223 | } | |||
2224 | case Builtin::BI__builtin_setjmp: { | |||
2225 | // Buffer is a void**. | |||
2226 | Address Buf = EmitPointerWithAlignment(E->getArg(0)); | |||
2227 | ||||
2228 | // Store the frame pointer to the setjmp buffer. | |||
2229 | Value *FrameAddr = | |||
2230 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), | |||
2231 | ConstantInt::get(Int32Ty, 0)); | |||
2232 | Builder.CreateStore(FrameAddr, Buf); | |||
2233 | ||||
2234 | // Store the stack pointer to the setjmp buffer. | |||
2235 | Value *StackAddr = | |||
2236 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); | |||
2237 | Address StackSaveSlot = | |||
2238 | Builder.CreateConstInBoundsGEP(Buf, 2, getPointerSize()); | |||
2239 | Builder.CreateStore(StackAddr, StackSaveSlot); | |||
2240 | ||||
2241 | // Call LLVM's EH setjmp, which is lightweight. | |||
2242 | Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); | |||
2243 | Buf = Builder.CreateBitCast(Buf, Int8PtrTy); | |||
2244 | return RValue::get(Builder.CreateCall(F, Buf.getPointer())); | |||
2245 | } | |||
2246 | case Builtin::BI__builtin_longjmp: { | |||
2247 | Value *Buf = EmitScalarExpr(E->getArg(0)); | |||
2248 | Buf = Builder.CreateBitCast(Buf, Int8PtrTy); | |||
2249 | ||||
2250 | // Call LLVM's EH longjmp, which is lightweight. | |||
2251 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); | |||
2252 | ||||
2253 | // longjmp doesn't return; mark this as unreachable. | |||
2254 | Builder.CreateUnreachable(); | |||
2255 | ||||
2256 | // We do need to preserve an insertion point. | |||
2257 | EmitBlock(createBasicBlock("longjmp.cont")); | |||
2258 | ||||
2259 | return RValue::get(nullptr); | |||
2260 | } | |||
2261 | case Builtin::BI__sync_fetch_and_add: | |||
2262 | case Builtin::BI__sync_fetch_and_sub: | |||
2263 | case Builtin::BI__sync_fetch_and_or: | |||
2264 | case Builtin::BI__sync_fetch_and_and: | |||
2265 | case Builtin::BI__sync_fetch_and_xor: | |||
2266 | case Builtin::BI__sync_fetch_and_nand: | |||
2267 | case Builtin::BI__sync_add_and_fetch: | |||
2268 | case Builtin::BI__sync_sub_and_fetch: | |||
2269 | case Builtin::BI__sync_and_and_fetch: | |||
2270 | case Builtin::BI__sync_or_and_fetch: | |||
2271 | case Builtin::BI__sync_xor_and_fetch: | |||
2272 | case Builtin::BI__sync_nand_and_fetch: | |||
2273 | case Builtin::BI__sync_val_compare_and_swap: | |||
2274 | case Builtin::BI__sync_bool_compare_and_swap: | |||
2275 | case Builtin::BI__sync_lock_test_and_set: | |||
2276 | case Builtin::BI__sync_lock_release: | |||
2277 | case Builtin::BI__sync_swap: | |||
2278 | llvm_unreachable("Shouldn't make it through sema")::llvm::llvm_unreachable_internal("Shouldn't make it through sema" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 2278); | |||
2279 | case Builtin::BI__sync_fetch_and_add_1: | |||
2280 | case Builtin::BI__sync_fetch_and_add_2: | |||
2281 | case Builtin::BI__sync_fetch_and_add_4: | |||
2282 | case Builtin::BI__sync_fetch_and_add_8: | |||
2283 | case Builtin::BI__sync_fetch_and_add_16: | |||
2284 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E); | |||
2285 | case Builtin::BI__sync_fetch_and_sub_1: | |||
2286 | case Builtin::BI__sync_fetch_and_sub_2: | |||
2287 | case Builtin::BI__sync_fetch_and_sub_4: | |||
2288 | case Builtin::BI__sync_fetch_and_sub_8: | |||
2289 | case Builtin::BI__sync_fetch_and_sub_16: | |||
2290 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E); | |||
2291 | case Builtin::BI__sync_fetch_and_or_1: | |||
2292 | case Builtin::BI__sync_fetch_and_or_2: | |||
2293 | case Builtin::BI__sync_fetch_and_or_4: | |||
2294 | case Builtin::BI__sync_fetch_and_or_8: | |||
2295 | case Builtin::BI__sync_fetch_and_or_16: | |||
2296 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E); | |||
2297 | case Builtin::BI__sync_fetch_and_and_1: | |||
2298 | case Builtin::BI__sync_fetch_and_and_2: | |||
2299 | case Builtin::BI__sync_fetch_and_and_4: | |||
2300 | case Builtin::BI__sync_fetch_and_and_8: | |||
2301 | case Builtin::BI__sync_fetch_and_and_16: | |||
2302 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E); | |||
2303 | case Builtin::BI__sync_fetch_and_xor_1: | |||
2304 | case Builtin::BI__sync_fetch_and_xor_2: | |||
2305 | case Builtin::BI__sync_fetch_and_xor_4: | |||
2306 | case Builtin::BI__sync_fetch_and_xor_8: | |||
2307 | case Builtin::BI__sync_fetch_and_xor_16: | |||
2308 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E); | |||
2309 | case Builtin::BI__sync_fetch_and_nand_1: | |||
2310 | case Builtin::BI__sync_fetch_and_nand_2: | |||
2311 | case Builtin::BI__sync_fetch_and_nand_4: | |||
2312 | case Builtin::BI__sync_fetch_and_nand_8: | |||
2313 | case Builtin::BI__sync_fetch_and_nand_16: | |||
2314 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E); | |||
2315 | ||||
2316 | // Clang extensions: not overloaded yet. | |||
2317 | case Builtin::BI__sync_fetch_and_min: | |||
2318 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E); | |||
2319 | case Builtin::BI__sync_fetch_and_max: | |||
2320 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E); | |||
2321 | case Builtin::BI__sync_fetch_and_umin: | |||
2322 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E); | |||
2323 | case Builtin::BI__sync_fetch_and_umax: | |||
2324 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E); | |||
2325 | ||||
2326 | case Builtin::BI__sync_add_and_fetch_1: | |||
2327 | case Builtin::BI__sync_add_and_fetch_2: | |||
2328 | case Builtin::BI__sync_add_and_fetch_4: | |||
2329 | case Builtin::BI__sync_add_and_fetch_8: | |||
2330 | case Builtin::BI__sync_add_and_fetch_16: | |||
2331 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E, | |||
2332 | llvm::Instruction::Add); | |||
2333 | case Builtin::BI__sync_sub_and_fetch_1: | |||
2334 | case Builtin::BI__sync_sub_and_fetch_2: | |||
2335 | case Builtin::BI__sync_sub_and_fetch_4: | |||
2336 | case Builtin::BI__sync_sub_and_fetch_8: | |||
2337 | case Builtin::BI__sync_sub_and_fetch_16: | |||
2338 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E, | |||
2339 | llvm::Instruction::Sub); | |||
2340 | case Builtin::BI__sync_and_and_fetch_1: | |||
2341 | case Builtin::BI__sync_and_and_fetch_2: | |||
2342 | case Builtin::BI__sync_and_and_fetch_4: | |||
2343 | case Builtin::BI__sync_and_and_fetch_8: | |||
2344 | case Builtin::BI__sync_and_and_fetch_16: | |||
2345 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E, | |||
2346 | llvm::Instruction::And); | |||
2347 | case Builtin::BI__sync_or_and_fetch_1: | |||
2348 | case Builtin::BI__sync_or_and_fetch_2: | |||
2349 | case Builtin::BI__sync_or_and_fetch_4: | |||
2350 | case Builtin::BI__sync_or_and_fetch_8: | |||
2351 | case Builtin::BI__sync_or_and_fetch_16: | |||
2352 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E, | |||
2353 | llvm::Instruction::Or); | |||
2354 | case Builtin::BI__sync_xor_and_fetch_1: | |||
2355 | case Builtin::BI__sync_xor_and_fetch_2: | |||
2356 | case Builtin::BI__sync_xor_and_fetch_4: | |||
2357 | case Builtin::BI__sync_xor_and_fetch_8: | |||
2358 | case Builtin::BI__sync_xor_and_fetch_16: | |||
2359 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E, | |||
2360 | llvm::Instruction::Xor); | |||
2361 | case Builtin::BI__sync_nand_and_fetch_1: | |||
2362 | case Builtin::BI__sync_nand_and_fetch_2: | |||
2363 | case Builtin::BI__sync_nand_and_fetch_4: | |||
2364 | case Builtin::BI__sync_nand_and_fetch_8: | |||
2365 | case Builtin::BI__sync_nand_and_fetch_16: | |||
2366 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E, | |||
2367 | llvm::Instruction::And, true); | |||
2368 | ||||
2369 | case Builtin::BI__sync_val_compare_and_swap_1: | |||
2370 | case Builtin::BI__sync_val_compare_and_swap_2: | |||
2371 | case Builtin::BI__sync_val_compare_and_swap_4: | |||
2372 | case Builtin::BI__sync_val_compare_and_swap_8: | |||
2373 | case Builtin::BI__sync_val_compare_and_swap_16: | |||
2374 | return RValue::get(MakeAtomicCmpXchgValue(*this, E, false)); | |||
2375 | ||||
2376 | case Builtin::BI__sync_bool_compare_and_swap_1: | |||
2377 | case Builtin::BI__sync_bool_compare_and_swap_2: | |||
2378 | case Builtin::BI__sync_bool_compare_and_swap_4: | |||
2379 | case Builtin::BI__sync_bool_compare_and_swap_8: | |||
2380 | case Builtin::BI__sync_bool_compare_and_swap_16: | |||
2381 | return RValue::get(MakeAtomicCmpXchgValue(*this, E, true)); | |||
2382 | ||||
2383 | case Builtin::BI__sync_swap_1: | |||
2384 | case Builtin::BI__sync_swap_2: | |||
2385 | case Builtin::BI__sync_swap_4: | |||
2386 | case Builtin::BI__sync_swap_8: | |||
2387 | case Builtin::BI__sync_swap_16: | |||
2388 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); | |||
2389 | ||||
2390 | case Builtin::BI__sync_lock_test_and_set_1: | |||
2391 | case Builtin::BI__sync_lock_test_and_set_2: | |||
2392 | case Builtin::BI__sync_lock_test_and_set_4: | |||
2393 | case Builtin::BI__sync_lock_test_and_set_8: | |||
2394 | case Builtin::BI__sync_lock_test_and_set_16: | |||
2395 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); | |||
2396 | ||||
2397 | case Builtin::BI__sync_lock_release_1: | |||
2398 | case Builtin::BI__sync_lock_release_2: | |||
2399 | case Builtin::BI__sync_lock_release_4: | |||
2400 | case Builtin::BI__sync_lock_release_8: | |||
2401 | case Builtin::BI__sync_lock_release_16: { | |||
2402 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
2403 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); | |||
2404 | CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); | |||
2405 | llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), | |||
2406 | StoreSize.getQuantity() * 8); | |||
2407 | Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); | |||
2408 | llvm::StoreInst *Store = | |||
2409 | Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr, | |||
2410 | StoreSize); | |||
2411 | Store->setAtomic(llvm::AtomicOrdering::Release); | |||
2412 | return RValue::get(nullptr); | |||
2413 | } | |||
2414 | ||||
2415 | case Builtin::BI__sync_synchronize: { | |||
2416 | // We assume this is supposed to correspond to a C++0x-style | |||
2417 | // sequentially-consistent fence (i.e. this is only usable for | |||
2418 | // synchronization, not device I/O or anything like that). This intrinsic | |||
2419 | // is really badly designed in the sense that in theory, there isn't | |||
2420 | // any way to safely use it... but in practice, it mostly works | |||
2421 | // to use it with non-atomic loads and stores to get acquire/release | |||
2422 | // semantics. | |||
2423 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent); | |||
2424 | return RValue::get(nullptr); | |||
2425 | } | |||
2426 | ||||
2427 | case Builtin::BI__builtin_nontemporal_load: | |||
2428 | return RValue::get(EmitNontemporalLoad(*this, E)); | |||
2429 | case Builtin::BI__builtin_nontemporal_store: | |||
2430 | return RValue::get(EmitNontemporalStore(*this, E)); | |||
2431 | case Builtin::BI__c11_atomic_is_lock_free: | |||
2432 | case Builtin::BI__atomic_is_lock_free: { | |||
2433 | // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the | |||
2434 | // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since | |||
2435 | // _Atomic(T) is always properly-aligned. | |||
2436 | const char *LibCallName = "__atomic_is_lock_free"; | |||
2437 | CallArgList Args; | |||
2438 | Args.add(RValue::get(EmitScalarExpr(E->getArg(0))), | |||
2439 | getContext().getSizeType()); | |||
2440 | if (BuiltinID == Builtin::BI__atomic_is_lock_free) | |||
2441 | Args.add(RValue::get(EmitScalarExpr(E->getArg(1))), | |||
2442 | getContext().VoidPtrTy); | |||
2443 | else | |||
2444 | Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)), | |||
2445 | getContext().VoidPtrTy); | |||
2446 | const CGFunctionInfo &FuncInfo = | |||
2447 | CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args); | |||
2448 | llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); | |||
2449 | llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); | |||
2450 | return EmitCall(FuncInfo, CGCallee::forDirect(Func), | |||
2451 | ReturnValueSlot(), Args); | |||
2452 | } | |||
2453 | ||||
2454 | case Builtin::BI__atomic_test_and_set: { | |||
2455 | // Look at the argument type to determine whether this is a volatile | |||
2456 | // operation. The parameter type is always volatile. | |||
2457 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); | |||
2458 | bool Volatile = | |||
2459 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); | |||
2460 | ||||
2461 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
2462 | unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); | |||
2463 | Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); | |||
2464 | Value *NewVal = Builder.getInt8(1); | |||
2465 | Value *Order = EmitScalarExpr(E->getArg(1)); | |||
2466 | if (isa<llvm::ConstantInt>(Order)) { | |||
2467 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); | |||
2468 | AtomicRMWInst *Result = nullptr; | |||
2469 | switch (ord) { | |||
2470 | case 0: // memory_order_relaxed | |||
2471 | default: // invalid order | |||
2472 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, | |||
2473 | llvm::AtomicOrdering::Monotonic); | |||
2474 | break; | |||
2475 | case 1: // memory_order_consume | |||
2476 | case 2: // memory_order_acquire | |||
2477 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, | |||
2478 | llvm::AtomicOrdering::Acquire); | |||
2479 | break; | |||
2480 | case 3: // memory_order_release | |||
2481 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, | |||
2482 | llvm::AtomicOrdering::Release); | |||
2483 | break; | |||
2484 | case 4: // memory_order_acq_rel | |||
2485 | ||||
2486 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, | |||
2487 | llvm::AtomicOrdering::AcquireRelease); | |||
2488 | break; | |||
2489 | case 5: // memory_order_seq_cst | |||
2490 | Result = Builder.CreateAtomicRMW( | |||
2491 | llvm::AtomicRMWInst::Xchg, Ptr, NewVal, | |||
2492 | llvm::AtomicOrdering::SequentiallyConsistent); | |||
2493 | break; | |||
2494 | } | |||
2495 | Result->setVolatile(Volatile); | |||
2496 | return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); | |||
2497 | } | |||
2498 | ||||
2499 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); | |||
2500 | ||||
2501 | llvm::BasicBlock *BBs[5] = { | |||
2502 | createBasicBlock("monotonic", CurFn), | |||
2503 | createBasicBlock("acquire", CurFn), | |||
2504 | createBasicBlock("release", CurFn), | |||
2505 | createBasicBlock("acqrel", CurFn), | |||
2506 | createBasicBlock("seqcst", CurFn) | |||
2507 | }; | |||
2508 | llvm::AtomicOrdering Orders[5] = { | |||
2509 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire, | |||
2510 | llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease, | |||
2511 | llvm::AtomicOrdering::SequentiallyConsistent}; | |||
2512 | ||||
2513 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); | |||
2514 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); | |||
2515 | ||||
2516 | Builder.SetInsertPoint(ContBB); | |||
2517 | PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set"); | |||
2518 | ||||
2519 | for (unsigned i = 0; i < 5; ++i) { | |||
2520 | Builder.SetInsertPoint(BBs[i]); | |||
2521 | AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, | |||
2522 | Ptr, NewVal, Orders[i]); | |||
2523 | RMW->setVolatile(Volatile); | |||
2524 | Result->addIncoming(RMW, BBs[i]); | |||
2525 | Builder.CreateBr(ContBB); | |||
2526 | } | |||
2527 | ||||
2528 | SI->addCase(Builder.getInt32(0), BBs[0]); | |||
2529 | SI->addCase(Builder.getInt32(1), BBs[1]); | |||
2530 | SI->addCase(Builder.getInt32(2), BBs[1]); | |||
2531 | SI->addCase(Builder.getInt32(3), BBs[2]); | |||
2532 | SI->addCase(Builder.getInt32(4), BBs[3]); | |||
2533 | SI->addCase(Builder.getInt32(5), BBs[4]); | |||
2534 | ||||
2535 | Builder.SetInsertPoint(ContBB); | |||
2536 | return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); | |||
2537 | } | |||
2538 | ||||
2539 | case Builtin::BI__atomic_clear: { | |||
2540 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); | |||
2541 | bool Volatile = | |||
2542 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); | |||
2543 | ||||
2544 | Address Ptr = EmitPointerWithAlignment(E->getArg(0)); | |||
2545 | unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace(); | |||
2546 | Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); | |||
2547 | Value *NewVal = Builder.getInt8(0); | |||
2548 | Value *Order = EmitScalarExpr(E->getArg(1)); | |||
2549 | if (isa<llvm::ConstantInt>(Order)) { | |||
2550 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); | |||
2551 | StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); | |||
2552 | switch (ord) { | |||
2553 | case 0: // memory_order_relaxed | |||
2554 | default: // invalid order | |||
2555 | Store->setOrdering(llvm::AtomicOrdering::Monotonic); | |||
2556 | break; | |||
2557 | case 3: // memory_order_release | |||
2558 | Store->setOrdering(llvm::AtomicOrdering::Release); | |||
2559 | break; | |||
2560 | case 5: // memory_order_seq_cst | |||
2561 | Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); | |||
2562 | break; | |||
2563 | } | |||
2564 | return RValue::get(nullptr); | |||
2565 | } | |||
2566 | ||||
2567 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); | |||
2568 | ||||
2569 | llvm::BasicBlock *BBs[3] = { | |||
2570 | createBasicBlock("monotonic", CurFn), | |||
2571 | createBasicBlock("release", CurFn), | |||
2572 | createBasicBlock("seqcst", CurFn) | |||
2573 | }; | |||
2574 | llvm::AtomicOrdering Orders[3] = { | |||
2575 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release, | |||
2576 | llvm::AtomicOrdering::SequentiallyConsistent}; | |||
2577 | ||||
2578 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); | |||
2579 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); | |||
2580 | ||||
2581 | for (unsigned i = 0; i < 3; ++i) { | |||
2582 | Builder.SetInsertPoint(BBs[i]); | |||
2583 | StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); | |||
2584 | Store->setOrdering(Orders[i]); | |||
2585 | Builder.CreateBr(ContBB); | |||
2586 | } | |||
2587 | ||||
2588 | SI->addCase(Builder.getInt32(0), BBs[0]); | |||
2589 | SI->addCase(Builder.getInt32(3), BBs[1]); | |||
2590 | SI->addCase(Builder.getInt32(5), BBs[2]); | |||
2591 | ||||
2592 | Builder.SetInsertPoint(ContBB); | |||
2593 | return RValue::get(nullptr); | |||
2594 | } | |||
2595 | ||||
2596 | case Builtin::BI__atomic_thread_fence: | |||
2597 | case Builtin::BI__atomic_signal_fence: | |||
2598 | case Builtin::BI__c11_atomic_thread_fence: | |||
2599 | case Builtin::BI__c11_atomic_signal_fence: { | |||
2600 | llvm::SyncScope::ID SSID; | |||
2601 | if (BuiltinID == Builtin::BI__atomic_signal_fence || | |||
2602 | BuiltinID == Builtin::BI__c11_atomic_signal_fence) | |||
2603 | SSID = llvm::SyncScope::SingleThread; | |||
2604 | else | |||
2605 | SSID = llvm::SyncScope::System; | |||
2606 | Value *Order = EmitScalarExpr(E->getArg(0)); | |||
2607 | if (isa<llvm::ConstantInt>(Order)) { | |||
2608 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); | |||
2609 | switch (ord) { | |||
2610 | case 0: // memory_order_relaxed | |||
2611 | default: // invalid order | |||
2612 | break; | |||
2613 | case 1: // memory_order_consume | |||
2614 | case 2: // memory_order_acquire | |||
2615 | Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); | |||
2616 | break; | |||
2617 | case 3: // memory_order_release | |||
2618 | Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); | |||
2619 | break; | |||
2620 | case 4: // memory_order_acq_rel | |||
2621 | Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); | |||
2622 | break; | |||
2623 | case 5: // memory_order_seq_cst | |||
2624 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); | |||
2625 | break; | |||
2626 | } | |||
2627 | return RValue::get(nullptr); | |||
2628 | } | |||
2629 | ||||
2630 | llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; | |||
2631 | AcquireBB = createBasicBlock("acquire", CurFn); | |||
2632 | ReleaseBB = createBasicBlock("release", CurFn); | |||
2633 | AcqRelBB = createBasicBlock("acqrel", CurFn); | |||
2634 | SeqCstBB = createBasicBlock("seqcst", CurFn); | |||
2635 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); | |||
2636 | ||||
2637 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); | |||
2638 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB); | |||
2639 | ||||
2640 | Builder.SetInsertPoint(AcquireBB); | |||
2641 | Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); | |||
2642 | Builder.CreateBr(ContBB); | |||
2643 | SI->addCase(Builder.getInt32(1), AcquireBB); | |||
2644 | SI->addCase(Builder.getInt32(2), AcquireBB); | |||
2645 | ||||
2646 | Builder.SetInsertPoint(ReleaseBB); | |||
2647 | Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); | |||
2648 | Builder.CreateBr(ContBB); | |||
2649 | SI->addCase(Builder.getInt32(3), ReleaseBB); | |||
2650 | ||||
2651 | Builder.SetInsertPoint(AcqRelBB); | |||
2652 | Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); | |||
2653 | Builder.CreateBr(ContBB); | |||
2654 | SI->addCase(Builder.getInt32(4), AcqRelBB); | |||
2655 | ||||
2656 | Builder.SetInsertPoint(SeqCstBB); | |||
2657 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); | |||
2658 | Builder.CreateBr(ContBB); | |||
2659 | SI->addCase(Builder.getInt32(5), SeqCstBB); | |||
2660 | ||||
2661 | Builder.SetInsertPoint(ContBB); | |||
2662 | return RValue::get(nullptr); | |||
2663 | } | |||
2664 | ||||
2665 | case Builtin::BI__builtin_signbit: | |||
2666 | case Builtin::BI__builtin_signbitf: | |||
2667 | case Builtin::BI__builtin_signbitl: { | |||
2668 | return RValue::get( | |||
2669 | Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))), | |||
2670 | ConvertType(E->getType()))); | |||
2671 | } | |||
2672 | case Builtin::BI__annotation: { | |||
2673 | // Re-encode each wide string to UTF8 and make an MDString. | |||
2674 | SmallVector<Metadata *, 1> Strings; | |||
2675 | for (const Expr *Arg : E->arguments()) { | |||
2676 | const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts()); | |||
2677 | assert(Str->getCharByteWidth() == 2)(static_cast <bool> (Str->getCharByteWidth() == 2) ? void (0) : __assert_fail ("Str->getCharByteWidth() == 2", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 2677, __extension__ __PRETTY_FUNCTION__)); | |||
2678 | StringRef WideBytes = Str->getBytes(); | |||
2679 | std::string StrUtf8; | |||
2680 | if (!convertUTF16ToUTF8String( | |||
2681 | makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) { | |||
2682 | CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument"); | |||
2683 | continue; | |||
2684 | } | |||
2685 | Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8)); | |||
2686 | } | |||
2687 | ||||
2688 | // Build and MDTuple of MDStrings and emit the intrinsic call. | |||
2689 | llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {}); | |||
2690 | MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings); | |||
2691 | Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple)); | |||
2692 | return RValue::getIgnored(); | |||
2693 | } | |||
2694 | case Builtin::BI__builtin_annotation: { | |||
2695 | llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); | |||
2696 | llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, | |||
2697 | AnnVal->getType()); | |||
2698 | ||||
2699 | // Get the annotation string, go through casts. Sema requires this to be a | |||
2700 | // non-wide string literal, potentially casted, so the cast<> is safe. | |||
2701 | const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); | |||
2702 | StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); | |||
2703 | return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc())); | |||
2704 | } | |||
2705 | case Builtin::BI__builtin_addcb: | |||
2706 | case Builtin::BI__builtin_addcs: | |||
2707 | case Builtin::BI__builtin_addc: | |||
2708 | case Builtin::BI__builtin_addcl: | |||
2709 | case Builtin::BI__builtin_addcll: | |||
2710 | case Builtin::BI__builtin_subcb: | |||
2711 | case Builtin::BI__builtin_subcs: | |||
2712 | case Builtin::BI__builtin_subc: | |||
2713 | case Builtin::BI__builtin_subcl: | |||
2714 | case Builtin::BI__builtin_subcll: { | |||
2715 | ||||
2716 | // We translate all of these builtins from expressions of the form: | |||
2717 | // int x = ..., y = ..., carryin = ..., carryout, result; | |||
2718 | // result = __builtin_addc(x, y, carryin, &carryout); | |||
2719 | // | |||
2720 | // to LLVM IR of the form: | |||
2721 | // | |||
2722 | // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) | |||
2723 | // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0 | |||
2724 | // %carry1 = extractvalue {i32, i1} %tmp1, 1 | |||
2725 | // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1, | |||
2726 | // i32 %carryin) | |||
2727 | // %result = extractvalue {i32, i1} %tmp2, 0 | |||
2728 | // %carry2 = extractvalue {i32, i1} %tmp2, 1 | |||
2729 | // %tmp3 = or i1 %carry1, %carry2 | |||
2730 | // %tmp4 = zext i1 %tmp3 to i32 | |||
2731 | // store i32 %tmp4, i32* %carryout | |||
2732 | ||||
2733 | // Scalarize our inputs. | |||
2734 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); | |||
2735 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); | |||
2736 | llvm::Value *Carryin = EmitScalarExpr(E->getArg(2)); | |||
2737 | Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3)); | |||
2738 | ||||
2739 | // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow. | |||
2740 | llvm::Intrinsic::ID IntrinsicId; | |||
2741 | switch (BuiltinID) { | |||
2742 | default: llvm_unreachable("Unknown multiprecision builtin id.")::llvm::llvm_unreachable_internal("Unknown multiprecision builtin id." , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 2742); | |||
2743 | case Builtin::BI__builtin_addcb: | |||
2744 | case Builtin::BI__builtin_addcs: | |||
2745 | case Builtin::BI__builtin_addc: | |||
2746 | case Builtin::BI__builtin_addcl: | |||
2747 | case Builtin::BI__builtin_addcll: | |||
2748 | IntrinsicId = llvm::Intrinsic::uadd_with_overflow; | |||
2749 | break; | |||
2750 | case Builtin::BI__builtin_subcb: | |||
2751 | case Builtin::BI__builtin_subcs: | |||
2752 | case Builtin::BI__builtin_subc: | |||
2753 | case Builtin::BI__builtin_subcl: | |||
2754 | case Builtin::BI__builtin_subcll: | |||
2755 | IntrinsicId = llvm::Intrinsic::usub_with_overflow; | |||
2756 | break; | |||
2757 | } | |||
2758 | ||||
2759 | // Construct our resulting LLVM IR expression. | |||
2760 | llvm::Value *Carry1; | |||
2761 | llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, | |||
2762 | X, Y, Carry1); | |||
2763 | llvm::Value *Carry2; | |||
2764 | llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId, | |||
2765 | Sum1, Carryin, Carry2); | |||
2766 | llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), | |||
2767 | X->getType()); | |||
2768 | Builder.CreateStore(CarryOut, CarryOutPtr); | |||
2769 | return RValue::get(Sum2); | |||
2770 | } | |||
2771 | ||||
2772 | case Builtin::BI__builtin_add_overflow: | |||
2773 | case Builtin::BI__builtin_sub_overflow: | |||
2774 | case Builtin::BI__builtin_mul_overflow: { | |||
2775 | const clang::Expr *LeftArg = E->getArg(0); | |||
2776 | const clang::Expr *RightArg = E->getArg(1); | |||
2777 | const clang::Expr *ResultArg = E->getArg(2); | |||
2778 | ||||
2779 | clang::QualType ResultQTy = | |||
2780 | ResultArg->getType()->castAs<PointerType>()->getPointeeType(); | |||
2781 | ||||
2782 | WidthAndSignedness LeftInfo = | |||
2783 | getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType()); | |||
2784 | WidthAndSignedness RightInfo = | |||
2785 | getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType()); | |||
2786 | WidthAndSignedness ResultInfo = | |||
2787 | getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy); | |||
2788 | ||||
2789 | // Handle mixed-sign multiplication as a special case, because adding | |||
2790 | // runtime or backend support for our generic irgen would be too expensive. | |||
2791 | if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo)) | |||
2792 | return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg, | |||
2793 | RightInfo, ResultArg, ResultQTy, | |||
2794 | ResultInfo); | |||
2795 | ||||
2796 | WidthAndSignedness EncompassingInfo = | |||
2797 | EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo}); | |||
2798 | ||||
2799 | llvm::Type *EncompassingLLVMTy = | |||
2800 | llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width); | |||
2801 | ||||
2802 | llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy); | |||
2803 | ||||
2804 | llvm::Intrinsic::ID IntrinsicId; | |||
2805 | switch (BuiltinID) { | |||
2806 | default: | |||
2807 | llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id." , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 2807); | |||
2808 | case Builtin::BI__builtin_add_overflow: | |||
2809 | IntrinsicId = EncompassingInfo.Signed | |||
2810 | ? llvm::Intrinsic::sadd_with_overflow | |||
2811 | : llvm::Intrinsic::uadd_with_overflow; | |||
2812 | break; | |||
2813 | case Builtin::BI__builtin_sub_overflow: | |||
2814 | IntrinsicId = EncompassingInfo.Signed | |||
2815 | ? llvm::Intrinsic::ssub_with_overflow | |||
2816 | : llvm::Intrinsic::usub_with_overflow; | |||
2817 | break; | |||
2818 | case Builtin::BI__builtin_mul_overflow: | |||
2819 | IntrinsicId = EncompassingInfo.Signed | |||
2820 | ? llvm::Intrinsic::smul_with_overflow | |||
2821 | : llvm::Intrinsic::umul_with_overflow; | |||
2822 | break; | |||
2823 | } | |||
2824 | ||||
2825 | llvm::Value *Left = EmitScalarExpr(LeftArg); | |||
2826 | llvm::Value *Right = EmitScalarExpr(RightArg); | |||
2827 | Address ResultPtr = EmitPointerWithAlignment(ResultArg); | |||
2828 | ||||
2829 | // Extend each operand to the encompassing type. | |||
2830 | Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed); | |||
2831 | Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed); | |||
2832 | ||||
2833 | // Perform the operation on the extended values. | |||
2834 | llvm::Value *Overflow, *Result; | |||
2835 | Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow); | |||
2836 | ||||
2837 | if (EncompassingInfo.Width > ResultInfo.Width) { | |||
2838 | // The encompassing type is wider than the result type, so we need to | |||
2839 | // truncate it. | |||
2840 | llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy); | |||
2841 | ||||
2842 | // To see if the truncation caused an overflow, we will extend | |||
2843 | // the result and then compare it to the original result. | |||
2844 | llvm::Value *ResultTruncExt = Builder.CreateIntCast( | |||
2845 | ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed); | |||
2846 | llvm::Value *TruncationOverflow = | |||
2847 | Builder.CreateICmpNE(Result, ResultTruncExt); | |||
2848 | ||||
2849 | Overflow = Builder.CreateOr(Overflow, TruncationOverflow); | |||
2850 | Result = ResultTrunc; | |||
2851 | } | |||
2852 | ||||
2853 | // Finally, store the result using the pointer. | |||
2854 | bool isVolatile = | |||
2855 | ResultArg->getType()->getPointeeType().isVolatileQualified(); | |||
2856 | Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile); | |||
2857 | ||||
2858 | return RValue::get(Overflow); | |||
2859 | } | |||
2860 | ||||
2861 | case Builtin::BI__builtin_uadd_overflow: | |||
2862 | case Builtin::BI__builtin_uaddl_overflow: | |||
2863 | case Builtin::BI__builtin_uaddll_overflow: | |||
2864 | case Builtin::BI__builtin_usub_overflow: | |||
2865 | case Builtin::BI__builtin_usubl_overflow: | |||
2866 | case Builtin::BI__builtin_usubll_overflow: | |||
2867 | case Builtin::BI__builtin_umul_overflow: | |||
2868 | case Builtin::BI__builtin_umull_overflow: | |||
2869 | case Builtin::BI__builtin_umulll_overflow: | |||
2870 | case Builtin::BI__builtin_sadd_overflow: | |||
2871 | case Builtin::BI__builtin_saddl_overflow: | |||
2872 | case Builtin::BI__builtin_saddll_overflow: | |||
2873 | case Builtin::BI__builtin_ssub_overflow: | |||
2874 | case Builtin::BI__builtin_ssubl_overflow: | |||
2875 | case Builtin::BI__builtin_ssubll_overflow: | |||
2876 | case Builtin::BI__builtin_smul_overflow: | |||
2877 | case Builtin::BI__builtin_smull_overflow: | |||
2878 | case Builtin::BI__builtin_smulll_overflow: { | |||
2879 | ||||
2880 | // We translate all of these builtins directly to the relevant llvm IR node. | |||
2881 | ||||
2882 | // Scalarize our inputs. | |||
2883 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); | |||
2884 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); | |||
2885 | Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2)); | |||
2886 | ||||
2887 | // Decide which of the overflow intrinsics we are lowering to: | |||
2888 | llvm::Intrinsic::ID IntrinsicId; | |||
2889 | switch (BuiltinID) { | |||
2890 | default: llvm_unreachable("Unknown overflow builtin id.")::llvm::llvm_unreachable_internal("Unknown overflow builtin id." , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 2890); | |||
2891 | case Builtin::BI__builtin_uadd_overflow: | |||
2892 | case Builtin::BI__builtin_uaddl_overflow: | |||
2893 | case Builtin::BI__builtin_uaddll_overflow: | |||
2894 | IntrinsicId = llvm::Intrinsic::uadd_with_overflow; | |||
2895 | break; | |||
2896 | case Builtin::BI__builtin_usub_overflow: | |||
2897 | case Builtin::BI__builtin_usubl_overflow: | |||
2898 | case Builtin::BI__builtin_usubll_overflow: | |||
2899 | IntrinsicId = llvm::Intrinsic::usub_with_overflow; | |||
2900 | break; | |||
2901 | case Builtin::BI__builtin_umul_overflow: | |||
2902 | case Builtin::BI__builtin_umull_overflow: | |||
2903 | case Builtin::BI__builtin_umulll_overflow: | |||
2904 | IntrinsicId = llvm::Intrinsic::umul_with_overflow; | |||
2905 | break; | |||
2906 | case Builtin::BI__builtin_sadd_overflow: | |||
2907 | case Builtin::BI__builtin_saddl_overflow: | |||
2908 | case Builtin::BI__builtin_saddll_overflow: | |||
2909 | IntrinsicId = llvm::Intrinsic::sadd_with_overflow; | |||
2910 | break; | |||
2911 | case Builtin::BI__builtin_ssub_overflow: | |||
2912 | case Builtin::BI__builtin_ssubl_overflow: | |||
2913 | case Builtin::BI__builtin_ssubll_overflow: | |||
2914 | IntrinsicId = llvm::Intrinsic::ssub_with_overflow; | |||
2915 | break; | |||
2916 | case Builtin::BI__builtin_smul_overflow: | |||
2917 | case Builtin::BI__builtin_smull_overflow: | |||
2918 | case Builtin::BI__builtin_smulll_overflow: | |||
2919 | IntrinsicId = llvm::Intrinsic::smul_with_overflow; | |||
2920 | break; | |||
2921 | } | |||
2922 | ||||
2923 | ||||
2924 | llvm::Value *Carry; | |||
2925 | llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry); | |||
2926 | Builder.CreateStore(Sum, SumOutPtr); | |||
2927 | ||||
2928 | return RValue::get(Carry); | |||
2929 | } | |||
2930 | case Builtin::BI__builtin_addressof: | |||
2931 | return RValue::get(EmitLValue(E->getArg(0)).getPointer()); | |||
2932 | case Builtin::BI__builtin_operator_new: | |||
2933 | return EmitBuiltinNewDeleteCall( | |||
2934 | E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false); | |||
2935 | case Builtin::BI__builtin_operator_delete: | |||
2936 | return EmitBuiltinNewDeleteCall( | |||
2937 | E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true); | |||
2938 | ||||
2939 | case Builtin::BI__noop: | |||
2940 | // __noop always evaluates to an integer literal zero. | |||
2941 | return RValue::get(ConstantInt::get(IntTy, 0)); | |||
2942 | case Builtin::BI__builtin_call_with_static_chain: { | |||
2943 | const CallExpr *Call = cast<CallExpr>(E->getArg(0)); | |||
2944 | const Expr *Chain = E->getArg(1); | |||
2945 | return EmitCall(Call->getCallee()->getType(), | |||
2946 | EmitCallee(Call->getCallee()), Call, ReturnValue, | |||
2947 | EmitScalarExpr(Chain)); | |||
2948 | } | |||
2949 | case Builtin::BI_InterlockedExchange8: | |||
2950 | case Builtin::BI_InterlockedExchange16: | |||
2951 | case Builtin::BI_InterlockedExchange: | |||
2952 | case Builtin::BI_InterlockedExchangePointer: | |||
2953 | return RValue::get( | |||
2954 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E)); | |||
2955 | case Builtin::BI_InterlockedCompareExchangePointer: { | |||
2956 | llvm::Type *RTy; | |||
2957 | llvm::IntegerType *IntType = | |||
2958 | IntegerType::get(getLLVMContext(), | |||
2959 | getContext().getTypeSize(E->getType())); | |||
2960 | llvm::Type *IntPtrType = IntType->getPointerTo(); | |||
2961 | ||||
2962 | llvm::Value *Destination = | |||
2963 | Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType); | |||
2964 | ||||
2965 | llvm::Value *Exchange = EmitScalarExpr(E->getArg(1)); | |||
2966 | RTy = Exchange->getType(); | |||
2967 | Exchange = Builder.CreatePtrToInt(Exchange, IntType); | |||
2968 | ||||
2969 | llvm::Value *Comparand = | |||
2970 | Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType); | |||
2971 | ||||
2972 | auto Result = | |||
2973 | Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, | |||
2974 | AtomicOrdering::SequentiallyConsistent, | |||
2975 | AtomicOrdering::SequentiallyConsistent); | |||
2976 | Result->setVolatile(true); | |||
2977 | ||||
2978 | return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result, | |||
2979 | 0), | |||
2980 | RTy)); | |||
2981 | } | |||
2982 | case Builtin::BI_InterlockedCompareExchange8: | |||
2983 | case Builtin::BI_InterlockedCompareExchange16: | |||
2984 | case Builtin::BI_InterlockedCompareExchange: | |||
2985 | case Builtin::BI_InterlockedCompareExchange64: { | |||
2986 | AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg( | |||
2987 | EmitScalarExpr(E->getArg(0)), | |||
2988 | EmitScalarExpr(E->getArg(2)), | |||
2989 | EmitScalarExpr(E->getArg(1)), | |||
2990 | AtomicOrdering::SequentiallyConsistent, | |||
2991 | AtomicOrdering::SequentiallyConsistent); | |||
2992 | CXI->setVolatile(true); | |||
2993 | return RValue::get(Builder.CreateExtractValue(CXI, 0)); | |||
2994 | } | |||
2995 | case Builtin::BI_InterlockedIncrement16: | |||
2996 | case Builtin::BI_InterlockedIncrement: | |||
2997 | return RValue::get( | |||
2998 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E)); | |||
2999 | case Builtin::BI_InterlockedDecrement16: | |||
3000 | case Builtin::BI_InterlockedDecrement: | |||
3001 | return RValue::get( | |||
3002 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E)); | |||
3003 | case Builtin::BI_InterlockedAnd8: | |||
3004 | case Builtin::BI_InterlockedAnd16: | |||
3005 | case Builtin::BI_InterlockedAnd: | |||
3006 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E)); | |||
3007 | case Builtin::BI_InterlockedExchangeAdd8: | |||
3008 | case Builtin::BI_InterlockedExchangeAdd16: | |||
3009 | case Builtin::BI_InterlockedExchangeAdd: | |||
3010 | return RValue::get( | |||
3011 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E)); | |||
3012 | case Builtin::BI_InterlockedExchangeSub8: | |||
3013 | case Builtin::BI_InterlockedExchangeSub16: | |||
3014 | case Builtin::BI_InterlockedExchangeSub: | |||
3015 | return RValue::get( | |||
3016 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E)); | |||
3017 | case Builtin::BI_InterlockedOr8: | |||
3018 | case Builtin::BI_InterlockedOr16: | |||
3019 | case Builtin::BI_InterlockedOr: | |||
3020 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E)); | |||
3021 | case Builtin::BI_InterlockedXor8: | |||
3022 | case Builtin::BI_InterlockedXor16: | |||
3023 | case Builtin::BI_InterlockedXor: | |||
3024 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E)); | |||
3025 | ||||
3026 | case Builtin::BI_bittest64: | |||
3027 | case Builtin::BI_bittest: | |||
3028 | case Builtin::BI_bittestandcomplement64: | |||
3029 | case Builtin::BI_bittestandcomplement: | |||
3030 | case Builtin::BI_bittestandreset64: | |||
3031 | case Builtin::BI_bittestandreset: | |||
3032 | case Builtin::BI_bittestandset64: | |||
3033 | case Builtin::BI_bittestandset: | |||
3034 | case Builtin::BI_interlockedbittestandreset: | |||
3035 | case Builtin::BI_interlockedbittestandreset64: | |||
3036 | case Builtin::BI_interlockedbittestandset64: | |||
3037 | case Builtin::BI_interlockedbittestandset: | |||
3038 | case Builtin::BI_interlockedbittestandset_acq: | |||
3039 | case Builtin::BI_interlockedbittestandset_rel: | |||
3040 | case Builtin::BI_interlockedbittestandset_nf: | |||
3041 | case Builtin::BI_interlockedbittestandreset_acq: | |||
3042 | case Builtin::BI_interlockedbittestandreset_rel: | |||
3043 | case Builtin::BI_interlockedbittestandreset_nf: | |||
3044 | return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E)); | |||
3045 | ||||
3046 | case Builtin::BI__exception_code: | |||
3047 | case Builtin::BI_exception_code: | |||
3048 | return RValue::get(EmitSEHExceptionCode()); | |||
3049 | case Builtin::BI__exception_info: | |||
3050 | case Builtin::BI_exception_info: | |||
3051 | return RValue::get(EmitSEHExceptionInfo()); | |||
3052 | case Builtin::BI__abnormal_termination: | |||
3053 | case Builtin::BI_abnormal_termination: | |||
3054 | return RValue::get(EmitSEHAbnormalTermination()); | |||
3055 | case Builtin::BI_setjmpex: | |||
3056 | if (getTarget().getTriple().isOSMSVCRT()) | |||
3057 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E); | |||
3058 | break; | |||
3059 | case Builtin::BI_setjmp: | |||
3060 | if (getTarget().getTriple().isOSMSVCRT()) { | |||
3061 | if (getTarget().getTriple().getArch() == llvm::Triple::x86) | |||
3062 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E); | |||
3063 | else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64) | |||
3064 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E); | |||
3065 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E); | |||
3066 | } | |||
3067 | break; | |||
3068 | ||||
3069 | case Builtin::BI__GetExceptionInfo: { | |||
3070 | if (llvm::GlobalVariable *GV = | |||
3071 | CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType())) | |||
3072 | return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy)); | |||
3073 | break; | |||
3074 | } | |||
3075 | ||||
3076 | case Builtin::BI__fastfail: | |||
3077 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E)); | |||
3078 | ||||
3079 | case Builtin::BI__builtin_coro_size: { | |||
3080 | auto & Context = getContext(); | |||
3081 | auto SizeTy = Context.getSizeType(); | |||
3082 | auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); | |||
3083 | Value *F = CGM.getIntrinsic(Intrinsic::coro_size, T); | |||
3084 | return RValue::get(Builder.CreateCall(F)); | |||
3085 | } | |||
3086 | ||||
3087 | case Builtin::BI__builtin_coro_id: | |||
3088 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_id); | |||
3089 | case Builtin::BI__builtin_coro_promise: | |||
3090 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise); | |||
3091 | case Builtin::BI__builtin_coro_resume: | |||
3092 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume); | |||
3093 | case Builtin::BI__builtin_coro_frame: | |||
3094 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame); | |||
3095 | case Builtin::BI__builtin_coro_noop: | |||
3096 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop); | |||
3097 | case Builtin::BI__builtin_coro_free: | |||
3098 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_free); | |||
3099 | case Builtin::BI__builtin_coro_destroy: | |||
3100 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy); | |||
3101 | case Builtin::BI__builtin_coro_done: | |||
3102 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_done); | |||
3103 | case Builtin::BI__builtin_coro_alloc: | |||
3104 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc); | |||
3105 | case Builtin::BI__builtin_coro_begin: | |||
3106 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin); | |||
3107 | case Builtin::BI__builtin_coro_end: | |||
3108 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_end); | |||
3109 | case Builtin::BI__builtin_coro_suspend: | |||
3110 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend); | |||
3111 | case Builtin::BI__builtin_coro_param: | |||
3112 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_param); | |||
3113 | ||||
3114 | // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions | |||
3115 | case Builtin::BIread_pipe: | |||
3116 | case Builtin::BIwrite_pipe: { | |||
3117 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), | |||
3118 | *Arg1 = EmitScalarExpr(E->getArg(1)); | |||
3119 | CGOpenCLRuntime OpenCLRT(CGM); | |||
3120 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); | |||
3121 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); | |||
3122 | ||||
3123 | // Type of the generic packet parameter. | |||
3124 | unsigned GenericAS = | |||
3125 | getContext().getTargetAddressSpace(LangAS::opencl_generic); | |||
3126 | llvm::Type *I8PTy = llvm::PointerType::get( | |||
3127 | llvm::Type::getInt8Ty(getLLVMContext()), GenericAS); | |||
3128 | ||||
3129 | // Testing which overloaded version we should generate the call for. | |||
3130 | if (2U == E->getNumArgs()) { | |||
3131 | const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2" | |||
3132 | : "__write_pipe_2"; | |||
3133 | // Creating a generic function type to be able to call with any builtin or | |||
3134 | // user defined type. | |||
3135 | llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty}; | |||
3136 | llvm::FunctionType *FTy = llvm::FunctionType::get( | |||
3137 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); | |||
3138 | Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy); | |||
3139 | return RValue::get( | |||
3140 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), | |||
3141 | {Arg0, BCast, PacketSize, PacketAlign})); | |||
3142 | } else { | |||
3143 | assert(4 == E->getNumArgs() &&(static_cast <bool> (4 == E->getNumArgs() && "Illegal number of parameters to pipe function") ? void (0) : __assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3144, __extension__ __PRETTY_FUNCTION__)) | |||
3144 | "Illegal number of parameters to pipe function")(static_cast <bool> (4 == E->getNumArgs() && "Illegal number of parameters to pipe function") ? void (0) : __assert_fail ("4 == E->getNumArgs() && \"Illegal number of parameters to pipe function\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3144, __extension__ __PRETTY_FUNCTION__)); | |||
3145 | const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4" | |||
3146 | : "__write_pipe_4"; | |||
3147 | ||||
3148 | llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy, | |||
3149 | Int32Ty, Int32Ty}; | |||
3150 | Value *Arg2 = EmitScalarExpr(E->getArg(2)), | |||
3151 | *Arg3 = EmitScalarExpr(E->getArg(3)); | |||
3152 | llvm::FunctionType *FTy = llvm::FunctionType::get( | |||
3153 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); | |||
3154 | Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy); | |||
3155 | // We know the third argument is an integer type, but we may need to cast | |||
3156 | // it to i32. | |||
3157 | if (Arg2->getType() != Int32Ty) | |||
3158 | Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty); | |||
3159 | return RValue::get(Builder.CreateCall( | |||
3160 | CGM.CreateRuntimeFunction(FTy, Name), | |||
3161 | {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign})); | |||
3162 | } | |||
3163 | } | |||
3164 | // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write | |||
3165 | // functions | |||
3166 | case Builtin::BIreserve_read_pipe: | |||
3167 | case Builtin::BIreserve_write_pipe: | |||
3168 | case Builtin::BIwork_group_reserve_read_pipe: | |||
3169 | case Builtin::BIwork_group_reserve_write_pipe: | |||
3170 | case Builtin::BIsub_group_reserve_read_pipe: | |||
3171 | case Builtin::BIsub_group_reserve_write_pipe: { | |||
3172 | // Composing the mangled name for the function. | |||
3173 | const char *Name; | |||
3174 | if (BuiltinID == Builtin::BIreserve_read_pipe) | |||
3175 | Name = "__reserve_read_pipe"; | |||
3176 | else if (BuiltinID == Builtin::BIreserve_write_pipe) | |||
3177 | Name = "__reserve_write_pipe"; | |||
3178 | else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe) | |||
3179 | Name = "__work_group_reserve_read_pipe"; | |||
3180 | else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe) | |||
3181 | Name = "__work_group_reserve_write_pipe"; | |||
3182 | else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe) | |||
3183 | Name = "__sub_group_reserve_read_pipe"; | |||
3184 | else | |||
3185 | Name = "__sub_group_reserve_write_pipe"; | |||
3186 | ||||
3187 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), | |||
3188 | *Arg1 = EmitScalarExpr(E->getArg(1)); | |||
3189 | llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy); | |||
3190 | CGOpenCLRuntime OpenCLRT(CGM); | |||
3191 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); | |||
3192 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); | |||
3193 | ||||
3194 | // Building the generic function prototype. | |||
3195 | llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty}; | |||
3196 | llvm::FunctionType *FTy = llvm::FunctionType::get( | |||
3197 | ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false); | |||
3198 | // We know the second argument is an integer type, but we may need to cast | |||
3199 | // it to i32. | |||
3200 | if (Arg1->getType() != Int32Ty) | |||
3201 | Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty); | |||
3202 | return RValue::get( | |||
3203 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), | |||
3204 | {Arg0, Arg1, PacketSize, PacketAlign})); | |||
3205 | } | |||
3206 | // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write | |||
3207 | // functions | |||
3208 | case Builtin::BIcommit_read_pipe: | |||
3209 | case Builtin::BIcommit_write_pipe: | |||
3210 | case Builtin::BIwork_group_commit_read_pipe: | |||
3211 | case Builtin::BIwork_group_commit_write_pipe: | |||
3212 | case Builtin::BIsub_group_commit_read_pipe: | |||
3213 | case Builtin::BIsub_group_commit_write_pipe: { | |||
3214 | const char *Name; | |||
3215 | if (BuiltinID == Builtin::BIcommit_read_pipe) | |||
3216 | Name = "__commit_read_pipe"; | |||
3217 | else if (BuiltinID == Builtin::BIcommit_write_pipe) | |||
3218 | Name = "__commit_write_pipe"; | |||
3219 | else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe) | |||
3220 | Name = "__work_group_commit_read_pipe"; | |||
3221 | else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe) | |||
3222 | Name = "__work_group_commit_write_pipe"; | |||
3223 | else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe) | |||
3224 | Name = "__sub_group_commit_read_pipe"; | |||
3225 | else | |||
3226 | Name = "__sub_group_commit_write_pipe"; | |||
3227 | ||||
3228 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), | |||
3229 | *Arg1 = EmitScalarExpr(E->getArg(1)); | |||
3230 | CGOpenCLRuntime OpenCLRT(CGM); | |||
3231 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); | |||
3232 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); | |||
3233 | ||||
3234 | // Building the generic function prototype. | |||
3235 | llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty}; | |||
3236 | llvm::FunctionType *FTy = | |||
3237 | llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), | |||
3238 | llvm::ArrayRef<llvm::Type *>(ArgTys), false); | |||
3239 | ||||
3240 | return RValue::get( | |||
3241 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), | |||
3242 | {Arg0, Arg1, PacketSize, PacketAlign})); | |||
3243 | } | |||
3244 | // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions | |||
3245 | case Builtin::BIget_pipe_num_packets: | |||
3246 | case Builtin::BIget_pipe_max_packets: { | |||
3247 | const char *BaseName; | |||
3248 | const PipeType *PipeTy = E->getArg(0)->getType()->getAs<PipeType>(); | |||
3249 | if (BuiltinID == Builtin::BIget_pipe_num_packets) | |||
3250 | BaseName = "__get_pipe_num_packets"; | |||
3251 | else | |||
3252 | BaseName = "__get_pipe_max_packets"; | |||
3253 | auto Name = std::string(BaseName) + | |||
3254 | std::string(PipeTy->isReadOnly() ? "_ro" : "_wo"); | |||
3255 | ||||
3256 | // Building the generic function prototype. | |||
3257 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); | |||
3258 | CGOpenCLRuntime OpenCLRT(CGM); | |||
3259 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); | |||
3260 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); | |||
3261 | llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty}; | |||
3262 | llvm::FunctionType *FTy = llvm::FunctionType::get( | |||
3263 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); | |||
3264 | ||||
3265 | return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), | |||
3266 | {Arg0, PacketSize, PacketAlign})); | |||
3267 | } | |||
3268 | ||||
3269 | // OpenCL v2.0 s6.13.9 - Address space qualifier functions. | |||
3270 | case Builtin::BIto_global: | |||
3271 | case Builtin::BIto_local: | |||
3272 | case Builtin::BIto_private: { | |||
3273 | auto Arg0 = EmitScalarExpr(E->getArg(0)); | |||
3274 | auto NewArgT = llvm::PointerType::get(Int8Ty, | |||
3275 | CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); | |||
3276 | auto NewRetT = llvm::PointerType::get(Int8Ty, | |||
3277 | CGM.getContext().getTargetAddressSpace( | |||
3278 | E->getType()->getPointeeType().getAddressSpace())); | |||
3279 | auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false); | |||
3280 | llvm::Value *NewArg; | |||
3281 | if (Arg0->getType()->getPointerAddressSpace() != | |||
3282 | NewArgT->getPointerAddressSpace()) | |||
3283 | NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT); | |||
3284 | else | |||
3285 | NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT); | |||
3286 | auto NewName = std::string("__") + E->getDirectCallee()->getName().str(); | |||
3287 | auto NewCall = | |||
3288 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg}); | |||
3289 | return RValue::get(Builder.CreateBitOrPointerCast(NewCall, | |||
3290 | ConvertType(E->getType()))); | |||
3291 | } | |||
3292 | ||||
3293 | // OpenCL v2.0, s6.13.17 - Enqueue kernel function. | |||
3294 | // It contains four different overload formats specified in Table 6.13.17.1. | |||
3295 | case Builtin::BIenqueue_kernel: { | |||
3296 | StringRef Name; // Generated function call name | |||
3297 | unsigned NumArgs = E->getNumArgs(); | |||
3298 | ||||
3299 | llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy); | |||
3300 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( | |||
3301 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); | |||
3302 | ||||
3303 | llvm::Value *Queue = EmitScalarExpr(E->getArg(0)); | |||
3304 | llvm::Value *Flags = EmitScalarExpr(E->getArg(1)); | |||
3305 | LValue NDRangeL = EmitAggExprToLValue(E->getArg(2)); | |||
3306 | llvm::Value *Range = NDRangeL.getAddress().getPointer(); | |||
3307 | llvm::Type *RangeTy = NDRangeL.getAddress().getType(); | |||
3308 | ||||
3309 | if (NumArgs == 4) { | |||
3310 | // The most basic form of the call with parameters: | |||
3311 | // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void) | |||
3312 | Name = "__enqueue_kernel_basic"; | |||
3313 | llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy, | |||
3314 | GenericVoidPtrTy}; | |||
3315 | llvm::FunctionType *FTy = llvm::FunctionType::get( | |||
3316 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); | |||
3317 | ||||
3318 | auto Info = | |||
3319 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3)); | |||
3320 | llvm::Value *Kernel = | |||
3321 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); | |||
3322 | llvm::Value *Block = | |||
3323 | Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); | |||
3324 | ||||
3325 | AttrBuilder B; | |||
3326 | B.addAttribute(Attribute::ByVal); | |||
3327 | llvm::AttributeList ByValAttrSet = | |||
3328 | llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B); | |||
3329 | ||||
3330 | auto RTCall = | |||
3331 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet), | |||
3332 | {Queue, Flags, Range, Kernel, Block}); | |||
3333 | RTCall->setAttributes(ByValAttrSet); | |||
3334 | return RValue::get(RTCall); | |||
3335 | } | |||
3336 | assert(NumArgs >= 5 && "Invalid enqueue_kernel signature")(static_cast <bool> (NumArgs >= 5 && "Invalid enqueue_kernel signature" ) ? void (0) : __assert_fail ("NumArgs >= 5 && \"Invalid enqueue_kernel signature\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3336, __extension__ __PRETTY_FUNCTION__)); | |||
3337 | ||||
3338 | // Create a temporary array to hold the sizes of local pointer arguments | |||
3339 | // for the block. \p First is the position of the first size argument. | |||
3340 | auto CreateArrayForSizeVar = [=](unsigned First) { | |||
3341 | auto *AT = llvm::ArrayType::get(SizeTy, NumArgs - First); | |||
3342 | auto *Arr = Builder.CreateAlloca(AT); | |||
3343 | llvm::Value *Ptr; | |||
| ||||
3344 | // Each of the following arguments specifies the size of the corresponding | |||
3345 | // argument passed to the enqueued block. | |||
3346 | auto *Zero = llvm::ConstantInt::get(IntTy, 0); | |||
3347 | for (unsigned I = First; I < NumArgs; ++I) { | |||
3348 | auto *Index = llvm::ConstantInt::get(IntTy, I - First); | |||
3349 | auto *GEP = Builder.CreateGEP(Arr, {Zero, Index}); | |||
3350 | if (I == First) | |||
3351 | Ptr = GEP; | |||
3352 | auto *V = | |||
3353 | Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy); | |||
3354 | Builder.CreateAlignedStore( | |||
3355 | V, GEP, CGM.getDataLayout().getPrefTypeAlignment(SizeTy)); | |||
3356 | } | |||
3357 | return Ptr; | |||
| ||||
3358 | }; | |||
3359 | ||||
3360 | // Could have events and/or varargs. | |||
3361 | if (E->getArg(3)->getType()->isBlockPointerType()) { | |||
3362 | // No events passed, but has variadic arguments. | |||
3363 | Name = "__enqueue_kernel_varargs"; | |||
3364 | auto Info = | |||
3365 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3)); | |||
3366 | llvm::Value *Kernel = | |||
3367 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); | |||
3368 | auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); | |||
3369 | auto *PtrToSizeArray = CreateArrayForSizeVar(4); | |||
3370 | ||||
3371 | // Create a vector of the arguments, as well as a constant value to | |||
3372 | // express to the runtime the number of variadic arguments. | |||
3373 | std::vector<llvm::Value *> Args = { | |||
3374 | Queue, Flags, Range, | |||
3375 | Kernel, Block, ConstantInt::get(IntTy, NumArgs - 4), | |||
3376 | PtrToSizeArray}; | |||
3377 | std::vector<llvm::Type *> ArgTys = { | |||
3378 | QueueTy, IntTy, RangeTy, | |||
3379 | GenericVoidPtrTy, GenericVoidPtrTy, IntTy, | |||
3380 | PtrToSizeArray->getType()}; | |||
3381 | ||||
3382 | llvm::FunctionType *FTy = llvm::FunctionType::get( | |||
3383 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); | |||
3384 | return RValue::get( | |||
3385 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), | |||
3386 | llvm::ArrayRef<llvm::Value *>(Args))); | |||
3387 | } | |||
3388 | // Any calls now have event arguments passed. | |||
3389 | if (NumArgs >= 7) { | |||
3390 | llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy); | |||
3391 | llvm::Type *EventPtrTy = EventTy->getPointerTo( | |||
3392 | CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); | |||
3393 | ||||
3394 | llvm::Value *NumEvents = | |||
3395 | Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty); | |||
3396 | llvm::Value *EventList = | |||
3397 | E->getArg(4)->getType()->isArrayType() | |||
3398 | ? EmitArrayToPointerDecay(E->getArg(4)).getPointer() | |||
3399 | : EmitScalarExpr(E->getArg(4)); | |||
3400 | llvm::Value *ClkEvent = EmitScalarExpr(E->getArg(5)); | |||
3401 | // Convert to generic address space. | |||
3402 | EventList = Builder.CreatePointerCast(EventList, EventPtrTy); | |||
3403 | ClkEvent = Builder.CreatePointerCast(ClkEvent, EventPtrTy); | |||
3404 | auto Info = | |||
3405 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6)); | |||
3406 | llvm::Value *Kernel = | |||
3407 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); | |||
3408 | llvm::Value *Block = | |||
3409 | Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); | |||
3410 | ||||
3411 | std::vector<llvm::Type *> ArgTys = { | |||
3412 | QueueTy, Int32Ty, RangeTy, Int32Ty, | |||
3413 | EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy}; | |||
3414 | ||||
3415 | std::vector<llvm::Value *> Args = {Queue, Flags, Range, NumEvents, | |||
3416 | EventList, ClkEvent, Kernel, Block}; | |||
3417 | ||||
3418 | if (NumArgs == 7) { | |||
3419 | // Has events but no variadics. | |||
3420 | Name = "__enqueue_kernel_basic_events"; | |||
3421 | llvm::FunctionType *FTy = llvm::FunctionType::get( | |||
3422 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); | |||
3423 | return RValue::get( | |||
3424 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), | |||
3425 | llvm::ArrayRef<llvm::Value *>(Args))); | |||
3426 | } | |||
3427 | // Has event info and variadics | |||
3428 | // Pass the number of variadics to the runtime function too. | |||
3429 | Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7)); | |||
3430 | ArgTys.push_back(Int32Ty); | |||
3431 | Name = "__enqueue_kernel_events_varargs"; | |||
3432 | ||||
3433 | auto *PtrToSizeArray = CreateArrayForSizeVar(7); | |||
3434 | Args.push_back(PtrToSizeArray); | |||
3435 | ArgTys.push_back(PtrToSizeArray->getType()); | |||
3436 | ||||
3437 | llvm::FunctionType *FTy = llvm::FunctionType::get( | |||
3438 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); | |||
3439 | return RValue::get( | |||
3440 | Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), | |||
3441 | llvm::ArrayRef<llvm::Value *>(Args))); | |||
3442 | } | |||
3443 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
3444 | } | |||
3445 | // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block | |||
3446 | // parameter. | |||
3447 | case Builtin::BIget_kernel_work_group_size: { | |||
3448 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( | |||
3449 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); | |||
3450 | auto Info = | |||
3451 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0)); | |||
3452 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); | |||
3453 | Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); | |||
3454 | return RValue::get(Builder.CreateCall( | |||
3455 | CGM.CreateRuntimeFunction( | |||
3456 | llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy}, | |||
3457 | false), | |||
3458 | "__get_kernel_work_group_size_impl"), | |||
3459 | {Kernel, Arg})); | |||
3460 | } | |||
3461 | case Builtin::BIget_kernel_preferred_work_group_size_multiple: { | |||
3462 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( | |||
3463 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); | |||
3464 | auto Info = | |||
3465 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0)); | |||
3466 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); | |||
3467 | Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); | |||
3468 | return RValue::get(Builder.CreateCall( | |||
3469 | CGM.CreateRuntimeFunction( | |||
3470 | llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy}, | |||
3471 | false), | |||
3472 | "__get_kernel_preferred_work_group_size_multiple_impl"), | |||
3473 | {Kernel, Arg})); | |||
3474 | } | |||
3475 | case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: | |||
3476 | case Builtin::BIget_kernel_sub_group_count_for_ndrange: { | |||
3477 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( | |||
3478 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); | |||
3479 | LValue NDRangeL = EmitAggExprToLValue(E->getArg(0)); | |||
3480 | llvm::Value *NDRange = NDRangeL.getAddress().getPointer(); | |||
3481 | auto Info = | |||
3482 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1)); | |||
3483 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); | |||
3484 | Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); | |||
3485 | const char *Name = | |||
3486 | BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange | |||
3487 | ? "__get_kernel_max_sub_group_size_for_ndrange_impl" | |||
3488 | : "__get_kernel_sub_group_count_for_ndrange_impl"; | |||
3489 | return RValue::get(Builder.CreateCall( | |||
3490 | CGM.CreateRuntimeFunction( | |||
3491 | llvm::FunctionType::get( | |||
3492 | IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy}, | |||
3493 | false), | |||
3494 | Name), | |||
3495 | {NDRange, Kernel, Block})); | |||
3496 | } | |||
3497 | ||||
3498 | case Builtin::BI__builtin_store_half: | |||
3499 | case Builtin::BI__builtin_store_halff: { | |||
3500 | Value *Val = EmitScalarExpr(E->getArg(0)); | |||
3501 | Address Address = EmitPointerWithAlignment(E->getArg(1)); | |||
3502 | Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy()); | |||
3503 | return RValue::get(Builder.CreateStore(HalfVal, Address)); | |||
3504 | } | |||
3505 | case Builtin::BI__builtin_load_half: { | |||
3506 | Address Address = EmitPointerWithAlignment(E->getArg(0)); | |||
3507 | Value *HalfVal = Builder.CreateLoad(Address); | |||
3508 | return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy())); | |||
3509 | } | |||
3510 | case Builtin::BI__builtin_load_halff: { | |||
3511 | Address Address = EmitPointerWithAlignment(E->getArg(0)); | |||
3512 | Value *HalfVal = Builder.CreateLoad(Address); | |||
3513 | return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy())); | |||
3514 | } | |||
3515 | case Builtin::BIprintf: | |||
3516 | if (getTarget().getTriple().isNVPTX()) | |||
3517 | return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue); | |||
3518 | break; | |||
3519 | case Builtin::BI__builtin_canonicalize: | |||
3520 | case Builtin::BI__builtin_canonicalizef: | |||
3521 | case Builtin::BI__builtin_canonicalizel: | |||
3522 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize)); | |||
3523 | ||||
3524 | case Builtin::BI__builtin_thread_pointer: { | |||
3525 | if (!getContext().getTargetInfo().isTLSSupported()) | |||
3526 | CGM.ErrorUnsupported(E, "__builtin_thread_pointer"); | |||
3527 | // Fall through - it's already mapped to the intrinsic by GCCBuiltin. | |||
3528 | break; | |||
3529 | } | |||
3530 | case Builtin::BI__builtin_os_log_format: | |||
3531 | return emitBuiltinOSLogFormat(*E); | |||
3532 | ||||
3533 | case Builtin::BI__builtin_os_log_format_buffer_size: { | |||
3534 | analyze_os_log::OSLogBufferLayout Layout; | |||
3535 | analyze_os_log::computeOSLogBufferLayout(CGM.getContext(), E, Layout); | |||
3536 | return RValue::get(ConstantInt::get(ConvertType(E->getType()), | |||
3537 | Layout.size().getQuantity())); | |||
3538 | } | |||
3539 | ||||
3540 | case Builtin::BI__xray_customevent: { | |||
3541 | if (!ShouldXRayInstrumentFunction()) | |||
3542 | return RValue::getIgnored(); | |||
3543 | ||||
3544 | if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( | |||
3545 | XRayInstrKind::Custom)) | |||
3546 | return RValue::getIgnored(); | |||
3547 | ||||
3548 | if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) | |||
3549 | if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents()) | |||
3550 | return RValue::getIgnored(); | |||
3551 | ||||
3552 | Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent); | |||
3553 | auto FTy = F->getFunctionType(); | |||
3554 | auto Arg0 = E->getArg(0); | |||
3555 | auto Arg0Val = EmitScalarExpr(Arg0); | |||
3556 | auto Arg0Ty = Arg0->getType(); | |||
3557 | auto PTy0 = FTy->getParamType(0); | |||
3558 | if (PTy0 != Arg0Val->getType()) { | |||
3559 | if (Arg0Ty->isArrayType()) | |||
3560 | Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer(); | |||
3561 | else | |||
3562 | Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0); | |||
3563 | } | |||
3564 | auto Arg1 = EmitScalarExpr(E->getArg(1)); | |||
3565 | auto PTy1 = FTy->getParamType(1); | |||
3566 | if (PTy1 != Arg1->getType()) | |||
3567 | Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1); | |||
3568 | return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1})); | |||
3569 | } | |||
3570 | ||||
3571 | case Builtin::BI__xray_typedevent: { | |||
3572 | // TODO: There should be a way to always emit events even if the current | |||
3573 | // function is not instrumented. Losing events in a stream can cripple | |||
3574 | // a trace. | |||
3575 | if (!ShouldXRayInstrumentFunction()) | |||
3576 | return RValue::getIgnored(); | |||
3577 | ||||
3578 | if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( | |||
3579 | XRayInstrKind::Typed)) | |||
3580 | return RValue::getIgnored(); | |||
3581 | ||||
3582 | if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) | |||
3583 | if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents()) | |||
3584 | return RValue::getIgnored(); | |||
3585 | ||||
3586 | Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent); | |||
3587 | auto FTy = F->getFunctionType(); | |||
3588 | auto Arg0 = EmitScalarExpr(E->getArg(0)); | |||
3589 | auto PTy0 = FTy->getParamType(0); | |||
3590 | if (PTy0 != Arg0->getType()) | |||
3591 | Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0); | |||
3592 | auto Arg1 = E->getArg(1); | |||
3593 | auto Arg1Val = EmitScalarExpr(Arg1); | |||
3594 | auto Arg1Ty = Arg1->getType(); | |||
3595 | auto PTy1 = FTy->getParamType(1); | |||
3596 | if (PTy1 != Arg1Val->getType()) { | |||
3597 | if (Arg1Ty->isArrayType()) | |||
3598 | Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer(); | |||
3599 | else | |||
3600 | Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1); | |||
3601 | } | |||
3602 | auto Arg2 = EmitScalarExpr(E->getArg(2)); | |||
3603 | auto PTy2 = FTy->getParamType(2); | |||
3604 | if (PTy2 != Arg2->getType()) | |||
3605 | Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2); | |||
3606 | return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2})); | |||
3607 | } | |||
3608 | ||||
3609 | case Builtin::BI__builtin_ms_va_start: | |||
3610 | case Builtin::BI__builtin_ms_va_end: | |||
3611 | return RValue::get( | |||
3612 | EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(), | |||
3613 | BuiltinID == Builtin::BI__builtin_ms_va_start)); | |||
3614 | ||||
3615 | case Builtin::BI__builtin_ms_va_copy: { | |||
3616 | // Lower this manually. We can't reliably determine whether or not any | |||
3617 | // given va_copy() is for a Win64 va_list from the calling convention | |||
3618 | // alone, because it's legal to do this from a System V ABI function. | |||
3619 | // With opaque pointer types, we won't have enough information in LLVM | |||
3620 | // IR to determine this from the argument types, either. Best to do it | |||
3621 | // now, while we have enough information. | |||
3622 | Address DestAddr = EmitMSVAListRef(E->getArg(0)); | |||
3623 | Address SrcAddr = EmitMSVAListRef(E->getArg(1)); | |||
3624 | ||||
3625 | llvm::Type *BPP = Int8PtrPtrTy; | |||
3626 | ||||
3627 | DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"), | |||
3628 | DestAddr.getAlignment()); | |||
3629 | SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"), | |||
3630 | SrcAddr.getAlignment()); | |||
3631 | ||||
3632 | Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val"); | |||
3633 | return RValue::get(Builder.CreateStore(ArgPtr, DestAddr)); | |||
3634 | } | |||
3635 | } | |||
3636 | ||||
3637 | // If this is an alias for a lib function (e.g. __builtin_sin), emit | |||
3638 | // the call using the normal call path, but using the unmangled | |||
3639 | // version of the function name. | |||
3640 | if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) | |||
3641 | return emitLibraryCall(*this, FD, E, | |||
3642 | CGM.getBuiltinLibFunction(FD, BuiltinID)); | |||
3643 | ||||
3644 | // If this is a predefined lib function (e.g. malloc), emit the call | |||
3645 | // using exactly the normal call path. | |||
3646 | if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) | |||
3647 | return emitLibraryCall(*this, FD, E, | |||
3648 | cast<llvm::Constant>(EmitScalarExpr(E->getCallee()))); | |||
3649 | ||||
3650 | // Check that a call to a target specific builtin has the correct target | |||
3651 | // features. | |||
3652 | // This is down here to avoid non-target specific builtins, however, if | |||
3653 | // generic builtins start to require generic target features then we | |||
3654 | // can move this up to the beginning of the function. | |||
3655 | checkTargetFeatures(E, FD); | |||
3656 | ||||
3657 | if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID)) | |||
3658 | LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth); | |||
3659 | ||||
3660 | // See if we have a target specific intrinsic. | |||
3661 | const char *Name = getContext().BuiltinInfo.getName(BuiltinID); | |||
3662 | Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; | |||
3663 | StringRef Prefix = | |||
3664 | llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch()); | |||
3665 | if (!Prefix.empty()) { | |||
3666 | IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name); | |||
3667 | // NOTE we don't need to perform a compatibility flag check here since the | |||
3668 | // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the | |||
3669 | // MS builtins via ALL_MS_LANGUAGES and are filtered earlier. | |||
3670 | if (IntrinsicID == Intrinsic::not_intrinsic) | |||
3671 | IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name); | |||
3672 | } | |||
3673 | ||||
3674 | if (IntrinsicID != Intrinsic::not_intrinsic) { | |||
3675 | SmallVector<Value*, 16> Args; | |||
3676 | ||||
3677 | // Find out if any arguments are required to be integer constant | |||
3678 | // expressions. | |||
3679 | unsigned ICEArguments = 0; | |||
3680 | ASTContext::GetBuiltinTypeError Error; | |||
3681 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); | |||
3682 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3682, __extension__ __PRETTY_FUNCTION__)); | |||
3683 | ||||
3684 | Function *F = CGM.getIntrinsic(IntrinsicID); | |||
3685 | llvm::FunctionType *FTy = F->getFunctionType(); | |||
3686 | ||||
3687 | for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { | |||
3688 | Value *ArgValue; | |||
3689 | // If this is a normal argument, just emit it as a scalar. | |||
3690 | if ((ICEArguments & (1 << i)) == 0) { | |||
3691 | ArgValue = EmitScalarExpr(E->getArg(i)); | |||
3692 | } else { | |||
3693 | // If this is required to be a constant, constant fold it so that we | |||
3694 | // know that the generated intrinsic gets a ConstantInt. | |||
3695 | llvm::APSInt Result; | |||
3696 | bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext()); | |||
3697 | assert(IsConst && "Constant arg isn't actually constant?")(static_cast <bool> (IsConst && "Constant arg isn't actually constant?" ) ? void (0) : __assert_fail ("IsConst && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3697, __extension__ __PRETTY_FUNCTION__)); | |||
3698 | (void)IsConst; | |||
3699 | ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result); | |||
3700 | } | |||
3701 | ||||
3702 | // If the intrinsic arg type is different from the builtin arg type | |||
3703 | // we need to do a bit cast. | |||
3704 | llvm::Type *PTy = FTy->getParamType(i); | |||
3705 | if (PTy != ArgValue->getType()) { | |||
3706 | assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&(static_cast <bool> (PTy->canLosslesslyBitCastTo(FTy ->getParamType(i)) && "Must be able to losslessly bit cast to param" ) ? void (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3707, __extension__ __PRETTY_FUNCTION__)) | |||
3707 | "Must be able to losslessly bit cast to param")(static_cast <bool> (PTy->canLosslesslyBitCastTo(FTy ->getParamType(i)) && "Must be able to losslessly bit cast to param" ) ? void (0) : __assert_fail ("PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && \"Must be able to losslessly bit cast to param\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3707, __extension__ __PRETTY_FUNCTION__)); | |||
3708 | ArgValue = Builder.CreateBitCast(ArgValue, PTy); | |||
3709 | } | |||
3710 | ||||
3711 | Args.push_back(ArgValue); | |||
3712 | } | |||
3713 | ||||
3714 | Value *V = Builder.CreateCall(F, Args); | |||
3715 | QualType BuiltinRetType = E->getType(); | |||
3716 | ||||
3717 | llvm::Type *RetTy = VoidTy; | |||
3718 | if (!BuiltinRetType->isVoidType()) | |||
3719 | RetTy = ConvertType(BuiltinRetType); | |||
3720 | ||||
3721 | if (RetTy != V->getType()) { | |||
3722 | assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&(static_cast <bool> (V->getType()->canLosslesslyBitCastTo (RetTy) && "Must be able to losslessly bit cast result type" ) ? void (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3723, __extension__ __PRETTY_FUNCTION__)) | |||
3723 | "Must be able to losslessly bit cast result type")(static_cast <bool> (V->getType()->canLosslesslyBitCastTo (RetTy) && "Must be able to losslessly bit cast result type" ) ? void (0) : __assert_fail ("V->getType()->canLosslesslyBitCastTo(RetTy) && \"Must be able to losslessly bit cast result type\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3723, __extension__ __PRETTY_FUNCTION__)); | |||
3724 | V = Builder.CreateBitCast(V, RetTy); | |||
3725 | } | |||
3726 | ||||
3727 | return RValue::get(V); | |||
3728 | } | |||
3729 | ||||
3730 | // See if we have a target specific builtin that needs to be lowered. | |||
3731 | if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E)) | |||
3732 | return RValue::get(V); | |||
3733 | ||||
3734 | ErrorUnsupported(E, "builtin function"); | |||
3735 | ||||
3736 | // Unknown builtin, for now just dump it out and return undef. | |||
3737 | return GetUndefRValue(E->getType()); | |||
3738 | } | |||
3739 | ||||
3740 | static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, | |||
3741 | unsigned BuiltinID, const CallExpr *E, | |||
3742 | llvm::Triple::ArchType Arch) { | |||
3743 | switch (Arch) { | |||
3744 | case llvm::Triple::arm: | |||
3745 | case llvm::Triple::armeb: | |||
3746 | case llvm::Triple::thumb: | |||
3747 | case llvm::Triple::thumbeb: | |||
3748 | return CGF->EmitARMBuiltinExpr(BuiltinID, E, Arch); | |||
3749 | case llvm::Triple::aarch64: | |||
3750 | case llvm::Triple::aarch64_be: | |||
3751 | return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch); | |||
3752 | case llvm::Triple::x86: | |||
3753 | case llvm::Triple::x86_64: | |||
3754 | return CGF->EmitX86BuiltinExpr(BuiltinID, E); | |||
3755 | case llvm::Triple::ppc: | |||
3756 | case llvm::Triple::ppc64: | |||
3757 | case llvm::Triple::ppc64le: | |||
3758 | return CGF->EmitPPCBuiltinExpr(BuiltinID, E); | |||
3759 | case llvm::Triple::r600: | |||
3760 | case llvm::Triple::amdgcn: | |||
3761 | return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E); | |||
3762 | case llvm::Triple::systemz: | |||
3763 | return CGF->EmitSystemZBuiltinExpr(BuiltinID, E); | |||
3764 | case llvm::Triple::nvptx: | |||
3765 | case llvm::Triple::nvptx64: | |||
3766 | return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E); | |||
3767 | case llvm::Triple::wasm32: | |||
3768 | case llvm::Triple::wasm64: | |||
3769 | return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E); | |||
3770 | case llvm::Triple::hexagon: | |||
3771 | return CGF->EmitHexagonBuiltinExpr(BuiltinID, E); | |||
3772 | default: | |||
3773 | return nullptr; | |||
3774 | } | |||
3775 | } | |||
3776 | ||||
3777 | Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, | |||
3778 | const CallExpr *E) { | |||
3779 | if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) { | |||
3780 | assert(getContext().getAuxTargetInfo() && "Missing aux target info")(static_cast <bool> (getContext().getAuxTargetInfo() && "Missing aux target info") ? void (0) : __assert_fail ("getContext().getAuxTargetInfo() && \"Missing aux target info\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3780, __extension__ __PRETTY_FUNCTION__)); | |||
3781 | return EmitTargetArchBuiltinExpr( | |||
3782 | this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E, | |||
3783 | getContext().getAuxTargetInfo()->getTriple().getArch()); | |||
3784 | } | |||
3785 | ||||
3786 | return EmitTargetArchBuiltinExpr(this, BuiltinID, E, | |||
3787 | getTarget().getTriple().getArch()); | |||
3788 | } | |||
3789 | ||||
3790 | static llvm::VectorType *GetNeonType(CodeGenFunction *CGF, | |||
3791 | NeonTypeFlags TypeFlags, | |||
3792 | bool HasLegalHalfType=true, | |||
3793 | bool V1Ty=false) { | |||
3794 | int IsQuad = TypeFlags.isQuad(); | |||
3795 | switch (TypeFlags.getEltType()) { | |||
3796 | case NeonTypeFlags::Int8: | |||
3797 | case NeonTypeFlags::Poly8: | |||
3798 | return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad)); | |||
3799 | case NeonTypeFlags::Int16: | |||
3800 | case NeonTypeFlags::Poly16: | |||
3801 | return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); | |||
3802 | case NeonTypeFlags::Float16: | |||
3803 | if (HasLegalHalfType) | |||
3804 | return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad)); | |||
3805 | else | |||
3806 | return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); | |||
3807 | case NeonTypeFlags::Int32: | |||
3808 | return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad)); | |||
3809 | case NeonTypeFlags::Int64: | |||
3810 | case NeonTypeFlags::Poly64: | |||
3811 | return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad)); | |||
3812 | case NeonTypeFlags::Poly128: | |||
3813 | // FIXME: i128 and f128 doesn't get fully support in Clang and llvm. | |||
3814 | // There is a lot of i128 and f128 API missing. | |||
3815 | // so we use v16i8 to represent poly128 and get pattern matched. | |||
3816 | return llvm::VectorType::get(CGF->Int8Ty, 16); | |||
3817 | case NeonTypeFlags::Float32: | |||
3818 | return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad)); | |||
3819 | case NeonTypeFlags::Float64: | |||
3820 | return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad)); | |||
3821 | } | |||
3822 | llvm_unreachable("Unknown vector element type!")::llvm::llvm_unreachable_internal("Unknown vector element type!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3822); | |||
3823 | } | |||
3824 | ||||
3825 | static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF, | |||
3826 | NeonTypeFlags IntTypeFlags) { | |||
3827 | int IsQuad = IntTypeFlags.isQuad(); | |||
3828 | switch (IntTypeFlags.getEltType()) { | |||
3829 | case NeonTypeFlags::Int16: | |||
3830 | return llvm::VectorType::get(CGF->HalfTy, (4 << IsQuad)); | |||
3831 | case NeonTypeFlags::Int32: | |||
3832 | return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad)); | |||
3833 | case NeonTypeFlags::Int64: | |||
3834 | return llvm::VectorType::get(CGF->DoubleTy, (1 << IsQuad)); | |||
3835 | default: | |||
3836 | llvm_unreachable("Type can't be converted to floating-point!")::llvm::llvm_unreachable_internal("Type can't be converted to floating-point!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 3836); | |||
3837 | } | |||
3838 | } | |||
3839 | ||||
3840 | Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { | |||
3841 | unsigned nElts = V->getType()->getVectorNumElements(); | |||
3842 | Value* SV = llvm::ConstantVector::getSplat(nElts, C); | |||
3843 | return Builder.CreateShuffleVector(V, V, SV, "lane"); | |||
3844 | } | |||
3845 | ||||
3846 | Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, | |||
3847 | const char *name, | |||
3848 | unsigned shift, bool rightshift) { | |||
3849 | unsigned j = 0; | |||
3850 | for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); | |||
3851 | ai != ae; ++ai, ++j) | |||
3852 | if (shift > 0 && shift == j) | |||
3853 | Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); | |||
3854 | else | |||
3855 | Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); | |||
3856 | ||||
3857 | return Builder.CreateCall(F, Ops, name); | |||
3858 | } | |||
3859 | ||||
3860 | Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty, | |||
3861 | bool neg) { | |||
3862 | int SV = cast<ConstantInt>(V)->getSExtValue(); | |||
3863 | return ConstantInt::get(Ty, neg ? -SV : SV); | |||
3864 | } | |||
3865 | ||||
3866 | // Right-shift a vector by a constant. | |||
3867 | Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift, | |||
3868 | llvm::Type *Ty, bool usgn, | |||
3869 | const char *name) { | |||
3870 | llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); | |||
3871 | ||||
3872 | int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue(); | |||
3873 | int EltSize = VTy->getScalarSizeInBits(); | |||
3874 | ||||
3875 | Vec = Builder.CreateBitCast(Vec, Ty); | |||
3876 | ||||
3877 | // lshr/ashr are undefined when the shift amount is equal to the vector | |||
3878 | // element size. | |||
3879 | if (ShiftAmt == EltSize) { | |||
3880 | if (usgn) { | |||
3881 | // Right-shifting an unsigned value by its size yields 0. | |||
3882 | return llvm::ConstantAggregateZero::get(VTy); | |||
3883 | } else { | |||
3884 | // Right-shifting a signed value by its size is equivalent | |||
3885 | // to a shift of size-1. | |||
3886 | --ShiftAmt; | |||
3887 | Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt); | |||
3888 | } | |||
3889 | } | |||
3890 | ||||
3891 | Shift = EmitNeonShiftVector(Shift, Ty, false); | |||
3892 | if (usgn) | |||
3893 | return Builder.CreateLShr(Vec, Shift, name); | |||
3894 | else | |||
3895 | return Builder.CreateAShr(Vec, Shift, name); | |||
3896 | } | |||
3897 | ||||
3898 | enum { | |||
3899 | AddRetType = (1 << 0), | |||
3900 | Add1ArgType = (1 << 1), | |||
3901 | Add2ArgTypes = (1 << 2), | |||
3902 | ||||
3903 | VectorizeRetType = (1 << 3), | |||
3904 | VectorizeArgTypes = (1 << 4), | |||
3905 | ||||
3906 | InventFloatType = (1 << 5), | |||
3907 | UnsignedAlts = (1 << 6), | |||
3908 | ||||
3909 | Use64BitVectors = (1 << 7), | |||
3910 | Use128BitVectors = (1 << 8), | |||
3911 | ||||
3912 | Vectorize1ArgType = Add1ArgType | VectorizeArgTypes, | |||
3913 | VectorRet = AddRetType | VectorizeRetType, | |||
3914 | VectorRetGetArgs01 = | |||
3915 | AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes, | |||
3916 | FpCmpzModifiers = | |||
3917 | AddRetType | VectorizeRetType | Add1ArgType | InventFloatType | |||
3918 | }; | |||
3919 | ||||
3920 | namespace { | |||
3921 | struct NeonIntrinsicInfo { | |||
3922 | const char *NameHint; | |||
3923 | unsigned BuiltinID; | |||
3924 | unsigned LLVMIntrinsic; | |||
3925 | unsigned AltLLVMIntrinsic; | |||
3926 | unsigned TypeModifier; | |||
3927 | ||||
3928 | bool operator<(unsigned RHSBuiltinID) const { | |||
3929 | return BuiltinID < RHSBuiltinID; | |||
3930 | } | |||
3931 | bool operator<(const NeonIntrinsicInfo &TE) const { | |||
3932 | return BuiltinID < TE.BuiltinID; | |||
3933 | } | |||
3934 | }; | |||
3935 | } // end anonymous namespace | |||
3936 | ||||
3937 | #define NEONMAP0(NameBase) \ | |||
3938 | { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 } | |||
3939 | ||||
3940 | #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ | |||
3941 | { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ | |||
3942 | Intrinsic::LLVMIntrinsic, 0, TypeModifier } | |||
3943 | ||||
3944 | #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \ | |||
3945 | { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ | |||
3946 | Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \ | |||
3947 | TypeModifier } | |||
3948 | ||||
3949 | static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = { | |||
3950 | NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), | |||
3951 | NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), | |||
3952 | NEONMAP1(vabs_v, arm_neon_vabs, 0), | |||
3953 | NEONMAP1(vabsq_v, arm_neon_vabs, 0), | |||
3954 | NEONMAP0(vaddhn_v), | |||
3955 | NEONMAP1(vaesdq_v, arm_neon_aesd, 0), | |||
3956 | NEONMAP1(vaeseq_v, arm_neon_aese, 0), | |||
3957 | NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0), | |||
3958 | NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0), | |||
3959 | NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType), | |||
3960 | NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType), | |||
3961 | NEONMAP1(vcage_v, arm_neon_vacge, 0), | |||
3962 | NEONMAP1(vcageq_v, arm_neon_vacge, 0), | |||
3963 | NEONMAP1(vcagt_v, arm_neon_vacgt, 0), | |||
3964 | NEONMAP1(vcagtq_v, arm_neon_vacgt, 0), | |||
3965 | NEONMAP1(vcale_v, arm_neon_vacge, 0), | |||
3966 | NEONMAP1(vcaleq_v, arm_neon_vacge, 0), | |||
3967 | NEONMAP1(vcalt_v, arm_neon_vacgt, 0), | |||
3968 | NEONMAP1(vcaltq_v, arm_neon_vacgt, 0), | |||
3969 | NEONMAP0(vceqz_v), | |||
3970 | NEONMAP0(vceqzq_v), | |||
3971 | NEONMAP0(vcgez_v), | |||
3972 | NEONMAP0(vcgezq_v), | |||
3973 | NEONMAP0(vcgtz_v), | |||
3974 | NEONMAP0(vcgtzq_v), | |||
3975 | NEONMAP0(vclez_v), | |||
3976 | NEONMAP0(vclezq_v), | |||
3977 | NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType), | |||
3978 | NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType), | |||
3979 | NEONMAP0(vcltz_v), | |||
3980 | NEONMAP0(vcltzq_v), | |||
3981 | NEONMAP1(vclz_v, ctlz, Add1ArgType), | |||
3982 | NEONMAP1(vclzq_v, ctlz, Add1ArgType), | |||
3983 | NEONMAP1(vcnt_v, ctpop, Add1ArgType), | |||
3984 | NEONMAP1(vcntq_v, ctpop, Add1ArgType), | |||
3985 | NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0), | |||
3986 | NEONMAP0(vcvt_f16_v), | |||
3987 | NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0), | |||
3988 | NEONMAP0(vcvt_f32_v), | |||
3989 | NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), | |||
3990 | NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), | |||
3991 | NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0), | |||
3992 | NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0), | |||
3993 | NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0), | |||
3994 | NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0), | |||
3995 | NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0), | |||
3996 | NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0), | |||
3997 | NEONMAP0(vcvt_s16_v), | |||
3998 | NEONMAP0(vcvt_s32_v), | |||
3999 | NEONMAP0(vcvt_s64_v), | |||
4000 | NEONMAP0(vcvt_u16_v), | |||
4001 | NEONMAP0(vcvt_u32_v), | |||
4002 | NEONMAP0(vcvt_u64_v), | |||
4003 | NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0), | |||
4004 | NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0), | |||
4005 | NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0), | |||
4006 | NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0), | |||
4007 | NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0), | |||
4008 | NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0), | |||
4009 | NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0), | |||
4010 | NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0), | |||
4011 | NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0), | |||
4012 | NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0), | |||
4013 | NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0), | |||
4014 | NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0), | |||
4015 | NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0), | |||
4016 | NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0), | |||
4017 | NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0), | |||
4018 | NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0), | |||
4019 | NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0), | |||
4020 | NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0), | |||
4021 | NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0), | |||
4022 | NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0), | |||
4023 | NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0), | |||
4024 | NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0), | |||
4025 | NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0), | |||
4026 | NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0), | |||
4027 | NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0), | |||
4028 | NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0), | |||
4029 | NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0), | |||
4030 | NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0), | |||
4031 | NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0), | |||
4032 | NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0), | |||
4033 | NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0), | |||
4034 | NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0), | |||
4035 | NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0), | |||
4036 | NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0), | |||
4037 | NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0), | |||
4038 | NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0), | |||
4039 | NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0), | |||
4040 | NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0), | |||
4041 | NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0), | |||
4042 | NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0), | |||
4043 | NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0), | |||
4044 | NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0), | |||
4045 | NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0), | |||
4046 | NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0), | |||
4047 | NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0), | |||
4048 | NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0), | |||
4049 | NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0), | |||
4050 | NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0), | |||
4051 | NEONMAP0(vcvtq_f16_v), | |||
4052 | NEONMAP0(vcvtq_f32_v), | |||
4053 | NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), | |||
4054 | NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), | |||
4055 | NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0), | |||
4056 | NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0), | |||
4057 | NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0), | |||
4058 | NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0), | |||
4059 | NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0), | |||
4060 | NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0), | |||
4061 | NEONMAP0(vcvtq_s16_v), | |||
4062 | NEONMAP0(vcvtq_s32_v), | |||
4063 | NEONMAP0(vcvtq_s64_v), | |||
4064 | NEONMAP0(vcvtq_u16_v), | |||
4065 | NEONMAP0(vcvtq_u32_v), | |||
4066 | NEONMAP0(vcvtq_u64_v), | |||
4067 | NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0), | |||
4068 | NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0), | |||
4069 | NEONMAP0(vext_v), | |||
4070 | NEONMAP0(vextq_v), | |||
4071 | NEONMAP0(vfma_v), | |||
4072 | NEONMAP0(vfmaq_v), | |||
4073 | NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), | |||
4074 | NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), | |||
4075 | NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), | |||
4076 | NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), | |||
4077 | NEONMAP0(vld1_dup_v), | |||
4078 | NEONMAP1(vld1_v, arm_neon_vld1, 0), | |||
4079 | NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0), | |||
4080 | NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0), | |||
4081 | NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0), | |||
4082 | NEONMAP0(vld1q_dup_v), | |||
4083 | NEONMAP1(vld1q_v, arm_neon_vld1, 0), | |||
4084 | NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0), | |||
4085 | NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0), | |||
4086 | NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0), | |||
4087 | NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0), | |||
4088 | NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0), | |||
4089 | NEONMAP1(vld2_v, arm_neon_vld2, 0), | |||
4090 | NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0), | |||
4091 | NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0), | |||
4092 | NEONMAP1(vld2q_v, arm_neon_vld2, 0), | |||
4093 | NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0), | |||
4094 | NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0), | |||
4095 | NEONMAP1(vld3_v, arm_neon_vld3, 0), | |||
4096 | NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0), | |||
4097 | NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0), | |||
4098 | NEONMAP1(vld3q_v, arm_neon_vld3, 0), | |||
4099 | NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0), | |||
4100 | NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0), | |||
4101 | NEONMAP1(vld4_v, arm_neon_vld4, 0), | |||
4102 | NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0), | |||
4103 | NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0), | |||
4104 | NEONMAP1(vld4q_v, arm_neon_vld4, 0), | |||
4105 | NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), | |||
4106 | NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType), | |||
4107 | NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType), | |||
4108 | NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), | |||
4109 | NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), | |||
4110 | NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType), | |||
4111 | NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType), | |||
4112 | NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), | |||
4113 | NEONMAP0(vmovl_v), | |||
4114 | NEONMAP0(vmovn_v), | |||
4115 | NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType), | |||
4116 | NEONMAP0(vmull_v), | |||
4117 | NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType), | |||
4118 | NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), | |||
4119 | NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), | |||
4120 | NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType), | |||
4121 | NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), | |||
4122 | NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), | |||
4123 | NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType), | |||
4124 | NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts), | |||
4125 | NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts), | |||
4126 | NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType), | |||
4127 | NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType), | |||
4128 | NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts), | |||
4129 | NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts), | |||
4130 | NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0), | |||
4131 | NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0), | |||
4132 | NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType), | |||
4133 | NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType), | |||
4134 | NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType), | |||
4135 | NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts), | |||
4136 | NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType), | |||
4137 | NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType), | |||
4138 | NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType), | |||
4139 | NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType), | |||
4140 | NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType), | |||
4141 | NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), | |||
4142 | NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), | |||
4143 | NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), | |||
4144 | NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), | |||
4145 | NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), | |||
4146 | NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), | |||
4147 | NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0), | |||
4148 | NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0), | |||
4149 | NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts), | |||
4150 | NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts), | |||
4151 | NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType), | |||
4152 | NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), | |||
4153 | NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), | |||
4154 | NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType), | |||
4155 | NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType), | |||
4156 | NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), | |||
4157 | NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), | |||
4158 | NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType), | |||
4159 | NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType), | |||
4160 | NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType), | |||
4161 | NEONMAP0(vrndi_v), | |||
4162 | NEONMAP0(vrndiq_v), | |||
4163 | NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType), | |||
4164 | NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType), | |||
4165 | NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType), | |||
4166 | NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType), | |||
4167 | NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType), | |||
4168 | NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType), | |||
4169 | NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType), | |||
4170 | NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType), | |||
4171 | NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType), | |||
4172 | NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), | |||
4173 | NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), | |||
4174 | NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), | |||
4175 | NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), | |||
4176 | NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), | |||
4177 | NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), | |||
4178 | NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType), | |||
4179 | NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType), | |||
4180 | NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType), | |||
4181 | NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0), | |||
4182 | NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0), | |||
4183 | NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0), | |||
4184 | NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0), | |||
4185 | NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0), | |||
4186 | NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0), | |||
4187 | NEONMAP0(vshl_n_v), | |||
4188 | NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), | |||
4189 | NEONMAP0(vshll_n_v), | |||
4190 | NEONMAP0(vshlq_n_v), | |||
4191 | NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), | |||
4192 | NEONMAP0(vshr_n_v), | |||
4193 | NEONMAP0(vshrn_n_v), | |||
4194 | NEONMAP0(vshrq_n_v), | |||
4195 | NEONMAP1(vst1_v, arm_neon_vst1, 0), | |||
4196 | NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0), | |||
4197 | NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0), | |||
4198 | NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0), | |||
4199 | NEONMAP1(vst1q_v, arm_neon_vst1, 0), | |||
4200 | NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0), | |||
4201 | NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0), | |||
4202 | NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0), | |||
4203 | NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0), | |||
4204 | NEONMAP1(vst2_v, arm_neon_vst2, 0), | |||
4205 | NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0), | |||
4206 | NEONMAP1(vst2q_v, arm_neon_vst2, 0), | |||
4207 | NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0), | |||
4208 | NEONMAP1(vst3_v, arm_neon_vst3, 0), | |||
4209 | NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0), | |||
4210 | NEONMAP1(vst3q_v, arm_neon_vst3, 0), | |||
4211 | NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0), | |||
4212 | NEONMAP1(vst4_v, arm_neon_vst4, 0), | |||
4213 | NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0), | |||
4214 | NEONMAP1(vst4q_v, arm_neon_vst4, 0), | |||
4215 | NEONMAP0(vsubhn_v), | |||
4216 | NEONMAP0(vtrn_v), | |||
4217 | NEONMAP0(vtrnq_v), | |||
4218 | NEONMAP0(vtst_v), | |||
4219 | NEONMAP0(vtstq_v), | |||
4220 | NEONMAP0(vuzp_v), | |||
4221 | NEONMAP0(vuzpq_v), | |||
4222 | NEONMAP0(vzip_v), | |||
4223 | NEONMAP0(vzipq_v) | |||
4224 | }; | |||
4225 | ||||
4226 | static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = { | |||
4227 | NEONMAP1(vabs_v, aarch64_neon_abs, 0), | |||
4228 | NEONMAP1(vabsq_v, aarch64_neon_abs, 0), | |||
4229 | NEONMAP0(vaddhn_v), | |||
4230 | NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0), | |||
4231 | NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0), | |||
4232 | NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0), | |||
4233 | NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0), | |||
4234 | NEONMAP1(vcage_v, aarch64_neon_facge, 0), | |||
4235 | NEONMAP1(vcageq_v, aarch64_neon_facge, 0), | |||
4236 | NEONMAP1(vcagt_v, aarch64_neon_facgt, 0), | |||
4237 | NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0), | |||
4238 | NEONMAP1(vcale_v, aarch64_neon_facge, 0), | |||
4239 | NEONMAP1(vcaleq_v, aarch64_neon_facge, 0), | |||
4240 | NEONMAP1(vcalt_v, aarch64_neon_facgt, 0), | |||
4241 | NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0), | |||
4242 | NEONMAP0(vceqz_v), | |||
4243 | NEONMAP0(vceqzq_v), | |||
4244 | NEONMAP0(vcgez_v), | |||
4245 | NEONMAP0(vcgezq_v), | |||
4246 | NEONMAP0(vcgtz_v), | |||
4247 | NEONMAP0(vcgtzq_v), | |||
4248 | NEONMAP0(vclez_v), | |||
4249 | NEONMAP0(vclezq_v), | |||
4250 | NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType), | |||
4251 | NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType), | |||
4252 | NEONMAP0(vcltz_v), | |||
4253 | NEONMAP0(vcltzq_v), | |||
4254 | NEONMAP1(vclz_v, ctlz, Add1ArgType), | |||
4255 | NEONMAP1(vclzq_v, ctlz, Add1ArgType), | |||
4256 | NEONMAP1(vcnt_v, ctpop, Add1ArgType), | |||
4257 | NEONMAP1(vcntq_v, ctpop, Add1ArgType), | |||
4258 | NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0), | |||
4259 | NEONMAP0(vcvt_f16_v), | |||
4260 | NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0), | |||
4261 | NEONMAP0(vcvt_f32_v), | |||
4262 | NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), | |||
4263 | NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), | |||
4264 | NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), | |||
4265 | NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), | |||
4266 | NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), | |||
4267 | NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), | |||
4268 | NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), | |||
4269 | NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), | |||
4270 | NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), | |||
4271 | NEONMAP0(vcvtq_f16_v), | |||
4272 | NEONMAP0(vcvtq_f32_v), | |||
4273 | NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), | |||
4274 | NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), | |||
4275 | NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), | |||
4276 | NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), | |||
4277 | NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), | |||
4278 | NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), | |||
4279 | NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), | |||
4280 | NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), | |||
4281 | NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), | |||
4282 | NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType), | |||
4283 | NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0), | |||
4284 | NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0), | |||
4285 | NEONMAP0(vext_v), | |||
4286 | NEONMAP0(vextq_v), | |||
4287 | NEONMAP0(vfma_v), | |||
4288 | NEONMAP0(vfmaq_v), | |||
4289 | NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), | |||
4290 | NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), | |||
4291 | NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), | |||
4292 | NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), | |||
4293 | NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0), | |||
4294 | NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0), | |||
4295 | NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0), | |||
4296 | NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0), | |||
4297 | NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0), | |||
4298 | NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0), | |||
4299 | NEONMAP0(vmovl_v), | |||
4300 | NEONMAP0(vmovn_v), | |||
4301 | NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType), | |||
4302 | NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType), | |||
4303 | NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType), | |||
4304 | NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), | |||
4305 | NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), | |||
4306 | NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType), | |||
4307 | NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType), | |||
4308 | NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType), | |||
4309 | NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), | |||
4310 | NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), | |||
4311 | NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0), | |||
4312 | NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0), | |||
4313 | NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType), | |||
4314 | NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType), | |||
4315 | NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType), | |||
4316 | NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts), | |||
4317 | NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType), | |||
4318 | NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType), | |||
4319 | NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType), | |||
4320 | NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType), | |||
4321 | NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType), | |||
4322 | NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), | |||
4323 | NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), | |||
4324 | NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts), | |||
4325 | NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), | |||
4326 | NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts), | |||
4327 | NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), | |||
4328 | NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0), | |||
4329 | NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0), | |||
4330 | NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), | |||
4331 | NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), | |||
4332 | NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType), | |||
4333 | NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), | |||
4334 | NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), | |||
4335 | NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType), | |||
4336 | NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType), | |||
4337 | NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), | |||
4338 | NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), | |||
4339 | NEONMAP0(vrndi_v), | |||
4340 | NEONMAP0(vrndiq_v), | |||
4341 | NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), | |||
4342 | NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), | |||
4343 | NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), | |||
4344 | NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), | |||
4345 | NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), | |||
4346 | NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), | |||
4347 | NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType), | |||
4348 | NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType), | |||
4349 | NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType), | |||
4350 | NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0), | |||
4351 | NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0), | |||
4352 | NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0), | |||
4353 | NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0), | |||
4354 | NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0), | |||
4355 | NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0), | |||
4356 | NEONMAP0(vshl_n_v), | |||
4357 | NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), | |||
4358 | NEONMAP0(vshll_n_v), | |||
4359 | NEONMAP0(vshlq_n_v), | |||
4360 | NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), | |||
4361 | NEONMAP0(vshr_n_v), | |||
4362 | NEONMAP0(vshrn_n_v), | |||
4363 | NEONMAP0(vshrq_n_v), | |||
4364 | NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0), | |||
4365 | NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0), | |||
4366 | NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0), | |||
4367 | NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0), | |||
4368 | NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0), | |||
4369 | NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0), | |||
4370 | NEONMAP0(vsubhn_v), | |||
4371 | NEONMAP0(vtst_v), | |||
4372 | NEONMAP0(vtstq_v), | |||
4373 | }; | |||
4374 | ||||
4375 | static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = { | |||
4376 | NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType), | |||
4377 | NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType), | |||
4378 | NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType), | |||
4379 | NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), | |||
4380 | NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), | |||
4381 | NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), | |||
4382 | NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), | |||
4383 | NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), | |||
4384 | NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), | |||
4385 | NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), | |||
4386 | NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), | |||
4387 | NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType), | |||
4388 | NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), | |||
4389 | NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType), | |||
4390 | NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), | |||
4391 | NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), | |||
4392 | NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType), | |||
4393 | NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType), | |||
4394 | NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), | |||
4395 | NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), | |||
4396 | NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType), | |||
4397 | NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType), | |||
4398 | NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), | |||
4399 | NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), | |||
4400 | NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType), | |||
4401 | NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType), | |||
4402 | NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType), | |||
4403 | NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType), | |||
4404 | NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), | |||
4405 | NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), | |||
4406 | NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), | |||
4407 | NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), | |||
4408 | NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType), | |||
4409 | NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), | |||
4410 | NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType), | |||
4411 | NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), | |||
4412 | NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType), | |||
4413 | NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), | |||
4414 | NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType), | |||
4415 | NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), | |||
4416 | NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType), | |||
4417 | NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), | |||
4418 | NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType), | |||
4419 | NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), | |||
4420 | NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), | |||
4421 | NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), | |||
4422 | NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), | |||
4423 | NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), | |||
4424 | NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0), | |||
4425 | NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), | |||
4426 | NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), | |||
4427 | NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), | |||
4428 | NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), | |||
4429 | NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), | |||
4430 | NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), | |||
4431 | NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), | |||
4432 | NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), | |||
4433 | NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), | |||
4434 | NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), | |||
4435 | NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), | |||
4436 | NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), | |||
4437 | NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), | |||
4438 | NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), | |||
4439 | NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), | |||
4440 | NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), | |||
4441 | NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), | |||
4442 | NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), | |||
4443 | NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), | |||
4444 | NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), | |||
4445 | NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0), | |||
4446 | NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType), | |||
4447 | NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType), | |||
4448 | NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType), | |||
4449 | NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), | |||
4450 | NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), | |||
4451 | NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), | |||
4452 | NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), | |||
4453 | NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), | |||
4454 | NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), | |||
4455 | NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), | |||
4456 | NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), | |||
4457 | NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), | |||
4458 | NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), | |||
4459 | NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType), | |||
4460 | NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), | |||
4461 | NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType), | |||
4462 | NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), | |||
4463 | NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), | |||
4464 | NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType), | |||
4465 | NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType), | |||
4466 | NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), | |||
4467 | NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), | |||
4468 | NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType), | |||
4469 | NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType), | |||
4470 | NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors), | |||
4471 | NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType), | |||
4472 | NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors), | |||
4473 | NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0), | |||
4474 | NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType), | |||
4475 | NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType), | |||
4476 | NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), | |||
4477 | NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), | |||
4478 | NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), | |||
4479 | NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), | |||
4480 | NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType), | |||
4481 | NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), | |||
4482 | NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), | |||
4483 | NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), | |||
4484 | NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType), | |||
4485 | NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), | |||
4486 | NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType), | |||
4487 | NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors), | |||
4488 | NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType), | |||
4489 | NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), | |||
4490 | NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), | |||
4491 | NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType), | |||
4492 | NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType), | |||
4493 | NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), | |||
4494 | NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), | |||
4495 | NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType), | |||
4496 | NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType), | |||
4497 | NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType), | |||
4498 | NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType), | |||
4499 | NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), | |||
4500 | NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), | |||
4501 | NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), | |||
4502 | NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), | |||
4503 | NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType), | |||
4504 | NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), | |||
4505 | NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), | |||
4506 | NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), | |||
4507 | NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), | |||
4508 | NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), | |||
4509 | NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), | |||
4510 | NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType), | |||
4511 | NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType), | |||
4512 | NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), | |||
4513 | NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), | |||
4514 | NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), | |||
4515 | NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), | |||
4516 | NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType), | |||
4517 | NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType), | |||
4518 | NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType), | |||
4519 | NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType), | |||
4520 | NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), | |||
4521 | NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), | |||
4522 | NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType), | |||
4523 | NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType), | |||
4524 | NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType), | |||
4525 | NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), | |||
4526 | NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), | |||
4527 | NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), | |||
4528 | NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), | |||
4529 | NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType), | |||
4530 | NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), | |||
4531 | NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), | |||
4532 | NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), | |||
4533 | NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), | |||
4534 | NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType), | |||
4535 | NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType), | |||
4536 | NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), | |||
4537 | NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), | |||
4538 | NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType), | |||
4539 | NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType), | |||
4540 | NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType), | |||
4541 | NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType), | |||
4542 | NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType), | |||
4543 | NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType), | |||
4544 | NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType), | |||
4545 | NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType), | |||
4546 | NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType), | |||
4547 | NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType), | |||
4548 | NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType), | |||
4549 | NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType), | |||
4550 | NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0), | |||
4551 | NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0), | |||
4552 | NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0), | |||
4553 | NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0), | |||
4554 | NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType), | |||
4555 | NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType), | |||
4556 | NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType), | |||
4557 | NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType), | |||
4558 | NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), | |||
4559 | NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType), | |||
4560 | NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), | |||
4561 | NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType), | |||
4562 | NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType), | |||
4563 | NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType), | |||
4564 | NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), | |||
4565 | NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType), | |||
4566 | NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), | |||
4567 | NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType), | |||
4568 | // FP16 scalar intrinisics go here. | |||
4569 | NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType), | |||
4570 | NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), | |||
4571 | NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), | |||
4572 | NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), | |||
4573 | NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), | |||
4574 | NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), | |||
4575 | NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), | |||
4576 | NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), | |||
4577 | NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), | |||
4578 | NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), | |||
4579 | NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), | |||
4580 | NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), | |||
4581 | NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), | |||
4582 | NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), | |||
4583 | NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), | |||
4584 | NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), | |||
4585 | NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), | |||
4586 | NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), | |||
4587 | NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), | |||
4588 | NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), | |||
4589 | NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), | |||
4590 | NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), | |||
4591 | NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), | |||
4592 | NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), | |||
4593 | NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), | |||
4594 | NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType), | |||
4595 | NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType), | |||
4596 | NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType), | |||
4597 | NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType), | |||
4598 | NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType), | |||
4599 | }; | |||
4600 | ||||
4601 | #undef NEONMAP0 | |||
4602 | #undef NEONMAP1 | |||
4603 | #undef NEONMAP2 | |||
4604 | ||||
4605 | static bool NEONSIMDIntrinsicsProvenSorted = false; | |||
4606 | ||||
4607 | static bool AArch64SIMDIntrinsicsProvenSorted = false; | |||
4608 | static bool AArch64SISDIntrinsicsProvenSorted = false; | |||
4609 | ||||
4610 | ||||
4611 | static const NeonIntrinsicInfo * | |||
4612 | findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap, | |||
4613 | unsigned BuiltinID, bool &MapProvenSorted) { | |||
4614 | ||||
4615 | #ifndef NDEBUG | |||
4616 | if (!MapProvenSorted) { | |||
4617 | assert(std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap)))(static_cast <bool> (std::is_sorted(std::begin(IntrinsicMap ), std::end(IntrinsicMap))) ? void (0) : __assert_fail ("std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap))" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4617, __extension__ __PRETTY_FUNCTION__)); | |||
4618 | MapProvenSorted = true; | |||
4619 | } | |||
4620 | #endif | |||
4621 | ||||
4622 | const NeonIntrinsicInfo *Builtin = | |||
4623 | std::lower_bound(IntrinsicMap.begin(), IntrinsicMap.end(), BuiltinID); | |||
4624 | ||||
4625 | if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID) | |||
4626 | return Builtin; | |||
4627 | ||||
4628 | return nullptr; | |||
4629 | } | |||
4630 | ||||
4631 | Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID, | |||
4632 | unsigned Modifier, | |||
4633 | llvm::Type *ArgType, | |||
4634 | const CallExpr *E) { | |||
4635 | int VectorSize = 0; | |||
4636 | if (Modifier & Use64BitVectors) | |||
4637 | VectorSize = 64; | |||
4638 | else if (Modifier & Use128BitVectors) | |||
4639 | VectorSize = 128; | |||
4640 | ||||
4641 | // Return type. | |||
4642 | SmallVector<llvm::Type *, 3> Tys; | |||
4643 | if (Modifier & AddRetType) { | |||
4644 | llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); | |||
4645 | if (Modifier & VectorizeRetType) | |||
4646 | Ty = llvm::VectorType::get( | |||
4647 | Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1); | |||
4648 | ||||
4649 | Tys.push_back(Ty); | |||
4650 | } | |||
4651 | ||||
4652 | // Arguments. | |||
4653 | if (Modifier & VectorizeArgTypes) { | |||
4654 | int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1; | |||
4655 | ArgType = llvm::VectorType::get(ArgType, Elts); | |||
4656 | } | |||
4657 | ||||
4658 | if (Modifier & (Add1ArgType | Add2ArgTypes)) | |||
4659 | Tys.push_back(ArgType); | |||
4660 | ||||
4661 | if (Modifier & Add2ArgTypes) | |||
4662 | Tys.push_back(ArgType); | |||
4663 | ||||
4664 | if (Modifier & InventFloatType) | |||
4665 | Tys.push_back(FloatTy); | |||
4666 | ||||
4667 | return CGM.getIntrinsic(IntrinsicID, Tys); | |||
4668 | } | |||
4669 | ||||
4670 | static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF, | |||
4671 | const NeonIntrinsicInfo &SISDInfo, | |||
4672 | SmallVectorImpl<Value *> &Ops, | |||
4673 | const CallExpr *E) { | |||
4674 | unsigned BuiltinID = SISDInfo.BuiltinID; | |||
4675 | unsigned int Int = SISDInfo.LLVMIntrinsic; | |||
4676 | unsigned Modifier = SISDInfo.TypeModifier; | |||
4677 | const char *s = SISDInfo.NameHint; | |||
4678 | ||||
4679 | switch (BuiltinID) { | |||
4680 | case NEON::BI__builtin_neon_vcled_s64: | |||
4681 | case NEON::BI__builtin_neon_vcled_u64: | |||
4682 | case NEON::BI__builtin_neon_vcles_f32: | |||
4683 | case NEON::BI__builtin_neon_vcled_f64: | |||
4684 | case NEON::BI__builtin_neon_vcltd_s64: | |||
4685 | case NEON::BI__builtin_neon_vcltd_u64: | |||
4686 | case NEON::BI__builtin_neon_vclts_f32: | |||
4687 | case NEON::BI__builtin_neon_vcltd_f64: | |||
4688 | case NEON::BI__builtin_neon_vcales_f32: | |||
4689 | case NEON::BI__builtin_neon_vcaled_f64: | |||
4690 | case NEON::BI__builtin_neon_vcalts_f32: | |||
4691 | case NEON::BI__builtin_neon_vcaltd_f64: | |||
4692 | // Only one direction of comparisons actually exist, cmle is actually a cmge | |||
4693 | // with swapped operands. The table gives us the right intrinsic but we | |||
4694 | // still need to do the swap. | |||
4695 | std::swap(Ops[0], Ops[1]); | |||
4696 | break; | |||
4697 | } | |||
4698 | ||||
4699 | assert(Int && "Generic code assumes a valid intrinsic")(static_cast <bool> (Int && "Generic code assumes a valid intrinsic" ) ? void (0) : __assert_fail ("Int && \"Generic code assumes a valid intrinsic\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4699, __extension__ __PRETTY_FUNCTION__)); | |||
4700 | ||||
4701 | // Determine the type(s) of this overloaded AArch64 intrinsic. | |||
4702 | const Expr *Arg = E->getArg(0); | |||
4703 | llvm::Type *ArgTy = CGF.ConvertType(Arg->getType()); | |||
4704 | Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E); | |||
4705 | ||||
4706 | int j = 0; | |||
4707 | ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0); | |||
4708 | for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); | |||
4709 | ai != ae; ++ai, ++j) { | |||
4710 | llvm::Type *ArgTy = ai->getType(); | |||
4711 | if (Ops[j]->getType()->getPrimitiveSizeInBits() == | |||
4712 | ArgTy->getPrimitiveSizeInBits()) | |||
4713 | continue; | |||
4714 | ||||
4715 | assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy())(static_cast <bool> (ArgTy->isVectorTy() && ! Ops[j]->getType()->isVectorTy()) ? void (0) : __assert_fail ("ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4715, __extension__ __PRETTY_FUNCTION__)); | |||
4716 | // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate | |||
4717 | // it before inserting. | |||
4718 | Ops[j] = | |||
4719 | CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType()); | |||
4720 | Ops[j] = | |||
4721 | CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0); | |||
4722 | } | |||
4723 | ||||
4724 | Value *Result = CGF.EmitNeonCall(F, Ops, s); | |||
4725 | llvm::Type *ResultType = CGF.ConvertType(E->getType()); | |||
4726 | if (ResultType->getPrimitiveSizeInBits() < | |||
4727 | Result->getType()->getPrimitiveSizeInBits()) | |||
4728 | return CGF.Builder.CreateExtractElement(Result, C0); | |||
4729 | ||||
4730 | return CGF.Builder.CreateBitCast(Result, ResultType, s); | |||
4731 | } | |||
4732 | ||||
4733 | Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( | |||
4734 | unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic, | |||
4735 | const char *NameHint, unsigned Modifier, const CallExpr *E, | |||
4736 | SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1, | |||
4737 | llvm::Triple::ArchType Arch) { | |||
4738 | // Get the last argument, which specifies the vector type. | |||
4739 | llvm::APSInt NeonTypeConst; | |||
4740 | const Expr *Arg = E->getArg(E->getNumArgs() - 1); | |||
4741 | if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext())) | |||
4742 | return nullptr; | |||
4743 | ||||
4744 | // Determine the type of this overloaded NEON intrinsic. | |||
4745 | NeonTypeFlags Type(NeonTypeConst.getZExtValue()); | |||
4746 | bool Usgn = Type.isUnsigned(); | |||
4747 | bool Quad = Type.isQuad(); | |||
4748 | const bool HasLegalHalfType = getTarget().hasLegalHalfType(); | |||
4749 | ||||
4750 | llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType); | |||
4751 | llvm::Type *Ty = VTy; | |||
4752 | if (!Ty) | |||
4753 | return nullptr; | |||
4754 | ||||
4755 | auto getAlignmentValue32 = [&](Address addr) -> Value* { | |||
4756 | return Builder.getInt32(addr.getAlignment().getQuantity()); | |||
4757 | }; | |||
4758 | ||||
4759 | unsigned Int = LLVMIntrinsic; | |||
4760 | if ((Modifier & UnsignedAlts) && !Usgn) | |||
4761 | Int = AltLLVMIntrinsic; | |||
4762 | ||||
4763 | switch (BuiltinID) { | |||
4764 | default: break; | |||
4765 | case NEON::BI__builtin_neon_vabs_v: | |||
4766 | case NEON::BI__builtin_neon_vabsq_v: | |||
4767 | if (VTy->getElementType()->isFloatingPointTy()) | |||
4768 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs"); | |||
4769 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs"); | |||
4770 | case NEON::BI__builtin_neon_vaddhn_v: { | |||
4771 | llvm::VectorType *SrcTy = | |||
4772 | llvm::VectorType::getExtendedElementVectorType(VTy); | |||
4773 | ||||
4774 | // %sum = add <4 x i32> %lhs, %rhs | |||
4775 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); | |||
4776 | Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); | |||
4777 | Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn"); | |||
4778 | ||||
4779 | // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> | |||
4780 | Constant *ShiftAmt = | |||
4781 | ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); | |||
4782 | Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn"); | |||
4783 | ||||
4784 | // %res = trunc <4 x i32> %high to <4 x i16> | |||
4785 | return Builder.CreateTrunc(Ops[0], VTy, "vaddhn"); | |||
4786 | } | |||
4787 | case NEON::BI__builtin_neon_vcale_v: | |||
4788 | case NEON::BI__builtin_neon_vcaleq_v: | |||
4789 | case NEON::BI__builtin_neon_vcalt_v: | |||
4790 | case NEON::BI__builtin_neon_vcaltq_v: | |||
4791 | std::swap(Ops[0], Ops[1]); | |||
4792 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
4793 | case NEON::BI__builtin_neon_vcage_v: | |||
4794 | case NEON::BI__builtin_neon_vcageq_v: | |||
4795 | case NEON::BI__builtin_neon_vcagt_v: | |||
4796 | case NEON::BI__builtin_neon_vcagtq_v: { | |||
4797 | llvm::Type *Ty; | |||
4798 | switch (VTy->getScalarSizeInBits()) { | |||
4799 | default: llvm_unreachable("unexpected type")::llvm::llvm_unreachable_internal("unexpected type", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 4799); | |||
4800 | case 32: | |||
4801 | Ty = FloatTy; | |||
4802 | break; | |||
4803 | case 64: | |||
4804 | Ty = DoubleTy; | |||
4805 | break; | |||
4806 | case 16: | |||
4807 | Ty = HalfTy; | |||
4808 | break; | |||
4809 | } | |||
4810 | llvm::Type *VecFlt = llvm::VectorType::get(Ty, VTy->getNumElements()); | |||
4811 | llvm::Type *Tys[] = { VTy, VecFlt }; | |||
4812 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); | |||
4813 | return EmitNeonCall(F, Ops, NameHint); | |||
4814 | } | |||
4815 | case NEON::BI__builtin_neon_vceqz_v: | |||
4816 | case NEON::BI__builtin_neon_vceqzq_v: | |||
4817 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ, | |||
4818 | ICmpInst::ICMP_EQ, "vceqz"); | |||
4819 | case NEON::BI__builtin_neon_vcgez_v: | |||
4820 | case NEON::BI__builtin_neon_vcgezq_v: | |||
4821 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE, | |||
4822 | ICmpInst::ICMP_SGE, "vcgez"); | |||
4823 | case NEON::BI__builtin_neon_vclez_v: | |||
4824 | case NEON::BI__builtin_neon_vclezq_v: | |||
4825 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE, | |||
4826 | ICmpInst::ICMP_SLE, "vclez"); | |||
4827 | case NEON::BI__builtin_neon_vcgtz_v: | |||
4828 | case NEON::BI__builtin_neon_vcgtzq_v: | |||
4829 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT, | |||
4830 | ICmpInst::ICMP_SGT, "vcgtz"); | |||
4831 | case NEON::BI__builtin_neon_vcltz_v: | |||
4832 | case NEON::BI__builtin_neon_vcltzq_v: | |||
4833 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT, | |||
4834 | ICmpInst::ICMP_SLT, "vcltz"); | |||
4835 | case NEON::BI__builtin_neon_vclz_v: | |||
4836 | case NEON::BI__builtin_neon_vclzq_v: | |||
4837 | // We generate target-independent intrinsic, which needs a second argument | |||
4838 | // for whether or not clz of zero is undefined; on ARM it isn't. | |||
4839 | Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef())); | |||
4840 | break; | |||
4841 | case NEON::BI__builtin_neon_vcvt_f32_v: | |||
4842 | case NEON::BI__builtin_neon_vcvtq_f32_v: | |||
4843 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
4844 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad), | |||
4845 | HasLegalHalfType); | |||
4846 | return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") | |||
4847 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); | |||
4848 | case NEON::BI__builtin_neon_vcvt_f16_v: | |||
4849 | case NEON::BI__builtin_neon_vcvtq_f16_v: | |||
4850 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
4851 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad), | |||
4852 | HasLegalHalfType); | |||
4853 | return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") | |||
4854 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); | |||
4855 | case NEON::BI__builtin_neon_vcvt_n_f16_v: | |||
4856 | case NEON::BI__builtin_neon_vcvt_n_f32_v: | |||
4857 | case NEON::BI__builtin_neon_vcvt_n_f64_v: | |||
4858 | case NEON::BI__builtin_neon_vcvtq_n_f16_v: | |||
4859 | case NEON::BI__builtin_neon_vcvtq_n_f32_v: | |||
4860 | case NEON::BI__builtin_neon_vcvtq_n_f64_v: { | |||
4861 | llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty }; | |||
4862 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; | |||
4863 | Function *F = CGM.getIntrinsic(Int, Tys); | |||
4864 | return EmitNeonCall(F, Ops, "vcvt_n"); | |||
4865 | } | |||
4866 | case NEON::BI__builtin_neon_vcvt_n_s16_v: | |||
4867 | case NEON::BI__builtin_neon_vcvt_n_s32_v: | |||
4868 | case NEON::BI__builtin_neon_vcvt_n_u16_v: | |||
4869 | case NEON::BI__builtin_neon_vcvt_n_u32_v: | |||
4870 | case NEON::BI__builtin_neon_vcvt_n_s64_v: | |||
4871 | case NEON::BI__builtin_neon_vcvt_n_u64_v: | |||
4872 | case NEON::BI__builtin_neon_vcvtq_n_s16_v: | |||
4873 | case NEON::BI__builtin_neon_vcvtq_n_s32_v: | |||
4874 | case NEON::BI__builtin_neon_vcvtq_n_u16_v: | |||
4875 | case NEON::BI__builtin_neon_vcvtq_n_u32_v: | |||
4876 | case NEON::BI__builtin_neon_vcvtq_n_s64_v: | |||
4877 | case NEON::BI__builtin_neon_vcvtq_n_u64_v: { | |||
4878 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; | |||
4879 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); | |||
4880 | return EmitNeonCall(F, Ops, "vcvt_n"); | |||
4881 | } | |||
4882 | case NEON::BI__builtin_neon_vcvt_s32_v: | |||
4883 | case NEON::BI__builtin_neon_vcvt_u32_v: | |||
4884 | case NEON::BI__builtin_neon_vcvt_s64_v: | |||
4885 | case NEON::BI__builtin_neon_vcvt_u64_v: | |||
4886 | case NEON::BI__builtin_neon_vcvt_s16_v: | |||
4887 | case NEON::BI__builtin_neon_vcvt_u16_v: | |||
4888 | case NEON::BI__builtin_neon_vcvtq_s32_v: | |||
4889 | case NEON::BI__builtin_neon_vcvtq_u32_v: | |||
4890 | case NEON::BI__builtin_neon_vcvtq_s64_v: | |||
4891 | case NEON::BI__builtin_neon_vcvtq_u64_v: | |||
4892 | case NEON::BI__builtin_neon_vcvtq_s16_v: | |||
4893 | case NEON::BI__builtin_neon_vcvtq_u16_v: { | |||
4894 | Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type)); | |||
4895 | return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") | |||
4896 | : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); | |||
4897 | } | |||
4898 | case NEON::BI__builtin_neon_vcvta_s16_v: | |||
4899 | case NEON::BI__builtin_neon_vcvta_s32_v: | |||
4900 | case NEON::BI__builtin_neon_vcvta_s64_v: | |||
4901 | case NEON::BI__builtin_neon_vcvta_u16_v: | |||
4902 | case NEON::BI__builtin_neon_vcvta_u32_v: | |||
4903 | case NEON::BI__builtin_neon_vcvta_u64_v: | |||
4904 | case NEON::BI__builtin_neon_vcvtaq_s16_v: | |||
4905 | case NEON::BI__builtin_neon_vcvtaq_s32_v: | |||
4906 | case NEON::BI__builtin_neon_vcvtaq_s64_v: | |||
4907 | case NEON::BI__builtin_neon_vcvtaq_u16_v: | |||
4908 | case NEON::BI__builtin_neon_vcvtaq_u32_v: | |||
4909 | case NEON::BI__builtin_neon_vcvtaq_u64_v: | |||
4910 | case NEON::BI__builtin_neon_vcvtn_s16_v: | |||
4911 | case NEON::BI__builtin_neon_vcvtn_s32_v: | |||
4912 | case NEON::BI__builtin_neon_vcvtn_s64_v: | |||
4913 | case NEON::BI__builtin_neon_vcvtn_u16_v: | |||
4914 | case NEON::BI__builtin_neon_vcvtn_u32_v: | |||
4915 | case NEON::BI__builtin_neon_vcvtn_u64_v: | |||
4916 | case NEON::BI__builtin_neon_vcvtnq_s16_v: | |||
4917 | case NEON::BI__builtin_neon_vcvtnq_s32_v: | |||
4918 | case NEON::BI__builtin_neon_vcvtnq_s64_v: | |||
4919 | case NEON::BI__builtin_neon_vcvtnq_u16_v: | |||
4920 | case NEON::BI__builtin_neon_vcvtnq_u32_v: | |||
4921 | case NEON::BI__builtin_neon_vcvtnq_u64_v: | |||
4922 | case NEON::BI__builtin_neon_vcvtp_s16_v: | |||
4923 | case NEON::BI__builtin_neon_vcvtp_s32_v: | |||
4924 | case NEON::BI__builtin_neon_vcvtp_s64_v: | |||
4925 | case NEON::BI__builtin_neon_vcvtp_u16_v: | |||
4926 | case NEON::BI__builtin_neon_vcvtp_u32_v: | |||
4927 | case NEON::BI__builtin_neon_vcvtp_u64_v: | |||
4928 | case NEON::BI__builtin_neon_vcvtpq_s16_v: | |||
4929 | case NEON::BI__builtin_neon_vcvtpq_s32_v: | |||
4930 | case NEON::BI__builtin_neon_vcvtpq_s64_v: | |||
4931 | case NEON::BI__builtin_neon_vcvtpq_u16_v: | |||
4932 | case NEON::BI__builtin_neon_vcvtpq_u32_v: | |||
4933 | case NEON::BI__builtin_neon_vcvtpq_u64_v: | |||
4934 | case NEON::BI__builtin_neon_vcvtm_s16_v: | |||
4935 | case NEON::BI__builtin_neon_vcvtm_s32_v: | |||
4936 | case NEON::BI__builtin_neon_vcvtm_s64_v: | |||
4937 | case NEON::BI__builtin_neon_vcvtm_u16_v: | |||
4938 | case NEON::BI__builtin_neon_vcvtm_u32_v: | |||
4939 | case NEON::BI__builtin_neon_vcvtm_u64_v: | |||
4940 | case NEON::BI__builtin_neon_vcvtmq_s16_v: | |||
4941 | case NEON::BI__builtin_neon_vcvtmq_s32_v: | |||
4942 | case NEON::BI__builtin_neon_vcvtmq_s64_v: | |||
4943 | case NEON::BI__builtin_neon_vcvtmq_u16_v: | |||
4944 | case NEON::BI__builtin_neon_vcvtmq_u32_v: | |||
4945 | case NEON::BI__builtin_neon_vcvtmq_u64_v: { | |||
4946 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; | |||
4947 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint); | |||
4948 | } | |||
4949 | case NEON::BI__builtin_neon_vext_v: | |||
4950 | case NEON::BI__builtin_neon_vextq_v: { | |||
4951 | int CV = cast<ConstantInt>(Ops[2])->getSExtValue(); | |||
4952 | SmallVector<uint32_t, 16> Indices; | |||
4953 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) | |||
4954 | Indices.push_back(i+CV); | |||
4955 | ||||
4956 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
4957 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
4958 | return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext"); | |||
4959 | } | |||
4960 | case NEON::BI__builtin_neon_vfma_v: | |||
4961 | case NEON::BI__builtin_neon_vfmaq_v: { | |||
4962 | Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); | |||
4963 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
4964 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
4965 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); | |||
4966 | ||||
4967 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. | |||
4968 | return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]}); | |||
4969 | } | |||
4970 | case NEON::BI__builtin_neon_vld1_v: | |||
4971 | case NEON::BI__builtin_neon_vld1q_v: { | |||
4972 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; | |||
4973 | Ops.push_back(getAlignmentValue32(PtrOp0)); | |||
4974 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1"); | |||
4975 | } | |||
4976 | case NEON::BI__builtin_neon_vld1_x2_v: | |||
4977 | case NEON::BI__builtin_neon_vld1q_x2_v: | |||
4978 | case NEON::BI__builtin_neon_vld1_x3_v: | |||
4979 | case NEON::BI__builtin_neon_vld1q_x3_v: | |||
4980 | case NEON::BI__builtin_neon_vld1_x4_v: | |||
4981 | case NEON::BI__builtin_neon_vld1q_x4_v: { | |||
4982 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType()); | |||
4983 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); | |||
4984 | llvm::Type *Tys[2] = { VTy, PTy }; | |||
4985 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); | |||
4986 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN"); | |||
4987 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); | |||
4988 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
4989 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
4990 | } | |||
4991 | case NEON::BI__builtin_neon_vld2_v: | |||
4992 | case NEON::BI__builtin_neon_vld2q_v: | |||
4993 | case NEON::BI__builtin_neon_vld3_v: | |||
4994 | case NEON::BI__builtin_neon_vld3q_v: | |||
4995 | case NEON::BI__builtin_neon_vld4_v: | |||
4996 | case NEON::BI__builtin_neon_vld4q_v: | |||
4997 | case NEON::BI__builtin_neon_vld2_dup_v: | |||
4998 | case NEON::BI__builtin_neon_vld2q_dup_v: | |||
4999 | case NEON::BI__builtin_neon_vld3_dup_v: | |||
5000 | case NEON::BI__builtin_neon_vld3q_dup_v: | |||
5001 | case NEON::BI__builtin_neon_vld4_dup_v: | |||
5002 | case NEON::BI__builtin_neon_vld4q_dup_v: { | |||
5003 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; | |||
5004 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); | |||
5005 | Value *Align = getAlignmentValue32(PtrOp1); | |||
5006 | Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint); | |||
5007 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); | |||
5008 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
5009 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
5010 | } | |||
5011 | case NEON::BI__builtin_neon_vld1_dup_v: | |||
5012 | case NEON::BI__builtin_neon_vld1q_dup_v: { | |||
5013 | Value *V = UndefValue::get(Ty); | |||
5014 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); | |||
5015 | PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty); | |||
5016 | LoadInst *Ld = Builder.CreateLoad(PtrOp0); | |||
5017 | llvm::Constant *CI = ConstantInt::get(SizeTy, 0); | |||
5018 | Ops[0] = Builder.CreateInsertElement(V, Ld, CI); | |||
5019 | return EmitNeonSplat(Ops[0], CI); | |||
5020 | } | |||
5021 | case NEON::BI__builtin_neon_vld2_lane_v: | |||
5022 | case NEON::BI__builtin_neon_vld2q_lane_v: | |||
5023 | case NEON::BI__builtin_neon_vld3_lane_v: | |||
5024 | case NEON::BI__builtin_neon_vld3q_lane_v: | |||
5025 | case NEON::BI__builtin_neon_vld4_lane_v: | |||
5026 | case NEON::BI__builtin_neon_vld4q_lane_v: { | |||
5027 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; | |||
5028 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); | |||
5029 | for (unsigned I = 2; I < Ops.size() - 1; ++I) | |||
5030 | Ops[I] = Builder.CreateBitCast(Ops[I], Ty); | |||
5031 | Ops.push_back(getAlignmentValue32(PtrOp1)); | |||
5032 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint); | |||
5033 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); | |||
5034 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
5035 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
5036 | } | |||
5037 | case NEON::BI__builtin_neon_vmovl_v: { | |||
5038 | llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy); | |||
5039 | Ops[0] = Builder.CreateBitCast(Ops[0], DTy); | |||
5040 | if (Usgn) | |||
5041 | return Builder.CreateZExt(Ops[0], Ty, "vmovl"); | |||
5042 | return Builder.CreateSExt(Ops[0], Ty, "vmovl"); | |||
5043 | } | |||
5044 | case NEON::BI__builtin_neon_vmovn_v: { | |||
5045 | llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy); | |||
5046 | Ops[0] = Builder.CreateBitCast(Ops[0], QTy); | |||
5047 | return Builder.CreateTrunc(Ops[0], Ty, "vmovn"); | |||
5048 | } | |||
5049 | case NEON::BI__builtin_neon_vmull_v: | |||
5050 | // FIXME: the integer vmull operations could be emitted in terms of pure | |||
5051 | // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of | |||
5052 | // hoisting the exts outside loops. Until global ISel comes along that can | |||
5053 | // see through such movement this leads to bad CodeGen. So we need an | |||
5054 | // intrinsic for now. | |||
5055 | Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; | |||
5056 | Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int; | |||
5057 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); | |||
5058 | case NEON::BI__builtin_neon_vpadal_v: | |||
5059 | case NEON::BI__builtin_neon_vpadalq_v: { | |||
5060 | // The source operand type has twice as many elements of half the size. | |||
5061 | unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); | |||
5062 | llvm::Type *EltTy = | |||
5063 | llvm::IntegerType::get(getLLVMContext(), EltBits / 2); | |||
5064 | llvm::Type *NarrowTy = | |||
5065 | llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); | |||
5066 | llvm::Type *Tys[2] = { Ty, NarrowTy }; | |||
5067 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); | |||
5068 | } | |||
5069 | case NEON::BI__builtin_neon_vpaddl_v: | |||
5070 | case NEON::BI__builtin_neon_vpaddlq_v: { | |||
5071 | // The source operand type has twice as many elements of half the size. | |||
5072 | unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); | |||
5073 | llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); | |||
5074 | llvm::Type *NarrowTy = | |||
5075 | llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); | |||
5076 | llvm::Type *Tys[2] = { Ty, NarrowTy }; | |||
5077 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl"); | |||
5078 | } | |||
5079 | case NEON::BI__builtin_neon_vqdmlal_v: | |||
5080 | case NEON::BI__builtin_neon_vqdmlsl_v: { | |||
5081 | SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end()); | |||
5082 | Ops[1] = | |||
5083 | EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal"); | |||
5084 | Ops.resize(2); | |||
5085 | return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint); | |||
5086 | } | |||
5087 | case NEON::BI__builtin_neon_vqshl_n_v: | |||
5088 | case NEON::BI__builtin_neon_vqshlq_n_v: | |||
5089 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n", | |||
5090 | 1, false); | |||
5091 | case NEON::BI__builtin_neon_vqshlu_n_v: | |||
5092 | case NEON::BI__builtin_neon_vqshluq_n_v: | |||
5093 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", | |||
5094 | 1, false); | |||
5095 | case NEON::BI__builtin_neon_vrecpe_v: | |||
5096 | case NEON::BI__builtin_neon_vrecpeq_v: | |||
5097 | case NEON::BI__builtin_neon_vrsqrte_v: | |||
5098 | case NEON::BI__builtin_neon_vrsqrteq_v: | |||
5099 | Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic; | |||
5100 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint); | |||
5101 | case NEON::BI__builtin_neon_vrndi_v: | |||
5102 | case NEON::BI__builtin_neon_vrndiq_v: | |||
5103 | Int = Intrinsic::nearbyint; | |||
5104 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint); | |||
5105 | case NEON::BI__builtin_neon_vrshr_n_v: | |||
5106 | case NEON::BI__builtin_neon_vrshrq_n_v: | |||
5107 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", | |||
5108 | 1, true); | |||
5109 | case NEON::BI__builtin_neon_vshl_n_v: | |||
5110 | case NEON::BI__builtin_neon_vshlq_n_v: | |||
5111 | Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); | |||
5112 | return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], | |||
5113 | "vshl_n"); | |||
5114 | case NEON::BI__builtin_neon_vshll_n_v: { | |||
5115 | llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy); | |||
5116 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); | |||
5117 | if (Usgn) | |||
5118 | Ops[0] = Builder.CreateZExt(Ops[0], VTy); | |||
5119 | else | |||
5120 | Ops[0] = Builder.CreateSExt(Ops[0], VTy); | |||
5121 | Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false); | |||
5122 | return Builder.CreateShl(Ops[0], Ops[1], "vshll_n"); | |||
5123 | } | |||
5124 | case NEON::BI__builtin_neon_vshrn_n_v: { | |||
5125 | llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy); | |||
5126 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); | |||
5127 | Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false); | |||
5128 | if (Usgn) | |||
5129 | Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]); | |||
5130 | else | |||
5131 | Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]); | |||
5132 | return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n"); | |||
5133 | } | |||
5134 | case NEON::BI__builtin_neon_vshr_n_v: | |||
5135 | case NEON::BI__builtin_neon_vshrq_n_v: | |||
5136 | return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n"); | |||
5137 | case NEON::BI__builtin_neon_vst1_v: | |||
5138 | case NEON::BI__builtin_neon_vst1q_v: | |||
5139 | case NEON::BI__builtin_neon_vst2_v: | |||
5140 | case NEON::BI__builtin_neon_vst2q_v: | |||
5141 | case NEON::BI__builtin_neon_vst3_v: | |||
5142 | case NEON::BI__builtin_neon_vst3q_v: | |||
5143 | case NEON::BI__builtin_neon_vst4_v: | |||
5144 | case NEON::BI__builtin_neon_vst4q_v: | |||
5145 | case NEON::BI__builtin_neon_vst2_lane_v: | |||
5146 | case NEON::BI__builtin_neon_vst2q_lane_v: | |||
5147 | case NEON::BI__builtin_neon_vst3_lane_v: | |||
5148 | case NEON::BI__builtin_neon_vst3q_lane_v: | |||
5149 | case NEON::BI__builtin_neon_vst4_lane_v: | |||
5150 | case NEON::BI__builtin_neon_vst4q_lane_v: { | |||
5151 | llvm::Type *Tys[] = {Int8PtrTy, Ty}; | |||
5152 | Ops.push_back(getAlignmentValue32(PtrOp0)); | |||
5153 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, ""); | |||
5154 | } | |||
5155 | case NEON::BI__builtin_neon_vst1_x2_v: | |||
5156 | case NEON::BI__builtin_neon_vst1q_x2_v: | |||
5157 | case NEON::BI__builtin_neon_vst1_x3_v: | |||
5158 | case NEON::BI__builtin_neon_vst1q_x3_v: | |||
5159 | case NEON::BI__builtin_neon_vst1_x4_v: | |||
5160 | case NEON::BI__builtin_neon_vst1q_x4_v: { | |||
5161 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType()); | |||
5162 | // TODO: Currently in AArch32 mode the pointer operand comes first, whereas | |||
5163 | // in AArch64 it comes last. We may want to stick to one or another. | |||
5164 | if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be) { | |||
5165 | llvm::Type *Tys[2] = { VTy, PTy }; | |||
5166 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); | |||
5167 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); | |||
5168 | } | |||
5169 | llvm::Type *Tys[2] = { PTy, VTy }; | |||
5170 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); | |||
5171 | } | |||
5172 | case NEON::BI__builtin_neon_vsubhn_v: { | |||
5173 | llvm::VectorType *SrcTy = | |||
5174 | llvm::VectorType::getExtendedElementVectorType(VTy); | |||
5175 | ||||
5176 | // %sum = add <4 x i32> %lhs, %rhs | |||
5177 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); | |||
5178 | Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); | |||
5179 | Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn"); | |||
5180 | ||||
5181 | // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16> | |||
5182 | Constant *ShiftAmt = | |||
5183 | ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); | |||
5184 | Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn"); | |||
5185 | ||||
5186 | // %res = trunc <4 x i32> %high to <4 x i16> | |||
5187 | return Builder.CreateTrunc(Ops[0], VTy, "vsubhn"); | |||
5188 | } | |||
5189 | case NEON::BI__builtin_neon_vtrn_v: | |||
5190 | case NEON::BI__builtin_neon_vtrnq_v: { | |||
5191 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); | |||
5192 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
5193 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); | |||
5194 | Value *SV = nullptr; | |||
5195 | ||||
5196 | for (unsigned vi = 0; vi != 2; ++vi) { | |||
5197 | SmallVector<uint32_t, 16> Indices; | |||
5198 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { | |||
5199 | Indices.push_back(i+vi); | |||
5200 | Indices.push_back(i+e+vi); | |||
5201 | } | |||
5202 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); | |||
5203 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn"); | |||
5204 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); | |||
5205 | } | |||
5206 | return SV; | |||
5207 | } | |||
5208 | case NEON::BI__builtin_neon_vtst_v: | |||
5209 | case NEON::BI__builtin_neon_vtstq_v: { | |||
5210 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
5211 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
5212 | Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); | |||
5213 | Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], | |||
5214 | ConstantAggregateZero::get(Ty)); | |||
5215 | return Builder.CreateSExt(Ops[0], Ty, "vtst"); | |||
5216 | } | |||
5217 | case NEON::BI__builtin_neon_vuzp_v: | |||
5218 | case NEON::BI__builtin_neon_vuzpq_v: { | |||
5219 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); | |||
5220 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
5221 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); | |||
5222 | Value *SV = nullptr; | |||
5223 | ||||
5224 | for (unsigned vi = 0; vi != 2; ++vi) { | |||
5225 | SmallVector<uint32_t, 16> Indices; | |||
5226 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) | |||
5227 | Indices.push_back(2*i+vi); | |||
5228 | ||||
5229 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); | |||
5230 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp"); | |||
5231 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); | |||
5232 | } | |||
5233 | return SV; | |||
5234 | } | |||
5235 | case NEON::BI__builtin_neon_vzip_v: | |||
5236 | case NEON::BI__builtin_neon_vzipq_v: { | |||
5237 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); | |||
5238 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
5239 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); | |||
5240 | Value *SV = nullptr; | |||
5241 | ||||
5242 | for (unsigned vi = 0; vi != 2; ++vi) { | |||
5243 | SmallVector<uint32_t, 16> Indices; | |||
5244 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { | |||
5245 | Indices.push_back((i + vi*e) >> 1); | |||
5246 | Indices.push_back(((i + vi*e) >> 1)+e); | |||
5247 | } | |||
5248 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); | |||
5249 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip"); | |||
5250 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); | |||
5251 | } | |||
5252 | return SV; | |||
5253 | } | |||
5254 | case NEON::BI__builtin_neon_vdot_v: | |||
5255 | case NEON::BI__builtin_neon_vdotq_v: { | |||
5256 | llvm::Type *InputTy = | |||
5257 | llvm::VectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); | |||
5258 | llvm::Type *Tys[2] = { Ty, InputTy }; | |||
5259 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; | |||
5260 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot"); | |||
5261 | } | |||
5262 | } | |||
5263 | ||||
5264 | assert(Int && "Expected valid intrinsic number")(static_cast <bool> (Int && "Expected valid intrinsic number" ) ? void (0) : __assert_fail ("Int && \"Expected valid intrinsic number\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5264, __extension__ __PRETTY_FUNCTION__)); | |||
5265 | ||||
5266 | // Determine the type(s) of this overloaded AArch64 intrinsic. | |||
5267 | Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E); | |||
5268 | ||||
5269 | Value *Result = EmitNeonCall(F, Ops, NameHint); | |||
5270 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
5271 | // AArch64 intrinsic one-element vector type cast to | |||
5272 | // scalar type expected by the builtin | |||
5273 | return Builder.CreateBitCast(Result, ResultType, NameHint); | |||
5274 | } | |||
5275 | ||||
5276 | Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr( | |||
5277 | Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp, | |||
5278 | const CmpInst::Predicate Ip, const Twine &Name) { | |||
5279 | llvm::Type *OTy = Op->getType(); | |||
5280 | ||||
5281 | // FIXME: this is utterly horrific. We should not be looking at previous | |||
5282 | // codegen context to find out what needs doing. Unfortunately TableGen | |||
5283 | // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32 | |||
5284 | // (etc). | |||
5285 | if (BitCastInst *BI = dyn_cast<BitCastInst>(Op)) | |||
5286 | OTy = BI->getOperand(0)->getType(); | |||
5287 | ||||
5288 | Op = Builder.CreateBitCast(Op, OTy); | |||
5289 | if (OTy->getScalarType()->isFloatingPointTy()) { | |||
5290 | Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy)); | |||
5291 | } else { | |||
5292 | Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy)); | |||
5293 | } | |||
5294 | return Builder.CreateSExt(Op, Ty, Name); | |||
5295 | } | |||
5296 | ||||
5297 | static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops, | |||
5298 | Value *ExtOp, Value *IndexOp, | |||
5299 | llvm::Type *ResTy, unsigned IntID, | |||
5300 | const char *Name) { | |||
5301 | SmallVector<Value *, 2> TblOps; | |||
5302 | if (ExtOp) | |||
5303 | TblOps.push_back(ExtOp); | |||
5304 | ||||
5305 | // Build a vector containing sequential number like (0, 1, 2, ..., 15) | |||
5306 | SmallVector<uint32_t, 16> Indices; | |||
5307 | llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType()); | |||
5308 | for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) { | |||
5309 | Indices.push_back(2*i); | |||
5310 | Indices.push_back(2*i+1); | |||
5311 | } | |||
5312 | ||||
5313 | int PairPos = 0, End = Ops.size() - 1; | |||
5314 | while (PairPos < End) { | |||
5315 | TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], | |||
5316 | Ops[PairPos+1], Indices, | |||
5317 | Name)); | |||
5318 | PairPos += 2; | |||
5319 | } | |||
5320 | ||||
5321 | // If there's an odd number of 64-bit lookup table, fill the high 64-bit | |||
5322 | // of the 128-bit lookup table with zero. | |||
5323 | if (PairPos == End) { | |||
5324 | Value *ZeroTbl = ConstantAggregateZero::get(TblTy); | |||
5325 | TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], | |||
5326 | ZeroTbl, Indices, Name)); | |||
5327 | } | |||
5328 | ||||
5329 | Function *TblF; | |||
5330 | TblOps.push_back(IndexOp); | |||
5331 | TblF = CGF.CGM.getIntrinsic(IntID, ResTy); | |||
5332 | ||||
5333 | return CGF.EmitNeonCall(TblF, TblOps, Name); | |||
5334 | } | |||
5335 | ||||
5336 | Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) { | |||
5337 | unsigned Value; | |||
5338 | switch (BuiltinID) { | |||
5339 | default: | |||
5340 | return nullptr; | |||
5341 | case ARM::BI__builtin_arm_nop: | |||
5342 | Value = 0; | |||
5343 | break; | |||
5344 | case ARM::BI__builtin_arm_yield: | |||
5345 | case ARM::BI__yield: | |||
5346 | Value = 1; | |||
5347 | break; | |||
5348 | case ARM::BI__builtin_arm_wfe: | |||
5349 | case ARM::BI__wfe: | |||
5350 | Value = 2; | |||
5351 | break; | |||
5352 | case ARM::BI__builtin_arm_wfi: | |||
5353 | case ARM::BI__wfi: | |||
5354 | Value = 3; | |||
5355 | break; | |||
5356 | case ARM::BI__builtin_arm_sev: | |||
5357 | case ARM::BI__sev: | |||
5358 | Value = 4; | |||
5359 | break; | |||
5360 | case ARM::BI__builtin_arm_sevl: | |||
5361 | case ARM::BI__sevl: | |||
5362 | Value = 5; | |||
5363 | break; | |||
5364 | } | |||
5365 | ||||
5366 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint), | |||
5367 | llvm::ConstantInt::get(Int32Ty, Value)); | |||
5368 | } | |||
5369 | ||||
5370 | // Generates the IR for the read/write special register builtin, | |||
5371 | // ValueType is the type of the value that is to be written or read, | |||
5372 | // RegisterType is the type of the register being written to or read from. | |||
5373 | static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF, | |||
5374 | const CallExpr *E, | |||
5375 | llvm::Type *RegisterType, | |||
5376 | llvm::Type *ValueType, | |||
5377 | bool IsRead, | |||
5378 | StringRef SysReg = "") { | |||
5379 | // write and register intrinsics only support 32 and 64 bit operations. | |||
5380 | assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))(static_cast <bool> ((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && "Unsupported size for register." ) ? void (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5381, __extension__ __PRETTY_FUNCTION__)) | |||
5381 | && "Unsupported size for register.")(static_cast <bool> ((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && "Unsupported size for register." ) ? void (0) : __assert_fail ("(RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && \"Unsupported size for register.\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5381, __extension__ __PRETTY_FUNCTION__)); | |||
5382 | ||||
5383 | CodeGen::CGBuilderTy &Builder = CGF.Builder; | |||
5384 | CodeGen::CodeGenModule &CGM = CGF.CGM; | |||
5385 | LLVMContext &Context = CGM.getLLVMContext(); | |||
5386 | ||||
5387 | if (SysReg.empty()) { | |||
5388 | const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts(); | |||
5389 | SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString(); | |||
5390 | } | |||
5391 | ||||
5392 | llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) }; | |||
5393 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); | |||
5394 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); | |||
5395 | ||||
5396 | llvm::Type *Types[] = { RegisterType }; | |||
5397 | ||||
5398 | bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32); | |||
5399 | assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))(static_cast <bool> (!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register" ) ? void (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5400, __extension__ __PRETTY_FUNCTION__)) | |||
5400 | && "Can't fit 64-bit value in 32-bit register")(static_cast <bool> (!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && "Can't fit 64-bit value in 32-bit register" ) ? void (0) : __assert_fail ("!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && \"Can't fit 64-bit value in 32-bit register\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5400, __extension__ __PRETTY_FUNCTION__)); | |||
5401 | ||||
5402 | if (IsRead) { | |||
5403 | llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); | |||
5404 | llvm::Value *Call = Builder.CreateCall(F, Metadata); | |||
5405 | ||||
5406 | if (MixedTypes) | |||
5407 | // Read into 64 bit register and then truncate result to 32 bit. | |||
5408 | return Builder.CreateTrunc(Call, ValueType); | |||
5409 | ||||
5410 | if (ValueType->isPointerTy()) | |||
5411 | // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*). | |||
5412 | return Builder.CreateIntToPtr(Call, ValueType); | |||
5413 | ||||
5414 | return Call; | |||
5415 | } | |||
5416 | ||||
5417 | llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); | |||
5418 | llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1)); | |||
5419 | if (MixedTypes) { | |||
5420 | // Extend 32 bit write value to 64 bit to pass to write. | |||
5421 | ArgValue = Builder.CreateZExt(ArgValue, RegisterType); | |||
5422 | return Builder.CreateCall(F, { Metadata, ArgValue }); | |||
5423 | } | |||
5424 | ||||
5425 | if (ValueType->isPointerTy()) { | |||
5426 | // Have VoidPtrTy ArgValue but want to return an i32/i64. | |||
5427 | ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType); | |||
5428 | return Builder.CreateCall(F, { Metadata, ArgValue }); | |||
5429 | } | |||
5430 | ||||
5431 | return Builder.CreateCall(F, { Metadata, ArgValue }); | |||
5432 | } | |||
5433 | ||||
5434 | /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra | |||
5435 | /// argument that specifies the vector type. | |||
5436 | static bool HasExtraNeonArgument(unsigned BuiltinID) { | |||
5437 | switch (BuiltinID) { | |||
5438 | default: break; | |||
5439 | case NEON::BI__builtin_neon_vget_lane_i8: | |||
5440 | case NEON::BI__builtin_neon_vget_lane_i16: | |||
5441 | case NEON::BI__builtin_neon_vget_lane_i32: | |||
5442 | case NEON::BI__builtin_neon_vget_lane_i64: | |||
5443 | case NEON::BI__builtin_neon_vget_lane_f32: | |||
5444 | case NEON::BI__builtin_neon_vgetq_lane_i8: | |||
5445 | case NEON::BI__builtin_neon_vgetq_lane_i16: | |||
5446 | case NEON::BI__builtin_neon_vgetq_lane_i32: | |||
5447 | case NEON::BI__builtin_neon_vgetq_lane_i64: | |||
5448 | case NEON::BI__builtin_neon_vgetq_lane_f32: | |||
5449 | case NEON::BI__builtin_neon_vset_lane_i8: | |||
5450 | case NEON::BI__builtin_neon_vset_lane_i16: | |||
5451 | case NEON::BI__builtin_neon_vset_lane_i32: | |||
5452 | case NEON::BI__builtin_neon_vset_lane_i64: | |||
5453 | case NEON::BI__builtin_neon_vset_lane_f32: | |||
5454 | case NEON::BI__builtin_neon_vsetq_lane_i8: | |||
5455 | case NEON::BI__builtin_neon_vsetq_lane_i16: | |||
5456 | case NEON::BI__builtin_neon_vsetq_lane_i32: | |||
5457 | case NEON::BI__builtin_neon_vsetq_lane_i64: | |||
5458 | case NEON::BI__builtin_neon_vsetq_lane_f32: | |||
5459 | case NEON::BI__builtin_neon_vsha1h_u32: | |||
5460 | case NEON::BI__builtin_neon_vsha1cq_u32: | |||
5461 | case NEON::BI__builtin_neon_vsha1pq_u32: | |||
5462 | case NEON::BI__builtin_neon_vsha1mq_u32: | |||
5463 | case clang::ARM::BI_MoveToCoprocessor: | |||
5464 | case clang::ARM::BI_MoveToCoprocessor2: | |||
5465 | return false; | |||
5466 | } | |||
5467 | return true; | |||
5468 | } | |||
5469 | ||||
5470 | Value *CodeGenFunction::EmitISOVolatileLoad(const CallExpr *E) { | |||
5471 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
5472 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); | |||
5473 | CharUnits LoadSize = getContext().getTypeSizeInChars(ElTy); | |||
5474 | llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), | |||
5475 | LoadSize.getQuantity() * 8); | |||
5476 | Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); | |||
5477 | llvm::LoadInst *Load = | |||
5478 | Builder.CreateAlignedLoad(Ptr, LoadSize); | |||
5479 | Load->setVolatile(true); | |||
5480 | return Load; | |||
5481 | } | |||
5482 | ||||
5483 | Value *CodeGenFunction::EmitISOVolatileStore(const CallExpr *E) { | |||
5484 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
5485 | Value *Value = EmitScalarExpr(E->getArg(1)); | |||
5486 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); | |||
5487 | CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); | |||
5488 | llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), | |||
5489 | StoreSize.getQuantity() * 8); | |||
5490 | Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); | |||
5491 | llvm::StoreInst *Store = | |||
5492 | Builder.CreateAlignedStore(Value, Ptr, | |||
5493 | StoreSize); | |||
5494 | Store->setVolatile(true); | |||
5495 | return Store; | |||
5496 | } | |||
5497 | ||||
5498 | Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, | |||
5499 | const CallExpr *E, | |||
5500 | llvm::Triple::ArchType Arch) { | |||
5501 | if (auto Hint = GetValueForARMHint(BuiltinID)) | |||
5502 | return Hint; | |||
5503 | ||||
5504 | if (BuiltinID == ARM::BI__emit) { | |||
5505 | bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb; | |||
5506 | llvm::FunctionType *FTy = | |||
5507 | llvm::FunctionType::get(VoidTy, /*Variadic=*/false); | |||
5508 | ||||
5509 | APSInt Value; | |||
5510 | if (!E->getArg(0)->EvaluateAsInt(Value, CGM.getContext())) | |||
5511 | llvm_unreachable("Sema will ensure that the parameter is constant")::llvm::llvm_unreachable_internal("Sema will ensure that the parameter is constant" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5511); | |||
5512 | ||||
5513 | uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue(); | |||
5514 | ||||
5515 | llvm::InlineAsm *Emit = | |||
5516 | IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "", | |||
5517 | /*SideEffects=*/true) | |||
5518 | : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "", | |||
5519 | /*SideEffects=*/true); | |||
5520 | ||||
5521 | return Builder.CreateCall(Emit); | |||
5522 | } | |||
5523 | ||||
5524 | if (BuiltinID == ARM::BI__builtin_arm_dbg) { | |||
5525 | Value *Option = EmitScalarExpr(E->getArg(0)); | |||
5526 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option); | |||
5527 | } | |||
5528 | ||||
5529 | if (BuiltinID == ARM::BI__builtin_arm_prefetch) { | |||
5530 | Value *Address = EmitScalarExpr(E->getArg(0)); | |||
5531 | Value *RW = EmitScalarExpr(E->getArg(1)); | |||
5532 | Value *IsData = EmitScalarExpr(E->getArg(2)); | |||
5533 | ||||
5534 | // Locality is not supported on ARM target | |||
5535 | Value *Locality = llvm::ConstantInt::get(Int32Ty, 3); | |||
5536 | ||||
5537 | Value *F = CGM.getIntrinsic(Intrinsic::prefetch); | |||
5538 | return Builder.CreateCall(F, {Address, RW, Locality, IsData}); | |||
5539 | } | |||
5540 | ||||
5541 | if (BuiltinID == ARM::BI__builtin_arm_rbit) { | |||
5542 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); | |||
5543 | return Builder.CreateCall( | |||
5544 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); | |||
5545 | } | |||
5546 | ||||
5547 | if (BuiltinID == ARM::BI__clear_cache) { | |||
5548 | assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")(static_cast <bool> (E->getNumArgs() == 2 && "__clear_cache takes 2 arguments") ? void (0) : __assert_fail ("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5548, __extension__ __PRETTY_FUNCTION__)); | |||
5549 | const FunctionDecl *FD = E->getDirectCallee(); | |||
5550 | Value *Ops[2]; | |||
5551 | for (unsigned i = 0; i < 2; i++) | |||
5552 | Ops[i] = EmitScalarExpr(E->getArg(i)); | |||
5553 | llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); | |||
5554 | llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); | |||
5555 | StringRef Name = FD->getName(); | |||
5556 | return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); | |||
5557 | } | |||
5558 | ||||
5559 | if (BuiltinID == ARM::BI__builtin_arm_mcrr || | |||
5560 | BuiltinID == ARM::BI__builtin_arm_mcrr2) { | |||
5561 | Function *F; | |||
5562 | ||||
5563 | switch (BuiltinID) { | |||
5564 | default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5564); | |||
5565 | case ARM::BI__builtin_arm_mcrr: | |||
5566 | F = CGM.getIntrinsic(Intrinsic::arm_mcrr); | |||
5567 | break; | |||
5568 | case ARM::BI__builtin_arm_mcrr2: | |||
5569 | F = CGM.getIntrinsic(Intrinsic::arm_mcrr2); | |||
5570 | break; | |||
5571 | } | |||
5572 | ||||
5573 | // MCRR{2} instruction has 5 operands but | |||
5574 | // the intrinsic has 4 because Rt and Rt2 | |||
5575 | // are represented as a single unsigned 64 | |||
5576 | // bit integer in the intrinsic definition | |||
5577 | // but internally it's represented as 2 32 | |||
5578 | // bit integers. | |||
5579 | ||||
5580 | Value *Coproc = EmitScalarExpr(E->getArg(0)); | |||
5581 | Value *Opc1 = EmitScalarExpr(E->getArg(1)); | |||
5582 | Value *RtAndRt2 = EmitScalarExpr(E->getArg(2)); | |||
5583 | Value *CRm = EmitScalarExpr(E->getArg(3)); | |||
5584 | ||||
5585 | Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); | |||
5586 | Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty); | |||
5587 | Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1); | |||
5588 | Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty); | |||
5589 | ||||
5590 | return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm}); | |||
5591 | } | |||
5592 | ||||
5593 | if (BuiltinID == ARM::BI__builtin_arm_mrrc || | |||
5594 | BuiltinID == ARM::BI__builtin_arm_mrrc2) { | |||
5595 | Function *F; | |||
5596 | ||||
5597 | switch (BuiltinID) { | |||
5598 | default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5598); | |||
5599 | case ARM::BI__builtin_arm_mrrc: | |||
5600 | F = CGM.getIntrinsic(Intrinsic::arm_mrrc); | |||
5601 | break; | |||
5602 | case ARM::BI__builtin_arm_mrrc2: | |||
5603 | F = CGM.getIntrinsic(Intrinsic::arm_mrrc2); | |||
5604 | break; | |||
5605 | } | |||
5606 | ||||
5607 | Value *Coproc = EmitScalarExpr(E->getArg(0)); | |||
5608 | Value *Opc1 = EmitScalarExpr(E->getArg(1)); | |||
5609 | Value *CRm = EmitScalarExpr(E->getArg(2)); | |||
5610 | Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm}); | |||
5611 | ||||
5612 | // Returns an unsigned 64 bit integer, represented | |||
5613 | // as two 32 bit integers. | |||
5614 | ||||
5615 | Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1); | |||
5616 | Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0); | |||
5617 | Rt = Builder.CreateZExt(Rt, Int64Ty); | |||
5618 | Rt1 = Builder.CreateZExt(Rt1, Int64Ty); | |||
5619 | ||||
5620 | Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32); | |||
5621 | RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true); | |||
5622 | RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1); | |||
5623 | ||||
5624 | return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType())); | |||
5625 | } | |||
5626 | ||||
5627 | if (BuiltinID == ARM::BI__builtin_arm_ldrexd || | |||
5628 | ((BuiltinID == ARM::BI__builtin_arm_ldrex || | |||
5629 | BuiltinID == ARM::BI__builtin_arm_ldaex) && | |||
5630 | getContext().getTypeSize(E->getType()) == 64) || | |||
5631 | BuiltinID == ARM::BI__ldrexd) { | |||
5632 | Function *F; | |||
5633 | ||||
5634 | switch (BuiltinID) { | |||
5635 | default: llvm_unreachable("unexpected builtin")::llvm::llvm_unreachable_internal("unexpected builtin", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5635); | |||
5636 | case ARM::BI__builtin_arm_ldaex: | |||
5637 | F = CGM.getIntrinsic(Intrinsic::arm_ldaexd); | |||
5638 | break; | |||
5639 | case ARM::BI__builtin_arm_ldrexd: | |||
5640 | case ARM::BI__builtin_arm_ldrex: | |||
5641 | case ARM::BI__ldrexd: | |||
5642 | F = CGM.getIntrinsic(Intrinsic::arm_ldrexd); | |||
5643 | break; | |||
5644 | } | |||
5645 | ||||
5646 | Value *LdPtr = EmitScalarExpr(E->getArg(0)); | |||
5647 | Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), | |||
5648 | "ldrexd"); | |||
5649 | ||||
5650 | Value *Val0 = Builder.CreateExtractValue(Val, 1); | |||
5651 | Value *Val1 = Builder.CreateExtractValue(Val, 0); | |||
5652 | Val0 = Builder.CreateZExt(Val0, Int64Ty); | |||
5653 | Val1 = Builder.CreateZExt(Val1, Int64Ty); | |||
5654 | ||||
5655 | Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32); | |||
5656 | Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); | |||
5657 | Val = Builder.CreateOr(Val, Val1); | |||
5658 | return Builder.CreateBitCast(Val, ConvertType(E->getType())); | |||
5659 | } | |||
5660 | ||||
5661 | if (BuiltinID == ARM::BI__builtin_arm_ldrex || | |||
5662 | BuiltinID == ARM::BI__builtin_arm_ldaex) { | |||
5663 | Value *LoadAddr = EmitScalarExpr(E->getArg(0)); | |||
5664 | ||||
5665 | QualType Ty = E->getType(); | |||
5666 | llvm::Type *RealResTy = ConvertType(Ty); | |||
5667 | llvm::Type *PtrTy = llvm::IntegerType::get( | |||
5668 | getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo(); | |||
5669 | LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy); | |||
5670 | ||||
5671 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex | |||
5672 | ? Intrinsic::arm_ldaex | |||
5673 | : Intrinsic::arm_ldrex, | |||
5674 | PtrTy); | |||
5675 | Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex"); | |||
5676 | ||||
5677 | if (RealResTy->isPointerTy()) | |||
5678 | return Builder.CreateIntToPtr(Val, RealResTy); | |||
5679 | else { | |||
5680 | llvm::Type *IntResTy = llvm::IntegerType::get( | |||
5681 | getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy)); | |||
5682 | Val = Builder.CreateTruncOrBitCast(Val, IntResTy); | |||
5683 | return Builder.CreateBitCast(Val, RealResTy); | |||
5684 | } | |||
5685 | } | |||
5686 | ||||
5687 | if (BuiltinID == ARM::BI__builtin_arm_strexd || | |||
5688 | ((BuiltinID == ARM::BI__builtin_arm_stlex || | |||
5689 | BuiltinID == ARM::BI__builtin_arm_strex) && | |||
5690 | getContext().getTypeSize(E->getArg(0)->getType()) == 64)) { | |||
5691 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex | |||
5692 | ? Intrinsic::arm_stlexd | |||
5693 | : Intrinsic::arm_strexd); | |||
5694 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty); | |||
5695 | ||||
5696 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); | |||
5697 | Value *Val = EmitScalarExpr(E->getArg(0)); | |||
5698 | Builder.CreateStore(Val, Tmp); | |||
5699 | ||||
5700 | Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy)); | |||
5701 | Val = Builder.CreateLoad(LdPtr); | |||
5702 | ||||
5703 | Value *Arg0 = Builder.CreateExtractValue(Val, 0); | |||
5704 | Value *Arg1 = Builder.CreateExtractValue(Val, 1); | |||
5705 | Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); | |||
5706 | return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd"); | |||
5707 | } | |||
5708 | ||||
5709 | if (BuiltinID == ARM::BI__builtin_arm_strex || | |||
5710 | BuiltinID == ARM::BI__builtin_arm_stlex) { | |||
5711 | Value *StoreVal = EmitScalarExpr(E->getArg(0)); | |||
5712 | Value *StoreAddr = EmitScalarExpr(E->getArg(1)); | |||
5713 | ||||
5714 | QualType Ty = E->getArg(0)->getType(); | |||
5715 | llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), | |||
5716 | getContext().getTypeSize(Ty)); | |||
5717 | StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); | |||
5718 | ||||
5719 | if (StoreVal->getType()->isPointerTy()) | |||
5720 | StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty); | |||
5721 | else { | |||
5722 | llvm::Type *IntTy = llvm::IntegerType::get( | |||
5723 | getLLVMContext(), | |||
5724 | CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType())); | |||
5725 | StoreVal = Builder.CreateBitCast(StoreVal, IntTy); | |||
5726 | StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty); | |||
5727 | } | |||
5728 | ||||
5729 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex | |||
5730 | ? Intrinsic::arm_stlex | |||
5731 | : Intrinsic::arm_strex, | |||
5732 | StoreAddr->getType()); | |||
5733 | return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex"); | |||
5734 | } | |||
5735 | ||||
5736 | switch (BuiltinID) { | |||
5737 | case ARM::BI__iso_volatile_load8: | |||
5738 | case ARM::BI__iso_volatile_load16: | |||
5739 | case ARM::BI__iso_volatile_load32: | |||
5740 | case ARM::BI__iso_volatile_load64: | |||
5741 | return EmitISOVolatileLoad(E); | |||
5742 | case ARM::BI__iso_volatile_store8: | |||
5743 | case ARM::BI__iso_volatile_store16: | |||
5744 | case ARM::BI__iso_volatile_store32: | |||
5745 | case ARM::BI__iso_volatile_store64: | |||
5746 | return EmitISOVolatileStore(E); | |||
5747 | } | |||
5748 | ||||
5749 | if (BuiltinID == ARM::BI__builtin_arm_clrex) { | |||
5750 | Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex); | |||
5751 | return Builder.CreateCall(F); | |||
5752 | } | |||
5753 | ||||
5754 | // CRC32 | |||
5755 | Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; | |||
5756 | switch (BuiltinID) { | |||
5757 | case ARM::BI__builtin_arm_crc32b: | |||
5758 | CRCIntrinsicID = Intrinsic::arm_crc32b; break; | |||
5759 | case ARM::BI__builtin_arm_crc32cb: | |||
5760 | CRCIntrinsicID = Intrinsic::arm_crc32cb; break; | |||
5761 | case ARM::BI__builtin_arm_crc32h: | |||
5762 | CRCIntrinsicID = Intrinsic::arm_crc32h; break; | |||
5763 | case ARM::BI__builtin_arm_crc32ch: | |||
5764 | CRCIntrinsicID = Intrinsic::arm_crc32ch; break; | |||
5765 | case ARM::BI__builtin_arm_crc32w: | |||
5766 | case ARM::BI__builtin_arm_crc32d: | |||
5767 | CRCIntrinsicID = Intrinsic::arm_crc32w; break; | |||
5768 | case ARM::BI__builtin_arm_crc32cw: | |||
5769 | case ARM::BI__builtin_arm_crc32cd: | |||
5770 | CRCIntrinsicID = Intrinsic::arm_crc32cw; break; | |||
5771 | } | |||
5772 | ||||
5773 | if (CRCIntrinsicID != Intrinsic::not_intrinsic) { | |||
5774 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); | |||
5775 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); | |||
5776 | ||||
5777 | // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w | |||
5778 | // intrinsics, hence we need different codegen for these cases. | |||
5779 | if (BuiltinID == ARM::BI__builtin_arm_crc32d || | |||
5780 | BuiltinID == ARM::BI__builtin_arm_crc32cd) { | |||
5781 | Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); | |||
5782 | Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty); | |||
5783 | Value *Arg1b = Builder.CreateLShr(Arg1, C1); | |||
5784 | Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty); | |||
5785 | ||||
5786 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); | |||
5787 | Value *Res = Builder.CreateCall(F, {Arg0, Arg1a}); | |||
5788 | return Builder.CreateCall(F, {Res, Arg1b}); | |||
5789 | } else { | |||
5790 | Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty); | |||
5791 | ||||
5792 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); | |||
5793 | return Builder.CreateCall(F, {Arg0, Arg1}); | |||
5794 | } | |||
5795 | } | |||
5796 | ||||
5797 | if (BuiltinID == ARM::BI__builtin_arm_rsr || | |||
5798 | BuiltinID == ARM::BI__builtin_arm_rsr64 || | |||
5799 | BuiltinID == ARM::BI__builtin_arm_rsrp || | |||
5800 | BuiltinID == ARM::BI__builtin_arm_wsr || | |||
5801 | BuiltinID == ARM::BI__builtin_arm_wsr64 || | |||
5802 | BuiltinID == ARM::BI__builtin_arm_wsrp) { | |||
5803 | ||||
5804 | bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr || | |||
5805 | BuiltinID == ARM::BI__builtin_arm_rsr64 || | |||
5806 | BuiltinID == ARM::BI__builtin_arm_rsrp; | |||
5807 | ||||
5808 | bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp || | |||
5809 | BuiltinID == ARM::BI__builtin_arm_wsrp; | |||
5810 | ||||
5811 | bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 || | |||
5812 | BuiltinID == ARM::BI__builtin_arm_wsr64; | |||
5813 | ||||
5814 | llvm::Type *ValueType; | |||
5815 | llvm::Type *RegisterType; | |||
5816 | if (IsPointerBuiltin) { | |||
5817 | ValueType = VoidPtrTy; | |||
5818 | RegisterType = Int32Ty; | |||
5819 | } else if (Is64Bit) { | |||
5820 | ValueType = RegisterType = Int64Ty; | |||
5821 | } else { | |||
5822 | ValueType = RegisterType = Int32Ty; | |||
5823 | } | |||
5824 | ||||
5825 | return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead); | |||
5826 | } | |||
5827 | ||||
5828 | // Find out if any arguments are required to be integer constant | |||
5829 | // expressions. | |||
5830 | unsigned ICEArguments = 0; | |||
5831 | ASTContext::GetBuiltinTypeError Error; | |||
5832 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); | |||
5833 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5833, __extension__ __PRETTY_FUNCTION__)); | |||
5834 | ||||
5835 | auto getAlignmentValue32 = [&](Address addr) -> Value* { | |||
5836 | return Builder.getInt32(addr.getAlignment().getQuantity()); | |||
5837 | }; | |||
5838 | ||||
5839 | Address PtrOp0 = Address::invalid(); | |||
5840 | Address PtrOp1 = Address::invalid(); | |||
5841 | SmallVector<Value*, 4> Ops; | |||
5842 | bool HasExtraArg = HasExtraNeonArgument(BuiltinID); | |||
5843 | unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0); | |||
5844 | for (unsigned i = 0, e = NumArgs; i != e; i++) { | |||
5845 | if (i == 0) { | |||
5846 | switch (BuiltinID) { | |||
5847 | case NEON::BI__builtin_neon_vld1_v: | |||
5848 | case NEON::BI__builtin_neon_vld1q_v: | |||
5849 | case NEON::BI__builtin_neon_vld1q_lane_v: | |||
5850 | case NEON::BI__builtin_neon_vld1_lane_v: | |||
5851 | case NEON::BI__builtin_neon_vld1_dup_v: | |||
5852 | case NEON::BI__builtin_neon_vld1q_dup_v: | |||
5853 | case NEON::BI__builtin_neon_vst1_v: | |||
5854 | case NEON::BI__builtin_neon_vst1q_v: | |||
5855 | case NEON::BI__builtin_neon_vst1q_lane_v: | |||
5856 | case NEON::BI__builtin_neon_vst1_lane_v: | |||
5857 | case NEON::BI__builtin_neon_vst2_v: | |||
5858 | case NEON::BI__builtin_neon_vst2q_v: | |||
5859 | case NEON::BI__builtin_neon_vst2_lane_v: | |||
5860 | case NEON::BI__builtin_neon_vst2q_lane_v: | |||
5861 | case NEON::BI__builtin_neon_vst3_v: | |||
5862 | case NEON::BI__builtin_neon_vst3q_v: | |||
5863 | case NEON::BI__builtin_neon_vst3_lane_v: | |||
5864 | case NEON::BI__builtin_neon_vst3q_lane_v: | |||
5865 | case NEON::BI__builtin_neon_vst4_v: | |||
5866 | case NEON::BI__builtin_neon_vst4q_v: | |||
5867 | case NEON::BI__builtin_neon_vst4_lane_v: | |||
5868 | case NEON::BI__builtin_neon_vst4q_lane_v: | |||
5869 | // Get the alignment for the argument in addition to the value; | |||
5870 | // we'll use it later. | |||
5871 | PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); | |||
5872 | Ops.push_back(PtrOp0.getPointer()); | |||
5873 | continue; | |||
5874 | } | |||
5875 | } | |||
5876 | if (i == 1) { | |||
5877 | switch (BuiltinID) { | |||
5878 | case NEON::BI__builtin_neon_vld2_v: | |||
5879 | case NEON::BI__builtin_neon_vld2q_v: | |||
5880 | case NEON::BI__builtin_neon_vld3_v: | |||
5881 | case NEON::BI__builtin_neon_vld3q_v: | |||
5882 | case NEON::BI__builtin_neon_vld4_v: | |||
5883 | case NEON::BI__builtin_neon_vld4q_v: | |||
5884 | case NEON::BI__builtin_neon_vld2_lane_v: | |||
5885 | case NEON::BI__builtin_neon_vld2q_lane_v: | |||
5886 | case NEON::BI__builtin_neon_vld3_lane_v: | |||
5887 | case NEON::BI__builtin_neon_vld3q_lane_v: | |||
5888 | case NEON::BI__builtin_neon_vld4_lane_v: | |||
5889 | case NEON::BI__builtin_neon_vld4q_lane_v: | |||
5890 | case NEON::BI__builtin_neon_vld2_dup_v: | |||
5891 | case NEON::BI__builtin_neon_vld2q_dup_v: | |||
5892 | case NEON::BI__builtin_neon_vld3_dup_v: | |||
5893 | case NEON::BI__builtin_neon_vld3q_dup_v: | |||
5894 | case NEON::BI__builtin_neon_vld4_dup_v: | |||
5895 | case NEON::BI__builtin_neon_vld4q_dup_v: | |||
5896 | // Get the alignment for the argument in addition to the value; | |||
5897 | // we'll use it later. | |||
5898 | PtrOp1 = EmitPointerWithAlignment(E->getArg(1)); | |||
5899 | Ops.push_back(PtrOp1.getPointer()); | |||
5900 | continue; | |||
5901 | } | |||
5902 | } | |||
5903 | ||||
5904 | if ((ICEArguments & (1 << i)) == 0) { | |||
5905 | Ops.push_back(EmitScalarExpr(E->getArg(i))); | |||
5906 | } else { | |||
5907 | // If this is required to be a constant, constant fold it so that we know | |||
5908 | // that the generated intrinsic gets a ConstantInt. | |||
5909 | llvm::APSInt Result; | |||
5910 | bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); | |||
5911 | assert(IsConst && "Constant arg isn't actually constant?")(static_cast <bool> (IsConst && "Constant arg isn't actually constant?" ) ? void (0) : __assert_fail ("IsConst && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5911, __extension__ __PRETTY_FUNCTION__)); (void)IsConst; | |||
5912 | Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); | |||
5913 | } | |||
5914 | } | |||
5915 | ||||
5916 | switch (BuiltinID) { | |||
5917 | default: break; | |||
5918 | ||||
5919 | case NEON::BI__builtin_neon_vget_lane_i8: | |||
5920 | case NEON::BI__builtin_neon_vget_lane_i16: | |||
5921 | case NEON::BI__builtin_neon_vget_lane_i32: | |||
5922 | case NEON::BI__builtin_neon_vget_lane_i64: | |||
5923 | case NEON::BI__builtin_neon_vget_lane_f32: | |||
5924 | case NEON::BI__builtin_neon_vgetq_lane_i8: | |||
5925 | case NEON::BI__builtin_neon_vgetq_lane_i16: | |||
5926 | case NEON::BI__builtin_neon_vgetq_lane_i32: | |||
5927 | case NEON::BI__builtin_neon_vgetq_lane_i64: | |||
5928 | case NEON::BI__builtin_neon_vgetq_lane_f32: | |||
5929 | return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane"); | |||
5930 | ||||
5931 | case NEON::BI__builtin_neon_vrndns_f32: { | |||
5932 | Value *Arg = EmitScalarExpr(E->getArg(0)); | |||
5933 | llvm::Type *Tys[] = {Arg->getType()}; | |||
5934 | Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys); | |||
5935 | return Builder.CreateCall(F, {Arg}, "vrndn"); } | |||
5936 | ||||
5937 | case NEON::BI__builtin_neon_vset_lane_i8: | |||
5938 | case NEON::BI__builtin_neon_vset_lane_i16: | |||
5939 | case NEON::BI__builtin_neon_vset_lane_i32: | |||
5940 | case NEON::BI__builtin_neon_vset_lane_i64: | |||
5941 | case NEON::BI__builtin_neon_vset_lane_f32: | |||
5942 | case NEON::BI__builtin_neon_vsetq_lane_i8: | |||
5943 | case NEON::BI__builtin_neon_vsetq_lane_i16: | |||
5944 | case NEON::BI__builtin_neon_vsetq_lane_i32: | |||
5945 | case NEON::BI__builtin_neon_vsetq_lane_i64: | |||
5946 | case NEON::BI__builtin_neon_vsetq_lane_f32: | |||
5947 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); | |||
5948 | ||||
5949 | case NEON::BI__builtin_neon_vsha1h_u32: | |||
5950 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops, | |||
5951 | "vsha1h"); | |||
5952 | case NEON::BI__builtin_neon_vsha1cq_u32: | |||
5953 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops, | |||
5954 | "vsha1h"); | |||
5955 | case NEON::BI__builtin_neon_vsha1pq_u32: | |||
5956 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops, | |||
5957 | "vsha1h"); | |||
5958 | case NEON::BI__builtin_neon_vsha1mq_u32: | |||
5959 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops, | |||
5960 | "vsha1h"); | |||
5961 | ||||
5962 | // The ARM _MoveToCoprocessor builtins put the input register value as | |||
5963 | // the first argument, but the LLVM intrinsic expects it as the third one. | |||
5964 | case ARM::BI_MoveToCoprocessor: | |||
5965 | case ARM::BI_MoveToCoprocessor2: { | |||
5966 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ? | |||
5967 | Intrinsic::arm_mcr : Intrinsic::arm_mcr2); | |||
5968 | return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0], | |||
5969 | Ops[3], Ops[4], Ops[5]}); | |||
5970 | } | |||
5971 | case ARM::BI_BitScanForward: | |||
5972 | case ARM::BI_BitScanForward64: | |||
5973 | return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E); | |||
5974 | case ARM::BI_BitScanReverse: | |||
5975 | case ARM::BI_BitScanReverse64: | |||
5976 | return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E); | |||
5977 | ||||
5978 | case ARM::BI_InterlockedAnd64: | |||
5979 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E); | |||
5980 | case ARM::BI_InterlockedExchange64: | |||
5981 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E); | |||
5982 | case ARM::BI_InterlockedExchangeAdd64: | |||
5983 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E); | |||
5984 | case ARM::BI_InterlockedExchangeSub64: | |||
5985 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E); | |||
5986 | case ARM::BI_InterlockedOr64: | |||
5987 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E); | |||
5988 | case ARM::BI_InterlockedXor64: | |||
5989 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E); | |||
5990 | case ARM::BI_InterlockedDecrement64: | |||
5991 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E); | |||
5992 | case ARM::BI_InterlockedIncrement64: | |||
5993 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E); | |||
5994 | } | |||
5995 | ||||
5996 | // Get the last argument, which specifies the vector type. | |||
5997 | assert(HasExtraArg)(static_cast <bool> (HasExtraArg) ? void (0) : __assert_fail ("HasExtraArg", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 5997, __extension__ __PRETTY_FUNCTION__)); | |||
5998 | llvm::APSInt Result; | |||
5999 | const Expr *Arg = E->getArg(E->getNumArgs()-1); | |||
6000 | if (!Arg->isIntegerConstantExpr(Result, getContext())) | |||
6001 | return nullptr; | |||
6002 | ||||
6003 | if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f || | |||
6004 | BuiltinID == ARM::BI__builtin_arm_vcvtr_d) { | |||
6005 | // Determine the overloaded type of this builtin. | |||
6006 | llvm::Type *Ty; | |||
6007 | if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f) | |||
6008 | Ty = FloatTy; | |||
6009 | else | |||
6010 | Ty = DoubleTy; | |||
6011 | ||||
6012 | // Determine whether this is an unsigned conversion or not. | |||
6013 | bool usgn = Result.getZExtValue() == 1; | |||
6014 | unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; | |||
6015 | ||||
6016 | // Call the appropriate intrinsic. | |||
6017 | Function *F = CGM.getIntrinsic(Int, Ty); | |||
6018 | return Builder.CreateCall(F, Ops, "vcvtr"); | |||
6019 | } | |||
6020 | ||||
6021 | // Determine the type of this overloaded NEON intrinsic. | |||
6022 | NeonTypeFlags Type(Result.getZExtValue()); | |||
6023 | bool usgn = Type.isUnsigned(); | |||
6024 | bool rightShift = false; | |||
6025 | ||||
6026 | llvm::VectorType *VTy = GetNeonType(this, Type, | |||
6027 | getTarget().hasLegalHalfType()); | |||
6028 | llvm::Type *Ty = VTy; | |||
6029 | if (!Ty) | |||
6030 | return nullptr; | |||
6031 | ||||
6032 | // Many NEON builtins have identical semantics and uses in ARM and | |||
6033 | // AArch64. Emit these in a single function. | |||
6034 | auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap); | |||
6035 | const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap( | |||
6036 | IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted); | |||
6037 | if (Builtin) | |||
6038 | return EmitCommonNeonBuiltinExpr( | |||
6039 | Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, | |||
6040 | Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch); | |||
6041 | ||||
6042 | unsigned Int; | |||
6043 | switch (BuiltinID) { | |||
6044 | default: return nullptr; | |||
6045 | case NEON::BI__builtin_neon_vld1q_lane_v: | |||
6046 | // Handle 64-bit integer elements as a special case. Use shuffles of | |||
6047 | // one-element vectors to avoid poor code for i64 in the backend. | |||
6048 | if (VTy->getElementType()->isIntegerTy(64)) { | |||
6049 | // Extract the other lane. | |||
6050 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
6051 | uint32_t Lane = cast<ConstantInt>(Ops[2])->getZExtValue(); | |||
6052 | Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane)); | |||
6053 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); | |||
6054 | // Load the value as a one-element vector. | |||
6055 | Ty = llvm::VectorType::get(VTy->getElementType(), 1); | |||
6056 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; | |||
6057 | Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys); | |||
6058 | Value *Align = getAlignmentValue32(PtrOp0); | |||
6059 | Value *Ld = Builder.CreateCall(F, {Ops[0], Align}); | |||
6060 | // Combine them. | |||
6061 | uint32_t Indices[] = {1 - Lane, Lane}; | |||
6062 | SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices); | |||
6063 | return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane"); | |||
6064 | } | |||
6065 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
6066 | case NEON::BI__builtin_neon_vld1_lane_v: { | |||
6067 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
6068 | PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType()); | |||
6069 | Value *Ld = Builder.CreateLoad(PtrOp0); | |||
6070 | return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane"); | |||
6071 | } | |||
6072 | case NEON::BI__builtin_neon_vqrshrn_n_v: | |||
6073 | Int = | |||
6074 | usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; | |||
6075 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n", | |||
6076 | 1, true); | |||
6077 | case NEON::BI__builtin_neon_vqrshrun_n_v: | |||
6078 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty), | |||
6079 | Ops, "vqrshrun_n", 1, true); | |||
6080 | case NEON::BI__builtin_neon_vqshrn_n_v: | |||
6081 | Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; | |||
6082 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n", | |||
6083 | 1, true); | |||
6084 | case NEON::BI__builtin_neon_vqshrun_n_v: | |||
6085 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty), | |||
6086 | Ops, "vqshrun_n", 1, true); | |||
6087 | case NEON::BI__builtin_neon_vrecpe_v: | |||
6088 | case NEON::BI__builtin_neon_vrecpeq_v: | |||
6089 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), | |||
6090 | Ops, "vrecpe"); | |||
6091 | case NEON::BI__builtin_neon_vrshrn_n_v: | |||
6092 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), | |||
6093 | Ops, "vrshrn_n", 1, true); | |||
6094 | case NEON::BI__builtin_neon_vrsra_n_v: | |||
6095 | case NEON::BI__builtin_neon_vrsraq_n_v: | |||
6096 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
6097 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
6098 | Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); | |||
6099 | Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; | |||
6100 | Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]}); | |||
6101 | return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); | |||
6102 | case NEON::BI__builtin_neon_vsri_n_v: | |||
6103 | case NEON::BI__builtin_neon_vsriq_n_v: | |||
6104 | rightShift = true; | |||
6105 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
6106 | case NEON::BI__builtin_neon_vsli_n_v: | |||
6107 | case NEON::BI__builtin_neon_vsliq_n_v: | |||
6108 | Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift); | |||
6109 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty), | |||
6110 | Ops, "vsli_n"); | |||
6111 | case NEON::BI__builtin_neon_vsra_n_v: | |||
6112 | case NEON::BI__builtin_neon_vsraq_n_v: | |||
6113 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
6114 | Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n"); | |||
6115 | return Builder.CreateAdd(Ops[0], Ops[1]); | |||
6116 | case NEON::BI__builtin_neon_vst1q_lane_v: | |||
6117 | // Handle 64-bit integer elements as a special case. Use a shuffle to get | |||
6118 | // a one-element vector and avoid poor code for i64 in the backend. | |||
6119 | if (VTy->getElementType()->isIntegerTy(64)) { | |||
6120 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
6121 | Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2])); | |||
6122 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); | |||
6123 | Ops[2] = getAlignmentValue32(PtrOp0); | |||
6124 | llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()}; | |||
6125 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, | |||
6126 | Tys), Ops); | |||
6127 | } | |||
6128 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
6129 | case NEON::BI__builtin_neon_vst1_lane_v: { | |||
6130 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
6131 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); | |||
6132 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); | |||
6133 | auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty)); | |||
6134 | return St; | |||
6135 | } | |||
6136 | case NEON::BI__builtin_neon_vtbl1_v: | |||
6137 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), | |||
6138 | Ops, "vtbl1"); | |||
6139 | case NEON::BI__builtin_neon_vtbl2_v: | |||
6140 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), | |||
6141 | Ops, "vtbl2"); | |||
6142 | case NEON::BI__builtin_neon_vtbl3_v: | |||
6143 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), | |||
6144 | Ops, "vtbl3"); | |||
6145 | case NEON::BI__builtin_neon_vtbl4_v: | |||
6146 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), | |||
6147 | Ops, "vtbl4"); | |||
6148 | case NEON::BI__builtin_neon_vtbx1_v: | |||
6149 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), | |||
6150 | Ops, "vtbx1"); | |||
6151 | case NEON::BI__builtin_neon_vtbx2_v: | |||
6152 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), | |||
6153 | Ops, "vtbx2"); | |||
6154 | case NEON::BI__builtin_neon_vtbx3_v: | |||
6155 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), | |||
6156 | Ops, "vtbx3"); | |||
6157 | case NEON::BI__builtin_neon_vtbx4_v: | |||
6158 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), | |||
6159 | Ops, "vtbx4"); | |||
6160 | } | |||
6161 | } | |||
6162 | ||||
6163 | static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, | |||
6164 | const CallExpr *E, | |||
6165 | SmallVectorImpl<Value *> &Ops, | |||
6166 | llvm::Triple::ArchType Arch) { | |||
6167 | unsigned int Int = 0; | |||
6168 | const char *s = nullptr; | |||
6169 | ||||
6170 | switch (BuiltinID) { | |||
6171 | default: | |||
6172 | return nullptr; | |||
6173 | case NEON::BI__builtin_neon_vtbl1_v: | |||
6174 | case NEON::BI__builtin_neon_vqtbl1_v: | |||
6175 | case NEON::BI__builtin_neon_vqtbl1q_v: | |||
6176 | case NEON::BI__builtin_neon_vtbl2_v: | |||
6177 | case NEON::BI__builtin_neon_vqtbl2_v: | |||
6178 | case NEON::BI__builtin_neon_vqtbl2q_v: | |||
6179 | case NEON::BI__builtin_neon_vtbl3_v: | |||
6180 | case NEON::BI__builtin_neon_vqtbl3_v: | |||
6181 | case NEON::BI__builtin_neon_vqtbl3q_v: | |||
6182 | case NEON::BI__builtin_neon_vtbl4_v: | |||
6183 | case NEON::BI__builtin_neon_vqtbl4_v: | |||
6184 | case NEON::BI__builtin_neon_vqtbl4q_v: | |||
6185 | break; | |||
6186 | case NEON::BI__builtin_neon_vtbx1_v: | |||
6187 | case NEON::BI__builtin_neon_vqtbx1_v: | |||
6188 | case NEON::BI__builtin_neon_vqtbx1q_v: | |||
6189 | case NEON::BI__builtin_neon_vtbx2_v: | |||
6190 | case NEON::BI__builtin_neon_vqtbx2_v: | |||
6191 | case NEON::BI__builtin_neon_vqtbx2q_v: | |||
6192 | case NEON::BI__builtin_neon_vtbx3_v: | |||
6193 | case NEON::BI__builtin_neon_vqtbx3_v: | |||
6194 | case NEON::BI__builtin_neon_vqtbx3q_v: | |||
6195 | case NEON::BI__builtin_neon_vtbx4_v: | |||
6196 | case NEON::BI__builtin_neon_vqtbx4_v: | |||
6197 | case NEON::BI__builtin_neon_vqtbx4q_v: | |||
6198 | break; | |||
6199 | } | |||
6200 | ||||
6201 | assert(E->getNumArgs() >= 3)(static_cast <bool> (E->getNumArgs() >= 3) ? void (0) : __assert_fail ("E->getNumArgs() >= 3", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6201, __extension__ __PRETTY_FUNCTION__)); | |||
6202 | ||||
6203 | // Get the last argument, which specifies the vector type. | |||
6204 | llvm::APSInt Result; | |||
6205 | const Expr *Arg = E->getArg(E->getNumArgs() - 1); | |||
6206 | if (!Arg->isIntegerConstantExpr(Result, CGF.getContext())) | |||
6207 | return nullptr; | |||
6208 | ||||
6209 | // Determine the type of this overloaded NEON intrinsic. | |||
6210 | NeonTypeFlags Type(Result.getZExtValue()); | |||
6211 | llvm::VectorType *Ty = GetNeonType(&CGF, Type); | |||
6212 | if (!Ty) | |||
6213 | return nullptr; | |||
6214 | ||||
6215 | CodeGen::CGBuilderTy &Builder = CGF.Builder; | |||
6216 | ||||
6217 | // AArch64 scalar builtins are not overloaded, they do not have an extra | |||
6218 | // argument that specifies the vector type, need to handle each case. | |||
6219 | switch (BuiltinID) { | |||
6220 | case NEON::BI__builtin_neon_vtbl1_v: { | |||
6221 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr, | |||
6222 | Ops[1], Ty, Intrinsic::aarch64_neon_tbl1, | |||
6223 | "vtbl1"); | |||
6224 | } | |||
6225 | case NEON::BI__builtin_neon_vtbl2_v: { | |||
6226 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr, | |||
6227 | Ops[2], Ty, Intrinsic::aarch64_neon_tbl1, | |||
6228 | "vtbl1"); | |||
6229 | } | |||
6230 | case NEON::BI__builtin_neon_vtbl3_v: { | |||
6231 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr, | |||
6232 | Ops[3], Ty, Intrinsic::aarch64_neon_tbl2, | |||
6233 | "vtbl2"); | |||
6234 | } | |||
6235 | case NEON::BI__builtin_neon_vtbl4_v: { | |||
6236 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr, | |||
6237 | Ops[4], Ty, Intrinsic::aarch64_neon_tbl2, | |||
6238 | "vtbl2"); | |||
6239 | } | |||
6240 | case NEON::BI__builtin_neon_vtbx1_v: { | |||
6241 | Value *TblRes = | |||
6242 | packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2], | |||
6243 | Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1"); | |||
6244 | ||||
6245 | llvm::Constant *EightV = ConstantInt::get(Ty, 8); | |||
6246 | Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV); | |||
6247 | CmpRes = Builder.CreateSExt(CmpRes, Ty); | |||
6248 | ||||
6249 | Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]); | |||
6250 | Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes); | |||
6251 | return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx"); | |||
6252 | } | |||
6253 | case NEON::BI__builtin_neon_vtbx2_v: { | |||
6254 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0], | |||
6255 | Ops[3], Ty, Intrinsic::aarch64_neon_tbx1, | |||
6256 | "vtbx1"); | |||
6257 | } | |||
6258 | case NEON::BI__builtin_neon_vtbx3_v: { | |||
6259 | Value *TblRes = | |||
6260 | packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4], | |||
6261 | Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2"); | |||
6262 | ||||
6263 | llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24); | |||
6264 | Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4], | |||
6265 | TwentyFourV); | |||
6266 | CmpRes = Builder.CreateSExt(CmpRes, Ty); | |||
6267 | ||||
6268 | Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]); | |||
6269 | Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes); | |||
6270 | return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx"); | |||
6271 | } | |||
6272 | case NEON::BI__builtin_neon_vtbx4_v: { | |||
6273 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0], | |||
6274 | Ops[5], Ty, Intrinsic::aarch64_neon_tbx2, | |||
6275 | "vtbx2"); | |||
6276 | } | |||
6277 | case NEON::BI__builtin_neon_vqtbl1_v: | |||
6278 | case NEON::BI__builtin_neon_vqtbl1q_v: | |||
6279 | Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break; | |||
6280 | case NEON::BI__builtin_neon_vqtbl2_v: | |||
6281 | case NEON::BI__builtin_neon_vqtbl2q_v: { | |||
6282 | Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break; | |||
6283 | case NEON::BI__builtin_neon_vqtbl3_v: | |||
6284 | case NEON::BI__builtin_neon_vqtbl3q_v: | |||
6285 | Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break; | |||
6286 | case NEON::BI__builtin_neon_vqtbl4_v: | |||
6287 | case NEON::BI__builtin_neon_vqtbl4q_v: | |||
6288 | Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break; | |||
6289 | case NEON::BI__builtin_neon_vqtbx1_v: | |||
6290 | case NEON::BI__builtin_neon_vqtbx1q_v: | |||
6291 | Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break; | |||
6292 | case NEON::BI__builtin_neon_vqtbx2_v: | |||
6293 | case NEON::BI__builtin_neon_vqtbx2q_v: | |||
6294 | Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break; | |||
6295 | case NEON::BI__builtin_neon_vqtbx3_v: | |||
6296 | case NEON::BI__builtin_neon_vqtbx3q_v: | |||
6297 | Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break; | |||
6298 | case NEON::BI__builtin_neon_vqtbx4_v: | |||
6299 | case NEON::BI__builtin_neon_vqtbx4q_v: | |||
6300 | Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break; | |||
6301 | } | |||
6302 | } | |||
6303 | ||||
6304 | if (!Int) | |||
6305 | return nullptr; | |||
6306 | ||||
6307 | Function *F = CGF.CGM.getIntrinsic(Int, Ty); | |||
6308 | return CGF.EmitNeonCall(F, Ops, s); | |||
6309 | } | |||
6310 | ||||
6311 | Value *CodeGenFunction::vectorWrapScalar16(Value *Op) { | |||
6312 | llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4); | |||
6313 | Op = Builder.CreateBitCast(Op, Int16Ty); | |||
6314 | Value *V = UndefValue::get(VTy); | |||
6315 | llvm::Constant *CI = ConstantInt::get(SizeTy, 0); | |||
6316 | Op = Builder.CreateInsertElement(V, Op, CI); | |||
6317 | return Op; | |||
6318 | } | |||
6319 | ||||
6320 | Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, | |||
6321 | const CallExpr *E, | |||
6322 | llvm::Triple::ArchType Arch) { | |||
6323 | unsigned HintID = static_cast<unsigned>(-1); | |||
6324 | switch (BuiltinID) { | |||
6325 | default: break; | |||
6326 | case AArch64::BI__builtin_arm_nop: | |||
6327 | HintID = 0; | |||
6328 | break; | |||
6329 | case AArch64::BI__builtin_arm_yield: | |||
6330 | case AArch64::BI__yield: | |||
6331 | HintID = 1; | |||
6332 | break; | |||
6333 | case AArch64::BI__builtin_arm_wfe: | |||
6334 | case AArch64::BI__wfe: | |||
6335 | HintID = 2; | |||
6336 | break; | |||
6337 | case AArch64::BI__builtin_arm_wfi: | |||
6338 | case AArch64::BI__wfi: | |||
6339 | HintID = 3; | |||
6340 | break; | |||
6341 | case AArch64::BI__builtin_arm_sev: | |||
6342 | case AArch64::BI__sev: | |||
6343 | HintID = 4; | |||
6344 | break; | |||
6345 | case AArch64::BI__builtin_arm_sevl: | |||
6346 | case AArch64::BI__sevl: | |||
6347 | HintID = 5; | |||
6348 | break; | |||
6349 | } | |||
6350 | ||||
6351 | if (HintID != static_cast<unsigned>(-1)) { | |||
6352 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint); | |||
6353 | return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID)); | |||
6354 | } | |||
6355 | ||||
6356 | if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { | |||
6357 | Value *Address = EmitScalarExpr(E->getArg(0)); | |||
6358 | Value *RW = EmitScalarExpr(E->getArg(1)); | |||
6359 | Value *CacheLevel = EmitScalarExpr(E->getArg(2)); | |||
6360 | Value *RetentionPolicy = EmitScalarExpr(E->getArg(3)); | |||
6361 | Value *IsData = EmitScalarExpr(E->getArg(4)); | |||
6362 | ||||
6363 | Value *Locality = nullptr; | |||
6364 | if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) { | |||
6365 | // Temporal fetch, needs to convert cache level to locality. | |||
6366 | Locality = llvm::ConstantInt::get(Int32Ty, | |||
6367 | -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3); | |||
6368 | } else { | |||
6369 | // Streaming fetch. | |||
6370 | Locality = llvm::ConstantInt::get(Int32Ty, 0); | |||
6371 | } | |||
6372 | ||||
6373 | // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify | |||
6374 | // PLDL3STRM or PLDL2STRM. | |||
6375 | Value *F = CGM.getIntrinsic(Intrinsic::prefetch); | |||
6376 | return Builder.CreateCall(F, {Address, RW, Locality, IsData}); | |||
6377 | } | |||
6378 | ||||
6379 | if (BuiltinID == AArch64::BI__builtin_arm_rbit) { | |||
6380 | assert((getContext().getTypeSize(E->getType()) == 32) &&(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 32) && "rbit of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6381, __extension__ __PRETTY_FUNCTION__)) | |||
6381 | "rbit of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 32) && "rbit of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 32) && \"rbit of unusual size!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6381, __extension__ __PRETTY_FUNCTION__)); | |||
6382 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); | |||
6383 | return Builder.CreateCall( | |||
6384 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); | |||
6385 | } | |||
6386 | if (BuiltinID == AArch64::BI__builtin_arm_rbit64) { | |||
6387 | assert((getContext().getTypeSize(E->getType()) == 64) &&(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 64) && "rbit of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6388, __extension__ __PRETTY_FUNCTION__)) | |||
6388 | "rbit of unusual size!")(static_cast <bool> ((getContext().getTypeSize(E->getType ()) == 64) && "rbit of unusual size!") ? void (0) : __assert_fail ("(getContext().getTypeSize(E->getType()) == 64) && \"rbit of unusual size!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6388, __extension__ __PRETTY_FUNCTION__)); | |||
6389 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); | |||
6390 | return Builder.CreateCall( | |||
6391 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); | |||
6392 | } | |||
6393 | ||||
6394 | if (BuiltinID == AArch64::BI__clear_cache) { | |||
6395 | assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments")(static_cast <bool> (E->getNumArgs() == 2 && "__clear_cache takes 2 arguments") ? void (0) : __assert_fail ("E->getNumArgs() == 2 && \"__clear_cache takes 2 arguments\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6395, __extension__ __PRETTY_FUNCTION__)); | |||
6396 | const FunctionDecl *FD = E->getDirectCallee(); | |||
6397 | Value *Ops[2]; | |||
6398 | for (unsigned i = 0; i < 2; i++) | |||
6399 | Ops[i] = EmitScalarExpr(E->getArg(i)); | |||
6400 | llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); | |||
6401 | llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); | |||
6402 | StringRef Name = FD->getName(); | |||
6403 | return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); | |||
6404 | } | |||
6405 | ||||
6406 | if ((BuiltinID == AArch64::BI__builtin_arm_ldrex || | |||
6407 | BuiltinID == AArch64::BI__builtin_arm_ldaex) && | |||
6408 | getContext().getTypeSize(E->getType()) == 128) { | |||
6409 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex | |||
6410 | ? Intrinsic::aarch64_ldaxp | |||
6411 | : Intrinsic::aarch64_ldxp); | |||
6412 | ||||
6413 | Value *LdPtr = EmitScalarExpr(E->getArg(0)); | |||
6414 | Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), | |||
6415 | "ldxp"); | |||
6416 | ||||
6417 | Value *Val0 = Builder.CreateExtractValue(Val, 1); | |||
6418 | Value *Val1 = Builder.CreateExtractValue(Val, 0); | |||
6419 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); | |||
6420 | Val0 = Builder.CreateZExt(Val0, Int128Ty); | |||
6421 | Val1 = Builder.CreateZExt(Val1, Int128Ty); | |||
6422 | ||||
6423 | Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64); | |||
6424 | Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); | |||
6425 | Val = Builder.CreateOr(Val, Val1); | |||
6426 | return Builder.CreateBitCast(Val, ConvertType(E->getType())); | |||
6427 | } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex || | |||
6428 | BuiltinID == AArch64::BI__builtin_arm_ldaex) { | |||
6429 | Value *LoadAddr = EmitScalarExpr(E->getArg(0)); | |||
6430 | ||||
6431 | QualType Ty = E->getType(); | |||
6432 | llvm::Type *RealResTy = ConvertType(Ty); | |||
6433 | llvm::Type *PtrTy = llvm::IntegerType::get( | |||
6434 | getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo(); | |||
6435 | LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy); | |||
6436 | ||||
6437 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex | |||
6438 | ? Intrinsic::aarch64_ldaxr | |||
6439 | : Intrinsic::aarch64_ldxr, | |||
6440 | PtrTy); | |||
6441 | Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr"); | |||
6442 | ||||
6443 | if (RealResTy->isPointerTy()) | |||
6444 | return Builder.CreateIntToPtr(Val, RealResTy); | |||
6445 | ||||
6446 | llvm::Type *IntResTy = llvm::IntegerType::get( | |||
6447 | getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy)); | |||
6448 | Val = Builder.CreateTruncOrBitCast(Val, IntResTy); | |||
6449 | return Builder.CreateBitCast(Val, RealResTy); | |||
6450 | } | |||
6451 | ||||
6452 | if ((BuiltinID == AArch64::BI__builtin_arm_strex || | |||
6453 | BuiltinID == AArch64::BI__builtin_arm_stlex) && | |||
6454 | getContext().getTypeSize(E->getArg(0)->getType()) == 128) { | |||
6455 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex | |||
6456 | ? Intrinsic::aarch64_stlxp | |||
6457 | : Intrinsic::aarch64_stxp); | |||
6458 | llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty); | |||
6459 | ||||
6460 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); | |||
6461 | EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true); | |||
6462 | ||||
6463 | Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy)); | |||
6464 | llvm::Value *Val = Builder.CreateLoad(Tmp); | |||
6465 | ||||
6466 | Value *Arg0 = Builder.CreateExtractValue(Val, 0); | |||
6467 | Value *Arg1 = Builder.CreateExtractValue(Val, 1); | |||
6468 | Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), | |||
6469 | Int8PtrTy); | |||
6470 | return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp"); | |||
6471 | } | |||
6472 | ||||
6473 | if (BuiltinID == AArch64::BI__builtin_arm_strex || | |||
6474 | BuiltinID == AArch64::BI__builtin_arm_stlex) { | |||
6475 | Value *StoreVal = EmitScalarExpr(E->getArg(0)); | |||
6476 | Value *StoreAddr = EmitScalarExpr(E->getArg(1)); | |||
6477 | ||||
6478 | QualType Ty = E->getArg(0)->getType(); | |||
6479 | llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), | |||
6480 | getContext().getTypeSize(Ty)); | |||
6481 | StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); | |||
6482 | ||||
6483 | if (StoreVal->getType()->isPointerTy()) | |||
6484 | StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty); | |||
6485 | else { | |||
6486 | llvm::Type *IntTy = llvm::IntegerType::get( | |||
6487 | getLLVMContext(), | |||
6488 | CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType())); | |||
6489 | StoreVal = Builder.CreateBitCast(StoreVal, IntTy); | |||
6490 | StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty); | |||
6491 | } | |||
6492 | ||||
6493 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex | |||
6494 | ? Intrinsic::aarch64_stlxr | |||
6495 | : Intrinsic::aarch64_stxr, | |||
6496 | StoreAddr->getType()); | |||
6497 | return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr"); | |||
6498 | } | |||
6499 | ||||
6500 | if (BuiltinID == AArch64::BI__builtin_arm_clrex) { | |||
6501 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex); | |||
6502 | return Builder.CreateCall(F); | |||
6503 | } | |||
6504 | ||||
6505 | // CRC32 | |||
6506 | Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; | |||
6507 | switch (BuiltinID) { | |||
6508 | case AArch64::BI__builtin_arm_crc32b: | |||
6509 | CRCIntrinsicID = Intrinsic::aarch64_crc32b; break; | |||
6510 | case AArch64::BI__builtin_arm_crc32cb: | |||
6511 | CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break; | |||
6512 | case AArch64::BI__builtin_arm_crc32h: | |||
6513 | CRCIntrinsicID = Intrinsic::aarch64_crc32h; break; | |||
6514 | case AArch64::BI__builtin_arm_crc32ch: | |||
6515 | CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break; | |||
6516 | case AArch64::BI__builtin_arm_crc32w: | |||
6517 | CRCIntrinsicID = Intrinsic::aarch64_crc32w; break; | |||
6518 | case AArch64::BI__builtin_arm_crc32cw: | |||
6519 | CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break; | |||
6520 | case AArch64::BI__builtin_arm_crc32d: | |||
6521 | CRCIntrinsicID = Intrinsic::aarch64_crc32x; break; | |||
6522 | case AArch64::BI__builtin_arm_crc32cd: | |||
6523 | CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break; | |||
6524 | } | |||
6525 | ||||
6526 | if (CRCIntrinsicID != Intrinsic::not_intrinsic) { | |||
6527 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); | |||
6528 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); | |||
6529 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); | |||
6530 | ||||
6531 | llvm::Type *DataTy = F->getFunctionType()->getParamType(1); | |||
6532 | Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy); | |||
6533 | ||||
6534 | return Builder.CreateCall(F, {Arg0, Arg1}); | |||
6535 | } | |||
6536 | ||||
6537 | if (BuiltinID == AArch64::BI__builtin_arm_rsr || | |||
6538 | BuiltinID == AArch64::BI__builtin_arm_rsr64 || | |||
6539 | BuiltinID == AArch64::BI__builtin_arm_rsrp || | |||
6540 | BuiltinID == AArch64::BI__builtin_arm_wsr || | |||
6541 | BuiltinID == AArch64::BI__builtin_arm_wsr64 || | |||
6542 | BuiltinID == AArch64::BI__builtin_arm_wsrp) { | |||
6543 | ||||
6544 | bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr || | |||
6545 | BuiltinID == AArch64::BI__builtin_arm_rsr64 || | |||
6546 | BuiltinID == AArch64::BI__builtin_arm_rsrp; | |||
6547 | ||||
6548 | bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp || | |||
6549 | BuiltinID == AArch64::BI__builtin_arm_wsrp; | |||
6550 | ||||
6551 | bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr && | |||
6552 | BuiltinID != AArch64::BI__builtin_arm_wsr; | |||
6553 | ||||
6554 | llvm::Type *ValueType; | |||
6555 | llvm::Type *RegisterType = Int64Ty; | |||
6556 | if (IsPointerBuiltin) { | |||
6557 | ValueType = VoidPtrTy; | |||
6558 | } else if (Is64Bit) { | |||
6559 | ValueType = Int64Ty; | |||
6560 | } else { | |||
6561 | ValueType = Int32Ty; | |||
6562 | } | |||
6563 | ||||
6564 | return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead); | |||
6565 | } | |||
6566 | ||||
6567 | // Find out if any arguments are required to be integer constant | |||
6568 | // expressions. | |||
6569 | unsigned ICEArguments = 0; | |||
6570 | ASTContext::GetBuiltinTypeError Error; | |||
6571 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); | |||
6572 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6572, __extension__ __PRETTY_FUNCTION__)); | |||
6573 | ||||
6574 | llvm::SmallVector<Value*, 4> Ops; | |||
6575 | for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { | |||
6576 | if ((ICEArguments & (1 << i)) == 0) { | |||
6577 | Ops.push_back(EmitScalarExpr(E->getArg(i))); | |||
6578 | } else { | |||
6579 | // If this is required to be a constant, constant fold it so that we know | |||
6580 | // that the generated intrinsic gets a ConstantInt. | |||
6581 | llvm::APSInt Result; | |||
6582 | bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); | |||
6583 | assert(IsConst && "Constant arg isn't actually constant?")(static_cast <bool> (IsConst && "Constant arg isn't actually constant?" ) ? void (0) : __assert_fail ("IsConst && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6583, __extension__ __PRETTY_FUNCTION__)); | |||
6584 | (void)IsConst; | |||
6585 | Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); | |||
6586 | } | |||
6587 | } | |||
6588 | ||||
6589 | auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap); | |||
6590 | const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap( | |||
6591 | SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted); | |||
6592 | ||||
6593 | if (Builtin) { | |||
6594 | Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1))); | |||
6595 | Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E); | |||
6596 | assert(Result && "SISD intrinsic should have been handled")(static_cast <bool> (Result && "SISD intrinsic should have been handled" ) ? void (0) : __assert_fail ("Result && \"SISD intrinsic should have been handled\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6596, __extension__ __PRETTY_FUNCTION__)); | |||
6597 | return Result; | |||
6598 | } | |||
6599 | ||||
6600 | llvm::APSInt Result; | |||
6601 | const Expr *Arg = E->getArg(E->getNumArgs()-1); | |||
6602 | NeonTypeFlags Type(0); | |||
6603 | if (Arg->isIntegerConstantExpr(Result, getContext())) | |||
6604 | // Determine the type of this overloaded NEON intrinsic. | |||
6605 | Type = NeonTypeFlags(Result.getZExtValue()); | |||
6606 | ||||
6607 | bool usgn = Type.isUnsigned(); | |||
6608 | bool quad = Type.isQuad(); | |||
6609 | ||||
6610 | // Handle non-overloaded intrinsics first. | |||
6611 | switch (BuiltinID) { | |||
6612 | default: break; | |||
6613 | case NEON::BI__builtin_neon_vabsh_f16: | |||
6614 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6615 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs"); | |||
6616 | case NEON::BI__builtin_neon_vldrq_p128: { | |||
6617 | llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128); | |||
6618 | llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0); | |||
6619 | Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy); | |||
6620 | return Builder.CreateAlignedLoad(Int128Ty, Ptr, | |||
6621 | CharUnits::fromQuantity(16)); | |||
6622 | } | |||
6623 | case NEON::BI__builtin_neon_vstrq_p128: { | |||
6624 | llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128); | |||
6625 | Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy); | |||
6626 | return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr); | |||
6627 | } | |||
6628 | case NEON::BI__builtin_neon_vcvts_u32_f32: | |||
6629 | case NEON::BI__builtin_neon_vcvtd_u64_f64: | |||
6630 | usgn = true; | |||
6631 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
6632 | case NEON::BI__builtin_neon_vcvts_s32_f32: | |||
6633 | case NEON::BI__builtin_neon_vcvtd_s64_f64: { | |||
6634 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6635 | bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64; | |||
6636 | llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty; | |||
6637 | llvm::Type *FTy = Is64 ? DoubleTy : FloatTy; | |||
6638 | Ops[0] = Builder.CreateBitCast(Ops[0], FTy); | |||
6639 | if (usgn) | |||
6640 | return Builder.CreateFPToUI(Ops[0], InTy); | |||
6641 | return Builder.CreateFPToSI(Ops[0], InTy); | |||
6642 | } | |||
6643 | case NEON::BI__builtin_neon_vcvts_f32_u32: | |||
6644 | case NEON::BI__builtin_neon_vcvtd_f64_u64: | |||
6645 | usgn = true; | |||
6646 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
6647 | case NEON::BI__builtin_neon_vcvts_f32_s32: | |||
6648 | case NEON::BI__builtin_neon_vcvtd_f64_s64: { | |||
6649 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6650 | bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64; | |||
6651 | llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty; | |||
6652 | llvm::Type *FTy = Is64 ? DoubleTy : FloatTy; | |||
6653 | Ops[0] = Builder.CreateBitCast(Ops[0], InTy); | |||
6654 | if (usgn) | |||
6655 | return Builder.CreateUIToFP(Ops[0], FTy); | |||
6656 | return Builder.CreateSIToFP(Ops[0], FTy); | |||
6657 | } | |||
6658 | case NEON::BI__builtin_neon_vcvth_f16_u16: | |||
6659 | case NEON::BI__builtin_neon_vcvth_f16_u32: | |||
6660 | case NEON::BI__builtin_neon_vcvth_f16_u64: | |||
6661 | usgn = true; | |||
6662 | // FALL THROUGH | |||
6663 | case NEON::BI__builtin_neon_vcvth_f16_s16: | |||
6664 | case NEON::BI__builtin_neon_vcvth_f16_s32: | |||
6665 | case NEON::BI__builtin_neon_vcvth_f16_s64: { | |||
6666 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6667 | llvm::Type *FTy = HalfTy; | |||
6668 | llvm::Type *InTy; | |||
6669 | if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64) | |||
6670 | InTy = Int64Ty; | |||
6671 | else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32) | |||
6672 | InTy = Int32Ty; | |||
6673 | else | |||
6674 | InTy = Int16Ty; | |||
6675 | Ops[0] = Builder.CreateBitCast(Ops[0], InTy); | |||
6676 | if (usgn) | |||
6677 | return Builder.CreateUIToFP(Ops[0], FTy); | |||
6678 | return Builder.CreateSIToFP(Ops[0], FTy); | |||
6679 | } | |||
6680 | case NEON::BI__builtin_neon_vcvth_u16_f16: | |||
6681 | usgn = true; | |||
6682 | // FALL THROUGH | |||
6683 | case NEON::BI__builtin_neon_vcvth_s16_f16: { | |||
6684 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6685 | Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy); | |||
6686 | if (usgn) | |||
6687 | return Builder.CreateFPToUI(Ops[0], Int16Ty); | |||
6688 | return Builder.CreateFPToSI(Ops[0], Int16Ty); | |||
6689 | } | |||
6690 | case NEON::BI__builtin_neon_vcvth_u32_f16: | |||
6691 | usgn = true; | |||
6692 | // FALL THROUGH | |||
6693 | case NEON::BI__builtin_neon_vcvth_s32_f16: { | |||
6694 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6695 | Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy); | |||
6696 | if (usgn) | |||
6697 | return Builder.CreateFPToUI(Ops[0], Int32Ty); | |||
6698 | return Builder.CreateFPToSI(Ops[0], Int32Ty); | |||
6699 | } | |||
6700 | case NEON::BI__builtin_neon_vcvth_u64_f16: | |||
6701 | usgn = true; | |||
6702 | // FALL THROUGH | |||
6703 | case NEON::BI__builtin_neon_vcvth_s64_f16: { | |||
6704 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6705 | Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy); | |||
6706 | if (usgn) | |||
6707 | return Builder.CreateFPToUI(Ops[0], Int64Ty); | |||
6708 | return Builder.CreateFPToSI(Ops[0], Int64Ty); | |||
6709 | } | |||
6710 | case NEON::BI__builtin_neon_vcvtah_u16_f16: | |||
6711 | case NEON::BI__builtin_neon_vcvtmh_u16_f16: | |||
6712 | case NEON::BI__builtin_neon_vcvtnh_u16_f16: | |||
6713 | case NEON::BI__builtin_neon_vcvtph_u16_f16: | |||
6714 | case NEON::BI__builtin_neon_vcvtah_s16_f16: | |||
6715 | case NEON::BI__builtin_neon_vcvtmh_s16_f16: | |||
6716 | case NEON::BI__builtin_neon_vcvtnh_s16_f16: | |||
6717 | case NEON::BI__builtin_neon_vcvtph_s16_f16: { | |||
6718 | unsigned Int; | |||
6719 | llvm::Type* InTy = Int32Ty; | |||
6720 | llvm::Type* FTy = HalfTy; | |||
6721 | llvm::Type *Tys[2] = {InTy, FTy}; | |||
6722 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6723 | switch (BuiltinID) { | |||
6724 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6724); | |||
6725 | case NEON::BI__builtin_neon_vcvtah_u16_f16: | |||
6726 | Int = Intrinsic::aarch64_neon_fcvtau; break; | |||
6727 | case NEON::BI__builtin_neon_vcvtmh_u16_f16: | |||
6728 | Int = Intrinsic::aarch64_neon_fcvtmu; break; | |||
6729 | case NEON::BI__builtin_neon_vcvtnh_u16_f16: | |||
6730 | Int = Intrinsic::aarch64_neon_fcvtnu; break; | |||
6731 | case NEON::BI__builtin_neon_vcvtph_u16_f16: | |||
6732 | Int = Intrinsic::aarch64_neon_fcvtpu; break; | |||
6733 | case NEON::BI__builtin_neon_vcvtah_s16_f16: | |||
6734 | Int = Intrinsic::aarch64_neon_fcvtas; break; | |||
6735 | case NEON::BI__builtin_neon_vcvtmh_s16_f16: | |||
6736 | Int = Intrinsic::aarch64_neon_fcvtms; break; | |||
6737 | case NEON::BI__builtin_neon_vcvtnh_s16_f16: | |||
6738 | Int = Intrinsic::aarch64_neon_fcvtns; break; | |||
6739 | case NEON::BI__builtin_neon_vcvtph_s16_f16: | |||
6740 | Int = Intrinsic::aarch64_neon_fcvtps; break; | |||
6741 | } | |||
6742 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt"); | |||
6743 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
6744 | } | |||
6745 | case NEON::BI__builtin_neon_vcaleh_f16: | |||
6746 | case NEON::BI__builtin_neon_vcalth_f16: | |||
6747 | case NEON::BI__builtin_neon_vcageh_f16: | |||
6748 | case NEON::BI__builtin_neon_vcagth_f16: { | |||
6749 | unsigned Int; | |||
6750 | llvm::Type* InTy = Int32Ty; | |||
6751 | llvm::Type* FTy = HalfTy; | |||
6752 | llvm::Type *Tys[2] = {InTy, FTy}; | |||
6753 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
6754 | switch (BuiltinID) { | |||
6755 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6755); | |||
6756 | case NEON::BI__builtin_neon_vcageh_f16: | |||
6757 | Int = Intrinsic::aarch64_neon_facge; break; | |||
6758 | case NEON::BI__builtin_neon_vcagth_f16: | |||
6759 | Int = Intrinsic::aarch64_neon_facgt; break; | |||
6760 | case NEON::BI__builtin_neon_vcaleh_f16: | |||
6761 | Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break; | |||
6762 | case NEON::BI__builtin_neon_vcalth_f16: | |||
6763 | Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break; | |||
6764 | } | |||
6765 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg"); | |||
6766 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
6767 | } | |||
6768 | case NEON::BI__builtin_neon_vcvth_n_s16_f16: | |||
6769 | case NEON::BI__builtin_neon_vcvth_n_u16_f16: { | |||
6770 | unsigned Int; | |||
6771 | llvm::Type* InTy = Int32Ty; | |||
6772 | llvm::Type* FTy = HalfTy; | |||
6773 | llvm::Type *Tys[2] = {InTy, FTy}; | |||
6774 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
6775 | switch (BuiltinID) { | |||
6776 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6776); | |||
6777 | case NEON::BI__builtin_neon_vcvth_n_s16_f16: | |||
6778 | Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break; | |||
6779 | case NEON::BI__builtin_neon_vcvth_n_u16_f16: | |||
6780 | Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break; | |||
6781 | } | |||
6782 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n"); | |||
6783 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
6784 | } | |||
6785 | case NEON::BI__builtin_neon_vcvth_n_f16_s16: | |||
6786 | case NEON::BI__builtin_neon_vcvth_n_f16_u16: { | |||
6787 | unsigned Int; | |||
6788 | llvm::Type* FTy = HalfTy; | |||
6789 | llvm::Type* InTy = Int32Ty; | |||
6790 | llvm::Type *Tys[2] = {FTy, InTy}; | |||
6791 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
6792 | switch (BuiltinID) { | |||
6793 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6793); | |||
6794 | case NEON::BI__builtin_neon_vcvth_n_f16_s16: | |||
6795 | Int = Intrinsic::aarch64_neon_vcvtfxs2fp; | |||
6796 | Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext"); | |||
6797 | break; | |||
6798 | case NEON::BI__builtin_neon_vcvth_n_f16_u16: | |||
6799 | Int = Intrinsic::aarch64_neon_vcvtfxu2fp; | |||
6800 | Ops[0] = Builder.CreateZExt(Ops[0], InTy); | |||
6801 | break; | |||
6802 | } | |||
6803 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n"); | |||
6804 | } | |||
6805 | case NEON::BI__builtin_neon_vpaddd_s64: { | |||
6806 | llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2); | |||
6807 | Value *Vec = EmitScalarExpr(E->getArg(0)); | |||
6808 | // The vector is v2f64, so make sure it's bitcast to that. | |||
6809 | Vec = Builder.CreateBitCast(Vec, Ty, "v2i64"); | |||
6810 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); | |||
6811 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); | |||
6812 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); | |||
6813 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); | |||
6814 | // Pairwise addition of a v2f64 into a scalar f64. | |||
6815 | return Builder.CreateAdd(Op0, Op1, "vpaddd"); | |||
6816 | } | |||
6817 | case NEON::BI__builtin_neon_vpaddd_f64: { | |||
6818 | llvm::Type *Ty = | |||
6819 | llvm::VectorType::get(DoubleTy, 2); | |||
6820 | Value *Vec = EmitScalarExpr(E->getArg(0)); | |||
6821 | // The vector is v2f64, so make sure it's bitcast to that. | |||
6822 | Vec = Builder.CreateBitCast(Vec, Ty, "v2f64"); | |||
6823 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); | |||
6824 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); | |||
6825 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); | |||
6826 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); | |||
6827 | // Pairwise addition of a v2f64 into a scalar f64. | |||
6828 | return Builder.CreateFAdd(Op0, Op1, "vpaddd"); | |||
6829 | } | |||
6830 | case NEON::BI__builtin_neon_vpadds_f32: { | |||
6831 | llvm::Type *Ty = | |||
6832 | llvm::VectorType::get(FloatTy, 2); | |||
6833 | Value *Vec = EmitScalarExpr(E->getArg(0)); | |||
6834 | // The vector is v2f32, so make sure it's bitcast to that. | |||
6835 | Vec = Builder.CreateBitCast(Vec, Ty, "v2f32"); | |||
6836 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); | |||
6837 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); | |||
6838 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); | |||
6839 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); | |||
6840 | // Pairwise addition of a v2f32 into a scalar f32. | |||
6841 | return Builder.CreateFAdd(Op0, Op1, "vpaddd"); | |||
6842 | } | |||
6843 | case NEON::BI__builtin_neon_vceqzd_s64: | |||
6844 | case NEON::BI__builtin_neon_vceqzd_f64: | |||
6845 | case NEON::BI__builtin_neon_vceqzs_f32: | |||
6846 | case NEON::BI__builtin_neon_vceqzh_f16: | |||
6847 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6848 | return EmitAArch64CompareBuiltinExpr( | |||
6849 | Ops[0], ConvertType(E->getCallReturnType(getContext())), | |||
6850 | ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz"); | |||
6851 | case NEON::BI__builtin_neon_vcgezd_s64: | |||
6852 | case NEON::BI__builtin_neon_vcgezd_f64: | |||
6853 | case NEON::BI__builtin_neon_vcgezs_f32: | |||
6854 | case NEON::BI__builtin_neon_vcgezh_f16: | |||
6855 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6856 | return EmitAArch64CompareBuiltinExpr( | |||
6857 | Ops[0], ConvertType(E->getCallReturnType(getContext())), | |||
6858 | ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez"); | |||
6859 | case NEON::BI__builtin_neon_vclezd_s64: | |||
6860 | case NEON::BI__builtin_neon_vclezd_f64: | |||
6861 | case NEON::BI__builtin_neon_vclezs_f32: | |||
6862 | case NEON::BI__builtin_neon_vclezh_f16: | |||
6863 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6864 | return EmitAArch64CompareBuiltinExpr( | |||
6865 | Ops[0], ConvertType(E->getCallReturnType(getContext())), | |||
6866 | ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez"); | |||
6867 | case NEON::BI__builtin_neon_vcgtzd_s64: | |||
6868 | case NEON::BI__builtin_neon_vcgtzd_f64: | |||
6869 | case NEON::BI__builtin_neon_vcgtzs_f32: | |||
6870 | case NEON::BI__builtin_neon_vcgtzh_f16: | |||
6871 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6872 | return EmitAArch64CompareBuiltinExpr( | |||
6873 | Ops[0], ConvertType(E->getCallReturnType(getContext())), | |||
6874 | ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz"); | |||
6875 | case NEON::BI__builtin_neon_vcltzd_s64: | |||
6876 | case NEON::BI__builtin_neon_vcltzd_f64: | |||
6877 | case NEON::BI__builtin_neon_vcltzs_f32: | |||
6878 | case NEON::BI__builtin_neon_vcltzh_f16: | |||
6879 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6880 | return EmitAArch64CompareBuiltinExpr( | |||
6881 | Ops[0], ConvertType(E->getCallReturnType(getContext())), | |||
6882 | ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz"); | |||
6883 | ||||
6884 | case NEON::BI__builtin_neon_vceqzd_u64: { | |||
6885 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
6886 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); | |||
6887 | Ops[0] = | |||
6888 | Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty)); | |||
6889 | return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd"); | |||
6890 | } | |||
6891 | case NEON::BI__builtin_neon_vceqd_f64: | |||
6892 | case NEON::BI__builtin_neon_vcled_f64: | |||
6893 | case NEON::BI__builtin_neon_vcltd_f64: | |||
6894 | case NEON::BI__builtin_neon_vcged_f64: | |||
6895 | case NEON::BI__builtin_neon_vcgtd_f64: { | |||
6896 | llvm::CmpInst::Predicate P; | |||
6897 | switch (BuiltinID) { | |||
6898 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6898); | |||
6899 | case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break; | |||
6900 | case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break; | |||
6901 | case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break; | |||
6902 | case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break; | |||
6903 | case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break; | |||
6904 | } | |||
6905 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
6906 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); | |||
6907 | Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); | |||
6908 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); | |||
6909 | return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd"); | |||
6910 | } | |||
6911 | case NEON::BI__builtin_neon_vceqs_f32: | |||
6912 | case NEON::BI__builtin_neon_vcles_f32: | |||
6913 | case NEON::BI__builtin_neon_vclts_f32: | |||
6914 | case NEON::BI__builtin_neon_vcges_f32: | |||
6915 | case NEON::BI__builtin_neon_vcgts_f32: { | |||
6916 | llvm::CmpInst::Predicate P; | |||
6917 | switch (BuiltinID) { | |||
6918 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6918); | |||
6919 | case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break; | |||
6920 | case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break; | |||
6921 | case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break; | |||
6922 | case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break; | |||
6923 | case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break; | |||
6924 | } | |||
6925 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
6926 | Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy); | |||
6927 | Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy); | |||
6928 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); | |||
6929 | return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd"); | |||
6930 | } | |||
6931 | case NEON::BI__builtin_neon_vceqh_f16: | |||
6932 | case NEON::BI__builtin_neon_vcleh_f16: | |||
6933 | case NEON::BI__builtin_neon_vclth_f16: | |||
6934 | case NEON::BI__builtin_neon_vcgeh_f16: | |||
6935 | case NEON::BI__builtin_neon_vcgth_f16: { | |||
6936 | llvm::CmpInst::Predicate P; | |||
6937 | switch (BuiltinID) { | |||
6938 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6938); | |||
6939 | case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break; | |||
6940 | case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break; | |||
6941 | case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break; | |||
6942 | case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break; | |||
6943 | case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break; | |||
6944 | } | |||
6945 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
6946 | Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy); | |||
6947 | Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy); | |||
6948 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); | |||
6949 | return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd"); | |||
6950 | } | |||
6951 | case NEON::BI__builtin_neon_vceqd_s64: | |||
6952 | case NEON::BI__builtin_neon_vceqd_u64: | |||
6953 | case NEON::BI__builtin_neon_vcgtd_s64: | |||
6954 | case NEON::BI__builtin_neon_vcgtd_u64: | |||
6955 | case NEON::BI__builtin_neon_vcltd_s64: | |||
6956 | case NEON::BI__builtin_neon_vcltd_u64: | |||
6957 | case NEON::BI__builtin_neon_vcged_u64: | |||
6958 | case NEON::BI__builtin_neon_vcged_s64: | |||
6959 | case NEON::BI__builtin_neon_vcled_u64: | |||
6960 | case NEON::BI__builtin_neon_vcled_s64: { | |||
6961 | llvm::CmpInst::Predicate P; | |||
6962 | switch (BuiltinID) { | |||
6963 | default: llvm_unreachable("missing builtin ID in switch!")::llvm::llvm_unreachable_internal("missing builtin ID in switch!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 6963); | |||
6964 | case NEON::BI__builtin_neon_vceqd_s64: | |||
6965 | case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break; | |||
6966 | case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break; | |||
6967 | case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break; | |||
6968 | case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break; | |||
6969 | case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break; | |||
6970 | case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break; | |||
6971 | case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break; | |||
6972 | case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break; | |||
6973 | case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break; | |||
6974 | } | |||
6975 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
6976 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); | |||
6977 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); | |||
6978 | Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]); | |||
6979 | return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd"); | |||
6980 | } | |||
6981 | case NEON::BI__builtin_neon_vtstd_s64: | |||
6982 | case NEON::BI__builtin_neon_vtstd_u64: { | |||
6983 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
6984 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); | |||
6985 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); | |||
6986 | Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); | |||
6987 | Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], | |||
6988 | llvm::Constant::getNullValue(Int64Ty)); | |||
6989 | return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd"); | |||
6990 | } | |||
6991 | case NEON::BI__builtin_neon_vset_lane_i8: | |||
6992 | case NEON::BI__builtin_neon_vset_lane_i16: | |||
6993 | case NEON::BI__builtin_neon_vset_lane_i32: | |||
6994 | case NEON::BI__builtin_neon_vset_lane_i64: | |||
6995 | case NEON::BI__builtin_neon_vset_lane_f32: | |||
6996 | case NEON::BI__builtin_neon_vsetq_lane_i8: | |||
6997 | case NEON::BI__builtin_neon_vsetq_lane_i16: | |||
6998 | case NEON::BI__builtin_neon_vsetq_lane_i32: | |||
6999 | case NEON::BI__builtin_neon_vsetq_lane_i64: | |||
7000 | case NEON::BI__builtin_neon_vsetq_lane_f32: | |||
7001 | Ops.push_back(EmitScalarExpr(E->getArg(2))); | |||
7002 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); | |||
7003 | case NEON::BI__builtin_neon_vset_lane_f64: | |||
7004 | // The vector type needs a cast for the v1f64 variant. | |||
7005 | Ops[1] = Builder.CreateBitCast(Ops[1], | |||
7006 | llvm::VectorType::get(DoubleTy, 1)); | |||
7007 | Ops.push_back(EmitScalarExpr(E->getArg(2))); | |||
7008 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); | |||
7009 | case NEON::BI__builtin_neon_vsetq_lane_f64: | |||
7010 | // The vector type needs a cast for the v2f64 variant. | |||
7011 | Ops[1] = Builder.CreateBitCast(Ops[1], | |||
7012 | llvm::VectorType::get(DoubleTy, 2)); | |||
7013 | Ops.push_back(EmitScalarExpr(E->getArg(2))); | |||
7014 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); | |||
7015 | ||||
7016 | case NEON::BI__builtin_neon_vget_lane_i8: | |||
7017 | case NEON::BI__builtin_neon_vdupb_lane_i8: | |||
7018 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 8)); | |||
7019 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7020 | "vget_lane"); | |||
7021 | case NEON::BI__builtin_neon_vgetq_lane_i8: | |||
7022 | case NEON::BI__builtin_neon_vdupb_laneq_i8: | |||
7023 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 16)); | |||
7024 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7025 | "vgetq_lane"); | |||
7026 | case NEON::BI__builtin_neon_vget_lane_i16: | |||
7027 | case NEON::BI__builtin_neon_vduph_lane_i16: | |||
7028 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 4)); | |||
7029 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7030 | "vget_lane"); | |||
7031 | case NEON::BI__builtin_neon_vgetq_lane_i16: | |||
7032 | case NEON::BI__builtin_neon_vduph_laneq_i16: | |||
7033 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 8)); | |||
7034 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7035 | "vgetq_lane"); | |||
7036 | case NEON::BI__builtin_neon_vget_lane_i32: | |||
7037 | case NEON::BI__builtin_neon_vdups_lane_i32: | |||
7038 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 2)); | |||
7039 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7040 | "vget_lane"); | |||
7041 | case NEON::BI__builtin_neon_vdups_lane_f32: | |||
7042 | Ops[0] = Builder.CreateBitCast(Ops[0], | |||
7043 | llvm::VectorType::get(FloatTy, 2)); | |||
7044 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7045 | "vdups_lane"); | |||
7046 | case NEON::BI__builtin_neon_vgetq_lane_i32: | |||
7047 | case NEON::BI__builtin_neon_vdups_laneq_i32: | |||
7048 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4)); | |||
7049 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7050 | "vgetq_lane"); | |||
7051 | case NEON::BI__builtin_neon_vget_lane_i64: | |||
7052 | case NEON::BI__builtin_neon_vdupd_lane_i64: | |||
7053 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 1)); | |||
7054 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7055 | "vget_lane"); | |||
7056 | case NEON::BI__builtin_neon_vdupd_lane_f64: | |||
7057 | Ops[0] = Builder.CreateBitCast(Ops[0], | |||
7058 | llvm::VectorType::get(DoubleTy, 1)); | |||
7059 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7060 | "vdupd_lane"); | |||
7061 | case NEON::BI__builtin_neon_vgetq_lane_i64: | |||
7062 | case NEON::BI__builtin_neon_vdupd_laneq_i64: | |||
7063 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2)); | |||
7064 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7065 | "vgetq_lane"); | |||
7066 | case NEON::BI__builtin_neon_vget_lane_f32: | |||
7067 | Ops[0] = Builder.CreateBitCast(Ops[0], | |||
7068 | llvm::VectorType::get(FloatTy, 2)); | |||
7069 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7070 | "vget_lane"); | |||
7071 | case NEON::BI__builtin_neon_vget_lane_f64: | |||
7072 | Ops[0] = Builder.CreateBitCast(Ops[0], | |||
7073 | llvm::VectorType::get(DoubleTy, 1)); | |||
7074 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7075 | "vget_lane"); | |||
7076 | case NEON::BI__builtin_neon_vgetq_lane_f32: | |||
7077 | case NEON::BI__builtin_neon_vdups_laneq_f32: | |||
7078 | Ops[0] = Builder.CreateBitCast(Ops[0], | |||
7079 | llvm::VectorType::get(FloatTy, 4)); | |||
7080 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7081 | "vgetq_lane"); | |||
7082 | case NEON::BI__builtin_neon_vgetq_lane_f64: | |||
7083 | case NEON::BI__builtin_neon_vdupd_laneq_f64: | |||
7084 | Ops[0] = Builder.CreateBitCast(Ops[0], | |||
7085 | llvm::VectorType::get(DoubleTy, 2)); | |||
7086 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), | |||
7087 | "vgetq_lane"); | |||
7088 | case NEON::BI__builtin_neon_vaddh_f16: | |||
7089 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7090 | return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh"); | |||
7091 | case NEON::BI__builtin_neon_vsubh_f16: | |||
7092 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7093 | return Builder.CreateFSub(Ops[0], Ops[1], "vsubh"); | |||
7094 | case NEON::BI__builtin_neon_vmulh_f16: | |||
7095 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7096 | return Builder.CreateFMul(Ops[0], Ops[1], "vmulh"); | |||
7097 | case NEON::BI__builtin_neon_vdivh_f16: | |||
7098 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7099 | return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh"); | |||
7100 | case NEON::BI__builtin_neon_vfmah_f16: { | |||
7101 | Value *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy); | |||
7102 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. | |||
7103 | return Builder.CreateCall(F, | |||
7104 | {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]}); | |||
7105 | } | |||
7106 | case NEON::BI__builtin_neon_vfmsh_f16: { | |||
7107 | Value *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy); | |||
7108 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy); | |||
7109 | Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh"); | |||
7110 | // NEON intrinsic puts accumulator first, unlike the LLVM fma. | |||
7111 | return Builder.CreateCall(F, {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]}); | |||
7112 | } | |||
7113 | case NEON::BI__builtin_neon_vaddd_s64: | |||
7114 | case NEON::BI__builtin_neon_vaddd_u64: | |||
7115 | return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd"); | |||
7116 | case NEON::BI__builtin_neon_vsubd_s64: | |||
7117 | case NEON::BI__builtin_neon_vsubd_u64: | |||
7118 | return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd"); | |||
7119 | case NEON::BI__builtin_neon_vqdmlalh_s16: | |||
7120 | case NEON::BI__builtin_neon_vqdmlslh_s16: { | |||
7121 | SmallVector<Value *, 2> ProductOps; | |||
7122 | ProductOps.push_back(vectorWrapScalar16(Ops[1])); | |||
7123 | ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2)))); | |||
7124 | llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4); | |||
7125 | Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), | |||
7126 | ProductOps, "vqdmlXl"); | |||
7127 | Constant *CI = ConstantInt::get(SizeTy, 0); | |||
7128 | Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); | |||
7129 | ||||
7130 | unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16 | |||
7131 | ? Intrinsic::aarch64_neon_sqadd | |||
7132 | : Intrinsic::aarch64_neon_sqsub; | |||
7133 | return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl"); | |||
7134 | } | |||
7135 | case NEON::BI__builtin_neon_vqshlud_n_s64: { | |||
7136 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7137 | Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); | |||
7138 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty), | |||
7139 | Ops, "vqshlu_n"); | |||
7140 | } | |||
7141 | case NEON::BI__builtin_neon_vqshld_n_u64: | |||
7142 | case NEON::BI__builtin_neon_vqshld_n_s64: { | |||
7143 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64 | |||
7144 | ? Intrinsic::aarch64_neon_uqshl | |||
7145 | : Intrinsic::aarch64_neon_sqshl; | |||
7146 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7147 | Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); | |||
7148 | return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n"); | |||
7149 | } | |||
7150 | case NEON::BI__builtin_neon_vrshrd_n_u64: | |||
7151 | case NEON::BI__builtin_neon_vrshrd_n_s64: { | |||
7152 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64 | |||
7153 | ? Intrinsic::aarch64_neon_urshl | |||
7154 | : Intrinsic::aarch64_neon_srshl; | |||
7155 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7156 | int SV = cast<ConstantInt>(Ops[1])->getSExtValue(); | |||
7157 | Ops[1] = ConstantInt::get(Int64Ty, -SV); | |||
7158 | return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n"); | |||
7159 | } | |||
7160 | case NEON::BI__builtin_neon_vrsrad_n_u64: | |||
7161 | case NEON::BI__builtin_neon_vrsrad_n_s64: { | |||
7162 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64 | |||
7163 | ? Intrinsic::aarch64_neon_urshl | |||
7164 | : Intrinsic::aarch64_neon_srshl; | |||
7165 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); | |||
7166 | Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2)))); | |||
7167 | Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty), | |||
7168 | {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)}); | |||
7169 | return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty)); | |||
7170 | } | |||
7171 | case NEON::BI__builtin_neon_vshld_n_s64: | |||
7172 | case NEON::BI__builtin_neon_vshld_n_u64: { | |||
7173 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); | |||
7174 | return Builder.CreateShl( | |||
7175 | Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n"); | |||
7176 | } | |||
7177 | case NEON::BI__builtin_neon_vshrd_n_s64: { | |||
7178 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); | |||
7179 | return Builder.CreateAShr( | |||
7180 | Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63), | |||
7181 | Amt->getZExtValue())), | |||
7182 | "shrd_n"); | |||
7183 | } | |||
7184 | case NEON::BI__builtin_neon_vshrd_n_u64: { | |||
7185 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); | |||
7186 | uint64_t ShiftAmt = Amt->getZExtValue(); | |||
7187 | // Right-shifting an unsigned value by its size yields 0. | |||
7188 | if (ShiftAmt == 64) | |||
7189 | return ConstantInt::get(Int64Ty, 0); | |||
7190 | return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt), | |||
7191 | "shrd_n"); | |||
7192 | } | |||
7193 | case NEON::BI__builtin_neon_vsrad_n_s64: { | |||
7194 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); | |||
7195 | Ops[1] = Builder.CreateAShr( | |||
7196 | Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63), | |||
7197 | Amt->getZExtValue())), | |||
7198 | "shrd_n"); | |||
7199 | return Builder.CreateAdd(Ops[0], Ops[1]); | |||
7200 | } | |||
7201 | case NEON::BI__builtin_neon_vsrad_n_u64: { | |||
7202 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); | |||
7203 | uint64_t ShiftAmt = Amt->getZExtValue(); | |||
7204 | // Right-shifting an unsigned value by its size yields 0. | |||
7205 | // As Op + 0 = Op, return Ops[0] directly. | |||
7206 | if (ShiftAmt == 64) | |||
7207 | return Ops[0]; | |||
7208 | Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt), | |||
7209 | "shrd_n"); | |||
7210 | return Builder.CreateAdd(Ops[0], Ops[1]); | |||
7211 | } | |||
7212 | case NEON::BI__builtin_neon_vqdmlalh_lane_s16: | |||
7213 | case NEON::BI__builtin_neon_vqdmlalh_laneq_s16: | |||
7214 | case NEON::BI__builtin_neon_vqdmlslh_lane_s16: | |||
7215 | case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: { | |||
7216 | Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), | |||
7217 | "lane"); | |||
7218 | SmallVector<Value *, 2> ProductOps; | |||
7219 | ProductOps.push_back(vectorWrapScalar16(Ops[1])); | |||
7220 | ProductOps.push_back(vectorWrapScalar16(Ops[2])); | |||
7221 | llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4); | |||
7222 | Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), | |||
7223 | ProductOps, "vqdmlXl"); | |||
7224 | Constant *CI = ConstantInt::get(SizeTy, 0); | |||
7225 | Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); | |||
7226 | Ops.pop_back(); | |||
7227 | ||||
7228 | unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 || | |||
7229 | BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16) | |||
7230 | ? Intrinsic::aarch64_neon_sqadd | |||
7231 | : Intrinsic::aarch64_neon_sqsub; | |||
7232 | return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl"); | |||
7233 | } | |||
7234 | case NEON::BI__builtin_neon_vqdmlals_s32: | |||
7235 | case NEON::BI__builtin_neon_vqdmlsls_s32: { | |||
7236 | SmallVector<Value *, 2> ProductOps; | |||
7237 | ProductOps.push_back(Ops[1]); | |||
7238 | ProductOps.push_back(EmitScalarExpr(E->getArg(2))); | |||
7239 | Ops[1] = | |||
7240 | EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), | |||
7241 | ProductOps, "vqdmlXl"); | |||
7242 | ||||
7243 | unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32 | |||
7244 | ? Intrinsic::aarch64_neon_sqadd | |||
7245 | : Intrinsic::aarch64_neon_sqsub; | |||
7246 | return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl"); | |||
7247 | } | |||
7248 | case NEON::BI__builtin_neon_vqdmlals_lane_s32: | |||
7249 | case NEON::BI__builtin_neon_vqdmlals_laneq_s32: | |||
7250 | case NEON::BI__builtin_neon_vqdmlsls_lane_s32: | |||
7251 | case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: { | |||
7252 | Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), | |||
7253 | "lane"); | |||
7254 | SmallVector<Value *, 2> ProductOps; | |||
7255 | ProductOps.push_back(Ops[1]); | |||
7256 | ProductOps.push_back(Ops[2]); | |||
7257 | Ops[1] = | |||
7258 | EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), | |||
7259 | ProductOps, "vqdmlXl"); | |||
7260 | Ops.pop_back(); | |||
7261 | ||||
7262 | unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 || | |||
7263 | BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32) | |||
7264 | ? Intrinsic::aarch64_neon_sqadd | |||
7265 | : Intrinsic::aarch64_neon_sqsub; | |||
7266 | return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl"); | |||
7267 | } | |||
7268 | } | |||
7269 | ||||
7270 | llvm::VectorType *VTy = GetNeonType(this, Type); | |||
7271 | llvm::Type *Ty = VTy; | |||
7272 | if (!Ty) | |||
7273 | return nullptr; | |||
7274 | ||||
7275 | // Not all intrinsics handled by the common case work for AArch64 yet, so only | |||
7276 | // defer to common code if it's been added to our special map. | |||
7277 | Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, | |||
7278 | AArch64SIMDIntrinsicsProvenSorted); | |||
7279 | ||||
7280 | if (Builtin) | |||
7281 | return EmitCommonNeonBuiltinExpr( | |||
7282 | Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, | |||
7283 | Builtin->NameHint, Builtin->TypeModifier, E, Ops, | |||
7284 | /*never use addresses*/ Address::invalid(), Address::invalid(), Arch); | |||
7285 | ||||
7286 | if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) | |||
7287 | return V; | |||
7288 | ||||
7289 | unsigned Int; | |||
7290 | switch (BuiltinID) { | |||
7291 | default: return nullptr; | |||
7292 | case NEON::BI__builtin_neon_vbsl_v: | |||
7293 | case NEON::BI__builtin_neon_vbslq_v: { | |||
7294 | llvm::Type *BitTy = llvm::VectorType::getInteger(VTy); | |||
7295 | Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl"); | |||
7296 | Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl"); | |||
7297 | Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl"); | |||
7298 | ||||
7299 | Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl"); | |||
7300 | Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl"); | |||
7301 | Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl"); | |||
7302 | return Builder.CreateBitCast(Ops[0], Ty); | |||
7303 | } | |||
7304 | case NEON::BI__builtin_neon_vfma_lane_v: | |||
7305 | case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types | |||
7306 | // The ARM builtins (and instructions) have the addend as the first | |||
7307 | // operand, but the 'fma' intrinsics have it last. Swap it around here. | |||
7308 | Value *Addend = Ops[0]; | |||
7309 | Value *Multiplicand = Ops[1]; | |||
7310 | Value *LaneSource = Ops[2]; | |||
7311 | Ops[0] = Multiplicand; | |||
7312 | Ops[1] = LaneSource; | |||
7313 | Ops[2] = Addend; | |||
7314 | ||||
7315 | // Now adjust things to handle the lane access. | |||
7316 | llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ? | |||
7317 | llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) : | |||
7318 | VTy; | |||
7319 | llvm::Constant *cst = cast<Constant>(Ops[3]); | |||
7320 | Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst); | |||
7321 | Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy); | |||
7322 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane"); | |||
7323 | ||||
7324 | Ops.pop_back(); | |||
7325 | Int = Intrinsic::fma; | |||
7326 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla"); | |||
7327 | } | |||
7328 | case NEON::BI__builtin_neon_vfma_laneq_v: { | |||
7329 | llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); | |||
7330 | // v1f64 fma should be mapped to Neon scalar f64 fma | |||
7331 | if (VTy && VTy->getElementType() == DoubleTy) { | |||
7332 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); | |||
7333 | Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); | |||
7334 | llvm::Type *VTy = GetNeonType(this, | |||
7335 | NeonTypeFlags(NeonTypeFlags::Float64, false, true)); | |||
7336 | Ops[2] = Builder.CreateBitCast(Ops[2], VTy); | |||
7337 | Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); | |||
7338 | Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy); | |||
7339 | Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]}); | |||
7340 | return Builder.CreateBitCast(Result, Ty); | |||
7341 | } | |||
7342 | Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); | |||
7343 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
7344 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
7345 | ||||
7346 | llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(), | |||
7347 | VTy->getNumElements() * 2); | |||
7348 | Ops[2] = Builder.CreateBitCast(Ops[2], STy); | |||
7349 | Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), | |||
7350 | cast<ConstantInt>(Ops[3])); | |||
7351 | Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane"); | |||
7352 | ||||
7353 | return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]}); | |||
7354 | } | |||
7355 | case NEON::BI__builtin_neon_vfmaq_laneq_v: { | |||
7356 | Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); | |||
7357 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
7358 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
7359 | ||||
7360 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); | |||
7361 | Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3])); | |||
7362 | return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]}); | |||
7363 | } | |||
7364 | case NEON::BI__builtin_neon_vfmah_lane_f16: | |||
7365 | case NEON::BI__builtin_neon_vfmas_lane_f32: | |||
7366 | case NEON::BI__builtin_neon_vfmah_laneq_f16: | |||
7367 | case NEON::BI__builtin_neon_vfmas_laneq_f32: | |||
7368 | case NEON::BI__builtin_neon_vfmad_lane_f64: | |||
7369 | case NEON::BI__builtin_neon_vfmad_laneq_f64: { | |||
7370 | Ops.push_back(EmitScalarExpr(E->getArg(3))); | |||
7371 | llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); | |||
7372 | Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); | |||
7373 | Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); | |||
7374 | return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]}); | |||
7375 | } | |||
7376 | case NEON::BI__builtin_neon_vmull_v: | |||
7377 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. | |||
7378 | Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull; | |||
7379 | if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull; | |||
7380 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); | |||
7381 | case NEON::BI__builtin_neon_vmax_v: | |||
7382 | case NEON::BI__builtin_neon_vmaxq_v: | |||
7383 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. | |||
7384 | Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax; | |||
7385 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax; | |||
7386 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax"); | |||
7387 | case NEON::BI__builtin_neon_vmaxh_f16: { | |||
7388 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7389 | Int = Intrinsic::aarch64_neon_fmax; | |||
7390 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax"); | |||
7391 | } | |||
7392 | case NEON::BI__builtin_neon_vmin_v: | |||
7393 | case NEON::BI__builtin_neon_vminq_v: | |||
7394 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. | |||
7395 | Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin; | |||
7396 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin; | |||
7397 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin"); | |||
7398 | case NEON::BI__builtin_neon_vminh_f16: { | |||
7399 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7400 | Int = Intrinsic::aarch64_neon_fmin; | |||
7401 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin"); | |||
7402 | } | |||
7403 | case NEON::BI__builtin_neon_vabd_v: | |||
7404 | case NEON::BI__builtin_neon_vabdq_v: | |||
7405 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. | |||
7406 | Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd; | |||
7407 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd; | |||
7408 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd"); | |||
7409 | case NEON::BI__builtin_neon_vpadal_v: | |||
7410 | case NEON::BI__builtin_neon_vpadalq_v: { | |||
7411 | unsigned ArgElts = VTy->getNumElements(); | |||
7412 | llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType()); | |||
7413 | unsigned BitWidth = EltTy->getBitWidth(); | |||
7414 | llvm::Type *ArgTy = llvm::VectorType::get( | |||
7415 | llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts); | |||
7416 | llvm::Type* Tys[2] = { VTy, ArgTy }; | |||
7417 | Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp; | |||
7418 | SmallVector<llvm::Value*, 1> TmpOps; | |||
7419 | TmpOps.push_back(Ops[1]); | |||
7420 | Function *F = CGM.getIntrinsic(Int, Tys); | |||
7421 | llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal"); | |||
7422 | llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType()); | |||
7423 | return Builder.CreateAdd(tmp, addend); | |||
7424 | } | |||
7425 | case NEON::BI__builtin_neon_vpmin_v: | |||
7426 | case NEON::BI__builtin_neon_vpminq_v: | |||
7427 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. | |||
7428 | Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp; | |||
7429 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp; | |||
7430 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); | |||
7431 | case NEON::BI__builtin_neon_vpmax_v: | |||
7432 | case NEON::BI__builtin_neon_vpmaxq_v: | |||
7433 | // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. | |||
7434 | Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp; | |||
7435 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp; | |||
7436 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); | |||
7437 | case NEON::BI__builtin_neon_vminnm_v: | |||
7438 | case NEON::BI__builtin_neon_vminnmq_v: | |||
7439 | Int = Intrinsic::aarch64_neon_fminnm; | |||
7440 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm"); | |||
7441 | case NEON::BI__builtin_neon_vminnmh_f16: | |||
7442 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7443 | Int = Intrinsic::aarch64_neon_fminnm; | |||
7444 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm"); | |||
7445 | case NEON::BI__builtin_neon_vmaxnm_v: | |||
7446 | case NEON::BI__builtin_neon_vmaxnmq_v: | |||
7447 | Int = Intrinsic::aarch64_neon_fmaxnm; | |||
7448 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm"); | |||
7449 | case NEON::BI__builtin_neon_vmaxnmh_f16: | |||
7450 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7451 | Int = Intrinsic::aarch64_neon_fmaxnm; | |||
7452 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm"); | |||
7453 | case NEON::BI__builtin_neon_vrecpss_f32: { | |||
7454 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7455 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy), | |||
7456 | Ops, "vrecps"); | |||
7457 | } | |||
7458 | case NEON::BI__builtin_neon_vrecpsd_f64: | |||
7459 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7460 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy), | |||
7461 | Ops, "vrecps"); | |||
7462 | case NEON::BI__builtin_neon_vrecpsh_f16: | |||
7463 | Ops.push_back(EmitScalarExpr(E->getArg(1))); | |||
7464 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy), | |||
7465 | Ops, "vrecps"); | |||
7466 | case NEON::BI__builtin_neon_vqshrun_n_v: | |||
7467 | Int = Intrinsic::aarch64_neon_sqshrun; | |||
7468 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n"); | |||
7469 | case NEON::BI__builtin_neon_vqrshrun_n_v: | |||
7470 | Int = Intrinsic::aarch64_neon_sqrshrun; | |||
7471 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n"); | |||
7472 | case NEON::BI__builtin_neon_vqshrn_n_v: | |||
7473 | Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn; | |||
7474 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n"); | |||
7475 | case NEON::BI__builtin_neon_vrshrn_n_v: | |||
7476 | Int = Intrinsic::aarch64_neon_rshrn; | |||
7477 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n"); | |||
7478 | case NEON::BI__builtin_neon_vqrshrn_n_v: | |||
7479 | Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn; | |||
7480 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n"); | |||
7481 | case NEON::BI__builtin_neon_vrndah_f16: { | |||
7482 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7483 | Int = Intrinsic::round; | |||
7484 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda"); | |||
7485 | } | |||
7486 | case NEON::BI__builtin_neon_vrnda_v: | |||
7487 | case NEON::BI__builtin_neon_vrndaq_v: { | |||
7488 | Int = Intrinsic::round; | |||
7489 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda"); | |||
7490 | } | |||
7491 | case NEON::BI__builtin_neon_vrndih_f16: { | |||
7492 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7493 | Int = Intrinsic::nearbyint; | |||
7494 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi"); | |||
7495 | } | |||
7496 | case NEON::BI__builtin_neon_vrndmh_f16: { | |||
7497 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7498 | Int = Intrinsic::floor; | |||
7499 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm"); | |||
7500 | } | |||
7501 | case NEON::BI__builtin_neon_vrndm_v: | |||
7502 | case NEON::BI__builtin_neon_vrndmq_v: { | |||
7503 | Int = Intrinsic::floor; | |||
7504 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm"); | |||
7505 | } | |||
7506 | case NEON::BI__builtin_neon_vrndnh_f16: { | |||
7507 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7508 | Int = Intrinsic::aarch64_neon_frintn; | |||
7509 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn"); | |||
7510 | } | |||
7511 | case NEON::BI__builtin_neon_vrndn_v: | |||
7512 | case NEON::BI__builtin_neon_vrndnq_v: { | |||
7513 | Int = Intrinsic::aarch64_neon_frintn; | |||
7514 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn"); | |||
7515 | } | |||
7516 | case NEON::BI__builtin_neon_vrndns_f32: { | |||
7517 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7518 | Int = Intrinsic::aarch64_neon_frintn; | |||
7519 | return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn"); | |||
7520 | } | |||
7521 | case NEON::BI__builtin_neon_vrndph_f16: { | |||
7522 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7523 | Int = Intrinsic::ceil; | |||
7524 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp"); | |||
7525 | } | |||
7526 | case NEON::BI__builtin_neon_vrndp_v: | |||
7527 | case NEON::BI__builtin_neon_vrndpq_v: { | |||
7528 | Int = Intrinsic::ceil; | |||
7529 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp"); | |||
7530 | } | |||
7531 | case NEON::BI__builtin_neon_vrndxh_f16: { | |||
7532 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7533 | Int = Intrinsic::rint; | |||
7534 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx"); | |||
7535 | } | |||
7536 | case NEON::BI__builtin_neon_vrndx_v: | |||
7537 | case NEON::BI__builtin_neon_vrndxq_v: { | |||
7538 | Int = Intrinsic::rint; | |||
7539 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx"); | |||
7540 | } | |||
7541 | case NEON::BI__builtin_neon_vrndh_f16: { | |||
7542 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7543 | Int = Intrinsic::trunc; | |||
7544 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz"); | |||
7545 | } | |||
7546 | case NEON::BI__builtin_neon_vrnd_v: | |||
7547 | case NEON::BI__builtin_neon_vrndq_v: { | |||
7548 | Int = Intrinsic::trunc; | |||
7549 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz"); | |||
7550 | } | |||
7551 | case NEON::BI__builtin_neon_vcvt_f64_v: | |||
7552 | case NEON::BI__builtin_neon_vcvtq_f64_v: | |||
7553 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
7554 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad)); | |||
7555 | return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") | |||
7556 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); | |||
7557 | case NEON::BI__builtin_neon_vcvt_f64_f32: { | |||
7558 | assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&(static_cast <bool> (Type.getEltType() == NeonTypeFlags ::Float64 && quad && "unexpected vcvt_f64_f32 builtin" ) ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7559, __extension__ __PRETTY_FUNCTION__)) | |||
7559 | "unexpected vcvt_f64_f32 builtin")(static_cast <bool> (Type.getEltType() == NeonTypeFlags ::Float64 && quad && "unexpected vcvt_f64_f32 builtin" ) ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float64 && quad && \"unexpected vcvt_f64_f32 builtin\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7559, __extension__ __PRETTY_FUNCTION__)); | |||
7560 | NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false); | |||
7561 | Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag)); | |||
7562 | ||||
7563 | return Builder.CreateFPExt(Ops[0], Ty, "vcvt"); | |||
7564 | } | |||
7565 | case NEON::BI__builtin_neon_vcvt_f32_f64: { | |||
7566 | assert(Type.getEltType() == NeonTypeFlags::Float32 &&(static_cast <bool> (Type.getEltType() == NeonTypeFlags ::Float32 && "unexpected vcvt_f32_f64 builtin") ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7567, __extension__ __PRETTY_FUNCTION__)) | |||
7567 | "unexpected vcvt_f32_f64 builtin")(static_cast <bool> (Type.getEltType() == NeonTypeFlags ::Float32 && "unexpected vcvt_f32_f64 builtin") ? void (0) : __assert_fail ("Type.getEltType() == NeonTypeFlags::Float32 && \"unexpected vcvt_f32_f64 builtin\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 7567, __extension__ __PRETTY_FUNCTION__)); | |||
7568 | NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true); | |||
7569 | Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag)); | |||
7570 | ||||
7571 | return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt"); | |||
7572 | } | |||
7573 | case NEON::BI__builtin_neon_vcvt_s32_v: | |||
7574 | case NEON::BI__builtin_neon_vcvt_u32_v: | |||
7575 | case NEON::BI__builtin_neon_vcvt_s64_v: | |||
7576 | case NEON::BI__builtin_neon_vcvt_u64_v: | |||
7577 | case NEON::BI__builtin_neon_vcvt_s16_v: | |||
7578 | case NEON::BI__builtin_neon_vcvt_u16_v: | |||
7579 | case NEON::BI__builtin_neon_vcvtq_s32_v: | |||
7580 | case NEON::BI__builtin_neon_vcvtq_u32_v: | |||
7581 | case NEON::BI__builtin_neon_vcvtq_s64_v: | |||
7582 | case NEON::BI__builtin_neon_vcvtq_u64_v: | |||
7583 | case NEON::BI__builtin_neon_vcvtq_s16_v: | |||
7584 | case NEON::BI__builtin_neon_vcvtq_u16_v: { | |||
7585 | Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type)); | |||
7586 | if (usgn) | |||
7587 | return Builder.CreateFPToUI(Ops[0], Ty); | |||
7588 | return Builder.CreateFPToSI(Ops[0], Ty); | |||
7589 | } | |||
7590 | case NEON::BI__builtin_neon_vcvta_s16_v: | |||
7591 | case NEON::BI__builtin_neon_vcvta_u16_v: | |||
7592 | case NEON::BI__builtin_neon_vcvta_s32_v: | |||
7593 | case NEON::BI__builtin_neon_vcvtaq_s16_v: | |||
7594 | case NEON::BI__builtin_neon_vcvtaq_s32_v: | |||
7595 | case NEON::BI__builtin_neon_vcvta_u32_v: | |||
7596 | case NEON::BI__builtin_neon_vcvtaq_u16_v: | |||
7597 | case NEON::BI__builtin_neon_vcvtaq_u32_v: | |||
7598 | case NEON::BI__builtin_neon_vcvta_s64_v: | |||
7599 | case NEON::BI__builtin_neon_vcvtaq_s64_v: | |||
7600 | case NEON::BI__builtin_neon_vcvta_u64_v: | |||
7601 | case NEON::BI__builtin_neon_vcvtaq_u64_v: { | |||
7602 | Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas; | |||
7603 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; | |||
7604 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta"); | |||
7605 | } | |||
7606 | case NEON::BI__builtin_neon_vcvtm_s16_v: | |||
7607 | case NEON::BI__builtin_neon_vcvtm_s32_v: | |||
7608 | case NEON::BI__builtin_neon_vcvtmq_s16_v: | |||
7609 | case NEON::BI__builtin_neon_vcvtmq_s32_v: | |||
7610 | case NEON::BI__builtin_neon_vcvtm_u16_v: | |||
7611 | case NEON::BI__builtin_neon_vcvtm_u32_v: | |||
7612 | case NEON::BI__builtin_neon_vcvtmq_u16_v: | |||
7613 | case NEON::BI__builtin_neon_vcvtmq_u32_v: | |||
7614 | case NEON::BI__builtin_neon_vcvtm_s64_v: | |||
7615 | case NEON::BI__builtin_neon_vcvtmq_s64_v: | |||
7616 | case NEON::BI__builtin_neon_vcvtm_u64_v: | |||
7617 | case NEON::BI__builtin_neon_vcvtmq_u64_v: { | |||
7618 | Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms; | |||
7619 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; | |||
7620 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm"); | |||
7621 | } | |||
7622 | case NEON::BI__builtin_neon_vcvtn_s16_v: | |||
7623 | case NEON::BI__builtin_neon_vcvtn_s32_v: | |||
7624 | case NEON::BI__builtin_neon_vcvtnq_s16_v: | |||
7625 | case NEON::BI__builtin_neon_vcvtnq_s32_v: | |||
7626 | case NEON::BI__builtin_neon_vcvtn_u16_v: | |||
7627 | case NEON::BI__builtin_neon_vcvtn_u32_v: | |||
7628 | case NEON::BI__builtin_neon_vcvtnq_u16_v: | |||
7629 | case NEON::BI__builtin_neon_vcvtnq_u32_v: | |||
7630 | case NEON::BI__builtin_neon_vcvtn_s64_v: | |||
7631 | case NEON::BI__builtin_neon_vcvtnq_s64_v: | |||
7632 | case NEON::BI__builtin_neon_vcvtn_u64_v: | |||
7633 | case NEON::BI__builtin_neon_vcvtnq_u64_v: { | |||
7634 | Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns; | |||
7635 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; | |||
7636 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn"); | |||
7637 | } | |||
7638 | case NEON::BI__builtin_neon_vcvtp_s16_v: | |||
7639 | case NEON::BI__builtin_neon_vcvtp_s32_v: | |||
7640 | case NEON::BI__builtin_neon_vcvtpq_s16_v: | |||
7641 | case NEON::BI__builtin_neon_vcvtpq_s32_v: | |||
7642 | case NEON::BI__builtin_neon_vcvtp_u16_v: | |||
7643 | case NEON::BI__builtin_neon_vcvtp_u32_v: | |||
7644 | case NEON::BI__builtin_neon_vcvtpq_u16_v: | |||
7645 | case NEON::BI__builtin_neon_vcvtpq_u32_v: | |||
7646 | case NEON::BI__builtin_neon_vcvtp_s64_v: | |||
7647 | case NEON::BI__builtin_neon_vcvtpq_s64_v: | |||
7648 | case NEON::BI__builtin_neon_vcvtp_u64_v: | |||
7649 | case NEON::BI__builtin_neon_vcvtpq_u64_v: { | |||
7650 | Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps; | |||
7651 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; | |||
7652 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp"); | |||
7653 | } | |||
7654 | case NEON::BI__builtin_neon_vmulx_v: | |||
7655 | case NEON::BI__builtin_neon_vmulxq_v: { | |||
7656 | Int = Intrinsic::aarch64_neon_fmulx; | |||
7657 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx"); | |||
7658 | } | |||
7659 | case NEON::BI__builtin_neon_vmulxh_lane_f16: | |||
7660 | case NEON::BI__builtin_neon_vmulxh_laneq_f16: { | |||
7661 | // vmulx_lane should be mapped to Neon scalar mulx after | |||
7662 | // extracting the scalar element | |||
7663 | Ops.push_back(EmitScalarExpr(E->getArg(2))); | |||
7664 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); | |||
7665 | Ops.pop_back(); | |||
7666 | Int = Intrinsic::aarch64_neon_fmulx; | |||
7667 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx"); | |||
7668 | } | |||
7669 | case NEON::BI__builtin_neon_vmul_lane_v: | |||
7670 | case NEON::BI__builtin_neon_vmul_laneq_v: { | |||
7671 | // v1f64 vmul_lane should be mapped to Neon scalar mul lane | |||
7672 | bool Quad = false; | |||
7673 | if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v) | |||
7674 | Quad = true; | |||
7675 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); | |||
7676 | llvm::Type *VTy = GetNeonType(this, | |||
7677 | NeonTypeFlags(NeonTypeFlags::Float64, false, Quad)); | |||
7678 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); | |||
7679 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); | |||
7680 | Value *Result = Builder.CreateFMul(Ops[0], Ops[1]); | |||
7681 | return Builder.CreateBitCast(Result, Ty); | |||
7682 | } | |||
7683 | case NEON::BI__builtin_neon_vnegd_s64: | |||
7684 | return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd"); | |||
7685 | case NEON::BI__builtin_neon_vnegh_f16: | |||
7686 | return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh"); | |||
7687 | case NEON::BI__builtin_neon_vpmaxnm_v: | |||
7688 | case NEON::BI__builtin_neon_vpmaxnmq_v: { | |||
7689 | Int = Intrinsic::aarch64_neon_fmaxnmp; | |||
7690 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm"); | |||
7691 | } | |||
7692 | case NEON::BI__builtin_neon_vpminnm_v: | |||
7693 | case NEON::BI__builtin_neon_vpminnmq_v: { | |||
7694 | Int = Intrinsic::aarch64_neon_fminnmp; | |||
7695 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm"); | |||
7696 | } | |||
7697 | case NEON::BI__builtin_neon_vsqrth_f16: { | |||
7698 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7699 | Int = Intrinsic::sqrt; | |||
7700 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt"); | |||
7701 | } | |||
7702 | case NEON::BI__builtin_neon_vsqrt_v: | |||
7703 | case NEON::BI__builtin_neon_vsqrtq_v: { | |||
7704 | Int = Intrinsic::sqrt; | |||
7705 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
7706 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt"); | |||
7707 | } | |||
7708 | case NEON::BI__builtin_neon_vrbit_v: | |||
7709 | case NEON::BI__builtin_neon_vrbitq_v: { | |||
7710 | Int = Intrinsic::aarch64_neon_rbit; | |||
7711 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit"); | |||
7712 | } | |||
7713 | case NEON::BI__builtin_neon_vaddv_u8: | |||
7714 | // FIXME: These are handled by the AArch64 scalar code. | |||
7715 | usgn = true; | |||
7716 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
7717 | case NEON::BI__builtin_neon_vaddv_s8: { | |||
7718 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; | |||
7719 | Ty = Int32Ty; | |||
7720 | VTy = llvm::VectorType::get(Int8Ty, 8); | |||
7721 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7722 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7723 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); | |||
7724 | return Builder.CreateTrunc(Ops[0], Int8Ty); | |||
7725 | } | |||
7726 | case NEON::BI__builtin_neon_vaddv_u16: | |||
7727 | usgn = true; | |||
7728 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
7729 | case NEON::BI__builtin_neon_vaddv_s16: { | |||
7730 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; | |||
7731 | Ty = Int32Ty; | |||
7732 | VTy = llvm::VectorType::get(Int16Ty, 4); | |||
7733 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7734 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7735 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); | |||
7736 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
7737 | } | |||
7738 | case NEON::BI__builtin_neon_vaddvq_u8: | |||
7739 | usgn = true; | |||
7740 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
7741 | case NEON::BI__builtin_neon_vaddvq_s8: { | |||
7742 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; | |||
7743 | Ty = Int32Ty; | |||
7744 | VTy = llvm::VectorType::get(Int8Ty, 16); | |||
7745 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7746 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7747 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); | |||
7748 | return Builder.CreateTrunc(Ops[0], Int8Ty); | |||
7749 | } | |||
7750 | case NEON::BI__builtin_neon_vaddvq_u16: | |||
7751 | usgn = true; | |||
7752 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
7753 | case NEON::BI__builtin_neon_vaddvq_s16: { | |||
7754 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; | |||
7755 | Ty = Int32Ty; | |||
7756 | VTy = llvm::VectorType::get(Int16Ty, 8); | |||
7757 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7758 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7759 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); | |||
7760 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
7761 | } | |||
7762 | case NEON::BI__builtin_neon_vmaxv_u8: { | |||
7763 | Int = Intrinsic::aarch64_neon_umaxv; | |||
7764 | Ty = Int32Ty; | |||
7765 | VTy = llvm::VectorType::get(Int8Ty, 8); | |||
7766 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7767 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7768 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); | |||
7769 | return Builder.CreateTrunc(Ops[0], Int8Ty); | |||
7770 | } | |||
7771 | case NEON::BI__builtin_neon_vmaxv_u16: { | |||
7772 | Int = Intrinsic::aarch64_neon_umaxv; | |||
7773 | Ty = Int32Ty; | |||
7774 | VTy = llvm::VectorType::get(Int16Ty, 4); | |||
7775 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7776 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7777 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); | |||
7778 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
7779 | } | |||
7780 | case NEON::BI__builtin_neon_vmaxvq_u8: { | |||
7781 | Int = Intrinsic::aarch64_neon_umaxv; | |||
7782 | Ty = Int32Ty; | |||
7783 | VTy = llvm::VectorType::get(Int8Ty, 16); | |||
7784 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7785 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7786 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); | |||
7787 | return Builder.CreateTrunc(Ops[0], Int8Ty); | |||
7788 | } | |||
7789 | case NEON::BI__builtin_neon_vmaxvq_u16: { | |||
7790 | Int = Intrinsic::aarch64_neon_umaxv; | |||
7791 | Ty = Int32Ty; | |||
7792 | VTy = llvm::VectorType::get(Int16Ty, 8); | |||
7793 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7794 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7795 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); | |||
7796 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
7797 | } | |||
7798 | case NEON::BI__builtin_neon_vmaxv_s8: { | |||
7799 | Int = Intrinsic::aarch64_neon_smaxv; | |||
7800 | Ty = Int32Ty; | |||
7801 | VTy = llvm::VectorType::get(Int8Ty, 8); | |||
7802 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7803 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7804 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); | |||
7805 | return Builder.CreateTrunc(Ops[0], Int8Ty); | |||
7806 | } | |||
7807 | case NEON::BI__builtin_neon_vmaxv_s16: { | |||
7808 | Int = Intrinsic::aarch64_neon_smaxv; | |||
7809 | Ty = Int32Ty; | |||
7810 | VTy = llvm::VectorType::get(Int16Ty, 4); | |||
7811 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7812 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7813 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); | |||
7814 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
7815 | } | |||
7816 | case NEON::BI__builtin_neon_vmaxvq_s8: { | |||
7817 | Int = Intrinsic::aarch64_neon_smaxv; | |||
7818 | Ty = Int32Ty; | |||
7819 | VTy = llvm::VectorType::get(Int8Ty, 16); | |||
7820 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7821 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7822 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); | |||
7823 | return Builder.CreateTrunc(Ops[0], Int8Ty); | |||
7824 | } | |||
7825 | case NEON::BI__builtin_neon_vmaxvq_s16: { | |||
7826 | Int = Intrinsic::aarch64_neon_smaxv; | |||
7827 | Ty = Int32Ty; | |||
7828 | VTy = llvm::VectorType::get(Int16Ty, 8); | |||
7829 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7830 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7831 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); | |||
7832 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
7833 | } | |||
7834 | case NEON::BI__builtin_neon_vmaxv_f16: { | |||
7835 | Int = Intrinsic::aarch64_neon_fmaxv; | |||
7836 | Ty = HalfTy; | |||
7837 | VTy = llvm::VectorType::get(HalfTy, 4); | |||
7838 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7839 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7840 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); | |||
7841 | return Builder.CreateTrunc(Ops[0], HalfTy); | |||
7842 | } | |||
7843 | case NEON::BI__builtin_neon_vmaxvq_f16: { | |||
7844 | Int = Intrinsic::aarch64_neon_fmaxv; | |||
7845 | Ty = HalfTy; | |||
7846 | VTy = llvm::VectorType::get(HalfTy, 8); | |||
7847 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7848 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7849 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); | |||
7850 | return Builder.CreateTrunc(Ops[0], HalfTy); | |||
7851 | } | |||
7852 | case NEON::BI__builtin_neon_vminv_u8: { | |||
7853 | Int = Intrinsic::aarch64_neon_uminv; | |||
7854 | Ty = Int32Ty; | |||
7855 | VTy = llvm::VectorType::get(Int8Ty, 8); | |||
7856 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7857 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7858 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); | |||
7859 | return Builder.CreateTrunc(Ops[0], Int8Ty); | |||
7860 | } | |||
7861 | case NEON::BI__builtin_neon_vminv_u16: { | |||
7862 | Int = Intrinsic::aarch64_neon_uminv; | |||
7863 | Ty = Int32Ty; | |||
7864 | VTy = llvm::VectorType::get(Int16Ty, 4); | |||
7865 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7866 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7867 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); | |||
7868 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
7869 | } | |||
7870 | case NEON::BI__builtin_neon_vminvq_u8: { | |||
7871 | Int = Intrinsic::aarch64_neon_uminv; | |||
7872 | Ty = Int32Ty; | |||
7873 | VTy = llvm::VectorType::get(Int8Ty, 16); | |||
7874 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7875 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7876 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); | |||
7877 | return Builder.CreateTrunc(Ops[0], Int8Ty); | |||
7878 | } | |||
7879 | case NEON::BI__builtin_neon_vminvq_u16: { | |||
7880 | Int = Intrinsic::aarch64_neon_uminv; | |||
7881 | Ty = Int32Ty; | |||
7882 | VTy = llvm::VectorType::get(Int16Ty, 8); | |||
7883 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7884 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7885 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); | |||
7886 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
7887 | } | |||
7888 | case NEON::BI__builtin_neon_vminv_s8: { | |||
7889 | Int = Intrinsic::aarch64_neon_sminv; | |||
7890 | Ty = Int32Ty; | |||
7891 | VTy = llvm::VectorType::get(Int8Ty, 8); | |||
7892 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7893 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7894 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); | |||
7895 | return Builder.CreateTrunc(Ops[0], Int8Ty); | |||
7896 | } | |||
7897 | case NEON::BI__builtin_neon_vminv_s16: { | |||
7898 | Int = Intrinsic::aarch64_neon_sminv; | |||
7899 | Ty = Int32Ty; | |||
7900 | VTy = llvm::VectorType::get(Int16Ty, 4); | |||
7901 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7902 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7903 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); | |||
7904 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
7905 | } | |||
7906 | case NEON::BI__builtin_neon_vminvq_s8: { | |||
7907 | Int = Intrinsic::aarch64_neon_sminv; | |||
7908 | Ty = Int32Ty; | |||
7909 | VTy = llvm::VectorType::get(Int8Ty, 16); | |||
7910 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7911 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7912 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); | |||
7913 | return Builder.CreateTrunc(Ops[0], Int8Ty); | |||
7914 | } | |||
7915 | case NEON::BI__builtin_neon_vminvq_s16: { | |||
7916 | Int = Intrinsic::aarch64_neon_sminv; | |||
7917 | Ty = Int32Ty; | |||
7918 | VTy = llvm::VectorType::get(Int16Ty, 8); | |||
7919 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7920 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7921 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); | |||
7922 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
7923 | } | |||
7924 | case NEON::BI__builtin_neon_vminv_f16: { | |||
7925 | Int = Intrinsic::aarch64_neon_fminv; | |||
7926 | Ty = HalfTy; | |||
7927 | VTy = llvm::VectorType::get(HalfTy, 4); | |||
7928 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7929 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7930 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); | |||
7931 | return Builder.CreateTrunc(Ops[0], HalfTy); | |||
7932 | } | |||
7933 | case NEON::BI__builtin_neon_vminvq_f16: { | |||
7934 | Int = Intrinsic::aarch64_neon_fminv; | |||
7935 | Ty = HalfTy; | |||
7936 | VTy = llvm::VectorType::get(HalfTy, 8); | |||
7937 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7938 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7939 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); | |||
7940 | return Builder.CreateTrunc(Ops[0], HalfTy); | |||
7941 | } | |||
7942 | case NEON::BI__builtin_neon_vmaxnmv_f16: { | |||
7943 | Int = Intrinsic::aarch64_neon_fmaxnmv; | |||
7944 | Ty = HalfTy; | |||
7945 | VTy = llvm::VectorType::get(HalfTy, 4); | |||
7946 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7947 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7948 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv"); | |||
7949 | return Builder.CreateTrunc(Ops[0], HalfTy); | |||
7950 | } | |||
7951 | case NEON::BI__builtin_neon_vmaxnmvq_f16: { | |||
7952 | Int = Intrinsic::aarch64_neon_fmaxnmv; | |||
7953 | Ty = HalfTy; | |||
7954 | VTy = llvm::VectorType::get(HalfTy, 8); | |||
7955 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7956 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7957 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv"); | |||
7958 | return Builder.CreateTrunc(Ops[0], HalfTy); | |||
7959 | } | |||
7960 | case NEON::BI__builtin_neon_vminnmv_f16: { | |||
7961 | Int = Intrinsic::aarch64_neon_fminnmv; | |||
7962 | Ty = HalfTy; | |||
7963 | VTy = llvm::VectorType::get(HalfTy, 4); | |||
7964 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7965 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7966 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv"); | |||
7967 | return Builder.CreateTrunc(Ops[0], HalfTy); | |||
7968 | } | |||
7969 | case NEON::BI__builtin_neon_vminnmvq_f16: { | |||
7970 | Int = Intrinsic::aarch64_neon_fminnmv; | |||
7971 | Ty = HalfTy; | |||
7972 | VTy = llvm::VectorType::get(HalfTy, 8); | |||
7973 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7974 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7975 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv"); | |||
7976 | return Builder.CreateTrunc(Ops[0], HalfTy); | |||
7977 | } | |||
7978 | case NEON::BI__builtin_neon_vmul_n_f64: { | |||
7979 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); | |||
7980 | Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy); | |||
7981 | return Builder.CreateFMul(Ops[0], RHS); | |||
7982 | } | |||
7983 | case NEON::BI__builtin_neon_vaddlv_u8: { | |||
7984 | Int = Intrinsic::aarch64_neon_uaddlv; | |||
7985 | Ty = Int32Ty; | |||
7986 | VTy = llvm::VectorType::get(Int8Ty, 8); | |||
7987 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7988 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7989 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); | |||
7990 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
7991 | } | |||
7992 | case NEON::BI__builtin_neon_vaddlv_u16: { | |||
7993 | Int = Intrinsic::aarch64_neon_uaddlv; | |||
7994 | Ty = Int32Ty; | |||
7995 | VTy = llvm::VectorType::get(Int16Ty, 4); | |||
7996 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
7997 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
7998 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); | |||
7999 | } | |||
8000 | case NEON::BI__builtin_neon_vaddlvq_u8: { | |||
8001 | Int = Intrinsic::aarch64_neon_uaddlv; | |||
8002 | Ty = Int32Ty; | |||
8003 | VTy = llvm::VectorType::get(Int8Ty, 16); | |||
8004 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
8005 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
8006 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); | |||
8007 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
8008 | } | |||
8009 | case NEON::BI__builtin_neon_vaddlvq_u16: { | |||
8010 | Int = Intrinsic::aarch64_neon_uaddlv; | |||
8011 | Ty = Int32Ty; | |||
8012 | VTy = llvm::VectorType::get(Int16Ty, 8); | |||
8013 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
8014 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
8015 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); | |||
8016 | } | |||
8017 | case NEON::BI__builtin_neon_vaddlv_s8: { | |||
8018 | Int = Intrinsic::aarch64_neon_saddlv; | |||
8019 | Ty = Int32Ty; | |||
8020 | VTy = llvm::VectorType::get(Int8Ty, 8); | |||
8021 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
8022 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
8023 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); | |||
8024 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
8025 | } | |||
8026 | case NEON::BI__builtin_neon_vaddlv_s16: { | |||
8027 | Int = Intrinsic::aarch64_neon_saddlv; | |||
8028 | Ty = Int32Ty; | |||
8029 | VTy = llvm::VectorType::get(Int16Ty, 4); | |||
8030 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
8031 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
8032 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); | |||
8033 | } | |||
8034 | case NEON::BI__builtin_neon_vaddlvq_s8: { | |||
8035 | Int = Intrinsic::aarch64_neon_saddlv; | |||
8036 | Ty = Int32Ty; | |||
8037 | VTy = llvm::VectorType::get(Int8Ty, 16); | |||
8038 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
8039 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
8040 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); | |||
8041 | return Builder.CreateTrunc(Ops[0], Int16Ty); | |||
8042 | } | |||
8043 | case NEON::BI__builtin_neon_vaddlvq_s16: { | |||
8044 | Int = Intrinsic::aarch64_neon_saddlv; | |||
8045 | Ty = Int32Ty; | |||
8046 | VTy = llvm::VectorType::get(Int16Ty, 8); | |||
8047 | llvm::Type *Tys[2] = { Ty, VTy }; | |||
8048 | Ops.push_back(EmitScalarExpr(E->getArg(0))); | |||
8049 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); | |||
8050 | } | |||
8051 | case NEON::BI__builtin_neon_vsri_n_v: | |||
8052 | case NEON::BI__builtin_neon_vsriq_n_v: { | |||
8053 | Int = Intrinsic::aarch64_neon_vsri; | |||
8054 | llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty); | |||
8055 | return EmitNeonCall(Intrin, Ops, "vsri_n"); | |||
8056 | } | |||
8057 | case NEON::BI__builtin_neon_vsli_n_v: | |||
8058 | case NEON::BI__builtin_neon_vsliq_n_v: { | |||
8059 | Int = Intrinsic::aarch64_neon_vsli; | |||
8060 | llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty); | |||
8061 | return EmitNeonCall(Intrin, Ops, "vsli_n"); | |||
8062 | } | |||
8063 | case NEON::BI__builtin_neon_vsra_n_v: | |||
8064 | case NEON::BI__builtin_neon_vsraq_n_v: | |||
8065 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
8066 | Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n"); | |||
8067 | return Builder.CreateAdd(Ops[0], Ops[1]); | |||
8068 | case NEON::BI__builtin_neon_vrsra_n_v: | |||
8069 | case NEON::BI__builtin_neon_vrsraq_n_v: { | |||
8070 | Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl; | |||
8071 | SmallVector<llvm::Value*,2> TmpOps; | |||
8072 | TmpOps.push_back(Ops[1]); | |||
8073 | TmpOps.push_back(Ops[2]); | |||
8074 | Function* F = CGM.getIntrinsic(Int, Ty); | |||
8075 | llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true); | |||
8076 | Ops[0] = Builder.CreateBitCast(Ops[0], VTy); | |||
8077 | return Builder.CreateAdd(Ops[0], tmp); | |||
8078 | } | |||
8079 | case NEON::BI__builtin_neon_vld1_v: | |||
8080 | case NEON::BI__builtin_neon_vld1q_v: { | |||
8081 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy)); | |||
8082 | auto Alignment = CharUnits::fromQuantity( | |||
8083 | BuiltinID == NEON::BI__builtin_neon_vld1_v ? 8 : 16); | |||
8084 | return Builder.CreateAlignedLoad(VTy, Ops[0], Alignment); | |||
8085 | } | |||
8086 | case NEON::BI__builtin_neon_vst1_v: | |||
8087 | case NEON::BI__builtin_neon_vst1q_v: | |||
8088 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy)); | |||
8089 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); | |||
8090 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
8091 | case NEON::BI__builtin_neon_vld1_lane_v: | |||
8092 | case NEON::BI__builtin_neon_vld1q_lane_v: { | |||
8093 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
8094 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); | |||
8095 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
8096 | auto Alignment = CharUnits::fromQuantity( | |||
8097 | BuiltinID == NEON::BI__builtin_neon_vld1_lane_v ? 8 : 16); | |||
8098 | Ops[0] = | |||
8099 | Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment); | |||
8100 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane"); | |||
8101 | } | |||
8102 | case NEON::BI__builtin_neon_vld1_dup_v: | |||
8103 | case NEON::BI__builtin_neon_vld1q_dup_v: { | |||
8104 | Value *V = UndefValue::get(Ty); | |||
8105 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); | |||
8106 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
8107 | auto Alignment = CharUnits::fromQuantity( | |||
8108 | BuiltinID == NEON::BI__builtin_neon_vld1_dup_v ? 8 : 16); | |||
8109 | Ops[0] = | |||
8110 | Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment); | |||
8111 | llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); | |||
8112 | Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI); | |||
8113 | return EmitNeonSplat(Ops[0], CI); | |||
8114 | } | |||
8115 | case NEON::BI__builtin_neon_vst1_lane_v: | |||
8116 | case NEON::BI__builtin_neon_vst1q_lane_v: | |||
8117 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
8118 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); | |||
8119 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); | |||
8120 | return Builder.CreateDefaultAlignedStore(Ops[1], | |||
8121 | Builder.CreateBitCast(Ops[0], Ty)); | |||
8122 | case NEON::BI__builtin_neon_vld2_v: | |||
8123 | case NEON::BI__builtin_neon_vld2q_v: { | |||
8124 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); | |||
8125 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); | |||
8126 | llvm::Type *Tys[2] = { VTy, PTy }; | |||
8127 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys); | |||
8128 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); | |||
8129 | Ops[0] = Builder.CreateBitCast(Ops[0], | |||
8130 | llvm::PointerType::getUnqual(Ops[1]->getType())); | |||
8131 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
8132 | } | |||
8133 | case NEON::BI__builtin_neon_vld3_v: | |||
8134 | case NEON::BI__builtin_neon_vld3q_v: { | |||
8135 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); | |||
8136 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); | |||
8137 | llvm::Type *Tys[2] = { VTy, PTy }; | |||
8138 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys); | |||
8139 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); | |||
8140 | Ops[0] = Builder.CreateBitCast(Ops[0], | |||
8141 | llvm::PointerType::getUnqual(Ops[1]->getType())); | |||
8142 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
8143 | } | |||
8144 | case NEON::BI__builtin_neon_vld4_v: | |||
8145 | case NEON::BI__builtin_neon_vld4q_v: { | |||
8146 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); | |||
8147 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); | |||
8148 | llvm::Type *Tys[2] = { VTy, PTy }; | |||
8149 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys); | |||
8150 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); | |||
8151 | Ops[0] = Builder.CreateBitCast(Ops[0], | |||
8152 | llvm::PointerType::getUnqual(Ops[1]->getType())); | |||
8153 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
8154 | } | |||
8155 | case NEON::BI__builtin_neon_vld2_dup_v: | |||
8156 | case NEON::BI__builtin_neon_vld2q_dup_v: { | |||
8157 | llvm::Type *PTy = | |||
8158 | llvm::PointerType::getUnqual(VTy->getElementType()); | |||
8159 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); | |||
8160 | llvm::Type *Tys[2] = { VTy, PTy }; | |||
8161 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys); | |||
8162 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); | |||
8163 | Ops[0] = Builder.CreateBitCast(Ops[0], | |||
8164 | llvm::PointerType::getUnqual(Ops[1]->getType())); | |||
8165 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
8166 | } | |||
8167 | case NEON::BI__builtin_neon_vld3_dup_v: | |||
8168 | case NEON::BI__builtin_neon_vld3q_dup_v: { | |||
8169 | llvm::Type *PTy = | |||
8170 | llvm::PointerType::getUnqual(VTy->getElementType()); | |||
8171 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); | |||
8172 | llvm::Type *Tys[2] = { VTy, PTy }; | |||
8173 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys); | |||
8174 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); | |||
8175 | Ops[0] = Builder.CreateBitCast(Ops[0], | |||
8176 | llvm::PointerType::getUnqual(Ops[1]->getType())); | |||
8177 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
8178 | } | |||
8179 | case NEON::BI__builtin_neon_vld4_dup_v: | |||
8180 | case NEON::BI__builtin_neon_vld4q_dup_v: { | |||
8181 | llvm::Type *PTy = | |||
8182 | llvm::PointerType::getUnqual(VTy->getElementType()); | |||
8183 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); | |||
8184 | llvm::Type *Tys[2] = { VTy, PTy }; | |||
8185 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys); | |||
8186 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); | |||
8187 | Ops[0] = Builder.CreateBitCast(Ops[0], | |||
8188 | llvm::PointerType::getUnqual(Ops[1]->getType())); | |||
8189 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
8190 | } | |||
8191 | case NEON::BI__builtin_neon_vld2_lane_v: | |||
8192 | case NEON::BI__builtin_neon_vld2q_lane_v: { | |||
8193 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; | |||
8194 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys); | |||
8195 | Ops.push_back(Ops[1]); | |||
8196 | Ops.erase(Ops.begin()+1); | |||
8197 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
8198 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); | |||
8199 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); | |||
8200 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane"); | |||
8201 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); | |||
8202 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
8203 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
8204 | } | |||
8205 | case NEON::BI__builtin_neon_vld3_lane_v: | |||
8206 | case NEON::BI__builtin_neon_vld3q_lane_v: { | |||
8207 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; | |||
8208 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys); | |||
8209 | Ops.push_back(Ops[1]); | |||
8210 | Ops.erase(Ops.begin()+1); | |||
8211 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
8212 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); | |||
8213 | Ops[3] = Builder.CreateBitCast(Ops[3], Ty); | |||
8214 | Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty); | |||
8215 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); | |||
8216 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); | |||
8217 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
8218 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
8219 | } | |||
8220 | case NEON::BI__builtin_neon_vld4_lane_v: | |||
8221 | case NEON::BI__builtin_neon_vld4q_lane_v: { | |||
8222 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; | |||
8223 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys); | |||
8224 | Ops.push_back(Ops[1]); | |||
8225 | Ops.erase(Ops.begin()+1); | |||
8226 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
8227 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); | |||
8228 | Ops[3] = Builder.CreateBitCast(Ops[3], Ty); | |||
8229 | Ops[4] = Builder.CreateBitCast(Ops[4], Ty); | |||
8230 | Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty); | |||
8231 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane"); | |||
8232 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); | |||
8233 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); | |||
8234 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
8235 | } | |||
8236 | case NEON::BI__builtin_neon_vst2_v: | |||
8237 | case NEON::BI__builtin_neon_vst2q_v: { | |||
8238 | Ops.push_back(Ops[0]); | |||
8239 | Ops.erase(Ops.begin()); | |||
8240 | llvm::Type *Tys[2] = { VTy, Ops[2]->getType() }; | |||
8241 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys), | |||
8242 | Ops, ""); | |||
8243 | } | |||
8244 | case NEON::BI__builtin_neon_vst2_lane_v: | |||
8245 | case NEON::BI__builtin_neon_vst2q_lane_v: { | |||
8246 | Ops.push_back(Ops[0]); | |||
8247 | Ops.erase(Ops.begin()); | |||
8248 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); | |||
8249 | llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; | |||
8250 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys), | |||
8251 | Ops, ""); | |||
8252 | } | |||
8253 | case NEON::BI__builtin_neon_vst3_v: | |||
8254 | case NEON::BI__builtin_neon_vst3q_v: { | |||
8255 | Ops.push_back(Ops[0]); | |||
8256 | Ops.erase(Ops.begin()); | |||
8257 | llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; | |||
8258 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys), | |||
8259 | Ops, ""); | |||
8260 | } | |||
8261 | case NEON::BI__builtin_neon_vst3_lane_v: | |||
8262 | case NEON::BI__builtin_neon_vst3q_lane_v: { | |||
8263 | Ops.push_back(Ops[0]); | |||
8264 | Ops.erase(Ops.begin()); | |||
8265 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); | |||
8266 | llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; | |||
8267 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys), | |||
8268 | Ops, ""); | |||
8269 | } | |||
8270 | case NEON::BI__builtin_neon_vst4_v: | |||
8271 | case NEON::BI__builtin_neon_vst4q_v: { | |||
8272 | Ops.push_back(Ops[0]); | |||
8273 | Ops.erase(Ops.begin()); | |||
8274 | llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; | |||
8275 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys), | |||
8276 | Ops, ""); | |||
8277 | } | |||
8278 | case NEON::BI__builtin_neon_vst4_lane_v: | |||
8279 | case NEON::BI__builtin_neon_vst4q_lane_v: { | |||
8280 | Ops.push_back(Ops[0]); | |||
8281 | Ops.erase(Ops.begin()); | |||
8282 | Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty); | |||
8283 | llvm::Type *Tys[2] = { VTy, Ops[5]->getType() }; | |||
8284 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys), | |||
8285 | Ops, ""); | |||
8286 | } | |||
8287 | case NEON::BI__builtin_neon_vtrn_v: | |||
8288 | case NEON::BI__builtin_neon_vtrnq_v: { | |||
8289 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); | |||
8290 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
8291 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); | |||
8292 | Value *SV = nullptr; | |||
8293 | ||||
8294 | for (unsigned vi = 0; vi != 2; ++vi) { | |||
8295 | SmallVector<uint32_t, 16> Indices; | |||
8296 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { | |||
8297 | Indices.push_back(i+vi); | |||
8298 | Indices.push_back(i+e+vi); | |||
8299 | } | |||
8300 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); | |||
8301 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn"); | |||
8302 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); | |||
8303 | } | |||
8304 | return SV; | |||
8305 | } | |||
8306 | case NEON::BI__builtin_neon_vuzp_v: | |||
8307 | case NEON::BI__builtin_neon_vuzpq_v: { | |||
8308 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); | |||
8309 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
8310 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); | |||
8311 | Value *SV = nullptr; | |||
8312 | ||||
8313 | for (unsigned vi = 0; vi != 2; ++vi) { | |||
8314 | SmallVector<uint32_t, 16> Indices; | |||
8315 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) | |||
8316 | Indices.push_back(2*i+vi); | |||
8317 | ||||
8318 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); | |||
8319 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp"); | |||
8320 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); | |||
8321 | } | |||
8322 | return SV; | |||
8323 | } | |||
8324 | case NEON::BI__builtin_neon_vzip_v: | |||
8325 | case NEON::BI__builtin_neon_vzipq_v: { | |||
8326 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); | |||
8327 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); | |||
8328 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); | |||
8329 | Value *SV = nullptr; | |||
8330 | ||||
8331 | for (unsigned vi = 0; vi != 2; ++vi) { | |||
8332 | SmallVector<uint32_t, 16> Indices; | |||
8333 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { | |||
8334 | Indices.push_back((i + vi*e) >> 1); | |||
8335 | Indices.push_back(((i + vi*e) >> 1)+e); | |||
8336 | } | |||
8337 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); | |||
8338 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip"); | |||
8339 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); | |||
8340 | } | |||
8341 | return SV; | |||
8342 | } | |||
8343 | case NEON::BI__builtin_neon_vqtbl1q_v: { | |||
8344 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty), | |||
8345 | Ops, "vtbl1"); | |||
8346 | } | |||
8347 | case NEON::BI__builtin_neon_vqtbl2q_v: { | |||
8348 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty), | |||
8349 | Ops, "vtbl2"); | |||
8350 | } | |||
8351 | case NEON::BI__builtin_neon_vqtbl3q_v: { | |||
8352 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty), | |||
8353 | Ops, "vtbl3"); | |||
8354 | } | |||
8355 | case NEON::BI__builtin_neon_vqtbl4q_v: { | |||
8356 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty), | |||
8357 | Ops, "vtbl4"); | |||
8358 | } | |||
8359 | case NEON::BI__builtin_neon_vqtbx1q_v: { | |||
8360 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty), | |||
8361 | Ops, "vtbx1"); | |||
8362 | } | |||
8363 | case NEON::BI__builtin_neon_vqtbx2q_v: { | |||
8364 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty), | |||
8365 | Ops, "vtbx2"); | |||
8366 | } | |||
8367 | case NEON::BI__builtin_neon_vqtbx3q_v: { | |||
8368 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty), | |||
8369 | Ops, "vtbx3"); | |||
8370 | } | |||
8371 | case NEON::BI__builtin_neon_vqtbx4q_v: { | |||
8372 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty), | |||
8373 | Ops, "vtbx4"); | |||
8374 | } | |||
8375 | case NEON::BI__builtin_neon_vsqadd_v: | |||
8376 | case NEON::BI__builtin_neon_vsqaddq_v: { | |||
8377 | Int = Intrinsic::aarch64_neon_usqadd; | |||
8378 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd"); | |||
8379 | } | |||
8380 | case NEON::BI__builtin_neon_vuqadd_v: | |||
8381 | case NEON::BI__builtin_neon_vuqaddq_v: { | |||
8382 | Int = Intrinsic::aarch64_neon_suqadd; | |||
8383 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd"); | |||
8384 | } | |||
8385 | case AArch64::BI__iso_volatile_load8: | |||
8386 | case AArch64::BI__iso_volatile_load16: | |||
8387 | case AArch64::BI__iso_volatile_load32: | |||
8388 | case AArch64::BI__iso_volatile_load64: | |||
8389 | return EmitISOVolatileLoad(E); | |||
8390 | case AArch64::BI__iso_volatile_store8: | |||
8391 | case AArch64::BI__iso_volatile_store16: | |||
8392 | case AArch64::BI__iso_volatile_store32: | |||
8393 | case AArch64::BI__iso_volatile_store64: | |||
8394 | return EmitISOVolatileStore(E); | |||
8395 | case AArch64::BI_BitScanForward: | |||
8396 | case AArch64::BI_BitScanForward64: | |||
8397 | return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E); | |||
8398 | case AArch64::BI_BitScanReverse: | |||
8399 | case AArch64::BI_BitScanReverse64: | |||
8400 | return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E); | |||
8401 | case AArch64::BI_InterlockedAnd64: | |||
8402 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E); | |||
8403 | case AArch64::BI_InterlockedExchange64: | |||
8404 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E); | |||
8405 | case AArch64::BI_InterlockedExchangeAdd64: | |||
8406 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E); | |||
8407 | case AArch64::BI_InterlockedExchangeSub64: | |||
8408 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E); | |||
8409 | case AArch64::BI_InterlockedOr64: | |||
8410 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E); | |||
8411 | case AArch64::BI_InterlockedXor64: | |||
8412 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E); | |||
8413 | case AArch64::BI_InterlockedDecrement64: | |||
8414 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E); | |||
8415 | case AArch64::BI_InterlockedIncrement64: | |||
8416 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E); | |||
8417 | } | |||
8418 | } | |||
8419 | ||||
8420 | llvm::Value *CodeGenFunction:: | |||
8421 | BuildVector(ArrayRef<llvm::Value*> Ops) { | |||
8422 | assert((Ops.size() & (Ops.size() - 1)) == 0 &&(static_cast <bool> ((Ops.size() & (Ops.size() - 1) ) == 0 && "Not a power-of-two sized vector!") ? void ( 0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8423, __extension__ __PRETTY_FUNCTION__)) | |||
8423 | "Not a power-of-two sized vector!")(static_cast <bool> ((Ops.size() & (Ops.size() - 1) ) == 0 && "Not a power-of-two sized vector!") ? void ( 0) : __assert_fail ("(Ops.size() & (Ops.size() - 1)) == 0 && \"Not a power-of-two sized vector!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8423, __extension__ __PRETTY_FUNCTION__)); | |||
8424 | bool AllConstants = true; | |||
8425 | for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i) | |||
8426 | AllConstants &= isa<Constant>(Ops[i]); | |||
8427 | ||||
8428 | // If this is a constant vector, create a ConstantVector. | |||
8429 | if (AllConstants) { | |||
8430 | SmallVector<llvm::Constant*, 16> CstOps; | |||
8431 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) | |||
8432 | CstOps.push_back(cast<Constant>(Ops[i])); | |||
8433 | return llvm::ConstantVector::get(CstOps); | |||
8434 | } | |||
8435 | ||||
8436 | // Otherwise, insertelement the values to build the vector. | |||
8437 | Value *Result = | |||
8438 | llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size())); | |||
8439 | ||||
8440 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) | |||
8441 | Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i)); | |||
8442 | ||||
8443 | return Result; | |||
8444 | } | |||
8445 | ||||
8446 | // Convert the mask from an integer type to a vector of i1. | |||
8447 | static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask, | |||
8448 | unsigned NumElts) { | |||
8449 | ||||
8450 | llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(), | |||
8451 | cast<IntegerType>(Mask->getType())->getBitWidth()); | |||
8452 | Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy); | |||
8453 | ||||
8454 | // If we have less than 8 elements, then the starting mask was an i8 and | |||
8455 | // we need to extract down to the right number of elements. | |||
8456 | if (NumElts < 8) { | |||
8457 | uint32_t Indices[4]; | |||
8458 | for (unsigned i = 0; i != NumElts; ++i) | |||
8459 | Indices[i] = i; | |||
8460 | MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec, | |||
8461 | makeArrayRef(Indices, NumElts), | |||
8462 | "extract"); | |||
8463 | } | |||
8464 | return MaskVec; | |||
8465 | } | |||
8466 | ||||
8467 | static Value *EmitX86MaskedStore(CodeGenFunction &CGF, | |||
8468 | ArrayRef<Value *> Ops, | |||
8469 | unsigned Align) { | |||
8470 | // Cast the pointer to right type. | |||
8471 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], | |||
8472 | llvm::PointerType::getUnqual(Ops[1]->getType())); | |||
8473 | ||||
8474 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], | |||
8475 | Ops[1]->getType()->getVectorNumElements()); | |||
8476 | ||||
8477 | return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec); | |||
8478 | } | |||
8479 | ||||
8480 | static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, | |||
8481 | ArrayRef<Value *> Ops, unsigned Align) { | |||
8482 | // Cast the pointer to right type. | |||
8483 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], | |||
8484 | llvm::PointerType::getUnqual(Ops[1]->getType())); | |||
8485 | ||||
8486 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], | |||
8487 | Ops[1]->getType()->getVectorNumElements()); | |||
8488 | ||||
8489 | return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]); | |||
8490 | } | |||
8491 | ||||
8492 | static Value *EmitX86ExpandLoad(CodeGenFunction &CGF, | |||
8493 | ArrayRef<Value *> Ops) { | |||
8494 | llvm::Type *ResultTy = Ops[1]->getType(); | |||
8495 | llvm::Type *PtrTy = ResultTy->getVectorElementType(); | |||
8496 | ||||
8497 | // Cast the pointer to element type. | |||
8498 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], | |||
8499 | llvm::PointerType::getUnqual(PtrTy)); | |||
8500 | ||||
8501 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], | |||
8502 | ResultTy->getVectorNumElements()); | |||
8503 | ||||
8504 | llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload, | |||
8505 | ResultTy); | |||
8506 | return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] }); | |||
8507 | } | |||
8508 | ||||
8509 | static Value *EmitX86CompressStore(CodeGenFunction &CGF, | |||
8510 | ArrayRef<Value *> Ops) { | |||
8511 | llvm::Type *ResultTy = Ops[1]->getType(); | |||
8512 | llvm::Type *PtrTy = ResultTy->getVectorElementType(); | |||
8513 | ||||
8514 | // Cast the pointer to element type. | |||
8515 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], | |||
8516 | llvm::PointerType::getUnqual(PtrTy)); | |||
8517 | ||||
8518 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], | |||
8519 | ResultTy->getVectorNumElements()); | |||
8520 | ||||
8521 | llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore, | |||
8522 | ResultTy); | |||
8523 | return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec }); | |||
8524 | } | |||
8525 | ||||
8526 | static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc, | |||
8527 | unsigned NumElts, ArrayRef<Value *> Ops, | |||
8528 | bool InvertLHS = false) { | |||
8529 | Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts); | |||
8530 | Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts); | |||
8531 | ||||
8532 | if (InvertLHS) | |||
8533 | LHS = CGF.Builder.CreateNot(LHS); | |||
8534 | ||||
8535 | return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS), | |||
8536 | CGF.Builder.getIntNTy(std::max(NumElts, 8U))); | |||
8537 | } | |||
8538 | ||||
8539 | static Value *EmitX86Select(CodeGenFunction &CGF, | |||
8540 | Value *Mask, Value *Op0, Value *Op1) { | |||
8541 | ||||
8542 | // If the mask is all ones just return first argument. | |||
8543 | if (const auto *C = dyn_cast<Constant>(Mask)) | |||
8544 | if (C->isAllOnesValue()) | |||
8545 | return Op0; | |||
8546 | ||||
8547 | Mask = getMaskVecValue(CGF, Mask, Op0->getType()->getVectorNumElements()); | |||
8548 | ||||
8549 | return CGF.Builder.CreateSelect(Mask, Op0, Op1); | |||
8550 | } | |||
8551 | ||||
8552 | static Value *EmitX86ScalarSelect(CodeGenFunction &CGF, | |||
8553 | Value *Mask, Value *Op0, Value *Op1) { | |||
8554 | // If the mask is all ones just return first argument. | |||
8555 | if (const auto *C = dyn_cast<Constant>(Mask)) | |||
8556 | if (C->isAllOnesValue()) | |||
8557 | return Op0; | |||
8558 | ||||
8559 | llvm::VectorType *MaskTy = | |||
8560 | llvm::VectorType::get(CGF.Builder.getInt1Ty(), | |||
8561 | Mask->getType()->getIntegerBitWidth()); | |||
8562 | Mask = CGF.Builder.CreateBitCast(Mask, MaskTy); | |||
8563 | Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0); | |||
8564 | return CGF.Builder.CreateSelect(Mask, Op0, Op1); | |||
8565 | } | |||
8566 | ||||
8567 | static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp, | |||
8568 | unsigned NumElts, Value *MaskIn) { | |||
8569 | if (MaskIn) { | |||
8570 | const auto *C = dyn_cast<Constant>(MaskIn); | |||
8571 | if (!C || !C->isAllOnesValue()) | |||
8572 | Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts)); | |||
8573 | } | |||
8574 | ||||
8575 | if (NumElts < 8) { | |||
8576 | uint32_t Indices[8]; | |||
8577 | for (unsigned i = 0; i != NumElts; ++i) | |||
8578 | Indices[i] = i; | |||
8579 | for (unsigned i = NumElts; i != 8; ++i) | |||
8580 | Indices[i] = i % NumElts + NumElts; | |||
8581 | Cmp = CGF.Builder.CreateShuffleVector( | |||
8582 | Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices); | |||
8583 | } | |||
8584 | ||||
8585 | return CGF.Builder.CreateBitCast(Cmp, | |||
8586 | IntegerType::get(CGF.getLLVMContext(), | |||
8587 | std::max(NumElts, 8U))); | |||
8588 | } | |||
8589 | ||||
8590 | static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC, | |||
8591 | bool Signed, ArrayRef<Value *> Ops) { | |||
8592 | assert((Ops.size() == 2 || Ops.size() == 4) &&(static_cast <bool> ((Ops.size() == 2 || Ops.size() == 4 ) && "Unexpected number of arguments") ? void (0) : __assert_fail ("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8593, __extension__ __PRETTY_FUNCTION__)) | |||
8593 | "Unexpected number of arguments")(static_cast <bool> ((Ops.size() == 2 || Ops.size() == 4 ) && "Unexpected number of arguments") ? void (0) : __assert_fail ("(Ops.size() == 2 || Ops.size() == 4) && \"Unexpected number of arguments\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8593, __extension__ __PRETTY_FUNCTION__)); | |||
8594 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); | |||
8595 | Value *Cmp; | |||
8596 | ||||
8597 | if (CC == 3) { | |||
8598 | Cmp = Constant::getNullValue( | |||
8599 | llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts)); | |||
8600 | } else if (CC == 7) { | |||
8601 | Cmp = Constant::getAllOnesValue( | |||
8602 | llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts)); | |||
8603 | } else { | |||
8604 | ICmpInst::Predicate Pred; | |||
8605 | switch (CC) { | |||
8606 | default: llvm_unreachable("Unknown condition code")::llvm::llvm_unreachable_internal("Unknown condition code", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8606); | |||
8607 | case 0: Pred = ICmpInst::ICMP_EQ; break; | |||
8608 | case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; | |||
8609 | case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; | |||
8610 | case 4: Pred = ICmpInst::ICMP_NE; break; | |||
8611 | case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; | |||
8612 | case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; | |||
8613 | } | |||
8614 | Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]); | |||
8615 | } | |||
8616 | ||||
8617 | Value *MaskIn = nullptr; | |||
8618 | if (Ops.size() == 4) | |||
8619 | MaskIn = Ops[3]; | |||
8620 | ||||
8621 | return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn); | |||
8622 | } | |||
8623 | ||||
8624 | static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) { | |||
8625 | Value *Zero = Constant::getNullValue(In->getType()); | |||
8626 | return EmitX86MaskedCompare(CGF, 1, true, { In, Zero }); | |||
8627 | } | |||
8628 | ||||
8629 | static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) { | |||
8630 | ||||
8631 | llvm::Type *Ty = Ops[0]->getType(); | |||
8632 | Value *Zero = llvm::Constant::getNullValue(Ty); | |||
8633 | Value *Sub = CGF.Builder.CreateSub(Zero, Ops[0]); | |||
8634 | Value *Cmp = CGF.Builder.CreateICmp(ICmpInst::ICMP_SGT, Ops[0], Zero); | |||
8635 | Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Sub); | |||
8636 | return Res; | |||
8637 | } | |||
8638 | ||||
8639 | static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred, | |||
8640 | ArrayRef<Value *> Ops) { | |||
8641 | Value *Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]); | |||
8642 | Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Ops[1]); | |||
8643 | ||||
8644 | assert(Ops.size() == 2)(static_cast <bool> (Ops.size() == 2) ? void (0) : __assert_fail ("Ops.size() == 2", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8644, __extension__ __PRETTY_FUNCTION__)); | |||
8645 | return Res; | |||
8646 | } | |||
8647 | ||||
8648 | // Lowers X86 FMA intrinsics to IR. | |||
8649 | static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops, | |||
8650 | unsigned BuiltinID, bool IsAddSub) { | |||
8651 | ||||
8652 | bool Subtract = false; | |||
8653 | Intrinsic::ID IID = Intrinsic::not_intrinsic; | |||
8654 | switch (BuiltinID) { | |||
8655 | default: break; | |||
8656 | case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: | |||
8657 | Subtract = true; | |||
8658 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
8659 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask: | |||
8660 | case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: | |||
8661 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: | |||
8662 | IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break; | |||
8663 | case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: | |||
8664 | Subtract = true; | |||
8665 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
8666 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: | |||
8667 | case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: | |||
8668 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: | |||
8669 | IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break; | |||
8670 | case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: | |||
8671 | Subtract = true; | |||
8672 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
8673 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: | |||
8674 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: | |||
8675 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: | |||
8676 | IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512; | |||
8677 | break; | |||
8678 | case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: | |||
8679 | Subtract = true; | |||
8680 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
8681 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: | |||
8682 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: | |||
8683 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3: | |||
8684 | IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512; | |||
8685 | break; | |||
8686 | } | |||
8687 | ||||
8688 | Value *A = Ops[0]; | |||
8689 | Value *B = Ops[1]; | |||
8690 | Value *C = Ops[2]; | |||
8691 | ||||
8692 | if (Subtract) | |||
8693 | C = CGF.Builder.CreateFNeg(C); | |||
8694 | ||||
8695 | Value *Res; | |||
8696 | ||||
8697 | // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding). | |||
8698 | if (IID != Intrinsic::not_intrinsic && | |||
8699 | cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4) { | |||
8700 | Function *Intr = CGF.CGM.getIntrinsic(IID); | |||
8701 | Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() }); | |||
8702 | } else { | |||
8703 | llvm::Type *Ty = A->getType(); | |||
8704 | Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty); | |||
8705 | Res = CGF.Builder.CreateCall(FMA, {A, B, C} ); | |||
8706 | ||||
8707 | if (IsAddSub) { | |||
8708 | // Negate even elts in C using a mask. | |||
8709 | unsigned NumElts = Ty->getVectorNumElements(); | |||
8710 | SmallVector<uint32_t, 16> Indices(NumElts); | |||
8711 | for (unsigned i = 0; i != NumElts; ++i) | |||
8712 | Indices[i] = i + (i % 2) * NumElts; | |||
8713 | ||||
8714 | Value *NegC = CGF.Builder.CreateFNeg(C); | |||
8715 | Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} ); | |||
8716 | Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices); | |||
8717 | } | |||
8718 | } | |||
8719 | ||||
8720 | // Handle any required masking. | |||
8721 | Value *MaskFalseVal = nullptr; | |||
8722 | switch (BuiltinID) { | |||
8723 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask: | |||
8724 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: | |||
8725 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: | |||
8726 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: | |||
8727 | MaskFalseVal = Ops[0]; | |||
8728 | break; | |||
8729 | case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: | |||
8730 | case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: | |||
8731 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: | |||
8732 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: | |||
8733 | MaskFalseVal = Constant::getNullValue(Ops[0]->getType()); | |||
8734 | break; | |||
8735 | case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: | |||
8736 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: | |||
8737 | case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: | |||
8738 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: | |||
8739 | case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: | |||
8740 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: | |||
8741 | case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: | |||
8742 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3: | |||
8743 | MaskFalseVal = Ops[2]; | |||
8744 | break; | |||
8745 | } | |||
8746 | ||||
8747 | if (MaskFalseVal) | |||
8748 | return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal); | |||
8749 | ||||
8750 | return Res; | |||
8751 | } | |||
8752 | ||||
8753 | static Value * | |||
8754 | EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops, | |||
8755 | Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0, | |||
8756 | bool NegAcc = false) { | |||
8757 | unsigned Rnd = 4; | |||
8758 | if (Ops.size() > 4) | |||
8759 | Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue(); | |||
8760 | ||||
8761 | if (NegAcc) | |||
8762 | Ops[2] = CGF.Builder.CreateFNeg(Ops[2]); | |||
8763 | ||||
8764 | Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0); | |||
8765 | Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0); | |||
8766 | Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0); | |||
8767 | Value *Res; | |||
8768 | if (Rnd != 4) { | |||
8769 | Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ? | |||
8770 | Intrinsic::x86_avx512_vfmadd_f32 : | |||
8771 | Intrinsic::x86_avx512_vfmadd_f64; | |||
8772 | Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), | |||
8773 | {Ops[0], Ops[1], Ops[2], Ops[4]}); | |||
8774 | } else { | |||
8775 | Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType()); | |||
8776 | Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3)); | |||
8777 | } | |||
8778 | // If we have more than 3 arguments, we need to do masking. | |||
8779 | if (Ops.size() > 3) { | |||
8780 | Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType()) | |||
8781 | : Ops[PTIdx]; | |||
8782 | ||||
8783 | // If we negated the accumulator and the its the PassThru value we need to | |||
8784 | // bypass the negate. Conveniently Upper should be the same thing in this | |||
8785 | // case. | |||
8786 | if (NegAcc && PTIdx == 2) | |||
8787 | PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0); | |||
8788 | ||||
8789 | Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru); | |||
8790 | } | |||
8791 | return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0); | |||
8792 | } | |||
8793 | ||||
8794 | static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned, | |||
8795 | ArrayRef<Value *> Ops) { | |||
8796 | llvm::Type *Ty = Ops[0]->getType(); | |||
8797 | // Arguments have a vXi32 type so cast to vXi64. | |||
8798 | Ty = llvm::VectorType::get(CGF.Int64Ty, | |||
8799 | Ty->getPrimitiveSizeInBits() / 64); | |||
8800 | Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty); | |||
8801 | Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty); | |||
8802 | ||||
8803 | if (IsSigned) { | |||
8804 | // Shift left then arithmetic shift right. | |||
8805 | Constant *ShiftAmt = ConstantInt::get(Ty, 32); | |||
8806 | LHS = CGF.Builder.CreateShl(LHS, ShiftAmt); | |||
8807 | LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt); | |||
8808 | RHS = CGF.Builder.CreateShl(RHS, ShiftAmt); | |||
8809 | RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt); | |||
8810 | } else { | |||
8811 | // Clear the upper bits. | |||
8812 | Constant *Mask = ConstantInt::get(Ty, 0xffffffff); | |||
8813 | LHS = CGF.Builder.CreateAnd(LHS, Mask); | |||
8814 | RHS = CGF.Builder.CreateAnd(RHS, Mask); | |||
8815 | } | |||
8816 | ||||
8817 | return CGF.Builder.CreateMul(LHS, RHS); | |||
8818 | } | |||
8819 | ||||
8820 | // Emit a masked pternlog intrinsic. This only exists because the header has to | |||
8821 | // use a macro and we aren't able to pass the input argument to a pternlog | |||
8822 | // builtin and a select builtin without evaluating it twice. | |||
8823 | static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask, | |||
8824 | ArrayRef<Value *> Ops) { | |||
8825 | llvm::Type *Ty = Ops[0]->getType(); | |||
8826 | ||||
8827 | unsigned VecWidth = Ty->getPrimitiveSizeInBits(); | |||
8828 | unsigned EltWidth = Ty->getScalarSizeInBits(); | |||
8829 | Intrinsic::ID IID; | |||
8830 | if (VecWidth == 128 && EltWidth == 32) | |||
8831 | IID = Intrinsic::x86_avx512_pternlog_d_128; | |||
8832 | else if (VecWidth == 256 && EltWidth == 32) | |||
8833 | IID = Intrinsic::x86_avx512_pternlog_d_256; | |||
8834 | else if (VecWidth == 512 && EltWidth == 32) | |||
8835 | IID = Intrinsic::x86_avx512_pternlog_d_512; | |||
8836 | else if (VecWidth == 128 && EltWidth == 64) | |||
8837 | IID = Intrinsic::x86_avx512_pternlog_q_128; | |||
8838 | else if (VecWidth == 256 && EltWidth == 64) | |||
8839 | IID = Intrinsic::x86_avx512_pternlog_q_256; | |||
8840 | else if (VecWidth == 512 && EltWidth == 64) | |||
8841 | IID = Intrinsic::x86_avx512_pternlog_q_512; | |||
8842 | else | |||
8843 | llvm_unreachable("Unexpected intrinsic")::llvm::llvm_unreachable_internal("Unexpected intrinsic", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8843); | |||
8844 | ||||
8845 | Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), | |||
8846 | Ops.drop_back()); | |||
8847 | Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0]; | |||
8848 | return EmitX86Select(CGF, Ops[4], Ternlog, PassThru); | |||
8849 | } | |||
8850 | ||||
8851 | static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op, | |||
8852 | llvm::Type *DstTy) { | |||
8853 | unsigned NumberOfElements = DstTy->getVectorNumElements(); | |||
8854 | Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements); | |||
8855 | return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2"); | |||
8856 | } | |||
8857 | ||||
8858 | Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) { | |||
8859 | const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts(); | |||
8860 | StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString(); | |||
8861 | return EmitX86CpuIs(CPUStr); | |||
8862 | } | |||
8863 | ||||
8864 | Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) { | |||
8865 | ||||
8866 | llvm::Type *Int32Ty = Builder.getInt32Ty(); | |||
8867 | ||||
8868 | // Matching the struct layout from the compiler-rt/libgcc structure that is | |||
8869 | // filled in: | |||
8870 | // unsigned int __cpu_vendor; | |||
8871 | // unsigned int __cpu_type; | |||
8872 | // unsigned int __cpu_subtype; | |||
8873 | // unsigned int __cpu_features[1]; | |||
8874 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, | |||
8875 | llvm::ArrayType::get(Int32Ty, 1)); | |||
8876 | ||||
8877 | // Grab the global __cpu_model. | |||
8878 | llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model"); | |||
8879 | ||||
8880 | // Calculate the index needed to access the correct field based on the | |||
8881 | // range. Also adjust the expected value. | |||
8882 | unsigned Index; | |||
8883 | unsigned Value; | |||
8884 | std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr) | |||
8885 | #define X86_VENDOR(ENUM, STRING) \ | |||
8886 | .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)}) | |||
8887 | #define X86_CPU_TYPE_COMPAT_WITH_ALIAS(ARCHNAME, ENUM, STR, ALIAS) \ | |||
8888 | .Cases(STR, ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)}) | |||
8889 | #define X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR) \ | |||
8890 | .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)}) | |||
8891 | #define X86_CPU_SUBTYPE_COMPAT(ARCHNAME, ENUM, STR) \ | |||
8892 | .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)}) | |||
8893 | #include "llvm/Support/X86TargetParser.def" | |||
8894 | .Default({0, 0}); | |||
8895 | assert(Value != 0 && "Invalid CPUStr passed to CpuIs")(static_cast <bool> (Value != 0 && "Invalid CPUStr passed to CpuIs" ) ? void (0) : __assert_fail ("Value != 0 && \"Invalid CPUStr passed to CpuIs\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8895, __extension__ __PRETTY_FUNCTION__)); | |||
8896 | ||||
8897 | // Grab the appropriate field from __cpu_model. | |||
8898 | llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0), | |||
8899 | ConstantInt::get(Int32Ty, Index)}; | |||
8900 | llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs); | |||
8901 | CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4)); | |||
8902 | ||||
8903 | // Check the value of the field against the requested value. | |||
8904 | return Builder.CreateICmpEQ(CpuValue, | |||
8905 | llvm::ConstantInt::get(Int32Ty, Value)); | |||
8906 | } | |||
8907 | ||||
8908 | Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) { | |||
8909 | const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts(); | |||
8910 | StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString(); | |||
8911 | return EmitX86CpuSupports(FeatureStr); | |||
8912 | } | |||
8913 | ||||
8914 | uint32_t | |||
8915 | CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) { | |||
8916 | // Processor features and mapping to processor feature value. | |||
8917 | uint32_t FeaturesMask = 0; | |||
8918 | for (const StringRef &FeatureStr : FeatureStrs) { | |||
8919 | unsigned Feature = | |||
8920 | StringSwitch<unsigned>(FeatureStr) | |||
8921 | #define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, VAL) | |||
8922 | #include "llvm/Support/X86TargetParser.def" | |||
8923 | ; | |||
8924 | FeaturesMask |= (1U << Feature); | |||
8925 | } | |||
8926 | return FeaturesMask; | |||
8927 | } | |||
8928 | ||||
8929 | Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) { | |||
8930 | return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs)); | |||
8931 | } | |||
8932 | ||||
8933 | llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint32_t FeaturesMask) { | |||
8934 | // Matching the struct layout from the compiler-rt/libgcc structure that is | |||
8935 | // filled in: | |||
8936 | // unsigned int __cpu_vendor; | |||
8937 | // unsigned int __cpu_type; | |||
8938 | // unsigned int __cpu_subtype; | |||
8939 | // unsigned int __cpu_features[1]; | |||
8940 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, | |||
8941 | llvm::ArrayType::get(Int32Ty, 1)); | |||
8942 | ||||
8943 | // Grab the global __cpu_model. | |||
8944 | llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model"); | |||
8945 | ||||
8946 | // Grab the first (0th) element from the field __cpu_features off of the | |||
8947 | // global in the struct STy. | |||
8948 | Value *Idxs[] = {ConstantInt::get(Int32Ty, 0), ConstantInt::get(Int32Ty, 3), | |||
8949 | ConstantInt::get(Int32Ty, 0)}; | |||
8950 | Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs); | |||
8951 | Value *Features = | |||
8952 | Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4)); | |||
8953 | ||||
8954 | // Check the value of the bit corresponding to the feature requested. | |||
8955 | Value *Bitset = Builder.CreateAnd( | |||
8956 | Features, llvm::ConstantInt::get(Int32Ty, FeaturesMask)); | |||
8957 | return Builder.CreateICmpNE(Bitset, llvm::ConstantInt::get(Int32Ty, 0)); | |||
8958 | } | |||
8959 | ||||
8960 | Value *CodeGenFunction::EmitX86CpuInit() { | |||
8961 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, | |||
8962 | /*Variadic*/ false); | |||
8963 | llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init"); | |||
8964 | return Builder.CreateCall(Func); | |||
8965 | } | |||
8966 | ||||
8967 | Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, | |||
8968 | const CallExpr *E) { | |||
8969 | if (BuiltinID == X86::BI__builtin_cpu_is) | |||
8970 | return EmitX86CpuIs(E); | |||
8971 | if (BuiltinID == X86::BI__builtin_cpu_supports) | |||
8972 | return EmitX86CpuSupports(E); | |||
8973 | if (BuiltinID == X86::BI__builtin_cpu_init) | |||
8974 | return EmitX86CpuInit(); | |||
8975 | ||||
8976 | SmallVector<Value*, 4> Ops; | |||
8977 | ||||
8978 | // Find out if any arguments are required to be integer constant expressions. | |||
8979 | unsigned ICEArguments = 0; | |||
8980 | ASTContext::GetBuiltinTypeError Error; | |||
8981 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); | |||
8982 | assert(Error == ASTContext::GE_None && "Should not codegen an error")(static_cast <bool> (Error == ASTContext::GE_None && "Should not codegen an error") ? void (0) : __assert_fail ("Error == ASTContext::GE_None && \"Should not codegen an error\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8982, __extension__ __PRETTY_FUNCTION__)); | |||
8983 | ||||
8984 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { | |||
8985 | // If this is a normal argument, just emit it as a scalar. | |||
8986 | if ((ICEArguments & (1 << i)) == 0) { | |||
8987 | Ops.push_back(EmitScalarExpr(E->getArg(i))); | |||
8988 | continue; | |||
8989 | } | |||
8990 | ||||
8991 | // If this is required to be a constant, constant fold it so that we know | |||
8992 | // that the generated intrinsic gets a ConstantInt. | |||
8993 | llvm::APSInt Result; | |||
8994 | bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); | |||
8995 | assert(IsConst && "Constant arg isn't actually constant?")(static_cast <bool> (IsConst && "Constant arg isn't actually constant?" ) ? void (0) : __assert_fail ("IsConst && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 8995, __extension__ __PRETTY_FUNCTION__)); (void)IsConst; | |||
8996 | Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); | |||
8997 | } | |||
8998 | ||||
8999 | // These exist so that the builtin that takes an immediate can be bounds | |||
9000 | // checked by clang to avoid passing bad immediates to the backend. Since | |||
9001 | // AVX has a larger immediate than SSE we would need separate builtins to | |||
9002 | // do the different bounds checking. Rather than create a clang specific | |||
9003 | // SSE only builtin, this implements eight separate builtins to match gcc | |||
9004 | // implementation. | |||
9005 | auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) { | |||
9006 | Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm)); | |||
9007 | llvm::Function *F = CGM.getIntrinsic(ID); | |||
9008 | return Builder.CreateCall(F, Ops); | |||
9009 | }; | |||
9010 | ||||
9011 | // For the vector forms of FP comparisons, translate the builtins directly to | |||
9012 | // IR. | |||
9013 | // TODO: The builtins could be removed if the SSE header files used vector | |||
9014 | // extension comparisons directly (vector ordered/unordered may need | |||
9015 | // additional support via __builtin_isnan()). | |||
9016 | auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred) { | |||
9017 | Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]); | |||
9018 | llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType()); | |||
9019 | llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy); | |||
9020 | Value *Sext = Builder.CreateSExt(Cmp, IntVecTy); | |||
9021 | return Builder.CreateBitCast(Sext, FPVecTy); | |||
9022 | }; | |||
9023 | ||||
9024 | switch (BuiltinID) { | |||
9025 | default: return nullptr; | |||
9026 | case X86::BI_mm_prefetch: { | |||
9027 | Value *Address = Ops[0]; | |||
9028 | ConstantInt *C = cast<ConstantInt>(Ops[1]); | |||
9029 | Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1); | |||
9030 | Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3); | |||
9031 | Value *Data = ConstantInt::get(Int32Ty, 1); | |||
9032 | Value *F = CGM.getIntrinsic(Intrinsic::prefetch); | |||
9033 | return Builder.CreateCall(F, {Address, RW, Locality, Data}); | |||
9034 | } | |||
9035 | case X86::BI_mm_clflush: { | |||
9036 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush), | |||
9037 | Ops[0]); | |||
9038 | } | |||
9039 | case X86::BI_mm_lfence: { | |||
9040 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence)); | |||
9041 | } | |||
9042 | case X86::BI_mm_mfence: { | |||
9043 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence)); | |||
9044 | } | |||
9045 | case X86::BI_mm_sfence: { | |||
9046 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence)); | |||
9047 | } | |||
9048 | case X86::BI_mm_pause: { | |||
9049 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause)); | |||
9050 | } | |||
9051 | case X86::BI__rdtsc: { | |||
9052 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc)); | |||
9053 | } | |||
9054 | case X86::BI__builtin_ia32_undef128: | |||
9055 | case X86::BI__builtin_ia32_undef256: | |||
9056 | case X86::BI__builtin_ia32_undef512: | |||
9057 | // The x86 definition of "undef" is not the same as the LLVM definition | |||
9058 | // (PR32176). We leave optimizing away an unnecessary zero constant to the | |||
9059 | // IR optimizer and backend. | |||
9060 | // TODO: If we had a "freeze" IR instruction to generate a fixed undef | |||
9061 | // value, we should use that here instead of a zero. | |||
9062 | return llvm::Constant::getNullValue(ConvertType(E->getType())); | |||
9063 | case X86::BI__builtin_ia32_vec_init_v8qi: | |||
9064 | case X86::BI__builtin_ia32_vec_init_v4hi: | |||
9065 | case X86::BI__builtin_ia32_vec_init_v2si: | |||
9066 | return Builder.CreateBitCast(BuildVector(Ops), | |||
9067 | llvm::Type::getX86_MMXTy(getLLVMContext())); | |||
9068 | case X86::BI__builtin_ia32_vec_ext_v2si: | |||
9069 | case X86::BI__builtin_ia32_vec_ext_v16qi: | |||
9070 | case X86::BI__builtin_ia32_vec_ext_v8hi: | |||
9071 | case X86::BI__builtin_ia32_vec_ext_v4si: | |||
9072 | case X86::BI__builtin_ia32_vec_ext_v4sf: | |||
9073 | case X86::BI__builtin_ia32_vec_ext_v2di: | |||
9074 | case X86::BI__builtin_ia32_vec_ext_v32qi: | |||
9075 | case X86::BI__builtin_ia32_vec_ext_v16hi: | |||
9076 | case X86::BI__builtin_ia32_vec_ext_v8si: | |||
9077 | case X86::BI__builtin_ia32_vec_ext_v4di: { | |||
9078 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); | |||
9079 | uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue(); | |||
9080 | Index &= NumElts - 1; | |||
9081 | // These builtins exist so we can ensure the index is an ICE and in range. | |||
9082 | // Otherwise we could just do this in the header file. | |||
9083 | return Builder.CreateExtractElement(Ops[0], Index); | |||
9084 | } | |||
9085 | case X86::BI__builtin_ia32_vec_set_v16qi: | |||
9086 | case X86::BI__builtin_ia32_vec_set_v8hi: | |||
9087 | case X86::BI__builtin_ia32_vec_set_v4si: | |||
9088 | case X86::BI__builtin_ia32_vec_set_v2di: | |||
9089 | case X86::BI__builtin_ia32_vec_set_v32qi: | |||
9090 | case X86::BI__builtin_ia32_vec_set_v16hi: | |||
9091 | case X86::BI__builtin_ia32_vec_set_v8si: | |||
9092 | case X86::BI__builtin_ia32_vec_set_v4di: { | |||
9093 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); | |||
9094 | unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue(); | |||
9095 | Index &= NumElts - 1; | |||
9096 | // These builtins exist so we can ensure the index is an ICE and in range. | |||
9097 | // Otherwise we could just do this in the header file. | |||
9098 | return Builder.CreateInsertElement(Ops[0], Ops[1], Index); | |||
9099 | } | |||
9100 | case X86::BI_mm_setcsr: | |||
9101 | case X86::BI__builtin_ia32_ldmxcsr: { | |||
9102 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); | |||
9103 | Builder.CreateStore(Ops[0], Tmp); | |||
9104 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), | |||
9105 | Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); | |||
9106 | } | |||
9107 | case X86::BI_mm_getcsr: | |||
9108 | case X86::BI__builtin_ia32_stmxcsr: { | |||
9109 | Address Tmp = CreateMemTemp(E->getType()); | |||
9110 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), | |||
9111 | Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); | |||
9112 | return Builder.CreateLoad(Tmp, "stmxcsr"); | |||
9113 | } | |||
9114 | case X86::BI__builtin_ia32_xsave: | |||
9115 | case X86::BI__builtin_ia32_xsave64: | |||
9116 | case X86::BI__builtin_ia32_xrstor: | |||
9117 | case X86::BI__builtin_ia32_xrstor64: | |||
9118 | case X86::BI__builtin_ia32_xsaveopt: | |||
9119 | case X86::BI__builtin_ia32_xsaveopt64: | |||
9120 | case X86::BI__builtin_ia32_xrstors: | |||
9121 | case X86::BI__builtin_ia32_xrstors64: | |||
9122 | case X86::BI__builtin_ia32_xsavec: | |||
9123 | case X86::BI__builtin_ia32_xsavec64: | |||
9124 | case X86::BI__builtin_ia32_xsaves: | |||
9125 | case X86::BI__builtin_ia32_xsaves64: { | |||
9126 | Intrinsic::ID ID; | |||
9127 | #define INTRINSIC_X86_XSAVE_ID(NAME) \ | |||
9128 | case X86::BI__builtin_ia32_##NAME: \ | |||
9129 | ID = Intrinsic::x86_##NAME; \ | |||
9130 | break | |||
9131 | switch (BuiltinID) { | |||
9132 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9132); | |||
9133 | INTRINSIC_X86_XSAVE_ID(xsave); | |||
9134 | INTRINSIC_X86_XSAVE_ID(xsave64); | |||
9135 | INTRINSIC_X86_XSAVE_ID(xrstor); | |||
9136 | INTRINSIC_X86_XSAVE_ID(xrstor64); | |||
9137 | INTRINSIC_X86_XSAVE_ID(xsaveopt); | |||
9138 | INTRINSIC_X86_XSAVE_ID(xsaveopt64); | |||
9139 | INTRINSIC_X86_XSAVE_ID(xrstors); | |||
9140 | INTRINSIC_X86_XSAVE_ID(xrstors64); | |||
9141 | INTRINSIC_X86_XSAVE_ID(xsavec); | |||
9142 | INTRINSIC_X86_XSAVE_ID(xsavec64); | |||
9143 | INTRINSIC_X86_XSAVE_ID(xsaves); | |||
9144 | INTRINSIC_X86_XSAVE_ID(xsaves64); | |||
9145 | } | |||
9146 | #undef INTRINSIC_X86_XSAVE_ID | |||
9147 | Value *Mhi = Builder.CreateTrunc( | |||
9148 | Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty); | |||
9149 | Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty); | |||
9150 | Ops[1] = Mhi; | |||
9151 | Ops.push_back(Mlo); | |||
9152 | return Builder.CreateCall(CGM.getIntrinsic(ID), Ops); | |||
9153 | } | |||
9154 | case X86::BI__builtin_ia32_storedqudi128_mask: | |||
9155 | case X86::BI__builtin_ia32_storedqusi128_mask: | |||
9156 | case X86::BI__builtin_ia32_storedquhi128_mask: | |||
9157 | case X86::BI__builtin_ia32_storedquqi128_mask: | |||
9158 | case X86::BI__builtin_ia32_storeupd128_mask: | |||
9159 | case X86::BI__builtin_ia32_storeups128_mask: | |||
9160 | case X86::BI__builtin_ia32_storedqudi256_mask: | |||
9161 | case X86::BI__builtin_ia32_storedqusi256_mask: | |||
9162 | case X86::BI__builtin_ia32_storedquhi256_mask: | |||
9163 | case X86::BI__builtin_ia32_storedquqi256_mask: | |||
9164 | case X86::BI__builtin_ia32_storeupd256_mask: | |||
9165 | case X86::BI__builtin_ia32_storeups256_mask: | |||
9166 | case X86::BI__builtin_ia32_storedqudi512_mask: | |||
9167 | case X86::BI__builtin_ia32_storedqusi512_mask: | |||
9168 | case X86::BI__builtin_ia32_storedquhi512_mask: | |||
9169 | case X86::BI__builtin_ia32_storedquqi512_mask: | |||
9170 | case X86::BI__builtin_ia32_storeupd512_mask: | |||
9171 | case X86::BI__builtin_ia32_storeups512_mask: | |||
9172 | return EmitX86MaskedStore(*this, Ops, 1); | |||
9173 | ||||
9174 | case X86::BI__builtin_ia32_storess128_mask: | |||
9175 | case X86::BI__builtin_ia32_storesd128_mask: { | |||
9176 | return EmitX86MaskedStore(*this, Ops, 1); | |||
9177 | } | |||
9178 | case X86::BI__builtin_ia32_vpopcntb_128: | |||
9179 | case X86::BI__builtin_ia32_vpopcntd_128: | |||
9180 | case X86::BI__builtin_ia32_vpopcntq_128: | |||
9181 | case X86::BI__builtin_ia32_vpopcntw_128: | |||
9182 | case X86::BI__builtin_ia32_vpopcntb_256: | |||
9183 | case X86::BI__builtin_ia32_vpopcntd_256: | |||
9184 | case X86::BI__builtin_ia32_vpopcntq_256: | |||
9185 | case X86::BI__builtin_ia32_vpopcntw_256: | |||
9186 | case X86::BI__builtin_ia32_vpopcntb_512: | |||
9187 | case X86::BI__builtin_ia32_vpopcntd_512: | |||
9188 | case X86::BI__builtin_ia32_vpopcntq_512: | |||
9189 | case X86::BI__builtin_ia32_vpopcntw_512: { | |||
9190 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
9191 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); | |||
9192 | return Builder.CreateCall(F, Ops); | |||
9193 | } | |||
9194 | case X86::BI__builtin_ia32_cvtmask2b128: | |||
9195 | case X86::BI__builtin_ia32_cvtmask2b256: | |||
9196 | case X86::BI__builtin_ia32_cvtmask2b512: | |||
9197 | case X86::BI__builtin_ia32_cvtmask2w128: | |||
9198 | case X86::BI__builtin_ia32_cvtmask2w256: | |||
9199 | case X86::BI__builtin_ia32_cvtmask2w512: | |||
9200 | case X86::BI__builtin_ia32_cvtmask2d128: | |||
9201 | case X86::BI__builtin_ia32_cvtmask2d256: | |||
9202 | case X86::BI__builtin_ia32_cvtmask2d512: | |||
9203 | case X86::BI__builtin_ia32_cvtmask2q128: | |||
9204 | case X86::BI__builtin_ia32_cvtmask2q256: | |||
9205 | case X86::BI__builtin_ia32_cvtmask2q512: | |||
9206 | return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType())); | |||
9207 | ||||
9208 | case X86::BI__builtin_ia32_cvtb2mask128: | |||
9209 | case X86::BI__builtin_ia32_cvtb2mask256: | |||
9210 | case X86::BI__builtin_ia32_cvtb2mask512: | |||
9211 | case X86::BI__builtin_ia32_cvtw2mask128: | |||
9212 | case X86::BI__builtin_ia32_cvtw2mask256: | |||
9213 | case X86::BI__builtin_ia32_cvtw2mask512: | |||
9214 | case X86::BI__builtin_ia32_cvtd2mask128: | |||
9215 | case X86::BI__builtin_ia32_cvtd2mask256: | |||
9216 | case X86::BI__builtin_ia32_cvtd2mask512: | |||
9217 | case X86::BI__builtin_ia32_cvtq2mask128: | |||
9218 | case X86::BI__builtin_ia32_cvtq2mask256: | |||
9219 | case X86::BI__builtin_ia32_cvtq2mask512: | |||
9220 | return EmitX86ConvertToMask(*this, Ops[0]); | |||
9221 | ||||
9222 | case X86::BI__builtin_ia32_vfmaddss3: | |||
9223 | case X86::BI__builtin_ia32_vfmaddsd3: | |||
9224 | case X86::BI__builtin_ia32_vfmaddss3_mask: | |||
9225 | case X86::BI__builtin_ia32_vfmaddsd3_mask: | |||
9226 | return EmitScalarFMAExpr(*this, Ops, Ops[0]); | |||
9227 | case X86::BI__builtin_ia32_vfmaddss: | |||
9228 | case X86::BI__builtin_ia32_vfmaddsd: | |||
9229 | return EmitScalarFMAExpr(*this, Ops, | |||
9230 | Constant::getNullValue(Ops[0]->getType())); | |||
9231 | case X86::BI__builtin_ia32_vfmaddss3_maskz: | |||
9232 | case X86::BI__builtin_ia32_vfmaddsd3_maskz: | |||
9233 | return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true); | |||
9234 | case X86::BI__builtin_ia32_vfmaddss3_mask3: | |||
9235 | case X86::BI__builtin_ia32_vfmaddsd3_mask3: | |||
9236 | return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2); | |||
9237 | case X86::BI__builtin_ia32_vfmsubss3_mask3: | |||
9238 | case X86::BI__builtin_ia32_vfmsubsd3_mask3: | |||
9239 | return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2, | |||
9240 | /*NegAcc*/true); | |||
9241 | case X86::BI__builtin_ia32_vfmaddps: | |||
9242 | case X86::BI__builtin_ia32_vfmaddpd: | |||
9243 | case X86::BI__builtin_ia32_vfmaddps256: | |||
9244 | case X86::BI__builtin_ia32_vfmaddpd256: | |||
9245 | case X86::BI__builtin_ia32_vfmaddps512_mask: | |||
9246 | case X86::BI__builtin_ia32_vfmaddps512_maskz: | |||
9247 | case X86::BI__builtin_ia32_vfmaddps512_mask3: | |||
9248 | case X86::BI__builtin_ia32_vfmsubps512_mask3: | |||
9249 | case X86::BI__builtin_ia32_vfmaddpd512_mask: | |||
9250 | case X86::BI__builtin_ia32_vfmaddpd512_maskz: | |||
9251 | case X86::BI__builtin_ia32_vfmaddpd512_mask3: | |||
9252 | case X86::BI__builtin_ia32_vfmsubpd512_mask3: | |||
9253 | return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false); | |||
9254 | case X86::BI__builtin_ia32_vfmaddsubps: | |||
9255 | case X86::BI__builtin_ia32_vfmaddsubpd: | |||
9256 | case X86::BI__builtin_ia32_vfmaddsubps256: | |||
9257 | case X86::BI__builtin_ia32_vfmaddsubpd256: | |||
9258 | case X86::BI__builtin_ia32_vfmaddsubps512_mask: | |||
9259 | case X86::BI__builtin_ia32_vfmaddsubps512_maskz: | |||
9260 | case X86::BI__builtin_ia32_vfmaddsubps512_mask3: | |||
9261 | case X86::BI__builtin_ia32_vfmsubaddps512_mask3: | |||
9262 | case X86::BI__builtin_ia32_vfmaddsubpd512_mask: | |||
9263 | case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: | |||
9264 | case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: | |||
9265 | case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: | |||
9266 | return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true); | |||
9267 | ||||
9268 | case X86::BI__builtin_ia32_movdqa32store128_mask: | |||
9269 | case X86::BI__builtin_ia32_movdqa64store128_mask: | |||
9270 | case X86::BI__builtin_ia32_storeaps128_mask: | |||
9271 | case X86::BI__builtin_ia32_storeapd128_mask: | |||
9272 | case X86::BI__builtin_ia32_movdqa32store256_mask: | |||
9273 | case X86::BI__builtin_ia32_movdqa64store256_mask: | |||
9274 | case X86::BI__builtin_ia32_storeaps256_mask: | |||
9275 | case X86::BI__builtin_ia32_storeapd256_mask: | |||
9276 | case X86::BI__builtin_ia32_movdqa32store512_mask: | |||
9277 | case X86::BI__builtin_ia32_movdqa64store512_mask: | |||
9278 | case X86::BI__builtin_ia32_storeaps512_mask: | |||
9279 | case X86::BI__builtin_ia32_storeapd512_mask: { | |||
9280 | unsigned Align = | |||
9281 | getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity(); | |||
9282 | return EmitX86MaskedStore(*this, Ops, Align); | |||
9283 | } | |||
9284 | case X86::BI__builtin_ia32_loadups128_mask: | |||
9285 | case X86::BI__builtin_ia32_loadups256_mask: | |||
9286 | case X86::BI__builtin_ia32_loadups512_mask: | |||
9287 | case X86::BI__builtin_ia32_loadupd128_mask: | |||
9288 | case X86::BI__builtin_ia32_loadupd256_mask: | |||
9289 | case X86::BI__builtin_ia32_loadupd512_mask: | |||
9290 | case X86::BI__builtin_ia32_loaddquqi128_mask: | |||
9291 | case X86::BI__builtin_ia32_loaddquqi256_mask: | |||
9292 | case X86::BI__builtin_ia32_loaddquqi512_mask: | |||
9293 | case X86::BI__builtin_ia32_loaddquhi128_mask: | |||
9294 | case X86::BI__builtin_ia32_loaddquhi256_mask: | |||
9295 | case X86::BI__builtin_ia32_loaddquhi512_mask: | |||
9296 | case X86::BI__builtin_ia32_loaddqusi128_mask: | |||
9297 | case X86::BI__builtin_ia32_loaddqusi256_mask: | |||
9298 | case X86::BI__builtin_ia32_loaddqusi512_mask: | |||
9299 | case X86::BI__builtin_ia32_loaddqudi128_mask: | |||
9300 | case X86::BI__builtin_ia32_loaddqudi256_mask: | |||
9301 | case X86::BI__builtin_ia32_loaddqudi512_mask: | |||
9302 | return EmitX86MaskedLoad(*this, Ops, 1); | |||
9303 | ||||
9304 | case X86::BI__builtin_ia32_loadss128_mask: | |||
9305 | case X86::BI__builtin_ia32_loadsd128_mask: | |||
9306 | return EmitX86MaskedLoad(*this, Ops, 1); | |||
9307 | ||||
9308 | case X86::BI__builtin_ia32_loadaps128_mask: | |||
9309 | case X86::BI__builtin_ia32_loadaps256_mask: | |||
9310 | case X86::BI__builtin_ia32_loadaps512_mask: | |||
9311 | case X86::BI__builtin_ia32_loadapd128_mask: | |||
9312 | case X86::BI__builtin_ia32_loadapd256_mask: | |||
9313 | case X86::BI__builtin_ia32_loadapd512_mask: | |||
9314 | case X86::BI__builtin_ia32_movdqa32load128_mask: | |||
9315 | case X86::BI__builtin_ia32_movdqa32load256_mask: | |||
9316 | case X86::BI__builtin_ia32_movdqa32load512_mask: | |||
9317 | case X86::BI__builtin_ia32_movdqa64load128_mask: | |||
9318 | case X86::BI__builtin_ia32_movdqa64load256_mask: | |||
9319 | case X86::BI__builtin_ia32_movdqa64load512_mask: { | |||
9320 | unsigned Align = | |||
9321 | getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity(); | |||
9322 | return EmitX86MaskedLoad(*this, Ops, Align); | |||
9323 | } | |||
9324 | ||||
9325 | case X86::BI__builtin_ia32_expandloaddf128_mask: | |||
9326 | case X86::BI__builtin_ia32_expandloaddf256_mask: | |||
9327 | case X86::BI__builtin_ia32_expandloaddf512_mask: | |||
9328 | case X86::BI__builtin_ia32_expandloadsf128_mask: | |||
9329 | case X86::BI__builtin_ia32_expandloadsf256_mask: | |||
9330 | case X86::BI__builtin_ia32_expandloadsf512_mask: | |||
9331 | case X86::BI__builtin_ia32_expandloaddi128_mask: | |||
9332 | case X86::BI__builtin_ia32_expandloaddi256_mask: | |||
9333 | case X86::BI__builtin_ia32_expandloaddi512_mask: | |||
9334 | case X86::BI__builtin_ia32_expandloadsi128_mask: | |||
9335 | case X86::BI__builtin_ia32_expandloadsi256_mask: | |||
9336 | case X86::BI__builtin_ia32_expandloadsi512_mask: | |||
9337 | case X86::BI__builtin_ia32_expandloadhi128_mask: | |||
9338 | case X86::BI__builtin_ia32_expandloadhi256_mask: | |||
9339 | case X86::BI__builtin_ia32_expandloadhi512_mask: | |||
9340 | case X86::BI__builtin_ia32_expandloadqi128_mask: | |||
9341 | case X86::BI__builtin_ia32_expandloadqi256_mask: | |||
9342 | case X86::BI__builtin_ia32_expandloadqi512_mask: | |||
9343 | return EmitX86ExpandLoad(*this, Ops); | |||
9344 | ||||
9345 | case X86::BI__builtin_ia32_compressstoredf128_mask: | |||
9346 | case X86::BI__builtin_ia32_compressstoredf256_mask: | |||
9347 | case X86::BI__builtin_ia32_compressstoredf512_mask: | |||
9348 | case X86::BI__builtin_ia32_compressstoresf128_mask: | |||
9349 | case X86::BI__builtin_ia32_compressstoresf256_mask: | |||
9350 | case X86::BI__builtin_ia32_compressstoresf512_mask: | |||
9351 | case X86::BI__builtin_ia32_compressstoredi128_mask: | |||
9352 | case X86::BI__builtin_ia32_compressstoredi256_mask: | |||
9353 | case X86::BI__builtin_ia32_compressstoredi512_mask: | |||
9354 | case X86::BI__builtin_ia32_compressstoresi128_mask: | |||
9355 | case X86::BI__builtin_ia32_compressstoresi256_mask: | |||
9356 | case X86::BI__builtin_ia32_compressstoresi512_mask: | |||
9357 | case X86::BI__builtin_ia32_compressstorehi128_mask: | |||
9358 | case X86::BI__builtin_ia32_compressstorehi256_mask: | |||
9359 | case X86::BI__builtin_ia32_compressstorehi512_mask: | |||
9360 | case X86::BI__builtin_ia32_compressstoreqi128_mask: | |||
9361 | case X86::BI__builtin_ia32_compressstoreqi256_mask: | |||
9362 | case X86::BI__builtin_ia32_compressstoreqi512_mask: | |||
9363 | return EmitX86CompressStore(*this, Ops); | |||
9364 | ||||
9365 | case X86::BI__builtin_ia32_storehps: | |||
9366 | case X86::BI__builtin_ia32_storelps: { | |||
9367 | llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty); | |||
9368 | llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); | |||
9369 | ||||
9370 | // cast val v2i64 | |||
9371 | Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); | |||
9372 | ||||
9373 | // extract (0, 1) | |||
9374 | unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; | |||
9375 | Ops[1] = Builder.CreateExtractElement(Ops[1], Index, "extract"); | |||
9376 | ||||
9377 | // cast pointer to i64 & store | |||
9378 | Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); | |||
9379 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); | |||
9380 | } | |||
9381 | case X86::BI__builtin_ia32_vextractf128_pd256: | |||
9382 | case X86::BI__builtin_ia32_vextractf128_ps256: | |||
9383 | case X86::BI__builtin_ia32_vextractf128_si256: | |||
9384 | case X86::BI__builtin_ia32_extract128i256: | |||
9385 | case X86::BI__builtin_ia32_extractf64x4_mask: | |||
9386 | case X86::BI__builtin_ia32_extractf32x4_mask: | |||
9387 | case X86::BI__builtin_ia32_extracti64x4_mask: | |||
9388 | case X86::BI__builtin_ia32_extracti32x4_mask: | |||
9389 | case X86::BI__builtin_ia32_extractf32x8_mask: | |||
9390 | case X86::BI__builtin_ia32_extracti32x8_mask: | |||
9391 | case X86::BI__builtin_ia32_extractf32x4_256_mask: | |||
9392 | case X86::BI__builtin_ia32_extracti32x4_256_mask: | |||
9393 | case X86::BI__builtin_ia32_extractf64x2_256_mask: | |||
9394 | case X86::BI__builtin_ia32_extracti64x2_256_mask: | |||
9395 | case X86::BI__builtin_ia32_extractf64x2_512_mask: | |||
9396 | case X86::BI__builtin_ia32_extracti64x2_512_mask: { | |||
9397 | llvm::Type *DstTy = ConvertType(E->getType()); | |||
9398 | unsigned NumElts = DstTy->getVectorNumElements(); | |||
9399 | unsigned SrcNumElts = Ops[0]->getType()->getVectorNumElements(); | |||
9400 | unsigned SubVectors = SrcNumElts / NumElts; | |||
9401 | unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue(); | |||
9402 | assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")(static_cast <bool> (llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors") ? void (0) : __assert_fail ("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9402, __extension__ __PRETTY_FUNCTION__)); | |||
9403 | Index &= SubVectors - 1; // Remove any extra bits. | |||
9404 | Index *= NumElts; | |||
9405 | ||||
9406 | uint32_t Indices[16]; | |||
9407 | for (unsigned i = 0; i != NumElts; ++i) | |||
9408 | Indices[i] = i + Index; | |||
9409 | ||||
9410 | Value *Res = Builder.CreateShuffleVector(Ops[0], | |||
9411 | UndefValue::get(Ops[0]->getType()), | |||
9412 | makeArrayRef(Indices, NumElts), | |||
9413 | "extract"); | |||
9414 | ||||
9415 | if (Ops.size() == 4) | |||
9416 | Res = EmitX86Select(*this, Ops[3], Res, Ops[2]); | |||
9417 | ||||
9418 | return Res; | |||
9419 | } | |||
9420 | case X86::BI__builtin_ia32_vinsertf128_pd256: | |||
9421 | case X86::BI__builtin_ia32_vinsertf128_ps256: | |||
9422 | case X86::BI__builtin_ia32_vinsertf128_si256: | |||
9423 | case X86::BI__builtin_ia32_insert128i256: | |||
9424 | case X86::BI__builtin_ia32_insertf64x4: | |||
9425 | case X86::BI__builtin_ia32_insertf32x4: | |||
9426 | case X86::BI__builtin_ia32_inserti64x4: | |||
9427 | case X86::BI__builtin_ia32_inserti32x4: | |||
9428 | case X86::BI__builtin_ia32_insertf32x8: | |||
9429 | case X86::BI__builtin_ia32_inserti32x8: | |||
9430 | case X86::BI__builtin_ia32_insertf32x4_256: | |||
9431 | case X86::BI__builtin_ia32_inserti32x4_256: | |||
9432 | case X86::BI__builtin_ia32_insertf64x2_256: | |||
9433 | case X86::BI__builtin_ia32_inserti64x2_256: | |||
9434 | case X86::BI__builtin_ia32_insertf64x2_512: | |||
9435 | case X86::BI__builtin_ia32_inserti64x2_512: { | |||
9436 | unsigned DstNumElts = Ops[0]->getType()->getVectorNumElements(); | |||
9437 | unsigned SrcNumElts = Ops[1]->getType()->getVectorNumElements(); | |||
9438 | unsigned SubVectors = DstNumElts / SrcNumElts; | |||
9439 | unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue(); | |||
9440 | assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors")(static_cast <bool> (llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors") ? void (0) : __assert_fail ("llvm::isPowerOf2_32(SubVectors) && \"Expected power of 2 subvectors\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9440, __extension__ __PRETTY_FUNCTION__)); | |||
9441 | Index &= SubVectors - 1; // Remove any extra bits. | |||
9442 | Index *= SrcNumElts; | |||
9443 | ||||
9444 | uint32_t Indices[16]; | |||
9445 | for (unsigned i = 0; i != DstNumElts; ++i) | |||
9446 | Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i; | |||
9447 | ||||
9448 | Value *Op1 = Builder.CreateShuffleVector(Ops[1], | |||
9449 | UndefValue::get(Ops[1]->getType()), | |||
9450 | makeArrayRef(Indices, DstNumElts), | |||
9451 | "widen"); | |||
9452 | ||||
9453 | for (unsigned i = 0; i != DstNumElts; ++i) { | |||
9454 | if (i >= Index && i < (Index + SrcNumElts)) | |||
9455 | Indices[i] = (i - Index) + DstNumElts; | |||
9456 | else | |||
9457 | Indices[i] = i; | |||
9458 | } | |||
9459 | ||||
9460 | return Builder.CreateShuffleVector(Ops[0], Op1, | |||
9461 | makeArrayRef(Indices, DstNumElts), | |||
9462 | "insert"); | |||
9463 | } | |||
9464 | case X86::BI__builtin_ia32_pmovqd512_mask: | |||
9465 | case X86::BI__builtin_ia32_pmovwb512_mask: { | |||
9466 | Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType()); | |||
9467 | return EmitX86Select(*this, Ops[2], Res, Ops[1]); | |||
9468 | } | |||
9469 | case X86::BI__builtin_ia32_pmovdb512_mask: | |||
9470 | case X86::BI__builtin_ia32_pmovdw512_mask: | |||
9471 | case X86::BI__builtin_ia32_pmovqw512_mask: { | |||
9472 | if (const auto *C = dyn_cast<Constant>(Ops[2])) | |||
9473 | if (C->isAllOnesValue()) | |||
9474 | return Builder.CreateTrunc(Ops[0], Ops[1]->getType()); | |||
9475 | ||||
9476 | Intrinsic::ID IID; | |||
9477 | switch (BuiltinID) { | |||
9478 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9478); | |||
9479 | case X86::BI__builtin_ia32_pmovdb512_mask: | |||
9480 | IID = Intrinsic::x86_avx512_mask_pmov_db_512; | |||
9481 | break; | |||
9482 | case X86::BI__builtin_ia32_pmovdw512_mask: | |||
9483 | IID = Intrinsic::x86_avx512_mask_pmov_dw_512; | |||
9484 | break; | |||
9485 | case X86::BI__builtin_ia32_pmovqw512_mask: | |||
9486 | IID = Intrinsic::x86_avx512_mask_pmov_qw_512; | |||
9487 | break; | |||
9488 | } | |||
9489 | ||||
9490 | Function *Intr = CGM.getIntrinsic(IID); | |||
9491 | return Builder.CreateCall(Intr, Ops); | |||
9492 | } | |||
9493 | case X86::BI__builtin_ia32_pblendw128: | |||
9494 | case X86::BI__builtin_ia32_blendpd: | |||
9495 | case X86::BI__builtin_ia32_blendps: | |||
9496 | case X86::BI__builtin_ia32_blendpd256: | |||
9497 | case X86::BI__builtin_ia32_blendps256: | |||
9498 | case X86::BI__builtin_ia32_pblendw256: | |||
9499 | case X86::BI__builtin_ia32_pblendd128: | |||
9500 | case X86::BI__builtin_ia32_pblendd256: { | |||
9501 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); | |||
9502 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); | |||
9503 | ||||
9504 | uint32_t Indices[16]; | |||
9505 | // If there are more than 8 elements, the immediate is used twice so make | |||
9506 | // sure we handle that. | |||
9507 | for (unsigned i = 0; i != NumElts; ++i) | |||
9508 | Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i; | |||
9509 | ||||
9510 | return Builder.CreateShuffleVector(Ops[0], Ops[1], | |||
9511 | makeArrayRef(Indices, NumElts), | |||
9512 | "blend"); | |||
9513 | } | |||
9514 | case X86::BI__builtin_ia32_pshuflw: | |||
9515 | case X86::BI__builtin_ia32_pshuflw256: | |||
9516 | case X86::BI__builtin_ia32_pshuflw512: { | |||
9517 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); | |||
9518 | llvm::Type *Ty = Ops[0]->getType(); | |||
9519 | unsigned NumElts = Ty->getVectorNumElements(); | |||
9520 | ||||
9521 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. | |||
9522 | Imm = (Imm & 0xff) * 0x01010101; | |||
9523 | ||||
9524 | uint32_t Indices[32]; | |||
9525 | for (unsigned l = 0; l != NumElts; l += 8) { | |||
9526 | for (unsigned i = 0; i != 4; ++i) { | |||
9527 | Indices[l + i] = l + (Imm & 3); | |||
9528 | Imm >>= 2; | |||
9529 | } | |||
9530 | for (unsigned i = 4; i != 8; ++i) | |||
9531 | Indices[l + i] = l + i; | |||
9532 | } | |||
9533 | ||||
9534 | return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty), | |||
9535 | makeArrayRef(Indices, NumElts), | |||
9536 | "pshuflw"); | |||
9537 | } | |||
9538 | case X86::BI__builtin_ia32_pshufhw: | |||
9539 | case X86::BI__builtin_ia32_pshufhw256: | |||
9540 | case X86::BI__builtin_ia32_pshufhw512: { | |||
9541 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); | |||
9542 | llvm::Type *Ty = Ops[0]->getType(); | |||
9543 | unsigned NumElts = Ty->getVectorNumElements(); | |||
9544 | ||||
9545 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. | |||
9546 | Imm = (Imm & 0xff) * 0x01010101; | |||
9547 | ||||
9548 | uint32_t Indices[32]; | |||
9549 | for (unsigned l = 0; l != NumElts; l += 8) { | |||
9550 | for (unsigned i = 0; i != 4; ++i) | |||
9551 | Indices[l + i] = l + i; | |||
9552 | for (unsigned i = 4; i != 8; ++i) { | |||
9553 | Indices[l + i] = l + 4 + (Imm & 3); | |||
9554 | Imm >>= 2; | |||
9555 | } | |||
9556 | } | |||
9557 | ||||
9558 | return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty), | |||
9559 | makeArrayRef(Indices, NumElts), | |||
9560 | "pshufhw"); | |||
9561 | } | |||
9562 | case X86::BI__builtin_ia32_pshufd: | |||
9563 | case X86::BI__builtin_ia32_pshufd256: | |||
9564 | case X86::BI__builtin_ia32_pshufd512: | |||
9565 | case X86::BI__builtin_ia32_vpermilpd: | |||
9566 | case X86::BI__builtin_ia32_vpermilps: | |||
9567 | case X86::BI__builtin_ia32_vpermilpd256: | |||
9568 | case X86::BI__builtin_ia32_vpermilps256: | |||
9569 | case X86::BI__builtin_ia32_vpermilpd512: | |||
9570 | case X86::BI__builtin_ia32_vpermilps512: { | |||
9571 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); | |||
9572 | llvm::Type *Ty = Ops[0]->getType(); | |||
9573 | unsigned NumElts = Ty->getVectorNumElements(); | |||
9574 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; | |||
9575 | unsigned NumLaneElts = NumElts / NumLanes; | |||
9576 | ||||
9577 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. | |||
9578 | Imm = (Imm & 0xff) * 0x01010101; | |||
9579 | ||||
9580 | uint32_t Indices[16]; | |||
9581 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { | |||
9582 | for (unsigned i = 0; i != NumLaneElts; ++i) { | |||
9583 | Indices[i + l] = (Imm % NumLaneElts) + l; | |||
9584 | Imm /= NumLaneElts; | |||
9585 | } | |||
9586 | } | |||
9587 | ||||
9588 | return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty), | |||
9589 | makeArrayRef(Indices, NumElts), | |||
9590 | "permil"); | |||
9591 | } | |||
9592 | case X86::BI__builtin_ia32_shufpd: | |||
9593 | case X86::BI__builtin_ia32_shufpd256: | |||
9594 | case X86::BI__builtin_ia32_shufpd512: | |||
9595 | case X86::BI__builtin_ia32_shufps: | |||
9596 | case X86::BI__builtin_ia32_shufps256: | |||
9597 | case X86::BI__builtin_ia32_shufps512: { | |||
9598 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); | |||
9599 | llvm::Type *Ty = Ops[0]->getType(); | |||
9600 | unsigned NumElts = Ty->getVectorNumElements(); | |||
9601 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; | |||
9602 | unsigned NumLaneElts = NumElts / NumLanes; | |||
9603 | ||||
9604 | // Splat the 8-bits of immediate 4 times to help the loop wrap around. | |||
9605 | Imm = (Imm & 0xff) * 0x01010101; | |||
9606 | ||||
9607 | uint32_t Indices[16]; | |||
9608 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { | |||
9609 | for (unsigned i = 0; i != NumLaneElts; ++i) { | |||
9610 | unsigned Index = Imm % NumLaneElts; | |||
9611 | Imm /= NumLaneElts; | |||
9612 | if (i >= (NumLaneElts / 2)) | |||
9613 | Index += NumElts; | |||
9614 | Indices[l + i] = l + Index; | |||
9615 | } | |||
9616 | } | |||
9617 | ||||
9618 | return Builder.CreateShuffleVector(Ops[0], Ops[1], | |||
9619 | makeArrayRef(Indices, NumElts), | |||
9620 | "shufp"); | |||
9621 | } | |||
9622 | case X86::BI__builtin_ia32_permdi256: | |||
9623 | case X86::BI__builtin_ia32_permdf256: | |||
9624 | case X86::BI__builtin_ia32_permdi512: | |||
9625 | case X86::BI__builtin_ia32_permdf512: { | |||
9626 | unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); | |||
9627 | llvm::Type *Ty = Ops[0]->getType(); | |||
9628 | unsigned NumElts = Ty->getVectorNumElements(); | |||
9629 | ||||
9630 | // These intrinsics operate on 256-bit lanes of four 64-bit elements. | |||
9631 | uint32_t Indices[8]; | |||
9632 | for (unsigned l = 0; l != NumElts; l += 4) | |||
9633 | for (unsigned i = 0; i != 4; ++i) | |||
9634 | Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3); | |||
9635 | ||||
9636 | return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty), | |||
9637 | makeArrayRef(Indices, NumElts), | |||
9638 | "perm"); | |||
9639 | } | |||
9640 | case X86::BI__builtin_ia32_palignr128: | |||
9641 | case X86::BI__builtin_ia32_palignr256: | |||
9642 | case X86::BI__builtin_ia32_palignr512: { | |||
9643 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; | |||
9644 | ||||
9645 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); | |||
9646 | assert(NumElts % 16 == 0)(static_cast <bool> (NumElts % 16 == 0) ? void (0) : __assert_fail ("NumElts % 16 == 0", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 9646, __extension__ __PRETTY_FUNCTION__)); | |||
9647 | ||||
9648 | // If palignr is shifting the pair of vectors more than the size of two | |||
9649 | // lanes, emit zero. | |||
9650 | if (ShiftVal >= 32) | |||
9651 | return llvm::Constant::getNullValue(ConvertType(E->getType())); | |||
9652 | ||||
9653 | // If palignr is shifting the pair of input vectors more than one lane, | |||
9654 | // but less than two lanes, convert to shifting in zeroes. | |||
9655 | if (ShiftVal > 16) { | |||
9656 | ShiftVal -= 16; | |||
9657 | Ops[1] = Ops[0]; | |||
9658 | Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType()); | |||
9659 | } | |||
9660 | ||||
9661 | uint32_t Indices[64]; | |||
9662 | // 256-bit palignr operates on 128-bit lanes so we need to handle that | |||
9663 | for (unsigned l = 0; l != NumElts; l += 16) { | |||
9664 | for (unsigned i = 0; i != 16; ++i) { | |||
9665 | unsigned Idx = ShiftVal + i; | |||
9666 | if (Idx >= 16) | |||
9667 | Idx += NumElts - 16; // End of lane, switch operand. | |||
9668 | Indices[l + i] = Idx + l; | |||
9669 | } | |||
9670 | } | |||
9671 | ||||
9672 | return Builder.CreateShuffleVector(Ops[1], Ops[0], | |||
9673 | makeArrayRef(Indices, NumElts), | |||
9674 | "palignr"); | |||
9675 | } | |||
9676 | case X86::BI__builtin_ia32_alignd128: | |||
9677 | case X86::BI__builtin_ia32_alignd256: | |||
9678 | case X86::BI__builtin_ia32_alignd512: | |||
9679 | case X86::BI__builtin_ia32_alignq128: | |||
9680 | case X86::BI__builtin_ia32_alignq256: | |||
9681 | case X86::BI__builtin_ia32_alignq512: { | |||
9682 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); | |||
9683 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; | |||
9684 | ||||
9685 | // Mask the shift amount to width of two vectors. | |||
9686 | ShiftVal &= (2 * NumElts) - 1; | |||
9687 | ||||
9688 | uint32_t Indices[16]; | |||
9689 | for (unsigned i = 0; i != NumElts; ++i) | |||
9690 | Indices[i] = i + ShiftVal; | |||
9691 | ||||
9692 | return Builder.CreateShuffleVector(Ops[1], Ops[0], | |||
9693 | makeArrayRef(Indices, NumElts), | |||
9694 | "valign"); | |||
9695 | } | |||
9696 | case X86::BI__builtin_ia32_shuf_f32x4_256: | |||
9697 | case X86::BI__builtin_ia32_shuf_f64x2_256: | |||
9698 | case X86::BI__builtin_ia32_shuf_i32x4_256: | |||
9699 | case X86::BI__builtin_ia32_shuf_i64x2_256: | |||
9700 | case X86::BI__builtin_ia32_shuf_f32x4: | |||
9701 | case X86::BI__builtin_ia32_shuf_f64x2: | |||
9702 | case X86::BI__builtin_ia32_shuf_i32x4: | |||
9703 | case X86::BI__builtin_ia32_shuf_i64x2: { | |||
9704 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); | |||
9705 | llvm::Type *Ty = Ops[0]->getType(); | |||
9706 | unsigned NumElts = Ty->getVectorNumElements(); | |||
9707 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2; | |||
9708 | unsigned NumLaneElts = NumElts / NumLanes; | |||
9709 | ||||
9710 | uint32_t Indices[16]; | |||
9711 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { | |||
9712 | unsigned Index = (Imm % NumLanes) * NumLaneElts; | |||
9713 | Imm /= NumLanes; // Discard the bits we just used. | |||
9714 | if (l >= (NumElts / 2)) | |||
9715 | Index += NumElts; // Switch to other source. | |||
9716 | for (unsigned i = 0; i != NumLaneElts; ++i) { | |||
9717 | Indices[l + i] = Index + i; | |||
9718 | } | |||
9719 | } | |||
9720 | ||||
9721 | return Builder.CreateShuffleVector(Ops[0], Ops[1], | |||
9722 | makeArrayRef(Indices, NumElts), | |||
9723 | "shuf"); | |||
9724 | } | |||
9725 | ||||
9726 | case X86::BI__builtin_ia32_vperm2f128_pd256: | |||
9727 | case X86::BI__builtin_ia32_vperm2f128_ps256: | |||
9728 | case X86::BI__builtin_ia32_vperm2f128_si256: | |||
9729 | case X86::BI__builtin_ia32_permti256: { | |||
9730 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); | |||
9731 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); | |||
9732 | ||||
9733 | // This takes a very simple approach since there are two lanes and a | |||
9734 | // shuffle can have 2 inputs. So we reserve the first input for the first | |||
9735 | // lane and the second input for the second lane. This may result in | |||
9736 | // duplicate sources, but this can be dealt with in the backend. | |||
9737 | ||||
9738 | Value *OutOps[2]; | |||
9739 | uint32_t Indices[8]; | |||
9740 | for (unsigned l = 0; l != 2; ++l) { | |||
9741 | // Determine the source for this lane. | |||
9742 | if (Imm & (1 << ((l * 4) + 3))) | |||
9743 | OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType()); | |||
9744 | else if (Imm & (1 << ((l * 4) + 1))) | |||
9745 | OutOps[l] = Ops[1]; | |||
9746 | else | |||
9747 | OutOps[l] = Ops[0]; | |||
9748 | ||||
9749 | for (unsigned i = 0; i != NumElts/2; ++i) { | |||
9750 | // Start with ith element of the source for this lane. | |||
9751 | unsigned Idx = (l * NumElts) + i; | |||
9752 | // If bit 0 of the immediate half is set, switch to the high half of | |||
9753 | // the source. | |||
9754 | if (Imm & (1 << (l * 4))) | |||
9755 | Idx += NumElts/2; | |||
9756 | Indices[(l * (NumElts/2)) + i] = Idx; | |||
9757 | } | |||
9758 | } | |||
9759 | ||||
9760 | return Builder.CreateShuffleVector(OutOps[0], OutOps[1], | |||
9761 | makeArrayRef(Indices, NumElts), | |||
9762 | "vperm"); | |||
9763 | } | |||
9764 | ||||
9765 | case X86::BI__builtin_ia32_pslldqi128_byteshift: | |||
9766 | case X86::BI__builtin_ia32_pslldqi256_byteshift: | |||
9767 | case X86::BI__builtin_ia32_pslldqi512_byteshift: { | |||
9768 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; | |||
9769 | llvm::Type *ResultType = Ops[0]->getType(); | |||
9770 | // Builtin type is vXi64 so multiply by 8 to get bytes. | |||
9771 | unsigned NumElts = ResultType->getVectorNumElements() * 8; | |||
9772 | ||||
9773 | // If pslldq is shifting the vector more than 15 bytes, emit zero. | |||
9774 | if (ShiftVal >= 16) | |||
9775 | return llvm::Constant::getNullValue(ResultType); | |||
9776 | ||||
9777 | uint32_t Indices[64]; | |||
9778 | // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that | |||
9779 | for (unsigned l = 0; l != NumElts; l += 16) { | |||
9780 | for (unsigned i = 0; i != 16; ++i) { | |||
9781 | unsigned Idx = NumElts + i - ShiftVal; | |||
9782 | if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand. | |||
9783 | Indices[l + i] = Idx + l; | |||
9784 | } | |||
9785 | } | |||
9786 | ||||
9787 | llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts); | |||
9788 | Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast"); | |||
9789 | Value *Zero = llvm::Constant::getNullValue(VecTy); | |||
9790 | Value *SV = Builder.CreateShuffleVector(Zero, Cast, | |||
9791 | makeArrayRef(Indices, NumElts), | |||
9792 | "pslldq"); | |||
9793 | return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast"); | |||
9794 | } | |||
9795 | case X86::BI__builtin_ia32_psrldqi128_byteshift: | |||
9796 | case X86::BI__builtin_ia32_psrldqi256_byteshift: | |||
9797 | case X86::BI__builtin_ia32_psrldqi512_byteshift: { | |||
9798 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; | |||
9799 | llvm::Type *ResultType = Ops[0]->getType(); | |||
9800 | // Builtin type is vXi64 so multiply by 8 to get bytes. | |||
9801 | unsigned NumElts = ResultType->getVectorNumElements() * 8; | |||
9802 | ||||
9803 | // If psrldq is shifting the vector more than 15 bytes, emit zero. | |||
9804 | if (ShiftVal >= 16) | |||
9805 | return llvm::Constant::getNullValue(ResultType); | |||
9806 | ||||
9807 | uint32_t Indices[64]; | |||
9808 | // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that | |||
9809 | for (unsigned l = 0; l != NumElts; l += 16) { | |||
9810 | for (unsigned i = 0; i != 16; ++i) { | |||
9811 | unsigned Idx = i + ShiftVal; | |||
9812 | if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand. | |||
9813 | Indices[l + i] = Idx + l; | |||
9814 | } | |||
9815 | } | |||
9816 | ||||
9817 | llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts); | |||
9818 | Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast"); | |||
9819 | Value *Zero = llvm::Constant::getNullValue(VecTy); | |||
9820 | Value *SV = Builder.CreateShuffleVector(Cast, Zero, | |||
9821 | makeArrayRef(Indices, NumElts), | |||
9822 | "psrldq"); | |||
9823 | return Builder.CreateBitCast(SV, ResultType, "cast"); | |||
9824 | } | |||
9825 | case X86::BI__builtin_ia32_movnti: | |||
9826 | case X86::BI__builtin_ia32_movnti64: | |||
9827 | case X86::BI__builtin_ia32_movntsd: | |||
9828 | case X86::BI__builtin_ia32_movntss: { | |||
9829 | llvm::MDNode *Node = llvm::MDNode::get( | |||
9830 | getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); | |||
9831 | ||||
9832 | Value *Ptr = Ops[0]; | |||
9833 | Value *Src = Ops[1]; | |||
9834 | ||||
9835 | // Extract the 0'th element of the source vector. | |||
9836 | if (BuiltinID == X86::BI__builtin_ia32_movntsd || | |||
9837 | BuiltinID == X86::BI__builtin_ia32_movntss) | |||
9838 | Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract"); | |||
9839 | ||||
9840 | // Convert the type of the pointer to a pointer to the stored type. | |||
9841 | Value *BC = Builder.CreateBitCast( | |||
9842 | Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast"); | |||
9843 | ||||
9844 | // Unaligned nontemporal store of the scalar value. | |||
9845 | StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC); | |||
9846 | SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); | |||
9847 | SI->setAlignment(1); | |||
9848 | return SI; | |||
9849 | } | |||
9850 | ||||
9851 | case X86::BI__builtin_ia32_selectb_128: | |||
9852 | case X86::BI__builtin_ia32_selectb_256: | |||
9853 | case X86::BI__builtin_ia32_selectb_512: | |||
9854 | case X86::BI__builtin_ia32_selectw_128: | |||
9855 | case X86::BI__builtin_ia32_selectw_256: | |||
9856 | case X86::BI__builtin_ia32_selectw_512: | |||
9857 | case X86::BI__builtin_ia32_selectd_128: | |||
9858 | case X86::BI__builtin_ia32_selectd_256: | |||
9859 | case X86::BI__builtin_ia32_selectd_512: | |||
9860 | case X86::BI__builtin_ia32_selectq_128: | |||
9861 | case X86::BI__builtin_ia32_selectq_256: | |||
9862 | case X86::BI__builtin_ia32_selectq_512: | |||
9863 | case X86::BI__builtin_ia32_selectps_128: | |||
9864 | case X86::BI__builtin_ia32_selectps_256: | |||
9865 | case X86::BI__builtin_ia32_selectps_512: | |||
9866 | case X86::BI__builtin_ia32_selectpd_128: | |||
9867 | case X86::BI__builtin_ia32_selectpd_256: | |||
9868 | case X86::BI__builtin_ia32_selectpd_512: | |||
9869 | return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]); | |||
9870 | case X86::BI__builtin_ia32_selectss_128: | |||
9871 | case X86::BI__builtin_ia32_selectsd_128: { | |||
9872 | Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0); | |||
9873 | Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0); | |||
9874 | A = EmitX86ScalarSelect(*this, Ops[0], A, B); | |||
9875 | return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0); | |||
9876 | } | |||
9877 | case X86::BI__builtin_ia32_cmpb128_mask: | |||
9878 | case X86::BI__builtin_ia32_cmpb256_mask: | |||
9879 | case X86::BI__builtin_ia32_cmpb512_mask: | |||
9880 | case X86::BI__builtin_ia32_cmpw128_mask: | |||
9881 | case X86::BI__builtin_ia32_cmpw256_mask: | |||
9882 | case X86::BI__builtin_ia32_cmpw512_mask: | |||
9883 | case X86::BI__builtin_ia32_cmpd128_mask: | |||
9884 | case X86::BI__builtin_ia32_cmpd256_mask: | |||
9885 | case X86::BI__builtin_ia32_cmpd512_mask: | |||
9886 | case X86::BI__builtin_ia32_cmpq128_mask: | |||
9887 | case X86::BI__builtin_ia32_cmpq256_mask: | |||
9888 | case X86::BI__builtin_ia32_cmpq512_mask: { | |||
9889 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; | |||
9890 | return EmitX86MaskedCompare(*this, CC, true, Ops); | |||
9891 | } | |||
9892 | case X86::BI__builtin_ia32_ucmpb128_mask: | |||
9893 | case X86::BI__builtin_ia32_ucmpb256_mask: | |||
9894 | case X86::BI__builtin_ia32_ucmpb512_mask: | |||
9895 | case X86::BI__builtin_ia32_ucmpw128_mask: | |||
9896 | case X86::BI__builtin_ia32_ucmpw256_mask: | |||
9897 | case X86::BI__builtin_ia32_ucmpw512_mask: | |||
9898 | case X86::BI__builtin_ia32_ucmpd128_mask: | |||
9899 | case X86::BI__builtin_ia32_ucmpd256_mask: | |||
9900 | case X86::BI__builtin_ia32_ucmpd512_mask: | |||
9901 | case X86::BI__builtin_ia32_ucmpq128_mask: | |||
9902 | case X86::BI__builtin_ia32_ucmpq256_mask: | |||
9903 | case X86::BI__builtin_ia32_ucmpq512_mask: { | |||
9904 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; | |||
9905 | return EmitX86MaskedCompare(*this, CC, false, Ops); | |||
9906 | } | |||
9907 | ||||
9908 | case X86::BI__builtin_ia32_kortestchi: | |||
9909 | case X86::BI__builtin_ia32_kortestzhi: { | |||
9910 | Value *Or = EmitX86MaskLogic(*this, Instruction::Or, 16, Ops); | |||
9911 | Value *C; | |||
9912 | if (BuiltinID == X86::BI__builtin_ia32_kortestchi) | |||
9913 | C = llvm::Constant::getAllOnesValue(Builder.getInt16Ty()); | |||
9914 | else | |||
9915 | C = llvm::Constant::getNullValue(Builder.getInt16Ty()); | |||
9916 | Value *Cmp = Builder.CreateICmpEQ(Or, C); | |||
9917 | return Builder.CreateZExt(Cmp, ConvertType(E->getType())); | |||
9918 | } | |||
9919 | ||||
9920 | case X86::BI__builtin_ia32_kandhi: | |||
9921 | return EmitX86MaskLogic(*this, Instruction::And, 16, Ops); | |||
9922 | case X86::BI__builtin_ia32_kandnhi: | |||
9923 | return EmitX86MaskLogic(*this, Instruction::And, 16, Ops, true); | |||
9924 | case X86::BI__builtin_ia32_korhi: | |||
9925 | return EmitX86MaskLogic(*this, Instruction::Or, 16, Ops); | |||
9926 | case X86::BI__builtin_ia32_kxnorhi: | |||
9927 | return EmitX86MaskLogic(*this, Instruction::Xor, 16, Ops, true); | |||
9928 | case X86::BI__builtin_ia32_kxorhi: | |||
9929 | return EmitX86MaskLogic(*this, Instruction::Xor, 16, Ops); | |||
9930 | case X86::BI__builtin_ia32_knothi: { | |||
9931 | Ops[0] = getMaskVecValue(*this, Ops[0], 16); | |||
9932 | return Builder.CreateBitCast(Builder.CreateNot(Ops[0]), | |||
9933 | Builder.getInt16Ty()); | |||
9934 | } | |||
9935 | ||||
9936 | case X86::BI__builtin_ia32_kunpckdi: | |||
9937 | case X86::BI__builtin_ia32_kunpcksi: | |||
9938 | case X86::BI__builtin_ia32_kunpckhi: { | |||
9939 | unsigned NumElts = Ops[0]->getType()->getScalarSizeInBits(); | |||
9940 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); | |||
9941 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); | |||
9942 | uint32_t Indices[64]; | |||
9943 | for (unsigned i = 0; i != NumElts; ++i) | |||
9944 | Indices[i] = i; | |||
9945 | ||||
9946 | // First extract half of each vector. This gives better codegen than | |||
9947 | // doing it in a single shuffle. | |||
9948 | LHS = Builder.CreateShuffleVector(LHS, LHS, | |||
9949 | makeArrayRef(Indices, NumElts / 2)); | |||
9950 | RHS = Builder.CreateShuffleVector(RHS, RHS, | |||
9951 | makeArrayRef(Indices, NumElts / 2)); | |||
9952 | // Concat the vectors. | |||
9953 | // NOTE: Operands are swapped to match the intrinsic definition. | |||
9954 | Value *Res = Builder.CreateShuffleVector(RHS, LHS, | |||
9955 | makeArrayRef(Indices, NumElts)); | |||
9956 | return Builder.CreateBitCast(Res, Ops[0]->getType()); | |||
9957 | } | |||
9958 | ||||
9959 | case X86::BI__builtin_ia32_vplzcntd_128: | |||
9960 | case X86::BI__builtin_ia32_vplzcntd_256: | |||
9961 | case X86::BI__builtin_ia32_vplzcntd_512: | |||
9962 | case X86::BI__builtin_ia32_vplzcntq_128: | |||
9963 | case X86::BI__builtin_ia32_vplzcntq_256: | |||
9964 | case X86::BI__builtin_ia32_vplzcntq_512: { | |||
9965 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); | |||
9966 | return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)}); | |||
9967 | } | |||
9968 | case X86::BI__builtin_ia32_sqrtss: | |||
9969 | case X86::BI__builtin_ia32_sqrtsd: { | |||
9970 | Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0); | |||
9971 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); | |||
9972 | A = Builder.CreateCall(F, {A}); | |||
9973 | return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); | |||
9974 | } | |||
9975 | case X86::BI__builtin_ia32_sqrtsd_round_mask: | |||
9976 | case X86::BI__builtin_ia32_sqrtss_round_mask: { | |||
9977 | unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue(); | |||
9978 | // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), | |||
9979 | // otherwise keep the intrinsic. | |||
9980 | if (CC != 4) { | |||
9981 | Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ? | |||
9982 | Intrinsic::x86_avx512_mask_sqrt_sd : | |||
9983 | Intrinsic::x86_avx512_mask_sqrt_ss; | |||
9984 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); | |||
9985 | } | |||
9986 | Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0); | |||
9987 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); | |||
9988 | A = Builder.CreateCall(F, A); | |||
9989 | Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0); | |||
9990 | A = EmitX86ScalarSelect(*this, Ops[3], A, Src); | |||
9991 | return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); | |||
9992 | } | |||
9993 | case X86::BI__builtin_ia32_sqrtpd256: | |||
9994 | case X86::BI__builtin_ia32_sqrtpd: | |||
9995 | case X86::BI__builtin_ia32_sqrtps256: | |||
9996 | case X86::BI__builtin_ia32_sqrtps: | |||
9997 | case X86::BI__builtin_ia32_sqrtps512: | |||
9998 | case X86::BI__builtin_ia32_sqrtpd512: { | |||
9999 | if (Ops.size() == 2) { | |||
10000 | unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); | |||
10001 | // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), | |||
10002 | // otherwise keep the intrinsic. | |||
10003 | if (CC != 4) { | |||
10004 | Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ? | |||
10005 | Intrinsic::x86_avx512_sqrt_ps_512 : | |||
10006 | Intrinsic::x86_avx512_sqrt_pd_512; | |||
10007 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); | |||
10008 | } | |||
10009 | } | |||
10010 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType()); | |||
10011 | return Builder.CreateCall(F, Ops[0]); | |||
10012 | } | |||
10013 | case X86::BI__builtin_ia32_pabsb128: | |||
10014 | case X86::BI__builtin_ia32_pabsw128: | |||
10015 | case X86::BI__builtin_ia32_pabsd128: | |||
10016 | case X86::BI__builtin_ia32_pabsb256: | |||
10017 | case X86::BI__builtin_ia32_pabsw256: | |||
10018 | case X86::BI__builtin_ia32_pabsd256: | |||
10019 | case X86::BI__builtin_ia32_pabsq128: | |||
10020 | case X86::BI__builtin_ia32_pabsq256: | |||
10021 | case X86::BI__builtin_ia32_pabsb512: | |||
10022 | case X86::BI__builtin_ia32_pabsw512: | |||
10023 | case X86::BI__builtin_ia32_pabsd512: | |||
10024 | case X86::BI__builtin_ia32_pabsq512: | |||
10025 | return EmitX86Abs(*this, Ops); | |||
10026 | ||||
10027 | case X86::BI__builtin_ia32_pmaxsb128: | |||
10028 | case X86::BI__builtin_ia32_pmaxsw128: | |||
10029 | case X86::BI__builtin_ia32_pmaxsd128: | |||
10030 | case X86::BI__builtin_ia32_pmaxsq128: | |||
10031 | case X86::BI__builtin_ia32_pmaxsb256: | |||
10032 | case X86::BI__builtin_ia32_pmaxsw256: | |||
10033 | case X86::BI__builtin_ia32_pmaxsd256: | |||
10034 | case X86::BI__builtin_ia32_pmaxsq256: | |||
10035 | case X86::BI__builtin_ia32_pmaxsb512: | |||
10036 | case X86::BI__builtin_ia32_pmaxsw512: | |||
10037 | case X86::BI__builtin_ia32_pmaxsd512: | |||
10038 | case X86::BI__builtin_ia32_pmaxsq512: | |||
10039 | return EmitX86MinMax(*this, ICmpInst::ICMP_SGT, Ops); | |||
10040 | case X86::BI__builtin_ia32_pmaxub128: | |||
10041 | case X86::BI__builtin_ia32_pmaxuw128: | |||
10042 | case X86::BI__builtin_ia32_pmaxud128: | |||
10043 | case X86::BI__builtin_ia32_pmaxuq128: | |||
10044 | case X86::BI__builtin_ia32_pmaxub256: | |||
10045 | case X86::BI__builtin_ia32_pmaxuw256: | |||
10046 | case X86::BI__builtin_ia32_pmaxud256: | |||
10047 | case X86::BI__builtin_ia32_pmaxuq256: | |||
10048 | case X86::BI__builtin_ia32_pmaxub512: | |||
10049 | case X86::BI__builtin_ia32_pmaxuw512: | |||
10050 | case X86::BI__builtin_ia32_pmaxud512: | |||
10051 | case X86::BI__builtin_ia32_pmaxuq512: | |||
10052 | return EmitX86MinMax(*this, ICmpInst::ICMP_UGT, Ops); | |||
10053 | case X86::BI__builtin_ia32_pminsb128: | |||
10054 | case X86::BI__builtin_ia32_pminsw128: | |||
10055 | case X86::BI__builtin_ia32_pminsd128: | |||
10056 | case X86::BI__builtin_ia32_pminsq128: | |||
10057 | case X86::BI__builtin_ia32_pminsb256: | |||
10058 | case X86::BI__builtin_ia32_pminsw256: | |||
10059 | case X86::BI__builtin_ia32_pminsd256: | |||
10060 | case X86::BI__builtin_ia32_pminsq256: | |||
10061 | case X86::BI__builtin_ia32_pminsb512: | |||
10062 | case X86::BI__builtin_ia32_pminsw512: | |||
10063 | case X86::BI__builtin_ia32_pminsd512: | |||
10064 | case X86::BI__builtin_ia32_pminsq512: | |||
10065 | return EmitX86MinMax(*this, ICmpInst::ICMP_SLT, Ops); | |||
10066 | case X86::BI__builtin_ia32_pminub128: | |||
10067 | case X86::BI__builtin_ia32_pminuw128: | |||
10068 | case X86::BI__builtin_ia32_pminud128: | |||
10069 | case X86::BI__builtin_ia32_pminuq128: | |||
10070 | case X86::BI__builtin_ia32_pminub256: | |||
10071 | case X86::BI__builtin_ia32_pminuw256: | |||
10072 | case X86::BI__builtin_ia32_pminud256: | |||
10073 | case X86::BI__builtin_ia32_pminuq256: | |||
10074 | case X86::BI__builtin_ia32_pminub512: | |||
10075 | case X86::BI__builtin_ia32_pminuw512: | |||
10076 | case X86::BI__builtin_ia32_pminud512: | |||
10077 | case X86::BI__builtin_ia32_pminuq512: | |||
10078 | return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops); | |||
10079 | ||||
10080 | case X86::BI__builtin_ia32_pmuludq128: | |||
10081 | case X86::BI__builtin_ia32_pmuludq256: | |||
10082 | case X86::BI__builtin_ia32_pmuludq512: | |||
10083 | return EmitX86Muldq(*this, /*IsSigned*/false, Ops); | |||
10084 | ||||
10085 | case X86::BI__builtin_ia32_pmuldq128: | |||
10086 | case X86::BI__builtin_ia32_pmuldq256: | |||
10087 | case X86::BI__builtin_ia32_pmuldq512: | |||
10088 | return EmitX86Muldq(*this, /*IsSigned*/true, Ops); | |||
10089 | ||||
10090 | case X86::BI__builtin_ia32_pternlogd512_mask: | |||
10091 | case X86::BI__builtin_ia32_pternlogq512_mask: | |||
10092 | case X86::BI__builtin_ia32_pternlogd128_mask: | |||
10093 | case X86::BI__builtin_ia32_pternlogd256_mask: | |||
10094 | case X86::BI__builtin_ia32_pternlogq128_mask: | |||
10095 | case X86::BI__builtin_ia32_pternlogq256_mask: | |||
10096 | return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops); | |||
10097 | ||||
10098 | case X86::BI__builtin_ia32_pternlogd512_maskz: | |||
10099 | case X86::BI__builtin_ia32_pternlogq512_maskz: | |||
10100 | case X86::BI__builtin_ia32_pternlogd128_maskz: | |||
10101 | case X86::BI__builtin_ia32_pternlogd256_maskz: | |||
10102 | case X86::BI__builtin_ia32_pternlogq128_maskz: | |||
10103 | case X86::BI__builtin_ia32_pternlogq256_maskz: | |||
10104 | return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops); | |||
10105 | ||||
10106 | // 3DNow! | |||
10107 | case X86::BI__builtin_ia32_pswapdsf: | |||
10108 | case X86::BI__builtin_ia32_pswapdsi: { | |||
10109 | llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext()); | |||
10110 | Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast"); | |||
10111 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd); | |||
10112 | return Builder.CreateCall(F, Ops, "pswapd"); | |||
10113 | } | |||
10114 | case X86::BI__builtin_ia32_rdrand16_step: | |||
10115 | case X86::BI__builtin_ia32_rdrand32_step: | |||
10116 | case X86::BI__builtin_ia32_rdrand64_step: | |||
10117 | case X86::BI__builtin_ia32_rdseed16_step: | |||
10118 | case X86::BI__builtin_ia32_rdseed32_step: | |||
10119 | case X86::BI__builtin_ia32_rdseed64_step: { | |||
10120 | Intrinsic::ID ID; | |||
10121 | switch (BuiltinID) { | |||
10122 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10122); | |||
10123 | case X86::BI__builtin_ia32_rdrand16_step: | |||
10124 | ID = Intrinsic::x86_rdrand_16; | |||
10125 | break; | |||
10126 | case X86::BI__builtin_ia32_rdrand32_step: | |||
10127 | ID = Intrinsic::x86_rdrand_32; | |||
10128 | break; | |||
10129 | case X86::BI__builtin_ia32_rdrand64_step: | |||
10130 | ID = Intrinsic::x86_rdrand_64; | |||
10131 | break; | |||
10132 | case X86::BI__builtin_ia32_rdseed16_step: | |||
10133 | ID = Intrinsic::x86_rdseed_16; | |||
10134 | break; | |||
10135 | case X86::BI__builtin_ia32_rdseed32_step: | |||
10136 | ID = Intrinsic::x86_rdseed_32; | |||
10137 | break; | |||
10138 | case X86::BI__builtin_ia32_rdseed64_step: | |||
10139 | ID = Intrinsic::x86_rdseed_64; | |||
10140 | break; | |||
10141 | } | |||
10142 | ||||
10143 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID)); | |||
10144 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0), | |||
10145 | Ops[0]); | |||
10146 | return Builder.CreateExtractValue(Call, 1); | |||
10147 | } | |||
10148 | ||||
10149 | case X86::BI__builtin_ia32_fpclassps128_mask: | |||
10150 | case X86::BI__builtin_ia32_fpclassps256_mask: | |||
10151 | case X86::BI__builtin_ia32_fpclassps512_mask: | |||
10152 | case X86::BI__builtin_ia32_fpclasspd128_mask: | |||
10153 | case X86::BI__builtin_ia32_fpclasspd256_mask: | |||
10154 | case X86::BI__builtin_ia32_fpclasspd512_mask: { | |||
10155 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); | |||
10156 | Value *MaskIn = Ops[2]; | |||
10157 | Ops.erase(&Ops[2]); | |||
10158 | ||||
10159 | Intrinsic::ID ID; | |||
10160 | switch (BuiltinID) { | |||
10161 | default: llvm_unreachable("Unsupported intrinsic!")::llvm::llvm_unreachable_internal("Unsupported intrinsic!", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10161); | |||
10162 | case X86::BI__builtin_ia32_fpclassps128_mask: | |||
10163 | ID = Intrinsic::x86_avx512_fpclass_ps_128; | |||
10164 | break; | |||
10165 | case X86::BI__builtin_ia32_fpclassps256_mask: | |||
10166 | ID = Intrinsic::x86_avx512_fpclass_ps_256; | |||
10167 | break; | |||
10168 | case X86::BI__builtin_ia32_fpclassps512_mask: | |||
10169 | ID = Intrinsic::x86_avx512_fpclass_ps_512; | |||
10170 | break; | |||
10171 | case X86::BI__builtin_ia32_fpclasspd128_mask: | |||
10172 | ID = Intrinsic::x86_avx512_fpclass_pd_128; | |||
10173 | break; | |||
10174 | case X86::BI__builtin_ia32_fpclasspd256_mask: | |||
10175 | ID = Intrinsic::x86_avx512_fpclass_pd_256; | |||
10176 | break; | |||
10177 | case X86::BI__builtin_ia32_fpclasspd512_mask: | |||
10178 | ID = Intrinsic::x86_avx512_fpclass_pd_512; | |||
10179 | break; | |||
10180 | } | |||
10181 | ||||
10182 | Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); | |||
10183 | return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn); | |||
10184 | } | |||
10185 | ||||
10186 | // packed comparison intrinsics | |||
10187 | case X86::BI__builtin_ia32_cmpeqps: | |||
10188 | case X86::BI__builtin_ia32_cmpeqpd: | |||
10189 | return getVectorFCmpIR(CmpInst::FCMP_OEQ); | |||
10190 | case X86::BI__builtin_ia32_cmpltps: | |||
10191 | case X86::BI__builtin_ia32_cmpltpd: | |||
10192 | return getVectorFCmpIR(CmpInst::FCMP_OLT); | |||
10193 | case X86::BI__builtin_ia32_cmpleps: | |||
10194 | case X86::BI__builtin_ia32_cmplepd: | |||
10195 | return getVectorFCmpIR(CmpInst::FCMP_OLE); | |||
10196 | case X86::BI__builtin_ia32_cmpunordps: | |||
10197 | case X86::BI__builtin_ia32_cmpunordpd: | |||
10198 | return getVectorFCmpIR(CmpInst::FCMP_UNO); | |||
10199 | case X86::BI__builtin_ia32_cmpneqps: | |||
10200 | case X86::BI__builtin_ia32_cmpneqpd: | |||
10201 | return getVectorFCmpIR(CmpInst::FCMP_UNE); | |||
10202 | case X86::BI__builtin_ia32_cmpnltps: | |||
10203 | case X86::BI__builtin_ia32_cmpnltpd: | |||
10204 | return getVectorFCmpIR(CmpInst::FCMP_UGE); | |||
10205 | case X86::BI__builtin_ia32_cmpnleps: | |||
10206 | case X86::BI__builtin_ia32_cmpnlepd: | |||
10207 | return getVectorFCmpIR(CmpInst::FCMP_UGT); | |||
10208 | case X86::BI__builtin_ia32_cmpordps: | |||
10209 | case X86::BI__builtin_ia32_cmpordpd: | |||
10210 | return getVectorFCmpIR(CmpInst::FCMP_ORD); | |||
10211 | case X86::BI__builtin_ia32_cmpps: | |||
10212 | case X86::BI__builtin_ia32_cmpps256: | |||
10213 | case X86::BI__builtin_ia32_cmppd: | |||
10214 | case X86::BI__builtin_ia32_cmppd256: | |||
10215 | case X86::BI__builtin_ia32_cmpps128_mask: | |||
10216 | case X86::BI__builtin_ia32_cmpps256_mask: | |||
10217 | case X86::BI__builtin_ia32_cmpps512_mask: | |||
10218 | case X86::BI__builtin_ia32_cmppd128_mask: | |||
10219 | case X86::BI__builtin_ia32_cmppd256_mask: | |||
10220 | case X86::BI__builtin_ia32_cmppd512_mask: { | |||
10221 | // Lowering vector comparisons to fcmp instructions, while | |||
10222 | // ignoring signalling behaviour requested | |||
10223 | // ignoring rounding mode requested | |||
10224 | // This is is only possible as long as FENV_ACCESS is not implemented. | |||
10225 | // See also: https://reviews.llvm.org/D45616 | |||
10226 | ||||
10227 | // The third argument is the comparison condition, and integer in the | |||
10228 | // range [0, 31] | |||
10229 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f; | |||
10230 | ||||
10231 | // Lowering to IR fcmp instruction. | |||
10232 | // Ignoring requested signaling behaviour, | |||
10233 | // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT. | |||
10234 | FCmpInst::Predicate Pred; | |||
10235 | switch (CC) { | |||
10236 | case 0x00: Pred = FCmpInst::FCMP_OEQ; break; | |||
10237 | case 0x01: Pred = FCmpInst::FCMP_OLT; break; | |||
10238 | case 0x02: Pred = FCmpInst::FCMP_OLE; break; | |||
10239 | case 0x03: Pred = FCmpInst::FCMP_UNO; break; | |||
10240 | case 0x04: Pred = FCmpInst::FCMP_UNE; break; | |||
10241 | case 0x05: Pred = FCmpInst::FCMP_UGE; break; | |||
10242 | case 0x06: Pred = FCmpInst::FCMP_UGT; break; | |||
10243 | case 0x07: Pred = FCmpInst::FCMP_ORD; break; | |||
10244 | case 0x08: Pred = FCmpInst::FCMP_UEQ; break; | |||
10245 | case 0x09: Pred = FCmpInst::FCMP_ULT; break; | |||
10246 | case 0x0a: Pred = FCmpInst::FCMP_ULE; break; | |||
10247 | case 0x0b: Pred = FCmpInst::FCMP_FALSE; break; | |||
10248 | case 0x0c: Pred = FCmpInst::FCMP_ONE; break; | |||
10249 | case 0x0d: Pred = FCmpInst::FCMP_OGE; break; | |||
10250 | case 0x0e: Pred = FCmpInst::FCMP_OGT; break; | |||
10251 | case 0x0f: Pred = FCmpInst::FCMP_TRUE; break; | |||
10252 | case 0x10: Pred = FCmpInst::FCMP_OEQ; break; | |||
10253 | case 0x11: Pred = FCmpInst::FCMP_OLT; break; | |||
10254 | case 0x12: Pred = FCmpInst::FCMP_OLE; break; | |||
10255 | case 0x13: Pred = FCmpInst::FCMP_UNO; break; | |||
10256 | case 0x14: Pred = FCmpInst::FCMP_UNE; break; | |||
10257 | case 0x15: Pred = FCmpInst::FCMP_UGE; break; | |||
10258 | case 0x16: Pred = FCmpInst::FCMP_UGT; break; | |||
10259 | case 0x17: Pred = FCmpInst::FCMP_ORD; break; | |||
10260 | case 0x18: Pred = FCmpInst::FCMP_UEQ; break; | |||
10261 | case 0x19: Pred = FCmpInst::FCMP_ULT; break; | |||
10262 | case 0x1a: Pred = FCmpInst::FCMP_ULE; break; | |||
10263 | case 0x1b: Pred = FCmpInst::FCMP_FALSE; break; | |||
10264 | case 0x1c: Pred = FCmpInst::FCMP_ONE; break; | |||
10265 | case 0x1d: Pred = FCmpInst::FCMP_OGE; break; | |||
10266 | case 0x1e: Pred = FCmpInst::FCMP_OGT; break; | |||
10267 | case 0x1f: Pred = FCmpInst::FCMP_TRUE; break; | |||
10268 | default: llvm_unreachable("Unhandled CC")::llvm::llvm_unreachable_internal("Unhandled CC", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10268); | |||
10269 | } | |||
10270 | ||||
10271 | // Builtins without the _mask suffix return a vector of integers | |||
10272 | // of the same width as the input vectors | |||
10273 | switch (BuiltinID) { | |||
10274 | case X86::BI__builtin_ia32_cmpps512_mask: | |||
10275 | case X86::BI__builtin_ia32_cmppd512_mask: | |||
10276 | case X86::BI__builtin_ia32_cmpps128_mask: | |||
10277 | case X86::BI__builtin_ia32_cmpps256_mask: | |||
10278 | case X86::BI__builtin_ia32_cmppd128_mask: | |||
10279 | case X86::BI__builtin_ia32_cmppd256_mask: { | |||
10280 | unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); | |||
10281 | Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]); | |||
10282 | return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]); | |||
10283 | } | |||
10284 | default: | |||
10285 | return getVectorFCmpIR(Pred); | |||
10286 | } | |||
10287 | } | |||
10288 | ||||
10289 | // SSE scalar comparison intrinsics | |||
10290 | case X86::BI__builtin_ia32_cmpeqss: | |||
10291 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0); | |||
10292 | case X86::BI__builtin_ia32_cmpltss: | |||
10293 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1); | |||
10294 | case X86::BI__builtin_ia32_cmpless: | |||
10295 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2); | |||
10296 | case X86::BI__builtin_ia32_cmpunordss: | |||
10297 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3); | |||
10298 | case X86::BI__builtin_ia32_cmpneqss: | |||
10299 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4); | |||
10300 | case X86::BI__builtin_ia32_cmpnltss: | |||
10301 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5); | |||
10302 | case X86::BI__builtin_ia32_cmpnless: | |||
10303 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6); | |||
10304 | case X86::BI__builtin_ia32_cmpordss: | |||
10305 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7); | |||
10306 | case X86::BI__builtin_ia32_cmpeqsd: | |||
10307 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0); | |||
10308 | case X86::BI__builtin_ia32_cmpltsd: | |||
10309 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1); | |||
10310 | case X86::BI__builtin_ia32_cmplesd: | |||
10311 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2); | |||
10312 | case X86::BI__builtin_ia32_cmpunordsd: | |||
10313 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3); | |||
10314 | case X86::BI__builtin_ia32_cmpneqsd: | |||
10315 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4); | |||
10316 | case X86::BI__builtin_ia32_cmpnltsd: | |||
10317 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5); | |||
10318 | case X86::BI__builtin_ia32_cmpnlesd: | |||
10319 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6); | |||
10320 | case X86::BI__builtin_ia32_cmpordsd: | |||
10321 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7); | |||
10322 | ||||
10323 | case X86::BI__emul: | |||
10324 | case X86::BI__emulu: { | |||
10325 | llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64); | |||
10326 | bool isSigned = (BuiltinID == X86::BI__emul); | |||
10327 | Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned); | |||
10328 | Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned); | |||
10329 | return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned); | |||
10330 | } | |||
10331 | case X86::BI__mulh: | |||
10332 | case X86::BI__umulh: | |||
10333 | case X86::BI_mul128: | |||
10334 | case X86::BI_umul128: { | |||
10335 | llvm::Type *ResType = ConvertType(E->getType()); | |||
10336 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); | |||
10337 | ||||
10338 | bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128); | |||
10339 | Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned); | |||
10340 | Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned); | |||
10341 | ||||
10342 | Value *MulResult, *HigherBits; | |||
10343 | if (IsSigned) { | |||
10344 | MulResult = Builder.CreateNSWMul(LHS, RHS); | |||
10345 | HigherBits = Builder.CreateAShr(MulResult, 64); | |||
10346 | } else { | |||
10347 | MulResult = Builder.CreateNUWMul(LHS, RHS); | |||
10348 | HigherBits = Builder.CreateLShr(MulResult, 64); | |||
10349 | } | |||
10350 | HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned); | |||
10351 | ||||
10352 | if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh) | |||
10353 | return HigherBits; | |||
10354 | ||||
10355 | Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2)); | |||
10356 | Builder.CreateStore(HigherBits, HighBitsAddress); | |||
10357 | return Builder.CreateIntCast(MulResult, ResType, IsSigned); | |||
10358 | } | |||
10359 | ||||
10360 | case X86::BI__faststorefence: { | |||
10361 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, | |||
10362 | llvm::SyncScope::System); | |||
10363 | } | |||
10364 | case X86::BI_ReadWriteBarrier: | |||
10365 | case X86::BI_ReadBarrier: | |||
10366 | case X86::BI_WriteBarrier: { | |||
10367 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, | |||
10368 | llvm::SyncScope::SingleThread); | |||
10369 | } | |||
10370 | case X86::BI_BitScanForward: | |||
10371 | case X86::BI_BitScanForward64: | |||
10372 | return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E); | |||
10373 | case X86::BI_BitScanReverse: | |||
10374 | case X86::BI_BitScanReverse64: | |||
10375 | return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E); | |||
10376 | ||||
10377 | case X86::BI_InterlockedAnd64: | |||
10378 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E); | |||
10379 | case X86::BI_InterlockedExchange64: | |||
10380 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E); | |||
10381 | case X86::BI_InterlockedExchangeAdd64: | |||
10382 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E); | |||
10383 | case X86::BI_InterlockedExchangeSub64: | |||
10384 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E); | |||
10385 | case X86::BI_InterlockedOr64: | |||
10386 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E); | |||
10387 | case X86::BI_InterlockedXor64: | |||
10388 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E); | |||
10389 | case X86::BI_InterlockedDecrement64: | |||
10390 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E); | |||
10391 | case X86::BI_InterlockedIncrement64: | |||
10392 | return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E); | |||
10393 | case X86::BI_InterlockedCompareExchange128: { | |||
10394 | // InterlockedCompareExchange128 doesn't directly refer to 128bit ints, | |||
10395 | // instead it takes pointers to 64bit ints for Destination and | |||
10396 | // ComparandResult, and exchange is taken as two 64bit ints (high & low). | |||
10397 | // The previous value is written to ComparandResult, and success is | |||
10398 | // returned. | |||
10399 | ||||
10400 | llvm::Type *Int128Ty = Builder.getInt128Ty(); | |||
10401 | llvm::Type *Int128PtrTy = Int128Ty->getPointerTo(); | |||
10402 | ||||
10403 | Value *Destination = | |||
10404 | Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PtrTy); | |||
10405 | Value *ExchangeHigh128 = | |||
10406 | Builder.CreateZExt(EmitScalarExpr(E->getArg(1)), Int128Ty); | |||
10407 | Value *ExchangeLow128 = | |||
10408 | Builder.CreateZExt(EmitScalarExpr(E->getArg(2)), Int128Ty); | |||
10409 | Address ComparandResult( | |||
10410 | Builder.CreateBitCast(EmitScalarExpr(E->getArg(3)), Int128PtrTy), | |||
10411 | getContext().toCharUnitsFromBits(128)); | |||
10412 | ||||
10413 | Value *Exchange = Builder.CreateOr( | |||
10414 | Builder.CreateShl(ExchangeHigh128, 64, "", false, false), | |||
10415 | ExchangeLow128); | |||
10416 | ||||
10417 | Value *Comparand = Builder.CreateLoad(ComparandResult); | |||
10418 | ||||
10419 | AtomicCmpXchgInst *CXI = | |||
10420 | Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, | |||
10421 | AtomicOrdering::SequentiallyConsistent, | |||
10422 | AtomicOrdering::SequentiallyConsistent); | |||
10423 | CXI->setVolatile(true); | |||
10424 | ||||
10425 | // Write the result back to the inout pointer. | |||
10426 | Builder.CreateStore(Builder.CreateExtractValue(CXI, 0), ComparandResult); | |||
10427 | ||||
10428 | // Get the success boolean and zero extend it to i8. | |||
10429 | Value *Success = Builder.CreateExtractValue(CXI, 1); | |||
10430 | return Builder.CreateZExt(Success, ConvertType(E->getType())); | |||
10431 | } | |||
10432 | ||||
10433 | case X86::BI_AddressOfReturnAddress: { | |||
10434 | Value *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress); | |||
10435 | return Builder.CreateCall(F); | |||
10436 | } | |||
10437 | case X86::BI__stosb: { | |||
10438 | // We treat __stosb as a volatile memset - it may not generate "rep stosb" | |||
10439 | // instruction, but it will create a memset that won't be optimized away. | |||
10440 | return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], 1, true); | |||
10441 | } | |||
10442 | case X86::BI__ud2: | |||
10443 | // llvm.trap makes a ud2a instruction on x86. | |||
10444 | return EmitTrapCall(Intrinsic::trap); | |||
10445 | case X86::BI__int2c: { | |||
10446 | // This syscall signals a driver assertion failure in x86 NT kernels. | |||
10447 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); | |||
10448 | llvm::InlineAsm *IA = | |||
10449 | llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*SideEffects=*/true); | |||
10450 | llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( | |||
10451 | getLLVMContext(), llvm::AttributeList::FunctionIndex, | |||
10452 | llvm::Attribute::NoReturn); | |||
10453 | CallSite CS = Builder.CreateCall(IA); | |||
10454 | CS.setAttributes(NoReturnAttr); | |||
10455 | return CS.getInstruction(); | |||
10456 | } | |||
10457 | case X86::BI__readfsbyte: | |||
10458 | case X86::BI__readfsword: | |||
10459 | case X86::BI__readfsdword: | |||
10460 | case X86::BI__readfsqword: { | |||
10461 | llvm::Type *IntTy = ConvertType(E->getType()); | |||
10462 | Value *Ptr = Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)), | |||
10463 | llvm::PointerType::get(IntTy, 257)); | |||
10464 | LoadInst *Load = Builder.CreateAlignedLoad( | |||
10465 | IntTy, Ptr, getContext().getTypeAlignInChars(E->getType())); | |||
10466 | Load->setVolatile(true); | |||
10467 | return Load; | |||
10468 | } | |||
10469 | case X86::BI__readgsbyte: | |||
10470 | case X86::BI__readgsword: | |||
10471 | case X86::BI__readgsdword: | |||
10472 | case X86::BI__readgsqword: { | |||
10473 | llvm::Type *IntTy = ConvertType(E->getType()); | |||
10474 | Value *Ptr = Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)), | |||
10475 | llvm::PointerType::get(IntTy, 256)); | |||
10476 | LoadInst *Load = Builder.CreateAlignedLoad( | |||
10477 | IntTy, Ptr, getContext().getTypeAlignInChars(E->getType())); | |||
10478 | Load->setVolatile(true); | |||
10479 | return Load; | |||
10480 | } | |||
10481 | } | |||
10482 | } | |||
10483 | ||||
10484 | ||||
10485 | Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, | |||
10486 | const CallExpr *E) { | |||
10487 | SmallVector<Value*, 4> Ops; | |||
10488 | ||||
10489 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) | |||
10490 | Ops.push_back(EmitScalarExpr(E->getArg(i))); | |||
10491 | ||||
10492 | Intrinsic::ID ID = Intrinsic::not_intrinsic; | |||
10493 | ||||
10494 | switch (BuiltinID) { | |||
10495 | default: return nullptr; | |||
10496 | ||||
10497 | // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we | |||
10498 | // call __builtin_readcyclecounter. | |||
10499 | case PPC::BI__builtin_ppc_get_timebase: | |||
10500 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter)); | |||
10501 | ||||
10502 | // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr | |||
10503 | case PPC::BI__builtin_altivec_lvx: | |||
10504 | case PPC::BI__builtin_altivec_lvxl: | |||
10505 | case PPC::BI__builtin_altivec_lvebx: | |||
10506 | case PPC::BI__builtin_altivec_lvehx: | |||
10507 | case PPC::BI__builtin_altivec_lvewx: | |||
10508 | case PPC::BI__builtin_altivec_lvsl: | |||
10509 | case PPC::BI__builtin_altivec_lvsr: | |||
10510 | case PPC::BI__builtin_vsx_lxvd2x: | |||
10511 | case PPC::BI__builtin_vsx_lxvw4x: | |||
10512 | case PPC::BI__builtin_vsx_lxvd2x_be: | |||
10513 | case PPC::BI__builtin_vsx_lxvw4x_be: | |||
10514 | case PPC::BI__builtin_vsx_lxvl: | |||
10515 | case PPC::BI__builtin_vsx_lxvll: | |||
10516 | { | |||
10517 | if(BuiltinID == PPC::BI__builtin_vsx_lxvl || | |||
10518 | BuiltinID == PPC::BI__builtin_vsx_lxvll){ | |||
10519 | Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy); | |||
10520 | }else { | |||
10521 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); | |||
10522 | Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]); | |||
10523 | Ops.pop_back(); | |||
10524 | } | |||
10525 | ||||
10526 | switch (BuiltinID) { | |||
10527 | default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!")::llvm::llvm_unreachable_internal("Unsupported ld/lvsl/lvsr intrinsic!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10527); | |||
10528 | case PPC::BI__builtin_altivec_lvx: | |||
10529 | ID = Intrinsic::ppc_altivec_lvx; | |||
10530 | break; | |||
10531 | case PPC::BI__builtin_altivec_lvxl: | |||
10532 | ID = Intrinsic::ppc_altivec_lvxl; | |||
10533 | break; | |||
10534 | case PPC::BI__builtin_altivec_lvebx: | |||
10535 | ID = Intrinsic::ppc_altivec_lvebx; | |||
10536 | break; | |||
10537 | case PPC::BI__builtin_altivec_lvehx: | |||
10538 | ID = Intrinsic::ppc_altivec_lvehx; | |||
10539 | break; | |||
10540 | case PPC::BI__builtin_altivec_lvewx: | |||
10541 | ID = Intrinsic::ppc_altivec_lvewx; | |||
10542 | break; | |||
10543 | case PPC::BI__builtin_altivec_lvsl: | |||
10544 | ID = Intrinsic::ppc_altivec_lvsl; | |||
10545 | break; | |||
10546 | case PPC::BI__builtin_altivec_lvsr: | |||
10547 | ID = Intrinsic::ppc_altivec_lvsr; | |||
10548 | break; | |||
10549 | case PPC::BI__builtin_vsx_lxvd2x: | |||
10550 | ID = Intrinsic::ppc_vsx_lxvd2x; | |||
10551 | break; | |||
10552 | case PPC::BI__builtin_vsx_lxvw4x: | |||
10553 | ID = Intrinsic::ppc_vsx_lxvw4x; | |||
10554 | break; | |||
10555 | case PPC::BI__builtin_vsx_lxvd2x_be: | |||
10556 | ID = Intrinsic::ppc_vsx_lxvd2x_be; | |||
10557 | break; | |||
10558 | case PPC::BI__builtin_vsx_lxvw4x_be: | |||
10559 | ID = Intrinsic::ppc_vsx_lxvw4x_be; | |||
10560 | break; | |||
10561 | case PPC::BI__builtin_vsx_lxvl: | |||
10562 | ID = Intrinsic::ppc_vsx_lxvl; | |||
10563 | break; | |||
10564 | case PPC::BI__builtin_vsx_lxvll: | |||
10565 | ID = Intrinsic::ppc_vsx_lxvll; | |||
10566 | break; | |||
10567 | } | |||
10568 | llvm::Function *F = CGM.getIntrinsic(ID); | |||
10569 | return Builder.CreateCall(F, Ops, ""); | |||
10570 | } | |||
10571 | ||||
10572 | // vec_st, vec_xst_be | |||
10573 | case PPC::BI__builtin_altivec_stvx: | |||
10574 | case PPC::BI__builtin_altivec_stvxl: | |||
10575 | case PPC::BI__builtin_altivec_stvebx: | |||
10576 | case PPC::BI__builtin_altivec_stvehx: | |||
10577 | case PPC::BI__builtin_altivec_stvewx: | |||
10578 | case PPC::BI__builtin_vsx_stxvd2x: | |||
10579 | case PPC::BI__builtin_vsx_stxvw4x: | |||
10580 | case PPC::BI__builtin_vsx_stxvd2x_be: | |||
10581 | case PPC::BI__builtin_vsx_stxvw4x_be: | |||
10582 | case PPC::BI__builtin_vsx_stxvl: | |||
10583 | case PPC::BI__builtin_vsx_stxvll: | |||
10584 | { | |||
10585 | if(BuiltinID == PPC::BI__builtin_vsx_stxvl || | |||
10586 | BuiltinID == PPC::BI__builtin_vsx_stxvll ){ | |||
10587 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); | |||
10588 | }else { | |||
10589 | Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); | |||
10590 | Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]); | |||
10591 | Ops.pop_back(); | |||
10592 | } | |||
10593 | ||||
10594 | switch (BuiltinID) { | |||
10595 | default: llvm_unreachable("Unsupported st intrinsic!")::llvm::llvm_unreachable_internal("Unsupported st intrinsic!" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10595); | |||
10596 | case PPC::BI__builtin_altivec_stvx: | |||
10597 | ID = Intrinsic::ppc_altivec_stvx; | |||
10598 | break; | |||
10599 | case PPC::BI__builtin_altivec_stvxl: | |||
10600 | ID = Intrinsic::ppc_altivec_stvxl; | |||
10601 | break; | |||
10602 | case PPC::BI__builtin_altivec_stvebx: | |||
10603 | ID = Intrinsic::ppc_altivec_stvebx; | |||
10604 | break; | |||
10605 | case PPC::BI__builtin_altivec_stvehx: | |||
10606 | ID = Intrinsic::ppc_altivec_stvehx; | |||
10607 | break; | |||
10608 | case PPC::BI__builtin_altivec_stvewx: | |||
10609 | ID = Intrinsic::ppc_altivec_stvewx; | |||
10610 | break; | |||
10611 | case PPC::BI__builtin_vsx_stxvd2x: | |||
10612 | ID = Intrinsic::ppc_vsx_stxvd2x; | |||
10613 | break; | |||
10614 | case PPC::BI__builtin_vsx_stxvw4x: | |||
10615 | ID = Intrinsic::ppc_vsx_stxvw4x; | |||
10616 | break; | |||
10617 | case PPC::BI__builtin_vsx_stxvd2x_be: | |||
10618 | ID = Intrinsic::ppc_vsx_stxvd2x_be; | |||
10619 | break; | |||
10620 | case PPC::BI__builtin_vsx_stxvw4x_be: | |||
10621 | ID = Intrinsic::ppc_vsx_stxvw4x_be; | |||
10622 | break; | |||
10623 | case PPC::BI__builtin_vsx_stxvl: | |||
10624 | ID = Intrinsic::ppc_vsx_stxvl; | |||
10625 | break; | |||
10626 | case PPC::BI__builtin_vsx_stxvll: | |||
10627 | ID = Intrinsic::ppc_vsx_stxvll; | |||
10628 | break; | |||
10629 | } | |||
10630 | llvm::Function *F = CGM.getIntrinsic(ID); | |||
10631 | return Builder.CreateCall(F, Ops, ""); | |||
10632 | } | |||
10633 | // Square root | |||
10634 | case PPC::BI__builtin_vsx_xvsqrtsp: | |||
10635 | case PPC::BI__builtin_vsx_xvsqrtdp: { | |||
10636 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
10637 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
10638 | ID = Intrinsic::sqrt; | |||
10639 | llvm::Function *F = CGM.getIntrinsic(ID, ResultType); | |||
10640 | return Builder.CreateCall(F, X); | |||
10641 | } | |||
10642 | // Count leading zeros | |||
10643 | case PPC::BI__builtin_altivec_vclzb: | |||
10644 | case PPC::BI__builtin_altivec_vclzh: | |||
10645 | case PPC::BI__builtin_altivec_vclzw: | |||
10646 | case PPC::BI__builtin_altivec_vclzd: { | |||
10647 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
10648 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
10649 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); | |||
10650 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); | |||
10651 | return Builder.CreateCall(F, {X, Undef}); | |||
10652 | } | |||
10653 | case PPC::BI__builtin_altivec_vctzb: | |||
10654 | case PPC::BI__builtin_altivec_vctzh: | |||
10655 | case PPC::BI__builtin_altivec_vctzw: | |||
10656 | case PPC::BI__builtin_altivec_vctzd: { | |||
10657 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
10658 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
10659 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); | |||
10660 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType); | |||
10661 | return Builder.CreateCall(F, {X, Undef}); | |||
10662 | } | |||
10663 | case PPC::BI__builtin_altivec_vpopcntb: | |||
10664 | case PPC::BI__builtin_altivec_vpopcnth: | |||
10665 | case PPC::BI__builtin_altivec_vpopcntw: | |||
10666 | case PPC::BI__builtin_altivec_vpopcntd: { | |||
10667 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
10668 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
10669 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); | |||
10670 | return Builder.CreateCall(F, X); | |||
10671 | } | |||
10672 | // Copy sign | |||
10673 | case PPC::BI__builtin_vsx_xvcpsgnsp: | |||
10674 | case PPC::BI__builtin_vsx_xvcpsgndp: { | |||
10675 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
10676 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
10677 | Value *Y = EmitScalarExpr(E->getArg(1)); | |||
10678 | ID = Intrinsic::copysign; | |||
10679 | llvm::Function *F = CGM.getIntrinsic(ID, ResultType); | |||
10680 | return Builder.CreateCall(F, {X, Y}); | |||
10681 | } | |||
10682 | // Rounding/truncation | |||
10683 | case PPC::BI__builtin_vsx_xvrspip: | |||
10684 | case PPC::BI__builtin_vsx_xvrdpip: | |||
10685 | case PPC::BI__builtin_vsx_xvrdpim: | |||
10686 | case PPC::BI__builtin_vsx_xvrspim: | |||
10687 | case PPC::BI__builtin_vsx_xvrdpi: | |||
10688 | case PPC::BI__builtin_vsx_xvrspi: | |||
10689 | case PPC::BI__builtin_vsx_xvrdpic: | |||
10690 | case PPC::BI__builtin_vsx_xvrspic: | |||
10691 | case PPC::BI__builtin_vsx_xvrdpiz: | |||
10692 | case PPC::BI__builtin_vsx_xvrspiz: { | |||
10693 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
10694 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
10695 | if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim || | |||
10696 | BuiltinID == PPC::BI__builtin_vsx_xvrspim) | |||
10697 | ID = Intrinsic::floor; | |||
10698 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi || | |||
10699 | BuiltinID == PPC::BI__builtin_vsx_xvrspi) | |||
10700 | ID = Intrinsic::round; | |||
10701 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic || | |||
10702 | BuiltinID == PPC::BI__builtin_vsx_xvrspic) | |||
10703 | ID = Intrinsic::nearbyint; | |||
10704 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip || | |||
10705 | BuiltinID == PPC::BI__builtin_vsx_xvrspip) | |||
10706 | ID = Intrinsic::ceil; | |||
10707 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz || | |||
10708 | BuiltinID == PPC::BI__builtin_vsx_xvrspiz) | |||
10709 | ID = Intrinsic::trunc; | |||
10710 | llvm::Function *F = CGM.getIntrinsic(ID, ResultType); | |||
10711 | return Builder.CreateCall(F, X); | |||
10712 | } | |||
10713 | ||||
10714 | // Absolute value | |||
10715 | case PPC::BI__builtin_vsx_xvabsdp: | |||
10716 | case PPC::BI__builtin_vsx_xvabssp: { | |||
10717 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
10718 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
10719 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); | |||
10720 | return Builder.CreateCall(F, X); | |||
10721 | } | |||
10722 | ||||
10723 | // FMA variations | |||
10724 | case PPC::BI__builtin_vsx_xvmaddadp: | |||
10725 | case PPC::BI__builtin_vsx_xvmaddasp: | |||
10726 | case PPC::BI__builtin_vsx_xvnmaddadp: | |||
10727 | case PPC::BI__builtin_vsx_xvnmaddasp: | |||
10728 | case PPC::BI__builtin_vsx_xvmsubadp: | |||
10729 | case PPC::BI__builtin_vsx_xvmsubasp: | |||
10730 | case PPC::BI__builtin_vsx_xvnmsubadp: | |||
10731 | case PPC::BI__builtin_vsx_xvnmsubasp: { | |||
10732 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
10733 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
10734 | Value *Y = EmitScalarExpr(E->getArg(1)); | |||
10735 | Value *Z = EmitScalarExpr(E->getArg(2)); | |||
10736 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType); | |||
10737 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); | |||
10738 | switch (BuiltinID) { | |||
10739 | case PPC::BI__builtin_vsx_xvmaddadp: | |||
10740 | case PPC::BI__builtin_vsx_xvmaddasp: | |||
10741 | return Builder.CreateCall(F, {X, Y, Z}); | |||
10742 | case PPC::BI__builtin_vsx_xvnmaddadp: | |||
10743 | case PPC::BI__builtin_vsx_xvnmaddasp: | |||
10744 | return Builder.CreateFSub(Zero, | |||
10745 | Builder.CreateCall(F, {X, Y, Z}), "sub"); | |||
10746 | case PPC::BI__builtin_vsx_xvmsubadp: | |||
10747 | case PPC::BI__builtin_vsx_xvmsubasp: | |||
10748 | return Builder.CreateCall(F, | |||
10749 | {X, Y, Builder.CreateFSub(Zero, Z, "sub")}); | |||
10750 | case PPC::BI__builtin_vsx_xvnmsubadp: | |||
10751 | case PPC::BI__builtin_vsx_xvnmsubasp: | |||
10752 | Value *FsubRes = | |||
10753 | Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")}); | |||
10754 | return Builder.CreateFSub(Zero, FsubRes, "sub"); | |||
10755 | } | |||
10756 | llvm_unreachable("Unknown FMA operation")::llvm::llvm_unreachable_internal("Unknown FMA operation", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10756); | |||
10757 | return nullptr; // Suppress no-return warning | |||
10758 | } | |||
10759 | ||||
10760 | case PPC::BI__builtin_vsx_insertword: { | |||
10761 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw); | |||
10762 | ||||
10763 | // Third argument is a compile time constant int. It must be clamped to | |||
10764 | // to the range [0, 12]. | |||
10765 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); | |||
10766 | assert(ArgCI &&(static_cast <bool> (ArgCI && "Third arg to xxinsertw intrinsic must be constant integer" ) ? void (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10767, __extension__ __PRETTY_FUNCTION__)) | |||
10767 | "Third arg to xxinsertw intrinsic must be constant integer")(static_cast <bool> (ArgCI && "Third arg to xxinsertw intrinsic must be constant integer" ) ? void (0) : __assert_fail ("ArgCI && \"Third arg to xxinsertw intrinsic must be constant integer\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10767, __extension__ __PRETTY_FUNCTION__)); | |||
10768 | const int64_t MaxIndex = 12; | |||
10769 | int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex); | |||
10770 | ||||
10771 | // The builtin semantics don't exactly match the xxinsertw instructions | |||
10772 | // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the | |||
10773 | // word from the first argument, and inserts it in the second argument. The | |||
10774 | // instruction extracts the word from its second input register and inserts | |||
10775 | // it into its first input register, so swap the first and second arguments. | |||
10776 | std::swap(Ops[0], Ops[1]); | |||
10777 | ||||
10778 | // Need to cast the second argument from a vector of unsigned int to a | |||
10779 | // vector of long long. | |||
10780 | Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2)); | |||
10781 | ||||
10782 | if (getTarget().isLittleEndian()) { | |||
10783 | // Create a shuffle mask of (1, 0) | |||
10784 | Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1), | |||
10785 | ConstantInt::get(Int32Ty, 0) | |||
10786 | }; | |||
10787 | Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts); | |||
10788 | ||||
10789 | // Reverse the double words in the vector we will extract from. | |||
10790 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2)); | |||
10791 | Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ShuffleMask); | |||
10792 | ||||
10793 | // Reverse the index. | |||
10794 | Index = MaxIndex - Index; | |||
10795 | } | |||
10796 | ||||
10797 | // Intrinsic expects the first arg to be a vector of int. | |||
10798 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4)); | |||
10799 | Ops[2] = ConstantInt::getSigned(Int32Ty, Index); | |||
10800 | return Builder.CreateCall(F, Ops); | |||
10801 | } | |||
10802 | ||||
10803 | case PPC::BI__builtin_vsx_extractuword: { | |||
10804 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw); | |||
10805 | ||||
10806 | // Intrinsic expects the first argument to be a vector of doublewords. | |||
10807 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2)); | |||
10808 | ||||
10809 | // The second argument is a compile time constant int that needs to | |||
10810 | // be clamped to the range [0, 12]. | |||
10811 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]); | |||
10812 | assert(ArgCI &&(static_cast <bool> (ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10813, __extension__ __PRETTY_FUNCTION__)) | |||
10813 | "Second Arg to xxextractuw intrinsic must be a constant integer!")(static_cast <bool> (ArgCI && "Second Arg to xxextractuw intrinsic must be a constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Second Arg to xxextractuw intrinsic must be a constant integer!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10813, __extension__ __PRETTY_FUNCTION__)); | |||
10814 | const int64_t MaxIndex = 12; | |||
10815 | int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex); | |||
10816 | ||||
10817 | if (getTarget().isLittleEndian()) { | |||
10818 | // Reverse the index. | |||
10819 | Index = MaxIndex - Index; | |||
10820 | Ops[1] = ConstantInt::getSigned(Int32Ty, Index); | |||
10821 | ||||
10822 | // Emit the call, then reverse the double words of the results vector. | |||
10823 | Value *Call = Builder.CreateCall(F, Ops); | |||
10824 | ||||
10825 | // Create a shuffle mask of (1, 0) | |||
10826 | Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1), | |||
10827 | ConstantInt::get(Int32Ty, 0) | |||
10828 | }; | |||
10829 | Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts); | |||
10830 | ||||
10831 | Value *ShuffleCall = Builder.CreateShuffleVector(Call, Call, ShuffleMask); | |||
10832 | return ShuffleCall; | |||
10833 | } else { | |||
10834 | Ops[1] = ConstantInt::getSigned(Int32Ty, Index); | |||
10835 | return Builder.CreateCall(F, Ops); | |||
10836 | } | |||
10837 | } | |||
10838 | ||||
10839 | case PPC::BI__builtin_vsx_xxpermdi: { | |||
10840 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); | |||
10841 | assert(ArgCI && "Third arg must be constant integer!")(static_cast <bool> (ArgCI && "Third arg must be constant integer!" ) ? void (0) : __assert_fail ("ArgCI && \"Third arg must be constant integer!\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10841, __extension__ __PRETTY_FUNCTION__)); | |||
10842 | ||||
10843 | unsigned Index = ArgCI->getZExtValue(); | |||
10844 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2)); | |||
10845 | Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2)); | |||
10846 | ||||
10847 | // Account for endianness by treating this as just a shuffle. So we use the | |||
10848 | // same indices for both LE and BE in order to produce expected results in | |||
10849 | // both cases. | |||
10850 | unsigned ElemIdx0 = (Index & 2) >> 1; | |||
10851 | unsigned ElemIdx1 = 2 + (Index & 1); | |||
10852 | ||||
10853 | Constant *ShuffleElts[2] = {ConstantInt::get(Int32Ty, ElemIdx0), | |||
10854 | ConstantInt::get(Int32Ty, ElemIdx1)}; | |||
10855 | Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts); | |||
10856 | ||||
10857 | Value *ShuffleCall = | |||
10858 | Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask); | |||
10859 | QualType BIRetType = E->getType(); | |||
10860 | auto RetTy = ConvertType(BIRetType); | |||
10861 | return Builder.CreateBitCast(ShuffleCall, RetTy); | |||
10862 | } | |||
10863 | ||||
10864 | case PPC::BI__builtin_vsx_xxsldwi: { | |||
10865 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); | |||
10866 | assert(ArgCI && "Third argument must be a compile time constant")(static_cast <bool> (ArgCI && "Third argument must be a compile time constant" ) ? void (0) : __assert_fail ("ArgCI && \"Third argument must be a compile time constant\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 10866, __extension__ __PRETTY_FUNCTION__)); | |||
10867 | unsigned Index = ArgCI->getZExtValue() & 0x3; | |||
10868 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4)); | |||
10869 | Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int32Ty, 4)); | |||
10870 | ||||
10871 | // Create a shuffle mask | |||
10872 | unsigned ElemIdx0; | |||
10873 | unsigned ElemIdx1; | |||
10874 | unsigned ElemIdx2; | |||
10875 | unsigned ElemIdx3; | |||
10876 | if (getTarget().isLittleEndian()) { | |||
10877 | // Little endian element N comes from element 8+N-Index of the | |||
10878 | // concatenated wide vector (of course, using modulo arithmetic on | |||
10879 | // the total number of elements). | |||
10880 | ElemIdx0 = (8 - Index) % 8; | |||
10881 | ElemIdx1 = (9 - Index) % 8; | |||
10882 | ElemIdx2 = (10 - Index) % 8; | |||
10883 | ElemIdx3 = (11 - Index) % 8; | |||
10884 | } else { | |||
10885 | // Big endian ElemIdx<N> = Index + N | |||
10886 | ElemIdx0 = Index; | |||
10887 | ElemIdx1 = Index + 1; | |||
10888 | ElemIdx2 = Index + 2; | |||
10889 | ElemIdx3 = Index + 3; | |||
10890 | } | |||
10891 | ||||
10892 | Constant *ShuffleElts[4] = {ConstantInt::get(Int32Ty, ElemIdx0), | |||
10893 | ConstantInt::get(Int32Ty, ElemIdx1), | |||
10894 | ConstantInt::get(Int32Ty, ElemIdx2), | |||
10895 | ConstantInt::get(Int32Ty, ElemIdx3)}; | |||
10896 | ||||
10897 | Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts); | |||
10898 | Value *ShuffleCall = | |||
10899 | Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask); | |||
10900 | QualType BIRetType = E->getType(); | |||
10901 | auto RetTy = ConvertType(BIRetType); | |||
10902 | return Builder.CreateBitCast(ShuffleCall, RetTy); | |||
10903 | } | |||
10904 | } | |||
10905 | } | |||
10906 | ||||
10907 | Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, | |||
10908 | const CallExpr *E) { | |||
10909 | switch (BuiltinID) { | |||
10910 | case AMDGPU::BI__builtin_amdgcn_div_scale: | |||
10911 | case AMDGPU::BI__builtin_amdgcn_div_scalef: { | |||
10912 | // Translate from the intrinsics's struct return to the builtin's out | |||
10913 | // argument. | |||
10914 | ||||
10915 | Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3)); | |||
10916 | ||||
10917 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); | |||
10918 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); | |||
10919 | llvm::Value *Z = EmitScalarExpr(E->getArg(2)); | |||
10920 | ||||
10921 | llvm::Value *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale, | |||
10922 | X->getType()); | |||
10923 | ||||
10924 | llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z}); | |||
10925 | ||||
10926 | llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0); | |||
10927 | llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1); | |||
10928 | ||||
10929 | llvm::Type *RealFlagType | |||
10930 | = FlagOutPtr.getPointer()->getType()->getPointerElementType(); | |||
10931 | ||||
10932 | llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType); | |||
10933 | Builder.CreateStore(FlagExt, FlagOutPtr); | |||
10934 | return Result; | |||
10935 | } | |||
10936 | case AMDGPU::BI__builtin_amdgcn_div_fmas: | |||
10937 | case AMDGPU::BI__builtin_amdgcn_div_fmasf: { | |||
10938 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); | |||
10939 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); | |||
10940 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); | |||
10941 | llvm::Value *Src3 = EmitScalarExpr(E->getArg(3)); | |||
10942 | ||||
10943 | llvm::Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas, | |||
10944 | Src0->getType()); | |||
10945 | llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3); | |||
10946 | return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool}); | |||
10947 | } | |||
10948 | ||||
10949 | case AMDGPU::BI__builtin_amdgcn_ds_swizzle: | |||
10950 | return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle); | |||
10951 | case AMDGPU::BI__builtin_amdgcn_mov_dpp: { | |||
10952 | llvm::SmallVector<llvm::Value *, 5> Args; | |||
10953 | for (unsigned I = 0; I != 5; ++I) | |||
10954 | Args.push_back(EmitScalarExpr(E->getArg(I))); | |||
10955 | Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_mov_dpp, | |||
10956 | Args[0]->getType()); | |||
10957 | return Builder.CreateCall(F, Args); | |||
10958 | } | |||
10959 | case AMDGPU::BI__builtin_amdgcn_div_fixup: | |||
10960 | case AMDGPU::BI__builtin_amdgcn_div_fixupf: | |||
10961 | case AMDGPU::BI__builtin_amdgcn_div_fixuph: | |||
10962 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup); | |||
10963 | case AMDGPU::BI__builtin_amdgcn_trig_preop: | |||
10964 | case AMDGPU::BI__builtin_amdgcn_trig_preopf: | |||
10965 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop); | |||
10966 | case AMDGPU::BI__builtin_amdgcn_rcp: | |||
10967 | case AMDGPU::BI__builtin_amdgcn_rcpf: | |||
10968 | case AMDGPU::BI__builtin_amdgcn_rcph: | |||
10969 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp); | |||
10970 | case AMDGPU::BI__builtin_amdgcn_rsq: | |||
10971 | case AMDGPU::BI__builtin_amdgcn_rsqf: | |||
10972 | case AMDGPU::BI__builtin_amdgcn_rsqh: | |||
10973 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq); | |||
10974 | case AMDGPU::BI__builtin_amdgcn_rsq_clamp: | |||
10975 | case AMDGPU::BI__builtin_amdgcn_rsq_clampf: | |||
10976 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp); | |||
10977 | case AMDGPU::BI__builtin_amdgcn_sinf: | |||
10978 | case AMDGPU::BI__builtin_amdgcn_sinh: | |||
10979 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin); | |||
10980 | case AMDGPU::BI__builtin_amdgcn_cosf: | |||
10981 | case AMDGPU::BI__builtin_amdgcn_cosh: | |||
10982 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos); | |||
10983 | case AMDGPU::BI__builtin_amdgcn_log_clampf: | |||
10984 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp); | |||
10985 | case AMDGPU::BI__builtin_amdgcn_ldexp: | |||
10986 | case AMDGPU::BI__builtin_amdgcn_ldexpf: | |||
10987 | case AMDGPU::BI__builtin_amdgcn_ldexph: | |||
10988 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp); | |||
10989 | case AMDGPU::BI__builtin_amdgcn_frexp_mant: | |||
10990 | case AMDGPU::BI__builtin_amdgcn_frexp_mantf: | |||
10991 | case AMDGPU::BI__builtin_amdgcn_frexp_manth: | |||
10992 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant); | |||
10993 | case AMDGPU::BI__builtin_amdgcn_frexp_exp: | |||
10994 | case AMDGPU::BI__builtin_amdgcn_frexp_expf: { | |||
10995 | Value *Src0 = EmitScalarExpr(E->getArg(0)); | |||
10996 | Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp, | |||
10997 | { Builder.getInt32Ty(), Src0->getType() }); | |||
10998 | return Builder.CreateCall(F, Src0); | |||
10999 | } | |||
11000 | case AMDGPU::BI__builtin_amdgcn_frexp_exph: { | |||
11001 | Value *Src0 = EmitScalarExpr(E->getArg(0)); | |||
11002 | Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp, | |||
11003 | { Builder.getInt16Ty(), Src0->getType() }); | |||
11004 | return Builder.CreateCall(F, Src0); | |||
11005 | } | |||
11006 | case AMDGPU::BI__builtin_amdgcn_fract: | |||
11007 | case AMDGPU::BI__builtin_amdgcn_fractf: | |||
11008 | case AMDGPU::BI__builtin_amdgcn_fracth: | |||
11009 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract); | |||
11010 | case AMDGPU::BI__builtin_amdgcn_lerp: | |||
11011 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp); | |||
11012 | case AMDGPU::BI__builtin_amdgcn_uicmp: | |||
11013 | case AMDGPU::BI__builtin_amdgcn_uicmpl: | |||
11014 | case AMDGPU::BI__builtin_amdgcn_sicmp: | |||
11015 | case AMDGPU::BI__builtin_amdgcn_sicmpl: | |||
11016 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_icmp); | |||
11017 | case AMDGPU::BI__builtin_amdgcn_fcmp: | |||
11018 | case AMDGPU::BI__builtin_amdgcn_fcmpf: | |||
11019 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fcmp); | |||
11020 | case AMDGPU::BI__builtin_amdgcn_class: | |||
11021 | case AMDGPU::BI__builtin_amdgcn_classf: | |||
11022 | case AMDGPU::BI__builtin_amdgcn_classh: | |||
11023 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class); | |||
11024 | case AMDGPU::BI__builtin_amdgcn_fmed3f: | |||
11025 | case AMDGPU::BI__builtin_amdgcn_fmed3h: | |||
11026 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3); | |||
11027 | case AMDGPU::BI__builtin_amdgcn_read_exec: { | |||
11028 | CallInst *CI = cast<CallInst>( | |||
11029 | EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, true, "exec")); | |||
11030 | CI->setConvergent(); | |||
11031 | return CI; | |||
11032 | } | |||
11033 | case AMDGPU::BI__builtin_amdgcn_read_exec_lo: | |||
11034 | case AMDGPU::BI__builtin_amdgcn_read_exec_hi: { | |||
11035 | StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ? | |||
11036 | "exec_lo" : "exec_hi"; | |||
11037 | CallInst *CI = cast<CallInst>( | |||
11038 | EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, true, RegName)); | |||
11039 | CI->setConvergent(); | |||
11040 | return CI; | |||
11041 | } | |||
11042 | case AMDGPU::BI__builtin_amdgcn_ds_faddf: | |||
11043 | case AMDGPU::BI__builtin_amdgcn_ds_fminf: | |||
11044 | case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: { | |||
11045 | llvm::SmallVector<llvm::Value *, 5> Args; | |||
11046 | for (unsigned I = 0; I != 5; ++I) | |||
11047 | Args.push_back(EmitScalarExpr(E->getArg(I))); | |||
11048 | const llvm::Type *PtrTy = Args[0]->getType(); | |||
11049 | // check pointer parameter | |||
11050 | if (!PtrTy->isPointerTy() || | |||
11051 | E->getArg(0) | |||
11052 | ->getType() | |||
11053 | ->getPointeeType() | |||
11054 | .getQualifiers() | |||
11055 | .getAddressSpace() != LangAS::opencl_local || | |||
11056 | !PtrTy->getPointerElementType()->isFloatTy()) { | |||
11057 | CGM.Error(E->getArg(0)->getLocStart(), | |||
11058 | "parameter should have type \"local float*\""); | |||
11059 | return nullptr; | |||
11060 | } | |||
11061 | // check float parameter | |||
11062 | if (!Args[1]->getType()->isFloatTy()) { | |||
11063 | CGM.Error(E->getArg(1)->getLocStart(), | |||
11064 | "parameter should have type \"float\""); | |||
11065 | return nullptr; | |||
11066 | } | |||
11067 | ||||
11068 | Intrinsic::ID ID; | |||
11069 | switch (BuiltinID) { | |||
11070 | case AMDGPU::BI__builtin_amdgcn_ds_faddf: | |||
11071 | ID = Intrinsic::amdgcn_ds_fadd; | |||
11072 | break; | |||
11073 | case AMDGPU::BI__builtin_amdgcn_ds_fminf: | |||
11074 | ID = Intrinsic::amdgcn_ds_fmin; | |||
11075 | break; | |||
11076 | case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: | |||
11077 | ID = Intrinsic::amdgcn_ds_fmax; | |||
11078 | break; | |||
11079 | default: | |||
11080 | llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11080); | |||
11081 | } | |||
11082 | Value *F = CGM.getIntrinsic(ID); | |||
11083 | return Builder.CreateCall(F, Args); | |||
11084 | } | |||
11085 | ||||
11086 | // amdgcn workitem | |||
11087 | case AMDGPU::BI__builtin_amdgcn_workitem_id_x: | |||
11088 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024); | |||
11089 | case AMDGPU::BI__builtin_amdgcn_workitem_id_y: | |||
11090 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024); | |||
11091 | case AMDGPU::BI__builtin_amdgcn_workitem_id_z: | |||
11092 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024); | |||
11093 | ||||
11094 | // r600 intrinsics | |||
11095 | case AMDGPU::BI__builtin_r600_recipsqrt_ieee: | |||
11096 | case AMDGPU::BI__builtin_r600_recipsqrt_ieeef: | |||
11097 | return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee); | |||
11098 | case AMDGPU::BI__builtin_r600_read_tidig_x: | |||
11099 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024); | |||
11100 | case AMDGPU::BI__builtin_r600_read_tidig_y: | |||
11101 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024); | |||
11102 | case AMDGPU::BI__builtin_r600_read_tidig_z: | |||
11103 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024); | |||
11104 | default: | |||
11105 | return nullptr; | |||
11106 | } | |||
11107 | } | |||
11108 | ||||
11109 | /// Handle a SystemZ function in which the final argument is a pointer | |||
11110 | /// to an int that receives the post-instruction CC value. At the LLVM level | |||
11111 | /// this is represented as a function that returns a {result, cc} pair. | |||
11112 | static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF, | |||
11113 | unsigned IntrinsicID, | |||
11114 | const CallExpr *E) { | |||
11115 | unsigned NumArgs = E->getNumArgs() - 1; | |||
11116 | SmallVector<Value *, 8> Args(NumArgs); | |||
11117 | for (unsigned I = 0; I < NumArgs; ++I) | |||
11118 | Args[I] = CGF.EmitScalarExpr(E->getArg(I)); | |||
11119 | Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs)); | |||
11120 | Value *F = CGF.CGM.getIntrinsic(IntrinsicID); | |||
11121 | Value *Call = CGF.Builder.CreateCall(F, Args); | |||
11122 | Value *CC = CGF.Builder.CreateExtractValue(Call, 1); | |||
11123 | CGF.Builder.CreateStore(CC, CCPtr); | |||
11124 | return CGF.Builder.CreateExtractValue(Call, 0); | |||
11125 | } | |||
11126 | ||||
11127 | Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID, | |||
11128 | const CallExpr *E) { | |||
11129 | switch (BuiltinID) { | |||
11130 | case SystemZ::BI__builtin_tbegin: { | |||
11131 | Value *TDB = EmitScalarExpr(E->getArg(0)); | |||
11132 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); | |||
11133 | Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin); | |||
11134 | return Builder.CreateCall(F, {TDB, Control}); | |||
11135 | } | |||
11136 | case SystemZ::BI__builtin_tbegin_nofloat: { | |||
11137 | Value *TDB = EmitScalarExpr(E->getArg(0)); | |||
11138 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); | |||
11139 | Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat); | |||
11140 | return Builder.CreateCall(F, {TDB, Control}); | |||
11141 | } | |||
11142 | case SystemZ::BI__builtin_tbeginc: { | |||
11143 | Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy); | |||
11144 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08); | |||
11145 | Value *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc); | |||
11146 | return Builder.CreateCall(F, {TDB, Control}); | |||
11147 | } | |||
11148 | case SystemZ::BI__builtin_tabort: { | |||
11149 | Value *Data = EmitScalarExpr(E->getArg(0)); | |||
11150 | Value *F = CGM.getIntrinsic(Intrinsic::s390_tabort); | |||
11151 | return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort")); | |||
11152 | } | |||
11153 | case SystemZ::BI__builtin_non_tx_store: { | |||
11154 | Value *Address = EmitScalarExpr(E->getArg(0)); | |||
11155 | Value *Data = EmitScalarExpr(E->getArg(1)); | |||
11156 | Value *F = CGM.getIntrinsic(Intrinsic::s390_ntstg); | |||
11157 | return Builder.CreateCall(F, {Data, Address}); | |||
11158 | } | |||
11159 | ||||
11160 | // Vector builtins. Note that most vector builtins are mapped automatically | |||
11161 | // to target-specific LLVM intrinsics. The ones handled specially here can | |||
11162 | // be represented via standard LLVM IR, which is preferable to enable common | |||
11163 | // LLVM optimizations. | |||
11164 | ||||
11165 | case SystemZ::BI__builtin_s390_vpopctb: | |||
11166 | case SystemZ::BI__builtin_s390_vpopcth: | |||
11167 | case SystemZ::BI__builtin_s390_vpopctf: | |||
11168 | case SystemZ::BI__builtin_s390_vpopctg: { | |||
11169 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11170 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11171 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); | |||
11172 | return Builder.CreateCall(F, X); | |||
11173 | } | |||
11174 | ||||
11175 | case SystemZ::BI__builtin_s390_vclzb: | |||
11176 | case SystemZ::BI__builtin_s390_vclzh: | |||
11177 | case SystemZ::BI__builtin_s390_vclzf: | |||
11178 | case SystemZ::BI__builtin_s390_vclzg: { | |||
11179 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11180 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11181 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); | |||
11182 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); | |||
11183 | return Builder.CreateCall(F, {X, Undef}); | |||
11184 | } | |||
11185 | ||||
11186 | case SystemZ::BI__builtin_s390_vctzb: | |||
11187 | case SystemZ::BI__builtin_s390_vctzh: | |||
11188 | case SystemZ::BI__builtin_s390_vctzf: | |||
11189 | case SystemZ::BI__builtin_s390_vctzg: { | |||
11190 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11191 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11192 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); | |||
11193 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType); | |||
11194 | return Builder.CreateCall(F, {X, Undef}); | |||
11195 | } | |||
11196 | ||||
11197 | case SystemZ::BI__builtin_s390_vfsqsb: | |||
11198 | case SystemZ::BI__builtin_s390_vfsqdb: { | |||
11199 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11200 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11201 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); | |||
11202 | return Builder.CreateCall(F, X); | |||
11203 | } | |||
11204 | case SystemZ::BI__builtin_s390_vfmasb: | |||
11205 | case SystemZ::BI__builtin_s390_vfmadb: { | |||
11206 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11207 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11208 | Value *Y = EmitScalarExpr(E->getArg(1)); | |||
11209 | Value *Z = EmitScalarExpr(E->getArg(2)); | |||
11210 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); | |||
11211 | return Builder.CreateCall(F, {X, Y, Z}); | |||
11212 | } | |||
11213 | case SystemZ::BI__builtin_s390_vfmssb: | |||
11214 | case SystemZ::BI__builtin_s390_vfmsdb: { | |||
11215 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11216 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11217 | Value *Y = EmitScalarExpr(E->getArg(1)); | |||
11218 | Value *Z = EmitScalarExpr(E->getArg(2)); | |||
11219 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType); | |||
11220 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); | |||
11221 | return Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")}); | |||
11222 | } | |||
11223 | case SystemZ::BI__builtin_s390_vfnmasb: | |||
11224 | case SystemZ::BI__builtin_s390_vfnmadb: { | |||
11225 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11226 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11227 | Value *Y = EmitScalarExpr(E->getArg(1)); | |||
11228 | Value *Z = EmitScalarExpr(E->getArg(2)); | |||
11229 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType); | |||
11230 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); | |||
11231 | return Builder.CreateFSub(Zero, Builder.CreateCall(F, {X, Y, Z}), "sub"); | |||
11232 | } | |||
11233 | case SystemZ::BI__builtin_s390_vfnmssb: | |||
11234 | case SystemZ::BI__builtin_s390_vfnmsdb: { | |||
11235 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11236 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11237 | Value *Y = EmitScalarExpr(E->getArg(1)); | |||
11238 | Value *Z = EmitScalarExpr(E->getArg(2)); | |||
11239 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType); | |||
11240 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); | |||
11241 | Value *NegZ = Builder.CreateFSub(Zero, Z, "sub"); | |||
11242 | return Builder.CreateFSub(Zero, Builder.CreateCall(F, {X, Y, NegZ})); | |||
11243 | } | |||
11244 | case SystemZ::BI__builtin_s390_vflpsb: | |||
11245 | case SystemZ::BI__builtin_s390_vflpdb: { | |||
11246 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11247 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11248 | Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); | |||
11249 | return Builder.CreateCall(F, X); | |||
11250 | } | |||
11251 | case SystemZ::BI__builtin_s390_vflnsb: | |||
11252 | case SystemZ::BI__builtin_s390_vflndb: { | |||
11253 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11254 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11255 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType); | |||
11256 | Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); | |||
11257 | return Builder.CreateFSub(Zero, Builder.CreateCall(F, X), "sub"); | |||
11258 | } | |||
11259 | case SystemZ::BI__builtin_s390_vfisb: | |||
11260 | case SystemZ::BI__builtin_s390_vfidb: { | |||
11261 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11262 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11263 | // Constant-fold the M4 and M5 mask arguments. | |||
11264 | llvm::APSInt M4, M5; | |||
11265 | bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext()); | |||
11266 | bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext()); | |||
11267 | assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?")(static_cast <bool> (IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?") ? void (0) : __assert_fail ("IsConstM4 && IsConstM5 && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11267, __extension__ __PRETTY_FUNCTION__)); | |||
11268 | (void)IsConstM4; (void)IsConstM5; | |||
11269 | // Check whether this instance can be represented via a LLVM standard | |||
11270 | // intrinsic. We only support some combinations of M4 and M5. | |||
11271 | Intrinsic::ID ID = Intrinsic::not_intrinsic; | |||
11272 | switch (M4.getZExtValue()) { | |||
11273 | default: break; | |||
11274 | case 0: // IEEE-inexact exception allowed | |||
11275 | switch (M5.getZExtValue()) { | |||
11276 | default: break; | |||
11277 | case 0: ID = Intrinsic::rint; break; | |||
11278 | } | |||
11279 | break; | |||
11280 | case 4: // IEEE-inexact exception suppressed | |||
11281 | switch (M5.getZExtValue()) { | |||
11282 | default: break; | |||
11283 | case 0: ID = Intrinsic::nearbyint; break; | |||
11284 | case 1: ID = Intrinsic::round; break; | |||
11285 | case 5: ID = Intrinsic::trunc; break; | |||
11286 | case 6: ID = Intrinsic::ceil; break; | |||
11287 | case 7: ID = Intrinsic::floor; break; | |||
11288 | } | |||
11289 | break; | |||
11290 | } | |||
11291 | if (ID != Intrinsic::not_intrinsic) { | |||
11292 | Function *F = CGM.getIntrinsic(ID, ResultType); | |||
11293 | return Builder.CreateCall(F, X); | |||
11294 | } | |||
11295 | switch (BuiltinID) { | |||
11296 | case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break; | |||
11297 | case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break; | |||
11298 | default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11298); | |||
11299 | } | |||
11300 | Function *F = CGM.getIntrinsic(ID); | |||
11301 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); | |||
11302 | Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5); | |||
11303 | return Builder.CreateCall(F, {X, M4Value, M5Value}); | |||
11304 | } | |||
11305 | case SystemZ::BI__builtin_s390_vfmaxsb: | |||
11306 | case SystemZ::BI__builtin_s390_vfmaxdb: { | |||
11307 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11308 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11309 | Value *Y = EmitScalarExpr(E->getArg(1)); | |||
11310 | // Constant-fold the M4 mask argument. | |||
11311 | llvm::APSInt M4; | |||
11312 | bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext()); | |||
11313 | assert(IsConstM4 && "Constant arg isn't actually constant?")(static_cast <bool> (IsConstM4 && "Constant arg isn't actually constant?" ) ? void (0) : __assert_fail ("IsConstM4 && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11313, __extension__ __PRETTY_FUNCTION__)); | |||
11314 | (void)IsConstM4; | |||
11315 | // Check whether this instance can be represented via a LLVM standard | |||
11316 | // intrinsic. We only support some values of M4. | |||
11317 | Intrinsic::ID ID = Intrinsic::not_intrinsic; | |||
11318 | switch (M4.getZExtValue()) { | |||
11319 | default: break; | |||
11320 | case 4: ID = Intrinsic::maxnum; break; | |||
11321 | } | |||
11322 | if (ID != Intrinsic::not_intrinsic) { | |||
11323 | Function *F = CGM.getIntrinsic(ID, ResultType); | |||
11324 | return Builder.CreateCall(F, {X, Y}); | |||
11325 | } | |||
11326 | switch (BuiltinID) { | |||
11327 | case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break; | |||
11328 | case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break; | |||
11329 | default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11329); | |||
11330 | } | |||
11331 | Function *F = CGM.getIntrinsic(ID); | |||
11332 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); | |||
11333 | return Builder.CreateCall(F, {X, Y, M4Value}); | |||
11334 | } | |||
11335 | case SystemZ::BI__builtin_s390_vfminsb: | |||
11336 | case SystemZ::BI__builtin_s390_vfmindb: { | |||
11337 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
11338 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
11339 | Value *Y = EmitScalarExpr(E->getArg(1)); | |||
11340 | // Constant-fold the M4 mask argument. | |||
11341 | llvm::APSInt M4; | |||
11342 | bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext()); | |||
11343 | assert(IsConstM4 && "Constant arg isn't actually constant?")(static_cast <bool> (IsConstM4 && "Constant arg isn't actually constant?" ) ? void (0) : __assert_fail ("IsConstM4 && \"Constant arg isn't actually constant?\"" , "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11343, __extension__ __PRETTY_FUNCTION__)); | |||
11344 | (void)IsConstM4; | |||
11345 | // Check whether this instance can be represented via a LLVM standard | |||
11346 | // intrinsic. We only support some values of M4. | |||
11347 | Intrinsic::ID ID = Intrinsic::not_intrinsic; | |||
11348 | switch (M4.getZExtValue()) { | |||
11349 | default: break; | |||
11350 | case 4: ID = Intrinsic::minnum; break; | |||
11351 | } | |||
11352 | if (ID != Intrinsic::not_intrinsic) { | |||
11353 | Function *F = CGM.getIntrinsic(ID, ResultType); | |||
11354 | return Builder.CreateCall(F, {X, Y}); | |||
11355 | } | |||
11356 | switch (BuiltinID) { | |||
11357 | case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break; | |||
11358 | case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break; | |||
11359 | default: llvm_unreachable("Unknown BuiltinID")::llvm::llvm_unreachable_internal("Unknown BuiltinID", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11359); | |||
11360 | } | |||
11361 | Function *F = CGM.getIntrinsic(ID); | |||
11362 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); | |||
11363 | return Builder.CreateCall(F, {X, Y, M4Value}); | |||
11364 | } | |||
11365 | ||||
11366 | // Vector intrisincs that output the post-instruction CC value. | |||
11367 | ||||
11368 | #define INTRINSIC_WITH_CC(NAME) \ | |||
11369 | case SystemZ::BI__builtin_##NAME: \ | |||
11370 | return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E) | |||
11371 | ||||
11372 | INTRINSIC_WITH_CC(s390_vpkshs); | |||
11373 | INTRINSIC_WITH_CC(s390_vpksfs); | |||
11374 | INTRINSIC_WITH_CC(s390_vpksgs); | |||
11375 | ||||
11376 | INTRINSIC_WITH_CC(s390_vpklshs); | |||
11377 | INTRINSIC_WITH_CC(s390_vpklsfs); | |||
11378 | INTRINSIC_WITH_CC(s390_vpklsgs); | |||
11379 | ||||
11380 | INTRINSIC_WITH_CC(s390_vceqbs); | |||
11381 | INTRINSIC_WITH_CC(s390_vceqhs); | |||
11382 | INTRINSIC_WITH_CC(s390_vceqfs); | |||
11383 | INTRINSIC_WITH_CC(s390_vceqgs); | |||
11384 | ||||
11385 | INTRINSIC_WITH_CC(s390_vchbs); | |||
11386 | INTRINSIC_WITH_CC(s390_vchhs); | |||
11387 | INTRINSIC_WITH_CC(s390_vchfs); | |||
11388 | INTRINSIC_WITH_CC(s390_vchgs); | |||
11389 | ||||
11390 | INTRINSIC_WITH_CC(s390_vchlbs); | |||
11391 | INTRINSIC_WITH_CC(s390_vchlhs); | |||
11392 | INTRINSIC_WITH_CC(s390_vchlfs); | |||
11393 | INTRINSIC_WITH_CC(s390_vchlgs); | |||
11394 | ||||
11395 | INTRINSIC_WITH_CC(s390_vfaebs); | |||
11396 | INTRINSIC_WITH_CC(s390_vfaehs); | |||
11397 | INTRINSIC_WITH_CC(s390_vfaefs); | |||
11398 | ||||
11399 | INTRINSIC_WITH_CC(s390_vfaezbs); | |||
11400 | INTRINSIC_WITH_CC(s390_vfaezhs); | |||
11401 | INTRINSIC_WITH_CC(s390_vfaezfs); | |||
11402 | ||||
11403 | INTRINSIC_WITH_CC(s390_vfeebs); | |||
11404 | INTRINSIC_WITH_CC(s390_vfeehs); | |||
11405 | INTRINSIC_WITH_CC(s390_vfeefs); | |||
11406 | ||||
11407 | INTRINSIC_WITH_CC(s390_vfeezbs); | |||
11408 | INTRINSIC_WITH_CC(s390_vfeezhs); | |||
11409 | INTRINSIC_WITH_CC(s390_vfeezfs); | |||
11410 | ||||
11411 | INTRINSIC_WITH_CC(s390_vfenebs); | |||
11412 | INTRINSIC_WITH_CC(s390_vfenehs); | |||
11413 | INTRINSIC_WITH_CC(s390_vfenefs); | |||
11414 | ||||
11415 | INTRINSIC_WITH_CC(s390_vfenezbs); | |||
11416 | INTRINSIC_WITH_CC(s390_vfenezhs); | |||
11417 | INTRINSIC_WITH_CC(s390_vfenezfs); | |||
11418 | ||||
11419 | INTRINSIC_WITH_CC(s390_vistrbs); | |||
11420 | INTRINSIC_WITH_CC(s390_vistrhs); | |||
11421 | INTRINSIC_WITH_CC(s390_vistrfs); | |||
11422 | ||||
11423 | INTRINSIC_WITH_CC(s390_vstrcbs); | |||
11424 | INTRINSIC_WITH_CC(s390_vstrchs); | |||
11425 | INTRINSIC_WITH_CC(s390_vstrcfs); | |||
11426 | ||||
11427 | INTRINSIC_WITH_CC(s390_vstrczbs); | |||
11428 | INTRINSIC_WITH_CC(s390_vstrczhs); | |||
11429 | INTRINSIC_WITH_CC(s390_vstrczfs); | |||
11430 | ||||
11431 | INTRINSIC_WITH_CC(s390_vfcesbs); | |||
11432 | INTRINSIC_WITH_CC(s390_vfcedbs); | |||
11433 | INTRINSIC_WITH_CC(s390_vfchsbs); | |||
11434 | INTRINSIC_WITH_CC(s390_vfchdbs); | |||
11435 | INTRINSIC_WITH_CC(s390_vfchesbs); | |||
11436 | INTRINSIC_WITH_CC(s390_vfchedbs); | |||
11437 | ||||
11438 | INTRINSIC_WITH_CC(s390_vftcisb); | |||
11439 | INTRINSIC_WITH_CC(s390_vftcidb); | |||
11440 | ||||
11441 | #undef INTRINSIC_WITH_CC | |||
11442 | ||||
11443 | default: | |||
11444 | return nullptr; | |||
11445 | } | |||
11446 | } | |||
11447 | ||||
11448 | Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, | |||
11449 | const CallExpr *E) { | |||
11450 | auto MakeLdg = [&](unsigned IntrinsicID) { | |||
11451 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
11452 | clang::CharUnits Align = | |||
11453 | getNaturalPointeeTypeAlignment(E->getArg(0)->getType()); | |||
11454 | return Builder.CreateCall( | |||
11455 | CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(), | |||
11456 | Ptr->getType()}), | |||
11457 | {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())}); | |||
11458 | }; | |||
11459 | auto MakeScopedAtomic = [&](unsigned IntrinsicID) { | |||
11460 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
11461 | return Builder.CreateCall( | |||
11462 | CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(), | |||
11463 | Ptr->getType()}), | |||
11464 | {Ptr, EmitScalarExpr(E->getArg(1))}); | |||
11465 | }; | |||
11466 | switch (BuiltinID) { | |||
11467 | case NVPTX::BI__nvvm_atom_add_gen_i: | |||
11468 | case NVPTX::BI__nvvm_atom_add_gen_l: | |||
11469 | case NVPTX::BI__nvvm_atom_add_gen_ll: | |||
11470 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E); | |||
11471 | ||||
11472 | case NVPTX::BI__nvvm_atom_sub_gen_i: | |||
11473 | case NVPTX::BI__nvvm_atom_sub_gen_l: | |||
11474 | case NVPTX::BI__nvvm_atom_sub_gen_ll: | |||
11475 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E); | |||
11476 | ||||
11477 | case NVPTX::BI__nvvm_atom_and_gen_i: | |||
11478 | case NVPTX::BI__nvvm_atom_and_gen_l: | |||
11479 | case NVPTX::BI__nvvm_atom_and_gen_ll: | |||
11480 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E); | |||
11481 | ||||
11482 | case NVPTX::BI__nvvm_atom_or_gen_i: | |||
11483 | case NVPTX::BI__nvvm_atom_or_gen_l: | |||
11484 | case NVPTX::BI__nvvm_atom_or_gen_ll: | |||
11485 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E); | |||
11486 | ||||
11487 | case NVPTX::BI__nvvm_atom_xor_gen_i: | |||
11488 | case NVPTX::BI__nvvm_atom_xor_gen_l: | |||
11489 | case NVPTX::BI__nvvm_atom_xor_gen_ll: | |||
11490 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E); | |||
11491 | ||||
11492 | case NVPTX::BI__nvvm_atom_xchg_gen_i: | |||
11493 | case NVPTX::BI__nvvm_atom_xchg_gen_l: | |||
11494 | case NVPTX::BI__nvvm_atom_xchg_gen_ll: | |||
11495 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E); | |||
11496 | ||||
11497 | case NVPTX::BI__nvvm_atom_max_gen_i: | |||
11498 | case NVPTX::BI__nvvm_atom_max_gen_l: | |||
11499 | case NVPTX::BI__nvvm_atom_max_gen_ll: | |||
11500 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E); | |||
11501 | ||||
11502 | case NVPTX::BI__nvvm_atom_max_gen_ui: | |||
11503 | case NVPTX::BI__nvvm_atom_max_gen_ul: | |||
11504 | case NVPTX::BI__nvvm_atom_max_gen_ull: | |||
11505 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E); | |||
11506 | ||||
11507 | case NVPTX::BI__nvvm_atom_min_gen_i: | |||
11508 | case NVPTX::BI__nvvm_atom_min_gen_l: | |||
11509 | case NVPTX::BI__nvvm_atom_min_gen_ll: | |||
11510 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E); | |||
11511 | ||||
11512 | case NVPTX::BI__nvvm_atom_min_gen_ui: | |||
11513 | case NVPTX::BI__nvvm_atom_min_gen_ul: | |||
11514 | case NVPTX::BI__nvvm_atom_min_gen_ull: | |||
11515 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E); | |||
11516 | ||||
11517 | case NVPTX::BI__nvvm_atom_cas_gen_i: | |||
11518 | case NVPTX::BI__nvvm_atom_cas_gen_l: | |||
11519 | case NVPTX::BI__nvvm_atom_cas_gen_ll: | |||
11520 | // __nvvm_atom_cas_gen_* should return the old value rather than the | |||
11521 | // success flag. | |||
11522 | return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false); | |||
11523 | ||||
11524 | case NVPTX::BI__nvvm_atom_add_gen_f: { | |||
11525 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
11526 | Value *Val = EmitScalarExpr(E->getArg(1)); | |||
11527 | // atomicrmw only deals with integer arguments so we need to use | |||
11528 | // LLVM's nvvm_atomic_load_add_f32 intrinsic for that. | |||
11529 | Value *FnALAF32 = | |||
11530 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f32, Ptr->getType()); | |||
11531 | return Builder.CreateCall(FnALAF32, {Ptr, Val}); | |||
11532 | } | |||
11533 | ||||
11534 | case NVPTX::BI__nvvm_atom_add_gen_d: { | |||
11535 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
11536 | Value *Val = EmitScalarExpr(E->getArg(1)); | |||
11537 | // atomicrmw only deals with integer arguments, so we need to use | |||
11538 | // LLVM's nvvm_atomic_load_add_f64 intrinsic. | |||
11539 | Value *FnALAF64 = | |||
11540 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f64, Ptr->getType()); | |||
11541 | return Builder.CreateCall(FnALAF64, {Ptr, Val}); | |||
11542 | } | |||
11543 | ||||
11544 | case NVPTX::BI__nvvm_atom_inc_gen_ui: { | |||
11545 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
11546 | Value *Val = EmitScalarExpr(E->getArg(1)); | |||
11547 | Value *FnALI32 = | |||
11548 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType()); | |||
11549 | return Builder.CreateCall(FnALI32, {Ptr, Val}); | |||
11550 | } | |||
11551 | ||||
11552 | case NVPTX::BI__nvvm_atom_dec_gen_ui: { | |||
11553 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
11554 | Value *Val = EmitScalarExpr(E->getArg(1)); | |||
11555 | Value *FnALD32 = | |||
11556 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType()); | |||
11557 | return Builder.CreateCall(FnALD32, {Ptr, Val}); | |||
11558 | } | |||
11559 | ||||
11560 | case NVPTX::BI__nvvm_ldg_c: | |||
11561 | case NVPTX::BI__nvvm_ldg_c2: | |||
11562 | case NVPTX::BI__nvvm_ldg_c4: | |||
11563 | case NVPTX::BI__nvvm_ldg_s: | |||
11564 | case NVPTX::BI__nvvm_ldg_s2: | |||
11565 | case NVPTX::BI__nvvm_ldg_s4: | |||
11566 | case NVPTX::BI__nvvm_ldg_i: | |||
11567 | case NVPTX::BI__nvvm_ldg_i2: | |||
11568 | case NVPTX::BI__nvvm_ldg_i4: | |||
11569 | case NVPTX::BI__nvvm_ldg_l: | |||
11570 | case NVPTX::BI__nvvm_ldg_ll: | |||
11571 | case NVPTX::BI__nvvm_ldg_ll2: | |||
11572 | case NVPTX::BI__nvvm_ldg_uc: | |||
11573 | case NVPTX::BI__nvvm_ldg_uc2: | |||
11574 | case NVPTX::BI__nvvm_ldg_uc4: | |||
11575 | case NVPTX::BI__nvvm_ldg_us: | |||
11576 | case NVPTX::BI__nvvm_ldg_us2: | |||
11577 | case NVPTX::BI__nvvm_ldg_us4: | |||
11578 | case NVPTX::BI__nvvm_ldg_ui: | |||
11579 | case NVPTX::BI__nvvm_ldg_ui2: | |||
11580 | case NVPTX::BI__nvvm_ldg_ui4: | |||
11581 | case NVPTX::BI__nvvm_ldg_ul: | |||
11582 | case NVPTX::BI__nvvm_ldg_ull: | |||
11583 | case NVPTX::BI__nvvm_ldg_ull2: | |||
11584 | // PTX Interoperability section 2.2: "For a vector with an even number of | |||
11585 | // elements, its alignment is set to number of elements times the alignment | |||
11586 | // of its member: n*alignof(t)." | |||
11587 | return MakeLdg(Intrinsic::nvvm_ldg_global_i); | |||
11588 | case NVPTX::BI__nvvm_ldg_f: | |||
11589 | case NVPTX::BI__nvvm_ldg_f2: | |||
11590 | case NVPTX::BI__nvvm_ldg_f4: | |||
11591 | case NVPTX::BI__nvvm_ldg_d: | |||
11592 | case NVPTX::BI__nvvm_ldg_d2: | |||
11593 | return MakeLdg(Intrinsic::nvvm_ldg_global_f); | |||
11594 | ||||
11595 | case NVPTX::BI__nvvm_atom_cta_add_gen_i: | |||
11596 | case NVPTX::BI__nvvm_atom_cta_add_gen_l: | |||
11597 | case NVPTX::BI__nvvm_atom_cta_add_gen_ll: | |||
11598 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta); | |||
11599 | case NVPTX::BI__nvvm_atom_sys_add_gen_i: | |||
11600 | case NVPTX::BI__nvvm_atom_sys_add_gen_l: | |||
11601 | case NVPTX::BI__nvvm_atom_sys_add_gen_ll: | |||
11602 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys); | |||
11603 | case NVPTX::BI__nvvm_atom_cta_add_gen_f: | |||
11604 | case NVPTX::BI__nvvm_atom_cta_add_gen_d: | |||
11605 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta); | |||
11606 | case NVPTX::BI__nvvm_atom_sys_add_gen_f: | |||
11607 | case NVPTX::BI__nvvm_atom_sys_add_gen_d: | |||
11608 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys); | |||
11609 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_i: | |||
11610 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_l: | |||
11611 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll: | |||
11612 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta); | |||
11613 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_i: | |||
11614 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_l: | |||
11615 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll: | |||
11616 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys); | |||
11617 | case NVPTX::BI__nvvm_atom_cta_max_gen_i: | |||
11618 | case NVPTX::BI__nvvm_atom_cta_max_gen_ui: | |||
11619 | case NVPTX::BI__nvvm_atom_cta_max_gen_l: | |||
11620 | case NVPTX::BI__nvvm_atom_cta_max_gen_ul: | |||
11621 | case NVPTX::BI__nvvm_atom_cta_max_gen_ll: | |||
11622 | case NVPTX::BI__nvvm_atom_cta_max_gen_ull: | |||
11623 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta); | |||
11624 | case NVPTX::BI__nvvm_atom_sys_max_gen_i: | |||
11625 | case NVPTX::BI__nvvm_atom_sys_max_gen_ui: | |||
11626 | case NVPTX::BI__nvvm_atom_sys_max_gen_l: | |||
11627 | case NVPTX::BI__nvvm_atom_sys_max_gen_ul: | |||
11628 | case NVPTX::BI__nvvm_atom_sys_max_gen_ll: | |||
11629 | case NVPTX::BI__nvvm_atom_sys_max_gen_ull: | |||
11630 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys); | |||
11631 | case NVPTX::BI__nvvm_atom_cta_min_gen_i: | |||
11632 | case NVPTX::BI__nvvm_atom_cta_min_gen_ui: | |||
11633 | case NVPTX::BI__nvvm_atom_cta_min_gen_l: | |||
11634 | case NVPTX::BI__nvvm_atom_cta_min_gen_ul: | |||
11635 | case NVPTX::BI__nvvm_atom_cta_min_gen_ll: | |||
11636 | case NVPTX::BI__nvvm_atom_cta_min_gen_ull: | |||
11637 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta); | |||
11638 | case NVPTX::BI__nvvm_atom_sys_min_gen_i: | |||
11639 | case NVPTX::BI__nvvm_atom_sys_min_gen_ui: | |||
11640 | case NVPTX::BI__nvvm_atom_sys_min_gen_l: | |||
11641 | case NVPTX::BI__nvvm_atom_sys_min_gen_ul: | |||
11642 | case NVPTX::BI__nvvm_atom_sys_min_gen_ll: | |||
11643 | case NVPTX::BI__nvvm_atom_sys_min_gen_ull: | |||
11644 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys); | |||
11645 | case NVPTX::BI__nvvm_atom_cta_inc_gen_ui: | |||
11646 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta); | |||
11647 | case NVPTX::BI__nvvm_atom_cta_dec_gen_ui: | |||
11648 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta); | |||
11649 | case NVPTX::BI__nvvm_atom_sys_inc_gen_ui: | |||
11650 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys); | |||
11651 | case NVPTX::BI__nvvm_atom_sys_dec_gen_ui: | |||
11652 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys); | |||
11653 | case NVPTX::BI__nvvm_atom_cta_and_gen_i: | |||
11654 | case NVPTX::BI__nvvm_atom_cta_and_gen_l: | |||
11655 | case NVPTX::BI__nvvm_atom_cta_and_gen_ll: | |||
11656 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta); | |||
11657 | case NVPTX::BI__nvvm_atom_sys_and_gen_i: | |||
11658 | case NVPTX::BI__nvvm_atom_sys_and_gen_l: | |||
11659 | case NVPTX::BI__nvvm_atom_sys_and_gen_ll: | |||
11660 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys); | |||
11661 | case NVPTX::BI__nvvm_atom_cta_or_gen_i: | |||
11662 | case NVPTX::BI__nvvm_atom_cta_or_gen_l: | |||
11663 | case NVPTX::BI__nvvm_atom_cta_or_gen_ll: | |||
11664 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta); | |||
11665 | case NVPTX::BI__nvvm_atom_sys_or_gen_i: | |||
11666 | case NVPTX::BI__nvvm_atom_sys_or_gen_l: | |||
11667 | case NVPTX::BI__nvvm_atom_sys_or_gen_ll: | |||
11668 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys); | |||
11669 | case NVPTX::BI__nvvm_atom_cta_xor_gen_i: | |||
11670 | case NVPTX::BI__nvvm_atom_cta_xor_gen_l: | |||
11671 | case NVPTX::BI__nvvm_atom_cta_xor_gen_ll: | |||
11672 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta); | |||
11673 | case NVPTX::BI__nvvm_atom_sys_xor_gen_i: | |||
11674 | case NVPTX::BI__nvvm_atom_sys_xor_gen_l: | |||
11675 | case NVPTX::BI__nvvm_atom_sys_xor_gen_ll: | |||
11676 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys); | |||
11677 | case NVPTX::BI__nvvm_atom_cta_cas_gen_i: | |||
11678 | case NVPTX::BI__nvvm_atom_cta_cas_gen_l: | |||
11679 | case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: { | |||
11680 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
11681 | return Builder.CreateCall( | |||
11682 | CGM.getIntrinsic( | |||
11683 | Intrinsic::nvvm_atomic_cas_gen_i_cta, | |||
11684 | {Ptr->getType()->getPointerElementType(), Ptr->getType()}), | |||
11685 | {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); | |||
11686 | } | |||
11687 | case NVPTX::BI__nvvm_atom_sys_cas_gen_i: | |||
11688 | case NVPTX::BI__nvvm_atom_sys_cas_gen_l: | |||
11689 | case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: { | |||
11690 | Value *Ptr = EmitScalarExpr(E->getArg(0)); | |||
11691 | return Builder.CreateCall( | |||
11692 | CGM.getIntrinsic( | |||
11693 | Intrinsic::nvvm_atomic_cas_gen_i_sys, | |||
11694 | {Ptr->getType()->getPointerElementType(), Ptr->getType()}), | |||
11695 | {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); | |||
11696 | } | |||
11697 | case NVPTX::BI__nvvm_match_all_sync_i32p: | |||
11698 | case NVPTX::BI__nvvm_match_all_sync_i64p: { | |||
11699 | Value *Mask = EmitScalarExpr(E->getArg(0)); | |||
11700 | Value *Val = EmitScalarExpr(E->getArg(1)); | |||
11701 | Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2)); | |||
11702 | Value *ResultPair = Builder.CreateCall( | |||
11703 | CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p | |||
11704 | ? Intrinsic::nvvm_match_all_sync_i32p | |||
11705 | : Intrinsic::nvvm_match_all_sync_i64p), | |||
11706 | {Mask, Val}); | |||
11707 | Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1), | |||
11708 | PredOutPtr.getElementType()); | |||
11709 | Builder.CreateStore(Pred, PredOutPtr); | |||
11710 | return Builder.CreateExtractValue(ResultPair, 0); | |||
11711 | } | |||
11712 | case NVPTX::BI__hmma_m16n16k16_ld_a: | |||
11713 | case NVPTX::BI__hmma_m16n16k16_ld_b: | |||
11714 | case NVPTX::BI__hmma_m16n16k16_ld_c_f16: | |||
11715 | case NVPTX::BI__hmma_m16n16k16_ld_c_f32: | |||
11716 | case NVPTX::BI__hmma_m32n8k16_ld_a: | |||
11717 | case NVPTX::BI__hmma_m32n8k16_ld_b: | |||
11718 | case NVPTX::BI__hmma_m32n8k16_ld_c_f16: | |||
11719 | case NVPTX::BI__hmma_m32n8k16_ld_c_f32: | |||
11720 | case NVPTX::BI__hmma_m8n32k16_ld_a: | |||
11721 | case NVPTX::BI__hmma_m8n32k16_ld_b: | |||
11722 | case NVPTX::BI__hmma_m8n32k16_ld_c_f16: | |||
11723 | case NVPTX::BI__hmma_m8n32k16_ld_c_f32: { | |||
11724 | Address Dst = EmitPointerWithAlignment(E->getArg(0)); | |||
11725 | Value *Src = EmitScalarExpr(E->getArg(1)); | |||
11726 | Value *Ldm = EmitScalarExpr(E->getArg(2)); | |||
11727 | llvm::APSInt isColMajorArg; | |||
11728 | if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext())) | |||
11729 | return nullptr; | |||
11730 | bool isColMajor = isColMajorArg.getSExtValue(); | |||
11731 | unsigned IID; | |||
11732 | unsigned NumResults; | |||
11733 | switch (BuiltinID) { | |||
11734 | case NVPTX::BI__hmma_m16n16k16_ld_a: | |||
11735 | IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride | |||
11736 | : Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride; | |||
11737 | NumResults = 8; | |||
11738 | break; | |||
11739 | case NVPTX::BI__hmma_m16n16k16_ld_b: | |||
11740 | IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride | |||
11741 | : Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride; | |||
11742 | NumResults = 8; | |||
11743 | break; | |||
11744 | case NVPTX::BI__hmma_m16n16k16_ld_c_f16: | |||
11745 | IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride | |||
11746 | : Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride; | |||
11747 | NumResults = 4; | |||
11748 | break; | |||
11749 | case NVPTX::BI__hmma_m16n16k16_ld_c_f32: | |||
11750 | IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride | |||
11751 | : Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride; | |||
11752 | NumResults = 8; | |||
11753 | break; | |||
11754 | case NVPTX::BI__hmma_m32n8k16_ld_a: | |||
11755 | IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride | |||
11756 | : Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride; | |||
11757 | NumResults = 8; | |||
11758 | break; | |||
11759 | case NVPTX::BI__hmma_m32n8k16_ld_b: | |||
11760 | IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride | |||
11761 | : Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride; | |||
11762 | NumResults = 8; | |||
11763 | break; | |||
11764 | case NVPTX::BI__hmma_m32n8k16_ld_c_f16: | |||
11765 | IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride | |||
11766 | : Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride; | |||
11767 | NumResults = 4; | |||
11768 | break; | |||
11769 | case NVPTX::BI__hmma_m32n8k16_ld_c_f32: | |||
11770 | IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride | |||
11771 | : Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride; | |||
11772 | NumResults = 8; | |||
11773 | break; | |||
11774 | case NVPTX::BI__hmma_m8n32k16_ld_a: | |||
11775 | IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride | |||
11776 | : Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride; | |||
11777 | NumResults = 8; | |||
11778 | break; | |||
11779 | case NVPTX::BI__hmma_m8n32k16_ld_b: | |||
11780 | IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride | |||
11781 | : Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride; | |||
11782 | NumResults = 8; | |||
11783 | break; | |||
11784 | case NVPTX::BI__hmma_m8n32k16_ld_c_f16: | |||
11785 | IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride | |||
11786 | : Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride; | |||
11787 | NumResults = 4; | |||
11788 | break; | |||
11789 | case NVPTX::BI__hmma_m8n32k16_ld_c_f32: | |||
11790 | IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride | |||
11791 | : Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride; | |||
11792 | NumResults = 8; | |||
11793 | break; | |||
11794 | default: | |||
11795 | llvm_unreachable("Unexpected builtin ID.")::llvm::llvm_unreachable_internal("Unexpected builtin ID.", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11795); | |||
11796 | } | |||
11797 | Value *Result = | |||
11798 | Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm}); | |||
11799 | ||||
11800 | // Save returned values. | |||
11801 | for (unsigned i = 0; i < NumResults; ++i) { | |||
11802 | Builder.CreateAlignedStore( | |||
11803 | Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), | |||
11804 | Dst.getElementType()), | |||
11805 | Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)), | |||
11806 | CharUnits::fromQuantity(4)); | |||
11807 | } | |||
11808 | return Result; | |||
11809 | } | |||
11810 | ||||
11811 | case NVPTX::BI__hmma_m16n16k16_st_c_f16: | |||
11812 | case NVPTX::BI__hmma_m16n16k16_st_c_f32: | |||
11813 | case NVPTX::BI__hmma_m32n8k16_st_c_f16: | |||
11814 | case NVPTX::BI__hmma_m32n8k16_st_c_f32: | |||
11815 | case NVPTX::BI__hmma_m8n32k16_st_c_f16: | |||
11816 | case NVPTX::BI__hmma_m8n32k16_st_c_f32: { | |||
11817 | Value *Dst = EmitScalarExpr(E->getArg(0)); | |||
11818 | Address Src = EmitPointerWithAlignment(E->getArg(1)); | |||
11819 | Value *Ldm = EmitScalarExpr(E->getArg(2)); | |||
11820 | llvm::APSInt isColMajorArg; | |||
11821 | if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext())) | |||
11822 | return nullptr; | |||
11823 | bool isColMajor = isColMajorArg.getSExtValue(); | |||
11824 | unsigned IID; | |||
11825 | unsigned NumResults = 8; | |||
11826 | // PTX Instructions (and LLVM instrinsics) are defined for slice _d_, yet | |||
11827 | // for some reason nvcc builtins use _c_. | |||
11828 | switch (BuiltinID) { | |||
11829 | case NVPTX::BI__hmma_m16n16k16_st_c_f16: | |||
11830 | IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride | |||
11831 | : Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride; | |||
11832 | NumResults = 4; | |||
11833 | break; | |||
11834 | case NVPTX::BI__hmma_m16n16k16_st_c_f32: | |||
11835 | IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride | |||
11836 | : Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride; | |||
11837 | break; | |||
11838 | case NVPTX::BI__hmma_m32n8k16_st_c_f16: | |||
11839 | IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride | |||
11840 | : Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride; | |||
11841 | NumResults = 4; | |||
11842 | break; | |||
11843 | case NVPTX::BI__hmma_m32n8k16_st_c_f32: | |||
11844 | IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride | |||
11845 | : Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride; | |||
11846 | break; | |||
11847 | case NVPTX::BI__hmma_m8n32k16_st_c_f16: | |||
11848 | IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride | |||
11849 | : Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride; | |||
11850 | NumResults = 4; | |||
11851 | break; | |||
11852 | case NVPTX::BI__hmma_m8n32k16_st_c_f32: | |||
11853 | IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride | |||
11854 | : Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride; | |||
11855 | break; | |||
11856 | default: | |||
11857 | llvm_unreachable("Unexpected builtin ID.")::llvm::llvm_unreachable_internal("Unexpected builtin ID.", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11857); | |||
11858 | } | |||
11859 | Function *Intrinsic = CGM.getIntrinsic(IID, Dst->getType()); | |||
11860 | llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1); | |||
11861 | SmallVector<Value *, 10> Values = {Dst}; | |||
11862 | for (unsigned i = 0; i < NumResults; ++i) { | |||
11863 | Value *V = Builder.CreateAlignedLoad( | |||
11864 | Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)), | |||
11865 | CharUnits::fromQuantity(4)); | |||
11866 | Values.push_back(Builder.CreateBitCast(V, ParamType)); | |||
11867 | } | |||
11868 | Values.push_back(Ldm); | |||
11869 | Value *Result = Builder.CreateCall(Intrinsic, Values); | |||
11870 | return Result; | |||
11871 | } | |||
11872 | ||||
11873 | // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) --> | |||
11874 | // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf> | |||
11875 | case NVPTX::BI__hmma_m16n16k16_mma_f16f16: | |||
11876 | case NVPTX::BI__hmma_m16n16k16_mma_f32f16: | |||
11877 | case NVPTX::BI__hmma_m16n16k16_mma_f32f32: | |||
11878 | case NVPTX::BI__hmma_m16n16k16_mma_f16f32: | |||
11879 | case NVPTX::BI__hmma_m32n8k16_mma_f16f16: | |||
11880 | case NVPTX::BI__hmma_m32n8k16_mma_f32f16: | |||
11881 | case NVPTX::BI__hmma_m32n8k16_mma_f32f32: | |||
11882 | case NVPTX::BI__hmma_m32n8k16_mma_f16f32: | |||
11883 | case NVPTX::BI__hmma_m8n32k16_mma_f16f16: | |||
11884 | case NVPTX::BI__hmma_m8n32k16_mma_f32f16: | |||
11885 | case NVPTX::BI__hmma_m8n32k16_mma_f32f32: | |||
11886 | case NVPTX::BI__hmma_m8n32k16_mma_f16f32: { | |||
11887 | Address Dst = EmitPointerWithAlignment(E->getArg(0)); | |||
11888 | Address SrcA = EmitPointerWithAlignment(E->getArg(1)); | |||
11889 | Address SrcB = EmitPointerWithAlignment(E->getArg(2)); | |||
11890 | Address SrcC = EmitPointerWithAlignment(E->getArg(3)); | |||
11891 | llvm::APSInt LayoutArg; | |||
11892 | if (!E->getArg(4)->isIntegerConstantExpr(LayoutArg, getContext())) | |||
11893 | return nullptr; | |||
11894 | int Layout = LayoutArg.getSExtValue(); | |||
11895 | if (Layout < 0 || Layout > 3) | |||
11896 | return nullptr; | |||
11897 | llvm::APSInt SatfArg; | |||
11898 | if (!E->getArg(5)->isIntegerConstantExpr(SatfArg, getContext())) | |||
11899 | return nullptr; | |||
11900 | bool Satf = SatfArg.getSExtValue(); | |||
11901 | ||||
11902 | // clang-format off | |||
11903 | #define MMA_VARIANTS(geom, type) {{ \ | |||
11904 | Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \ | |||
11905 | Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \ | |||
11906 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \ | |||
11907 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \ | |||
11908 | Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \ | |||
11909 | Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \ | |||
11910 | Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \ | |||
11911 | Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \ | |||
11912 | }} | |||
11913 | // clang-format on | |||
11914 | ||||
11915 | auto getMMAIntrinsic = [Layout, Satf](std::array<unsigned, 8> Variants) { | |||
11916 | unsigned Index = Layout * 2 + Satf; | |||
11917 | assert(Index < 8)(static_cast <bool> (Index < 8) ? void (0) : __assert_fail ("Index < 8", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11917, __extension__ __PRETTY_FUNCTION__)); | |||
11918 | return Variants[Index]; | |||
11919 | }; | |||
11920 | unsigned IID; | |||
11921 | unsigned NumEltsC; | |||
11922 | unsigned NumEltsD; | |||
11923 | switch (BuiltinID) { | |||
11924 | case NVPTX::BI__hmma_m16n16k16_mma_f16f16: | |||
11925 | IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f16_f16)); | |||
11926 | NumEltsC = 4; | |||
11927 | NumEltsD = 4; | |||
11928 | break; | |||
11929 | case NVPTX::BI__hmma_m16n16k16_mma_f32f16: | |||
11930 | IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f32_f16)); | |||
11931 | NumEltsC = 4; | |||
11932 | NumEltsD = 8; | |||
11933 | break; | |||
11934 | case NVPTX::BI__hmma_m16n16k16_mma_f16f32: | |||
11935 | IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f16_f32)); | |||
11936 | NumEltsC = 8; | |||
11937 | NumEltsD = 4; | |||
11938 | break; | |||
11939 | case NVPTX::BI__hmma_m16n16k16_mma_f32f32: | |||
11940 | IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f32_f32)); | |||
11941 | NumEltsC = 8; | |||
11942 | NumEltsD = 8; | |||
11943 | break; | |||
11944 | case NVPTX::BI__hmma_m32n8k16_mma_f16f16: | |||
11945 | IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f16_f16)); | |||
11946 | NumEltsC = 4; | |||
11947 | NumEltsD = 4; | |||
11948 | break; | |||
11949 | case NVPTX::BI__hmma_m32n8k16_mma_f32f16: | |||
11950 | IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f32_f16)); | |||
11951 | NumEltsC = 4; | |||
11952 | NumEltsD = 8; | |||
11953 | break; | |||
11954 | case NVPTX::BI__hmma_m32n8k16_mma_f16f32: | |||
11955 | IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f16_f32)); | |||
11956 | NumEltsC = 8; | |||
11957 | NumEltsD = 4; | |||
11958 | break; | |||
11959 | case NVPTX::BI__hmma_m32n8k16_mma_f32f32: | |||
11960 | IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f32_f32)); | |||
11961 | NumEltsC = 8; | |||
11962 | NumEltsD = 8; | |||
11963 | break; | |||
11964 | case NVPTX::BI__hmma_m8n32k16_mma_f16f16: | |||
11965 | IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f16_f16)); | |||
11966 | NumEltsC = 4; | |||
11967 | NumEltsD = 4; | |||
11968 | break; | |||
11969 | case NVPTX::BI__hmma_m8n32k16_mma_f32f16: | |||
11970 | IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f32_f16)); | |||
11971 | NumEltsC = 4; | |||
11972 | NumEltsD = 8; | |||
11973 | break; | |||
11974 | case NVPTX::BI__hmma_m8n32k16_mma_f16f32: | |||
11975 | IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f16_f32)); | |||
11976 | NumEltsC = 8; | |||
11977 | NumEltsD = 4; | |||
11978 | break; | |||
11979 | case NVPTX::BI__hmma_m8n32k16_mma_f32f32: | |||
11980 | IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f32_f32)); | |||
11981 | NumEltsC = 8; | |||
11982 | NumEltsD = 8; | |||
11983 | break; | |||
11984 | default: | |||
11985 | llvm_unreachable("Unexpected builtin ID.")::llvm::llvm_unreachable_internal("Unexpected builtin ID.", "/build/llvm-toolchain-snapshot-7~svn338205/tools/clang/lib/CodeGen/CGBuiltin.cpp" , 11985); | |||
11986 | } | |||
11987 | #undef MMA_VARIANTS | |||
11988 | ||||
11989 | SmallVector<Value *, 24> Values; | |||
11990 | Function *Intrinsic = CGM.getIntrinsic(IID); | |||
11991 | llvm::Type *ABType = Intrinsic->getFunctionType()->getParamType(0); | |||
11992 | // Load A | |||
11993 | for (unsigned i = 0; i < 8; ++i) { | |||
11994 | Value *V = Builder.CreateAlignedLoad( | |||
11995 | Builder.CreateGEP(SrcA.getPointer(), | |||
11996 | llvm::ConstantInt::get(IntTy, i)), | |||
11997 | CharUnits::fromQuantity(4)); | |||
11998 | Values.push_back(Builder.CreateBitCast(V, ABType)); | |||
11999 | } | |||
12000 | // Load B | |||
12001 | for (unsigned i = 0; i < 8; ++i) { | |||
12002 | Value *V = Builder.CreateAlignedLoad( | |||
12003 | Builder.CreateGEP(SrcB.getPointer(), | |||
12004 | llvm::ConstantInt::get(IntTy, i)), | |||
12005 | CharUnits::fromQuantity(4)); | |||
12006 | Values.push_back(Builder.CreateBitCast(V, ABType)); | |||
12007 | } | |||
12008 | // Load C | |||
12009 | llvm::Type *CType = Intrinsic->getFunctionType()->getParamType(16); | |||
12010 | for (unsigned i = 0; i < NumEltsC; ++i) { | |||
12011 | Value *V = Builder.CreateAlignedLoad( | |||
12012 | Builder.CreateGEP(SrcC.getPointer(), | |||
12013 | llvm::ConstantInt::get(IntTy, i)), | |||
12014 | CharUnits::fromQuantity(4)); | |||
12015 | Values.push_back(Builder.CreateBitCast(V, CType)); | |||
12016 | } | |||
12017 | Value *Result = Builder.CreateCall(Intrinsic, Values); | |||
12018 | llvm::Type *DType = Dst.getElementType(); | |||
12019 | for (unsigned i = 0; i < NumEltsD; ++i) | |||
12020 | Builder.CreateAlignedStore( | |||
12021 | Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType), | |||
12022 | Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)), | |||
12023 | CharUnits::fromQuantity(4)); | |||
12024 | return Result; | |||
12025 | } | |||
12026 | default: | |||
12027 | return nullptr; | |||
12028 | } | |||
12029 | } | |||
12030 | ||||
12031 | Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, | |||
12032 | const CallExpr *E) { | |||
12033 | switch (BuiltinID) { | |||
12034 | case WebAssembly::BI__builtin_wasm_memory_size: { | |||
12035 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
12036 | Value *I = EmitScalarExpr(E->getArg(0)); | |||
12037 | Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType); | |||
12038 | return Builder.CreateCall(Callee, I); | |||
12039 | } | |||
12040 | case WebAssembly::BI__builtin_wasm_memory_grow: { | |||
12041 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
12042 | Value *Args[] = { | |||
12043 | EmitScalarExpr(E->getArg(0)), | |||
12044 | EmitScalarExpr(E->getArg(1)) | |||
12045 | }; | |||
12046 | Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType); | |||
12047 | return Builder.CreateCall(Callee, Args); | |||
12048 | } | |||
12049 | case WebAssembly::BI__builtin_wasm_mem_size: { | |||
12050 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
12051 | Value *I = EmitScalarExpr(E->getArg(0)); | |||
12052 | Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_mem_size, ResultType); | |||
12053 | return Builder.CreateCall(Callee, I); | |||
12054 | } | |||
12055 | case WebAssembly::BI__builtin_wasm_mem_grow: { | |||
12056 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
12057 | Value *Args[] = { | |||
12058 | EmitScalarExpr(E->getArg(0)), | |||
12059 | EmitScalarExpr(E->getArg(1)) | |||
12060 | }; | |||
12061 | Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_mem_grow, ResultType); | |||
12062 | return Builder.CreateCall(Callee, Args); | |||
12063 | } | |||
12064 | case WebAssembly::BI__builtin_wasm_current_memory: { | |||
12065 | llvm::Type *ResultType = ConvertType(E->getType()); | |||
12066 | Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_current_memory, ResultType); | |||
12067 | return Builder.CreateCall(Callee); | |||
12068 | } | |||
12069 | case WebAssembly::BI__builtin_wasm_grow_memory: { | |||
12070 | Value *X = EmitScalarExpr(E->getArg(0)); | |||
12071 | Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_grow_memory, X->getType()); | |||
12072 | return Builder.CreateCall(Callee, X); | |||
12073 | } | |||
12074 | case WebAssembly::BI__builtin_wasm_throw: { | |||
12075 | Value *Tag = EmitScalarExpr(E->getArg(0)); | |||
12076 | Value *Obj = EmitScalarExpr(E->getArg(1)); | |||
12077 | Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw); | |||
12078 | return Builder.CreateCall(Callee, {Tag, Obj}); | |||
12079 | } | |||
12080 | case WebAssembly::BI__builtin_wasm_rethrow: { | |||
12081 | Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow); | |||
12082 | return Builder.CreateCall(Callee); | |||
12083 | } | |||
12084 | ||||
12085 | default: | |||
12086 | return nullptr; | |||
12087 | } | |||
12088 | } | |||
12089 | ||||
12090 | Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID, | |||
12091 | const CallExpr *E) { | |||
12092 | SmallVector<llvm::Value *, 4> Ops; | |||
12093 | Intrinsic::ID ID = Intrinsic::not_intrinsic; | |||
12094 | ||||
12095 | auto MakeCircLd = [&](unsigned IntID, bool HasImm) { | |||
12096 | // The base pointer is passed by address, so it needs to be loaded. | |||
12097 | Address BP = EmitPointerWithAlignment(E->getArg(0)); | |||
12098 | BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy), | |||
12099 | BP.getAlignment()); | |||
12100 | llvm::Value *Base = Builder.CreateLoad(BP); | |||
12101 | // Operands are Base, Increment, Modifier, Start. | |||
12102 | if (HasImm) | |||
12103 | Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), | |||
12104 | EmitScalarExpr(E->getArg(3)) }; | |||
12105 | else | |||
12106 | Ops = { Base, EmitScalarExpr(E->getArg(1)), | |||
12107 | EmitScalarExpr(E->getArg(2)) }; | |||
12108 | ||||
12109 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops); | |||
12110 | llvm::Value *NewBase = Builder.CreateExtractValue(Result, 1); | |||
12111 | llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), | |||
12112 | NewBase->getType()->getPointerTo()); | |||
12113 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); | |||
12114 | // The intrinsic generates two results. The new value for the base pointer | |||
12115 | // needs to be stored. | |||
12116 | Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment()); | |||
12117 | return Builder.CreateExtractValue(Result, 0); | |||
12118 | }; | |||
12119 | ||||
12120 | auto MakeCircSt = [&](unsigned IntID, bool HasImm) { | |||
12121 | // The base pointer is passed by address, so it needs to be loaded. | |||
12122 | Address BP = EmitPointerWithAlignment(E->getArg(0)); | |||
12123 | BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy), | |||
12124 | BP.getAlignment()); | |||
12125 | llvm::Value *Base = Builder.CreateLoad(BP); | |||
12126 | // Operands are Base, Increment, Modifier, Value, Start. | |||
12127 | if (HasImm) | |||
12128 | Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), | |||
12129 | EmitScalarExpr(E->getArg(3)), EmitScalarExpr(E->getArg(4)) }; | |||
12130 | else | |||
12131 | Ops = { Base, EmitScalarExpr(E->getArg(1)), | |||
12132 | EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)) }; | |||
12133 | ||||
12134 | llvm::Value *NewBase = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops); | |||
12135 | llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), | |||
12136 | NewBase->getType()->getPointerTo()); | |||
12137 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); | |||
12138 | // The intrinsic generates one result, which is the new value for the base | |||
12139 | // pointer. It needs to be stored. | |||
12140 | return Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment()); | |||
12141 | }; | |||
12142 | ||||
12143 | // Handle the conversion of bit-reverse load intrinsics to bit code. | |||
12144 | // The intrinsic call after this function only reads from memory and the | |||
12145 | // write to memory is dealt by the store instruction. | |||
12146 | auto MakeBrevLd = [&](unsigned IntID, llvm::Type *DestTy) { | |||
12147 | // The intrinsic generates one result, which is the new value for the base | |||
12148 | // pointer. It needs to be returned. The result of the load instruction is | |||
12149 | // passed to intrinsic by address, so the value needs to be stored. | |||
12150 | llvm::Value *BaseAddress = | |||
12151 | Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy); | |||
12152 | ||||
12153 | // Expressions like &(*pt++) will be incremented per evaluation. | |||
12154 | // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression | |||
12155 | // per call. | |||
12156 | Address DestAddr = EmitPointerWithAlignment(E->getArg(1)); | |||
12157 | DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy), | |||
12158 | DestAddr.getAlignment()); | |||
12159 | llvm::Value *DestAddress = DestAddr.getPointer(); | |||
12160 | ||||
12161 | // Operands are Base, Dest, Modifier. | |||
12162 | // The intrinsic format in LLVM IR is defined as | |||
12163 | // { ValueType, i8* } (i8*, i32). | |||
12164 | Ops = {BaseAddress, EmitScalarExpr(E->getArg(2))}; | |||
12165 | ||||
12166 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops); | |||
12167 | // The value needs to be stored as the variable is passed by reference. | |||
12168 | llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0); | |||
12169 | ||||
12170 | // The store needs to be truncated to fit the destination type. | |||
12171 | // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs | |||
12172 | // to be handled with stores of respective destination type. | |||
12173 | DestVal = Builder.CreateTrunc(DestVal, DestTy); | |||
12174 | ||||
12175 | llvm::Value *DestForStore = | |||
12176 | Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo()); | |||
12177 | Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment()); | |||
12178 | // The updated value of the base pointer is returned. | |||
12179 | return Builder.CreateExtractValue(Result, 1); | |||
12180 | }; | |||
12181 | ||||
12182 | switch (BuiltinID) { | |||
12183 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry: | |||
12184 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: { | |||
12185 | Address Dest = EmitPointerWithAlignment(E->getArg(2)); | |||
12186 | unsigned Size; | |||
12187 | if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vaddcarry) { | |||
12188 | Size = 512; | |||
12189 | ID = Intrinsic::hexagon_V6_vaddcarry; | |||
12190 | } else { | |||
12191 | Size = 1024; | |||
12192 | ID = Intrinsic::hexagon_V6_vaddcarry_128B; | |||
12193 | } | |||
12194 | Dest = Builder.CreateBitCast(Dest, | |||
12195 | llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0)); | |||
12196 | LoadInst *QLd = Builder.CreateLoad(Dest); | |||
12197 | Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd }; | |||
12198 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); | |||
12199 | llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1); | |||
12200 | llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)), | |||
12201 | Vprd->getType()->getPointerTo(0)); | |||
12202 | Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment()); | |||
12203 | return Builder.CreateExtractValue(Result, 0); | |||
12204 | } | |||
12205 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry: | |||
12206 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: { | |||
12207 | Address Dest = EmitPointerWithAlignment(E->getArg(2)); | |||
12208 | unsigned Size; | |||
12209 | if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vsubcarry) { | |||
12210 | Size = 512; | |||
12211 | ID = Intrinsic::hexagon_V6_vsubcarry; | |||
12212 | } else { | |||
12213 | Size = 1024; | |||
12214 | ID = Intrinsic::hexagon_V6_vsubcarry_128B; | |||
12215 | } | |||
12216 | Dest = Builder.CreateBitCast(Dest, | |||
12217 | llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0)); | |||
12218 | LoadInst *QLd = Builder.CreateLoad(Dest); | |||
12219 | Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd }; | |||
12220 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); | |||
12221 | llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1); | |||
12222 | llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)), | |||
12223 | Vprd->getType()->getPointerTo(0)); | |||
12224 | Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment()); | |||
12225 | return Builder.CreateExtractValue(Result, 0); | |||
12226 | } | |||
12227 | case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci: | |||
12228 | return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pci, /*HasImm*/true); | |||
12229 | case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci: | |||
12230 | return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pci, /*HasImm*/true); | |||
12231 | case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci: | |||
12232 | return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pci, /*HasImm*/true); | |||
12233 | case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci: | |||
12234 | return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pci, /*HasImm*/true); | |||
12235 | case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci: | |||
12236 | return MakeCircLd(Intrinsic::hexagon_L2_loadri_pci, /*HasImm*/true); | |||
12237 | case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci: | |||
12238 | return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pci, /*HasImm*/true); | |||
12239 | case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr: | |||
12240 | return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pcr, /*HasImm*/false); | |||
12241 | case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr: | |||
12242 | return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pcr, /*HasImm*/false); | |||
12243 | case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr: | |||
12244 | return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pcr, /*HasImm*/false); | |||
12245 | case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr: | |||
12246 | return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pcr, /*HasImm*/false); | |||
12247 | case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr: | |||
12248 | return MakeCircLd(Intrinsic::hexagon_L2_loadri_pcr, /*HasImm*/false); | |||
12249 | case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr: | |||
12250 | return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pcr, /*HasImm*/false); | |||
12251 | case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci: | |||
12252 | return MakeCircSt(Intrinsic::hexagon_S2_storerb_pci, /*HasImm*/true); | |||
12253 | case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci: | |||
12254 | return MakeCircSt(Intrinsic::hexagon_S2_storerh_pci, /*HasImm*/true); | |||
12255 | case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci: | |||
12256 | return MakeCircSt(Intrinsic::hexagon_S2_storerf_pci, /*HasImm*/true); | |||
12257 | case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci: | |||
12258 | return MakeCircSt(Intrinsic::hexagon_S2_storeri_pci, /*HasImm*/true); | |||
12259 | case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci: | |||
12260 | return MakeCircSt(Intrinsic::hexagon_S2_storerd_pci, /*HasImm*/true); | |||
12261 | case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr: | |||
12262 | return MakeCircSt(Intrinsic::hexagon_S2_storerb_pcr, /*HasImm*/false); | |||
12263 | case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr: | |||
12264 | return MakeCircSt(Intrinsic::hexagon_S2_storerh_pcr, /*HasImm*/false); | |||
12265 | case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr: | |||
12266 | return MakeCircSt(Intrinsic::hexagon_S2_storerf_pcr, /*HasImm*/false); | |||
12267 | case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr: | |||
12268 | return MakeCircSt(Intrinsic::hexagon_S2_storeri_pcr, /*HasImm*/false); | |||
12269 | case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr: | |||
12270 | return MakeCircSt(Intrinsic::hexagon_S2_storerd_pcr, /*HasImm*/false); | |||
12271 | case Hexagon::BI__builtin_brev_ldub: | |||
12272 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty); | |||
12273 | case Hexagon::BI__builtin_brev_ldb: | |||
12274 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty); | |||
12275 | case Hexagon::BI__builtin_brev_lduh: | |||
12276 | return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty); | |||
12277 | case Hexagon::BI__builtin_brev_ldh: | |||
12278 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty); | |||
12279 | case Hexagon::BI__builtin_brev_ldw: | |||
12280 | return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty); | |||
12281 | case Hexagon::BI__builtin_brev_ldd: | |||
12282 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty); | |||
12283 | default: | |||
12284 | break; | |||
12285 | } // switch | |||
12286 | ||||
12287 | return nullptr; | |||
12288 | } |