Bug Summary

File:clang/lib/Sema/SemaChecking.cpp
Warning:line 16526, column 15
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SemaChecking.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/clang/lib/Sema -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/clang/lib/Sema -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/clang/lib/Sema -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/clang/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/tools/clang/lib/Sema -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/clang/lib/Sema/SemaChecking.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/clang/lib/Sema/SemaChecking.cpp

1//===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements extra semantic analysis beyond what is enforced
10// by the C type system.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/AST/APValue.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/AttrIterator.h"
18#include "clang/AST/CharUnits.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclBase.h"
21#include "clang/AST/DeclCXX.h"
22#include "clang/AST/DeclObjC.h"
23#include "clang/AST/DeclarationName.h"
24#include "clang/AST/EvaluatedExprVisitor.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
28#include "clang/AST/ExprOpenMP.h"
29#include "clang/AST/FormatString.h"
30#include "clang/AST/NSAPI.h"
31#include "clang/AST/NonTrivialTypeVisitor.h"
32#include "clang/AST/OperationKinds.h"
33#include "clang/AST/RecordLayout.h"
34#include "clang/AST/Stmt.h"
35#include "clang/AST/TemplateBase.h"
36#include "clang/AST/Type.h"
37#include "clang/AST/TypeLoc.h"
38#include "clang/AST/UnresolvedSet.h"
39#include "clang/Basic/AddressSpaces.h"
40#include "clang/Basic/CharInfo.h"
41#include "clang/Basic/Diagnostic.h"
42#include "clang/Basic/IdentifierTable.h"
43#include "clang/Basic/LLVM.h"
44#include "clang/Basic/LangOptions.h"
45#include "clang/Basic/OpenCLOptions.h"
46#include "clang/Basic/OperatorKinds.h"
47#include "clang/Basic/PartialDiagnostic.h"
48#include "clang/Basic/SourceLocation.h"
49#include "clang/Basic/SourceManager.h"
50#include "clang/Basic/Specifiers.h"
51#include "clang/Basic/SyncScope.h"
52#include "clang/Basic/TargetBuiltins.h"
53#include "clang/Basic/TargetCXXABI.h"
54#include "clang/Basic/TargetInfo.h"
55#include "clang/Basic/TypeTraits.h"
56#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
57#include "clang/Sema/Initialization.h"
58#include "clang/Sema/Lookup.h"
59#include "clang/Sema/Ownership.h"
60#include "clang/Sema/Scope.h"
61#include "clang/Sema/ScopeInfo.h"
62#include "clang/Sema/Sema.h"
63#include "clang/Sema/SemaInternal.h"
64#include "llvm/ADT/APFloat.h"
65#include "llvm/ADT/APInt.h"
66#include "llvm/ADT/APSInt.h"
67#include "llvm/ADT/ArrayRef.h"
68#include "llvm/ADT/DenseMap.h"
69#include "llvm/ADT/FoldingSet.h"
70#include "llvm/ADT/None.h"
71#include "llvm/ADT/Optional.h"
72#include "llvm/ADT/STLExtras.h"
73#include "llvm/ADT/SmallBitVector.h"
74#include "llvm/ADT/SmallPtrSet.h"
75#include "llvm/ADT/SmallString.h"
76#include "llvm/ADT/SmallVector.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/ADT/StringSet.h"
79#include "llvm/ADT/StringSwitch.h"
80#include "llvm/ADT/Triple.h"
81#include "llvm/Support/AtomicOrdering.h"
82#include "llvm/Support/Casting.h"
83#include "llvm/Support/Compiler.h"
84#include "llvm/Support/ConvertUTF.h"
85#include "llvm/Support/ErrorHandling.h"
86#include "llvm/Support/Format.h"
87#include "llvm/Support/Locale.h"
88#include "llvm/Support/MathExtras.h"
89#include "llvm/Support/SaveAndRestore.h"
90#include "llvm/Support/raw_ostream.h"
91#include <algorithm>
92#include <bitset>
93#include <cassert>
94#include <cctype>
95#include <cstddef>
96#include <cstdint>
97#include <functional>
98#include <limits>
99#include <string>
100#include <tuple>
101#include <utility>
102
103using namespace clang;
104using namespace sema;
105
106SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
107 unsigned ByteNo) const {
108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts,
109 Context.getTargetInfo());
110}
111
112/// Checks that a call expression's argument count is the desired number.
113/// This is useful when doing custom type-checking. Returns true on error.
114static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
115 unsigned argCount = call->getNumArgs();
116 if (argCount == desiredArgCount) return false;
117
118 if (argCount < desiredArgCount)
119 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args)
120 << 0 /*function call*/ << desiredArgCount << argCount
121 << call->getSourceRange();
122
123 // Highlight all the excess arguments.
124 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(),
125 call->getArg(argCount - 1)->getEndLoc());
126
127 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
128 << 0 /*function call*/ << desiredArgCount << argCount
129 << call->getArg(1)->getSourceRange();
130}
131
132/// Check that the first argument to __builtin_annotation is an integer
133/// and the second argument is a non-wide string literal.
134static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
135 if (checkArgCount(S, TheCall, 2))
136 return true;
137
138 // First argument should be an integer.
139 Expr *ValArg = TheCall->getArg(0);
140 QualType Ty = ValArg->getType();
141 if (!Ty->isIntegerType()) {
142 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
143 << ValArg->getSourceRange();
144 return true;
145 }
146
147 // Second argument should be a constant string.
148 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
149 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
150 if (!Literal || !Literal->isAscii()) {
151 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
152 << StrArg->getSourceRange();
153 return true;
154 }
155
156 TheCall->setType(Ty);
157 return false;
158}
159
160static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
161 // We need at least one argument.
162 if (TheCall->getNumArgs() < 1) {
163 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
164 << 0 << 1 << TheCall->getNumArgs()
165 << TheCall->getCallee()->getSourceRange();
166 return true;
167 }
168
169 // All arguments should be wide string literals.
170 for (Expr *Arg : TheCall->arguments()) {
171 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
172 if (!Literal || !Literal->isWide()) {
173 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
174 << Arg->getSourceRange();
175 return true;
176 }
177 }
178
179 return false;
180}
181
182/// Check that the argument to __builtin_addressof is a glvalue, and set the
183/// result type to the corresponding pointer type.
184static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
185 if (checkArgCount(S, TheCall, 1))
186 return true;
187
188 ExprResult Arg(TheCall->getArg(0));
189 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
190 if (ResultType.isNull())
191 return true;
192
193 TheCall->setArg(0, Arg.get());
194 TheCall->setType(ResultType);
195 return false;
196}
197
198/// Check the number of arguments and set the result type to
199/// the argument type.
200static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
201 if (checkArgCount(S, TheCall, 1))
202 return true;
203
204 TheCall->setType(TheCall->getArg(0)->getType());
205 return false;
206}
207
208/// Check that the value argument for __builtin_is_aligned(value, alignment) and
209/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
210/// type (but not a function pointer) and that the alignment is a power-of-two.
211static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
212 if (checkArgCount(S, TheCall, 2))
213 return true;
214
215 clang::Expr *Source = TheCall->getArg(0);
216 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned;
217
218 auto IsValidIntegerType = [](QualType Ty) {
219 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
220 };
221 QualType SrcTy = Source->getType();
222 // We should also be able to use it with arrays (but not functions!).
223 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) {
224 SrcTy = S.Context.getDecayedType(SrcTy);
225 }
226 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) ||
227 SrcTy->isFunctionPointerType()) {
228 // FIXME: this is not quite the right error message since we don't allow
229 // floating point types, or member pointers.
230 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
231 << SrcTy;
232 return true;
233 }
234
235 clang::Expr *AlignOp = TheCall->getArg(1);
236 if (!IsValidIntegerType(AlignOp->getType())) {
237 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
238 << AlignOp->getType();
239 return true;
240 }
241 Expr::EvalResult AlignResult;
242 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1;
243 // We can't check validity of alignment if it is value dependent.
244 if (!AlignOp->isValueDependent() &&
245 AlignOp->EvaluateAsInt(AlignResult, S.Context,
246 Expr::SE_AllowSideEffects)) {
247 llvm::APSInt AlignValue = AlignResult.Val.getInt();
248 llvm::APSInt MaxValue(
249 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits));
250 if (AlignValue < 1) {
251 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
252 return true;
253 }
254 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) {
255 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
256 << toString(MaxValue, 10);
257 return true;
258 }
259 if (!AlignValue.isPowerOf2()) {
260 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
261 return true;
262 }
263 if (AlignValue == 1) {
264 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
265 << IsBooleanAlignBuiltin;
266 }
267 }
268
269 ExprResult SrcArg = S.PerformCopyInitialization(
270 InitializedEntity::InitializeParameter(S.Context, SrcTy, false),
271 SourceLocation(), Source);
272 if (SrcArg.isInvalid())
273 return true;
274 TheCall->setArg(0, SrcArg.get());
275 ExprResult AlignArg =
276 S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
277 S.Context, AlignOp->getType(), false),
278 SourceLocation(), AlignOp);
279 if (AlignArg.isInvalid())
280 return true;
281 TheCall->setArg(1, AlignArg.get());
282 // For align_up/align_down, the return type is the same as the (potentially
283 // decayed) argument type including qualifiers. For is_aligned(), the result
284 // is always bool.
285 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy);
286 return false;
287}
288
289static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
290 unsigned BuiltinID) {
291 if (checkArgCount(S, TheCall, 3))
292 return true;
293
294 // First two arguments should be integers.
295 for (unsigned I = 0; I < 2; ++I) {
296 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I));
297 if (Arg.isInvalid()) return true;
298 TheCall->setArg(I, Arg.get());
299
300 QualType Ty = Arg.get()->getType();
301 if (!Ty->isIntegerType()) {
302 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
303 << Ty << Arg.get()->getSourceRange();
304 return true;
305 }
306 }
307
308 // Third argument should be a pointer to a non-const integer.
309 // IRGen correctly handles volatile, restrict, and address spaces, and
310 // the other qualifiers aren't possible.
311 {
312 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2));
313 if (Arg.isInvalid()) return true;
314 TheCall->setArg(2, Arg.get());
315
316 QualType Ty = Arg.get()->getType();
317 const auto *PtrTy = Ty->getAs<PointerType>();
318 if (!PtrTy ||
319 !PtrTy->getPointeeType()->isIntegerType() ||
320 PtrTy->getPointeeType().isConstQualified()) {
321 S.Diag(Arg.get()->getBeginLoc(),
322 diag::err_overflow_builtin_must_be_ptr_int)
323 << Ty << Arg.get()->getSourceRange();
324 return true;
325 }
326 }
327
328 // Disallow signed ExtIntType args larger than 128 bits to mul function until
329 // we improve backend support.
330 if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
331 for (unsigned I = 0; I < 3; ++I) {
332 const auto Arg = TheCall->getArg(I);
333 // Third argument will be a pointer.
334 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
335 if (Ty->isExtIntType() && Ty->isSignedIntegerType() &&
336 S.getASTContext().getIntWidth(Ty) > 128)
337 return S.Diag(Arg->getBeginLoc(),
338 diag::err_overflow_builtin_ext_int_max_size)
339 << 128;
340 }
341 }
342
343 return false;
344}
345
346static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
347 if (checkArgCount(S, BuiltinCall, 2))
348 return true;
349
350 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
351 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
352 Expr *Call = BuiltinCall->getArg(0);
353 Expr *Chain = BuiltinCall->getArg(1);
354
355 if (Call->getStmtClass() != Stmt::CallExprClass) {
356 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
357 << Call->getSourceRange();
358 return true;
359 }
360
361 auto CE = cast<CallExpr>(Call);
362 if (CE->getCallee()->getType()->isBlockPointerType()) {
363 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
364 << Call->getSourceRange();
365 return true;
366 }
367
368 const Decl *TargetDecl = CE->getCalleeDecl();
369 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
370 if (FD->getBuiltinID()) {
371 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
372 << Call->getSourceRange();
373 return true;
374 }
375
376 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) {
377 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
378 << Call->getSourceRange();
379 return true;
380 }
381
382 ExprResult ChainResult = S.UsualUnaryConversions(Chain);
383 if (ChainResult.isInvalid())
384 return true;
385 if (!ChainResult.get()->getType()->isPointerType()) {
386 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
387 << Chain->getSourceRange();
388 return true;
389 }
390
391 QualType ReturnTy = CE->getCallReturnType(S.Context);
392 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
393 QualType BuiltinTy = S.Context.getFunctionType(
394 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo());
395 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy);
396
397 Builtin =
398 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get();
399
400 BuiltinCall->setType(CE->getType());
401 BuiltinCall->setValueKind(CE->getValueKind());
402 BuiltinCall->setObjectKind(CE->getObjectKind());
403 BuiltinCall->setCallee(Builtin);
404 BuiltinCall->setArg(1, ChainResult.get());
405
406 return false;
407}
408
409namespace {
410
411class EstimateSizeFormatHandler
412 : public analyze_format_string::FormatStringHandler {
413 size_t Size;
414
415public:
416 EstimateSizeFormatHandler(StringRef Format)
417 : Size(std::min(Format.find(0), Format.size()) +
418 1 /* null byte always written by sprintf */) {}
419
420 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
421 const char *, unsigned SpecifierLen) override {
422
423 const size_t FieldWidth = computeFieldWidth(FS);
424 const size_t Precision = computePrecision(FS);
425
426 // The actual format.
427 switch (FS.getConversionSpecifier().getKind()) {
428 // Just a char.
429 case analyze_format_string::ConversionSpecifier::cArg:
430 case analyze_format_string::ConversionSpecifier::CArg:
431 Size += std::max(FieldWidth, (size_t)1);
432 break;
433 // Just an integer.
434 case analyze_format_string::ConversionSpecifier::dArg:
435 case analyze_format_string::ConversionSpecifier::DArg:
436 case analyze_format_string::ConversionSpecifier::iArg:
437 case analyze_format_string::ConversionSpecifier::oArg:
438 case analyze_format_string::ConversionSpecifier::OArg:
439 case analyze_format_string::ConversionSpecifier::uArg:
440 case analyze_format_string::ConversionSpecifier::UArg:
441 case analyze_format_string::ConversionSpecifier::xArg:
442 case analyze_format_string::ConversionSpecifier::XArg:
443 Size += std::max(FieldWidth, Precision);
444 break;
445
446 // %g style conversion switches between %f or %e style dynamically.
447 // %f always takes less space, so default to it.
448 case analyze_format_string::ConversionSpecifier::gArg:
449 case analyze_format_string::ConversionSpecifier::GArg:
450
451 // Floating point number in the form '[+]ddd.ddd'.
452 case analyze_format_string::ConversionSpecifier::fArg:
453 case analyze_format_string::ConversionSpecifier::FArg:
454 Size += std::max(FieldWidth, 1 /* integer part */ +
455 (Precision ? 1 + Precision
456 : 0) /* period + decimal */);
457 break;
458
459 // Floating point number in the form '[-]d.ddde[+-]dd'.
460 case analyze_format_string::ConversionSpecifier::eArg:
461 case analyze_format_string::ConversionSpecifier::EArg:
462 Size +=
463 std::max(FieldWidth,
464 1 /* integer part */ +
465 (Precision ? 1 + Precision : 0) /* period + decimal */ +
466 1 /* e or E letter */ + 2 /* exponent */);
467 break;
468
469 // Floating point number in the form '[-]0xh.hhhhp±dd'.
470 case analyze_format_string::ConversionSpecifier::aArg:
471 case analyze_format_string::ConversionSpecifier::AArg:
472 Size +=
473 std::max(FieldWidth,
474 2 /* 0x */ + 1 /* integer part */ +
475 (Precision ? 1 + Precision : 0) /* period + decimal */ +
476 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
477 break;
478
479 // Just a string.
480 case analyze_format_string::ConversionSpecifier::sArg:
481 case analyze_format_string::ConversionSpecifier::SArg:
482 Size += FieldWidth;
483 break;
484
485 // Just a pointer in the form '0xddd'.
486 case analyze_format_string::ConversionSpecifier::pArg:
487 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision);
488 break;
489
490 // A plain percent.
491 case analyze_format_string::ConversionSpecifier::PercentArg:
492 Size += 1;
493 break;
494
495 default:
496 break;
497 }
498
499 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
500
501 if (FS.hasAlternativeForm()) {
502 switch (FS.getConversionSpecifier().getKind()) {
503 default:
504 break;
505 // Force a leading '0'.
506 case analyze_format_string::ConversionSpecifier::oArg:
507 Size += 1;
508 break;
509 // Force a leading '0x'.
510 case analyze_format_string::ConversionSpecifier::xArg:
511 case analyze_format_string::ConversionSpecifier::XArg:
512 Size += 2;
513 break;
514 // Force a period '.' before decimal, even if precision is 0.
515 case analyze_format_string::ConversionSpecifier::aArg:
516 case analyze_format_string::ConversionSpecifier::AArg:
517 case analyze_format_string::ConversionSpecifier::eArg:
518 case analyze_format_string::ConversionSpecifier::EArg:
519 case analyze_format_string::ConversionSpecifier::fArg:
520 case analyze_format_string::ConversionSpecifier::FArg:
521 case analyze_format_string::ConversionSpecifier::gArg:
522 case analyze_format_string::ConversionSpecifier::GArg:
523 Size += (Precision ? 0 : 1);
524 break;
525 }
526 }
527 assert(SpecifierLen <= Size && "no underflow")(static_cast<void> (0));
528 Size -= SpecifierLen;
529 return true;
530 }
531
532 size_t getSizeLowerBound() const { return Size; }
533
534private:
535 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
536 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
537 size_t FieldWidth = 0;
538 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant)
539 FieldWidth = FW.getConstantAmount();
540 return FieldWidth;
541 }
542
543 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
544 const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
545 size_t Precision = 0;
546
547 // See man 3 printf for default precision value based on the specifier.
548 switch (FW.getHowSpecified()) {
549 case analyze_format_string::OptionalAmount::NotSpecified:
550 switch (FS.getConversionSpecifier().getKind()) {
551 default:
552 break;
553 case analyze_format_string::ConversionSpecifier::dArg: // %d
554 case analyze_format_string::ConversionSpecifier::DArg: // %D
555 case analyze_format_string::ConversionSpecifier::iArg: // %i
556 Precision = 1;
557 break;
558 case analyze_format_string::ConversionSpecifier::oArg: // %d
559 case analyze_format_string::ConversionSpecifier::OArg: // %D
560 case analyze_format_string::ConversionSpecifier::uArg: // %d
561 case analyze_format_string::ConversionSpecifier::UArg: // %D
562 case analyze_format_string::ConversionSpecifier::xArg: // %d
563 case analyze_format_string::ConversionSpecifier::XArg: // %D
564 Precision = 1;
565 break;
566 case analyze_format_string::ConversionSpecifier::fArg: // %f
567 case analyze_format_string::ConversionSpecifier::FArg: // %F
568 case analyze_format_string::ConversionSpecifier::eArg: // %e
569 case analyze_format_string::ConversionSpecifier::EArg: // %E
570 case analyze_format_string::ConversionSpecifier::gArg: // %g
571 case analyze_format_string::ConversionSpecifier::GArg: // %G
572 Precision = 6;
573 break;
574 case analyze_format_string::ConversionSpecifier::pArg: // %d
575 Precision = 1;
576 break;
577 }
578 break;
579 case analyze_format_string::OptionalAmount::Constant:
580 Precision = FW.getConstantAmount();
581 break;
582 default:
583 break;
584 }
585 return Precision;
586 }
587};
588
589} // namespace
590
591void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
592 CallExpr *TheCall) {
593 if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
594 isConstantEvaluated())
595 return;
596
597 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true);
598 if (!BuiltinID)
599 return;
600
601 const TargetInfo &TI = getASTContext().getTargetInfo();
602 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
603
604 auto ComputeExplicitObjectSizeArgument =
605 [&](unsigned Index) -> Optional<llvm::APSInt> {
606 Expr::EvalResult Result;
607 Expr *SizeArg = TheCall->getArg(Index);
608 if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
609 return llvm::None;
610 return Result.Val.getInt();
611 };
612
613 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> {
614 // If the parameter has a pass_object_size attribute, then we should use its
615 // (potentially) more strict checking mode. Otherwise, conservatively assume
616 // type 0.
617 int BOSType = 0;
618 if (const auto *POS =
619 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>())
620 BOSType = POS->getType();
621
622 const Expr *ObjArg = TheCall->getArg(Index);
623 uint64_t Result;
624 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
625 return llvm::None;
626
627 // Get the object size in the target's size_t width.
628 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
629 };
630
631 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> {
632 Expr *ObjArg = TheCall->getArg(Index);
633 uint64_t Result;
634 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext()))
635 return llvm::None;
636 // Add 1 for null byte.
637 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth);
638 };
639
640 Optional<llvm::APSInt> SourceSize;
641 Optional<llvm::APSInt> DestinationSize;
642 unsigned DiagID = 0;
643 bool IsChkVariant = false;
644
645 switch (BuiltinID) {
646 default:
647 return;
648 case Builtin::BI__builtin_strcpy:
649 case Builtin::BIstrcpy: {
650 DiagID = diag::warn_fortify_strlen_overflow;
651 SourceSize = ComputeStrLenArgument(1);
652 DestinationSize = ComputeSizeArgument(0);
653 break;
654 }
655
656 case Builtin::BI__builtin___strcpy_chk: {
657 DiagID = diag::warn_fortify_strlen_overflow;
658 SourceSize = ComputeStrLenArgument(1);
659 DestinationSize = ComputeExplicitObjectSizeArgument(2);
660 IsChkVariant = true;
661 break;
662 }
663
664 case Builtin::BIsprintf:
665 case Builtin::BI__builtin___sprintf_chk: {
666 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
667 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
668
669 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) {
670
671 if (!Format->isAscii() && !Format->isUTF8())
672 return;
673
674 StringRef FormatStrRef = Format->getString();
675 EstimateSizeFormatHandler H(FormatStrRef);
676 const char *FormatBytes = FormatStrRef.data();
677 const ConstantArrayType *T =
678 Context.getAsConstantArrayType(Format->getType());
679 assert(T && "String literal not of constant array type!")(static_cast<void> (0));
680 size_t TypeSize = T->getSize().getZExtValue();
681
682 // In case there's a null byte somewhere.
683 size_t StrLen =
684 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
685 if (!analyze_format_string::ParsePrintfString(
686 H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
687 Context.getTargetInfo(), false)) {
688 DiagID = diag::warn_fortify_source_format_overflow;
689 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
690 .extOrTrunc(SizeTypeWidth);
691 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
692 DestinationSize = ComputeExplicitObjectSizeArgument(2);
693 IsChkVariant = true;
694 } else {
695 DestinationSize = ComputeSizeArgument(0);
696 }
697 break;
698 }
699 }
700 return;
701 }
702 case Builtin::BI__builtin___memcpy_chk:
703 case Builtin::BI__builtin___memmove_chk:
704 case Builtin::BI__builtin___memset_chk:
705// case Builtin::BI__builtin___strlcat_chk:
706// case Builtin::BI__builtin___strlcpy_chk:
707 case Builtin::BI__builtin___strncat_chk:
708 case Builtin::BI__builtin___strncpy_chk:
709 case Builtin::BI__builtin___stpncpy_chk:
710 case Builtin::BI__builtin___memccpy_chk:
711 case Builtin::BI__builtin___mempcpy_chk: {
712 DiagID = diag::warn_builtin_chk_overflow;
713 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2);
714 DestinationSize =
715 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
716 IsChkVariant = true;
717 break;
718 }
719
720 case Builtin::BI__builtin___snprintf_chk:
721 case Builtin::BI__builtin___vsnprintf_chk: {
722 DiagID = diag::warn_builtin_chk_overflow;
723 SourceSize = ComputeExplicitObjectSizeArgument(1);
724 DestinationSize = ComputeExplicitObjectSizeArgument(3);
725 IsChkVariant = true;
726 break;
727 }
728
729 case Builtin::BIstrncat:
730 case Builtin::BI__builtin_strncat:
731 case Builtin::BIstrncpy:
732 case Builtin::BI__builtin_strncpy:
733 case Builtin::BIstpncpy:
734 case Builtin::BI__builtin_stpncpy: {
735 // Whether these functions overflow depends on the runtime strlen of the
736 // string, not just the buffer size, so emitting the "always overflow"
737 // diagnostic isn't quite right. We should still diagnose passing a buffer
738 // size larger than the destination buffer though; this is a runtime abort
739 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
740 DiagID = diag::warn_fortify_source_size_mismatch;
741 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
742 DestinationSize = ComputeSizeArgument(0);
743 break;
744 }
745
746 case Builtin::BImemcpy:
747 case Builtin::BI__builtin_memcpy:
748 case Builtin::BImemmove:
749 case Builtin::BI__builtin_memmove:
750 case Builtin::BImemset:
751 case Builtin::BI__builtin_memset:
752 case Builtin::BImempcpy:
753 case Builtin::BI__builtin_mempcpy: {
754 DiagID = diag::warn_fortify_source_overflow;
755 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
756 DestinationSize = ComputeSizeArgument(0);
757 break;
758 }
759 case Builtin::BIsnprintf:
760 case Builtin::BI__builtin_snprintf:
761 case Builtin::BIvsnprintf:
762 case Builtin::BI__builtin_vsnprintf: {
763 DiagID = diag::warn_fortify_source_size_mismatch;
764 SourceSize = ComputeExplicitObjectSizeArgument(1);
765 DestinationSize = ComputeSizeArgument(0);
766 break;
767 }
768 }
769
770 if (!SourceSize || !DestinationSize ||
771 SourceSize.getValue().ule(DestinationSize.getValue()))
772 return;
773
774 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
775 // Skim off the details of whichever builtin was called to produce a better
776 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly.
777 if (IsChkVariant) {
778 FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
779 FunctionName = FunctionName.drop_back(std::strlen("_chk"));
780 } else if (FunctionName.startswith("__builtin_")) {
781 FunctionName = FunctionName.drop_front(std::strlen("__builtin_"));
782 }
783
784 SmallString<16> DestinationStr;
785 SmallString<16> SourceStr;
786 DestinationSize->toString(DestinationStr, /*Radix=*/10);
787 SourceSize->toString(SourceStr, /*Radix=*/10);
788 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
789 PDiag(DiagID)
790 << FunctionName << DestinationStr << SourceStr);
791}
792
793static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
794 Scope::ScopeFlags NeededScopeFlags,
795 unsigned DiagID) {
796 // Scopes aren't available during instantiation. Fortunately, builtin
797 // functions cannot be template args so they cannot be formed through template
798 // instantiation. Therefore checking once during the parse is sufficient.
799 if (SemaRef.inTemplateInstantiation())
800 return false;
801
802 Scope *S = SemaRef.getCurScope();
803 while (S && !S->isSEHExceptScope())
804 S = S->getParent();
805 if (!S || !(S->getFlags() & NeededScopeFlags)) {
806 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
807 SemaRef.Diag(TheCall->getExprLoc(), DiagID)
808 << DRE->getDecl()->getIdentifier();
809 return true;
810 }
811
812 return false;
813}
814
815static inline bool isBlockPointer(Expr *Arg) {
816 return Arg->getType()->isBlockPointerType();
817}
818
819/// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
820/// void*, which is a requirement of device side enqueue.
821static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
822 const BlockPointerType *BPT =
823 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
824 ArrayRef<QualType> Params =
825 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes();
826 unsigned ArgCounter = 0;
827 bool IllegalParams = false;
828 // Iterate through the block parameters until either one is found that is not
829 // a local void*, or the block is valid.
830 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
831 I != E; ++I, ++ArgCounter) {
832 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
833 (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
834 LangAS::opencl_local) {
835 // Get the location of the error. If a block literal has been passed
836 // (BlockExpr) then we can point straight to the offending argument,
837 // else we just point to the variable reference.
838 SourceLocation ErrorLoc;
839 if (isa<BlockExpr>(BlockArg)) {
840 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
841 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
842 } else if (isa<DeclRefExpr>(BlockArg)) {
843 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
844 }
845 S.Diag(ErrorLoc,
846 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
847 IllegalParams = true;
848 }
849 }
850
851 return IllegalParams;
852}
853
854static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
855 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) {
856 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
857 << 1 << Call->getDirectCallee() << "cl_khr_subgroups";
858 return true;
859 }
860 return false;
861}
862
863static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
864 if (checkArgCount(S, TheCall, 2))
865 return true;
866
867 if (checkOpenCLSubgroupExt(S, TheCall))
868 return true;
869
870 // First argument is an ndrange_t type.
871 Expr *NDRangeArg = TheCall->getArg(0);
872 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
873 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
874 << TheCall->getDirectCallee() << "'ndrange_t'";
875 return true;
876 }
877
878 Expr *BlockArg = TheCall->getArg(1);
879 if (!isBlockPointer(BlockArg)) {
880 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
881 << TheCall->getDirectCallee() << "block";
882 return true;
883 }
884 return checkOpenCLBlockArgs(S, BlockArg);
885}
886
887/// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
888/// get_kernel_work_group_size
889/// and get_kernel_preferred_work_group_size_multiple builtin functions.
890static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
891 if (checkArgCount(S, TheCall, 1))
892 return true;
893
894 Expr *BlockArg = TheCall->getArg(0);
895 if (!isBlockPointer(BlockArg)) {
896 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
897 << TheCall->getDirectCallee() << "block";
898 return true;
899 }
900 return checkOpenCLBlockArgs(S, BlockArg);
901}
902
903/// Diagnose integer type and any valid implicit conversion to it.
904static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
905 const QualType &IntType);
906
907static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
908 unsigned Start, unsigned End) {
909 bool IllegalParams = false;
910 for (unsigned I = Start; I <= End; ++I)
911 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
912 S.Context.getSizeType());
913 return IllegalParams;
914}
915
916/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
917/// 'local void*' parameter of passed block.
918static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
919 Expr *BlockArg,
920 unsigned NumNonVarArgs) {
921 const BlockPointerType *BPT =
922 cast<BlockPointerType>(BlockArg->getType().getCanonicalType());
923 unsigned NumBlockParams =
924 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams();
925 unsigned TotalNumArgs = TheCall->getNumArgs();
926
927 // For each argument passed to the block, a corresponding uint needs to
928 // be passed to describe the size of the local memory.
929 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
930 S.Diag(TheCall->getBeginLoc(),
931 diag::err_opencl_enqueue_kernel_local_size_args);
932 return true;
933 }
934
935 // Check that the sizes of the local memory are specified by integers.
936 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs,
937 TotalNumArgs - 1);
938}
939
940/// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
941/// overload formats specified in Table 6.13.17.1.
942/// int enqueue_kernel(queue_t queue,
943/// kernel_enqueue_flags_t flags,
944/// const ndrange_t ndrange,
945/// void (^block)(void))
946/// int enqueue_kernel(queue_t queue,
947/// kernel_enqueue_flags_t flags,
948/// const ndrange_t ndrange,
949/// uint num_events_in_wait_list,
950/// clk_event_t *event_wait_list,
951/// clk_event_t *event_ret,
952/// void (^block)(void))
953/// int enqueue_kernel(queue_t queue,
954/// kernel_enqueue_flags_t flags,
955/// const ndrange_t ndrange,
956/// void (^block)(local void*, ...),
957/// uint size0, ...)
958/// int enqueue_kernel(queue_t queue,
959/// kernel_enqueue_flags_t flags,
960/// const ndrange_t ndrange,
961/// uint num_events_in_wait_list,
962/// clk_event_t *event_wait_list,
963/// clk_event_t *event_ret,
964/// void (^block)(local void*, ...),
965/// uint size0, ...)
966static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
967 unsigned NumArgs = TheCall->getNumArgs();
968
969 if (NumArgs < 4) {
970 S.Diag(TheCall->getBeginLoc(),
971 diag::err_typecheck_call_too_few_args_at_least)
972 << 0 << 4 << NumArgs;
973 return true;
974 }
975
976 Expr *Arg0 = TheCall->getArg(0);
977 Expr *Arg1 = TheCall->getArg(1);
978 Expr *Arg2 = TheCall->getArg(2);
979 Expr *Arg3 = TheCall->getArg(3);
980
981 // First argument always needs to be a queue_t type.
982 if (!Arg0->getType()->isQueueT()) {
983 S.Diag(TheCall->getArg(0)->getBeginLoc(),
984 diag::err_opencl_builtin_expected_type)
985 << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
986 return true;
987 }
988
989 // Second argument always needs to be a kernel_enqueue_flags_t enum value.
990 if (!Arg1->getType()->isIntegerType()) {
991 S.Diag(TheCall->getArg(1)->getBeginLoc(),
992 diag::err_opencl_builtin_expected_type)
993 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
994 return true;
995 }
996
997 // Third argument is always an ndrange_t type.
998 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
999 S.Diag(TheCall->getArg(2)->getBeginLoc(),
1000 diag::err_opencl_builtin_expected_type)
1001 << TheCall->getDirectCallee() << "'ndrange_t'";
1002 return true;
1003 }
1004
1005 // With four arguments, there is only one form that the function could be
1006 // called in: no events and no variable arguments.
1007 if (NumArgs == 4) {
1008 // check that the last argument is the right block type.
1009 if (!isBlockPointer(Arg3)) {
1010 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1011 << TheCall->getDirectCallee() << "block";
1012 return true;
1013 }
1014 // we have a block type, check the prototype
1015 const BlockPointerType *BPT =
1016 cast<BlockPointerType>(Arg3->getType().getCanonicalType());
1017 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) {
1018 S.Diag(Arg3->getBeginLoc(),
1019 diag::err_opencl_enqueue_kernel_blocks_no_args);
1020 return true;
1021 }
1022 return false;
1023 }
1024 // we can have block + varargs.
1025 if (isBlockPointer(Arg3))
1026 return (checkOpenCLBlockArgs(S, Arg3) ||
1027 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4));
1028 // last two cases with either exactly 7 args or 7 args and varargs.
1029 if (NumArgs >= 7) {
1030 // check common block argument.
1031 Expr *Arg6 = TheCall->getArg(6);
1032 if (!isBlockPointer(Arg6)) {
1033 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1034 << TheCall->getDirectCallee() << "block";
1035 return true;
1036 }
1037 if (checkOpenCLBlockArgs(S, Arg6))
1038 return true;
1039
1040 // Forth argument has to be any integer type.
1041 if (!Arg3->getType()->isIntegerType()) {
1042 S.Diag(TheCall->getArg(3)->getBeginLoc(),
1043 diag::err_opencl_builtin_expected_type)
1044 << TheCall->getDirectCallee() << "integer";
1045 return true;
1046 }
1047 // check remaining common arguments.
1048 Expr *Arg4 = TheCall->getArg(4);
1049 Expr *Arg5 = TheCall->getArg(5);
1050
1051 // Fifth argument is always passed as a pointer to clk_event_t.
1052 if (!Arg4->isNullPointerConstant(S.Context,
1053 Expr::NPC_ValueDependentIsNotNull) &&
1054 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
1055 S.Diag(TheCall->getArg(4)->getBeginLoc(),
1056 diag::err_opencl_builtin_expected_type)
1057 << TheCall->getDirectCallee()
1058 << S.Context.getPointerType(S.Context.OCLClkEventTy);
1059 return true;
1060 }
1061
1062 // Sixth argument is always passed as a pointer to clk_event_t.
1063 if (!Arg5->isNullPointerConstant(S.Context,
1064 Expr::NPC_ValueDependentIsNotNull) &&
1065 !(Arg5->getType()->isPointerType() &&
1066 Arg5->getType()->getPointeeType()->isClkEventT())) {
1067 S.Diag(TheCall->getArg(5)->getBeginLoc(),
1068 diag::err_opencl_builtin_expected_type)
1069 << TheCall->getDirectCallee()
1070 << S.Context.getPointerType(S.Context.OCLClkEventTy);
1071 return true;
1072 }
1073
1074 if (NumArgs == 7)
1075 return false;
1076
1077 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7);
1078 }
1079
1080 // None of the specific case has been detected, give generic error
1081 S.Diag(TheCall->getBeginLoc(),
1082 diag::err_opencl_enqueue_kernel_incorrect_args);
1083 return true;
1084}
1085
1086/// Returns OpenCL access qual.
1087static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
1088 return D->getAttr<OpenCLAccessAttr>();
1089}
1090
1091/// Returns true if pipe element type is different from the pointer.
1092static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
1093 const Expr *Arg0 = Call->getArg(0);
1094 // First argument type should always be pipe.
1095 if (!Arg0->getType()->isPipeType()) {
1096 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1097 << Call->getDirectCallee() << Arg0->getSourceRange();
1098 return true;
1099 }
1100 OpenCLAccessAttr *AccessQual =
1101 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
1102 // Validates the access qualifier is compatible with the call.
1103 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
1104 // read_only and write_only, and assumed to be read_only if no qualifier is
1105 // specified.
1106 switch (Call->getDirectCallee()->getBuiltinID()) {
1107 case Builtin::BIread_pipe:
1108 case Builtin::BIreserve_read_pipe:
1109 case Builtin::BIcommit_read_pipe:
1110 case Builtin::BIwork_group_reserve_read_pipe:
1111 case Builtin::BIsub_group_reserve_read_pipe:
1112 case Builtin::BIwork_group_commit_read_pipe:
1113 case Builtin::BIsub_group_commit_read_pipe:
1114 if (!(!AccessQual || AccessQual->isReadOnly())) {
1115 S.Diag(Arg0->getBeginLoc(),
1116 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1117 << "read_only" << Arg0->getSourceRange();
1118 return true;
1119 }
1120 break;
1121 case Builtin::BIwrite_pipe:
1122 case Builtin::BIreserve_write_pipe:
1123 case Builtin::BIcommit_write_pipe:
1124 case Builtin::BIwork_group_reserve_write_pipe:
1125 case Builtin::BIsub_group_reserve_write_pipe:
1126 case Builtin::BIwork_group_commit_write_pipe:
1127 case Builtin::BIsub_group_commit_write_pipe:
1128 if (!(AccessQual && AccessQual->isWriteOnly())) {
1129 S.Diag(Arg0->getBeginLoc(),
1130 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1131 << "write_only" << Arg0->getSourceRange();
1132 return true;
1133 }
1134 break;
1135 default:
1136 break;
1137 }
1138 return false;
1139}
1140
1141/// Returns true if pipe element type is different from the pointer.
1142static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
1143 const Expr *Arg0 = Call->getArg(0);
1144 const Expr *ArgIdx = Call->getArg(Idx);
1145 const PipeType *PipeTy = cast<PipeType>(Arg0->getType());
1146 const QualType EltTy = PipeTy->getElementType();
1147 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
1148 // The Idx argument should be a pointer and the type of the pointer and
1149 // the type of pipe element should also be the same.
1150 if (!ArgTy ||
1151 !S.Context.hasSameType(
1152 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
1153 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1154 << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
1155 << ArgIdx->getType() << ArgIdx->getSourceRange();
1156 return true;
1157 }
1158 return false;
1159}
1160
1161// Performs semantic analysis for the read/write_pipe call.
1162// \param S Reference to the semantic analyzer.
1163// \param Call A pointer to the builtin call.
1164// \return True if a semantic error has been found, false otherwise.
1165static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
1166 // OpenCL v2.0 s6.13.16.2 - The built-in read/write
1167 // functions have two forms.
1168 switch (Call->getNumArgs()) {
1169 case 2:
1170 if (checkOpenCLPipeArg(S, Call))
1171 return true;
1172 // The call with 2 arguments should be
1173 // read/write_pipe(pipe T, T*).
1174 // Check packet type T.
1175 if (checkOpenCLPipePacketType(S, Call, 1))
1176 return true;
1177 break;
1178
1179 case 4: {
1180 if (checkOpenCLPipeArg(S, Call))
1181 return true;
1182 // The call with 4 arguments should be
1183 // read/write_pipe(pipe T, reserve_id_t, uint, T*).
1184 // Check reserve_id_t.
1185 if (!Call->getArg(1)->getType()->isReserveIDT()) {
1186 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1187 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1188 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1189 return true;
1190 }
1191
1192 // Check the index.
1193 const Expr *Arg2 = Call->getArg(2);
1194 if (!Arg2->getType()->isIntegerType() &&
1195 !Arg2->getType()->isUnsignedIntegerType()) {
1196 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1197 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1198 << Arg2->getType() << Arg2->getSourceRange();
1199 return true;
1200 }
1201
1202 // Check packet type T.
1203 if (checkOpenCLPipePacketType(S, Call, 3))
1204 return true;
1205 } break;
1206 default:
1207 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
1208 << Call->getDirectCallee() << Call->getSourceRange();
1209 return true;
1210 }
1211
1212 return false;
1213}
1214
1215// Performs a semantic analysis on the {work_group_/sub_group_
1216// /_}reserve_{read/write}_pipe
1217// \param S Reference to the semantic analyzer.
1218// \param Call The call to the builtin function to be analyzed.
1219// \return True if a semantic error was found, false otherwise.
1220static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
1221 if (checkArgCount(S, Call, 2))
1222 return true;
1223
1224 if (checkOpenCLPipeArg(S, Call))
1225 return true;
1226
1227 // Check the reserve size.
1228 if (!Call->getArg(1)->getType()->isIntegerType() &&
1229 !Call->getArg(1)->getType()->isUnsignedIntegerType()) {
1230 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1231 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1232 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1233 return true;
1234 }
1235
1236 // Since return type of reserve_read/write_pipe built-in function is
1237 // reserve_id_t, which is not defined in the builtin def file , we used int
1238 // as return type and need to override the return type of these functions.
1239 Call->setType(S.Context.OCLReserveIDTy);
1240
1241 return false;
1242}
1243
1244// Performs a semantic analysis on {work_group_/sub_group_
1245// /_}commit_{read/write}_pipe
1246// \param S Reference to the semantic analyzer.
1247// \param Call The call to the builtin function to be analyzed.
1248// \return True if a semantic error was found, false otherwise.
1249static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
1250 if (checkArgCount(S, Call, 2))
1251 return true;
1252
1253 if (checkOpenCLPipeArg(S, Call))
1254 return true;
1255
1256 // Check reserve_id_t.
1257 if (!Call->getArg(1)->getType()->isReserveIDT()) {
1258 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1259 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1260 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1261 return true;
1262 }
1263
1264 return false;
1265}
1266
1267// Performs a semantic analysis on the call to built-in Pipe
1268// Query Functions.
1269// \param S Reference to the semantic analyzer.
1270// \param Call The call to the builtin function to be analyzed.
1271// \return True if a semantic error was found, false otherwise.
1272static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
1273 if (checkArgCount(S, Call, 1))
1274 return true;
1275
1276 if (!Call->getArg(0)->getType()->isPipeType()) {
1277 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1278 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
1279 return true;
1280 }
1281
1282 return false;
1283}
1284
1285// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
1286// Performs semantic analysis for the to_global/local/private call.
1287// \param S Reference to the semantic analyzer.
1288// \param BuiltinID ID of the builtin function.
1289// \param Call A pointer to the builtin call.
1290// \return True if a semantic error has been found, false otherwise.
1291static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
1292 CallExpr *Call) {
1293 if (checkArgCount(S, Call, 1))
1294 return true;
1295
1296 auto RT = Call->getArg(0)->getType();
1297 if (!RT->isPointerType() || RT->getPointeeType()
1298 .getAddressSpace() == LangAS::opencl_constant) {
1299 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
1300 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
1301 return true;
1302 }
1303
1304 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
1305 S.Diag(Call->getArg(0)->getBeginLoc(),
1306 diag::warn_opencl_generic_address_space_arg)
1307 << Call->getDirectCallee()->getNameInfo().getAsString()
1308 << Call->getArg(0)->getSourceRange();
1309 }
1310
1311 RT = RT->getPointeeType();
1312 auto Qual = RT.getQualifiers();
1313 switch (BuiltinID) {
1314 case Builtin::BIto_global:
1315 Qual.setAddressSpace(LangAS::opencl_global);
1316 break;
1317 case Builtin::BIto_local:
1318 Qual.setAddressSpace(LangAS::opencl_local);
1319 break;
1320 case Builtin::BIto_private:
1321 Qual.setAddressSpace(LangAS::opencl_private);
1322 break;
1323 default:
1324 llvm_unreachable("Invalid builtin function")__builtin_unreachable();
1325 }
1326 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType(
1327 RT.getUnqualifiedType(), Qual)));
1328
1329 return false;
1330}
1331
1332static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
1333 if (checkArgCount(S, TheCall, 1))
1334 return ExprError();
1335
1336 // Compute __builtin_launder's parameter type from the argument.
1337 // The parameter type is:
1338 // * The type of the argument if it's not an array or function type,
1339 // Otherwise,
1340 // * The decayed argument type.
1341 QualType ParamTy = [&]() {
1342 QualType ArgTy = TheCall->getArg(0)->getType();
1343 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
1344 return S.Context.getPointerType(Ty->getElementType());
1345 if (ArgTy->isFunctionType()) {
1346 return S.Context.getPointerType(ArgTy);
1347 }
1348 return ArgTy;
1349 }();
1350
1351 TheCall->setType(ParamTy);
1352
1353 auto DiagSelect = [&]() -> llvm::Optional<unsigned> {
1354 if (!ParamTy->isPointerType())
1355 return 0;
1356 if (ParamTy->isFunctionPointerType())
1357 return 1;
1358 if (ParamTy->isVoidPointerType())
1359 return 2;
1360 return llvm::Optional<unsigned>{};
1361 }();
1362 if (DiagSelect.hasValue()) {
1363 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
1364 << DiagSelect.getValue() << TheCall->getSourceRange();
1365 return ExprError();
1366 }
1367
1368 // We either have an incomplete class type, or we have a class template
1369 // whose instantiation has not been forced. Example:
1370 //
1371 // template <class T> struct Foo { T value; };
1372 // Foo<int> *p = nullptr;
1373 // auto *d = __builtin_launder(p);
1374 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
1375 diag::err_incomplete_type))
1376 return ExprError();
1377
1378 assert(ParamTy->getPointeeType()->isObjectType() &&(static_cast<void> (0))
1379 "Unhandled non-object pointer case")(static_cast<void> (0));
1380
1381 InitializedEntity Entity =
1382 InitializedEntity::InitializeParameter(S.Context, ParamTy, false);
1383 ExprResult Arg =
1384 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
1385 if (Arg.isInvalid())
1386 return ExprError();
1387 TheCall->setArg(0, Arg.get());
1388
1389 return TheCall;
1390}
1391
1392// Emit an error and return true if the current architecture is not in the list
1393// of supported architectures.
1394static bool
1395CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
1396 ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
1397 llvm::Triple::ArchType CurArch =
1398 S.getASTContext().getTargetInfo().getTriple().getArch();
1399 if (llvm::is_contained(SupportedArchs, CurArch))
1400 return false;
1401 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
1402 << TheCall->getSourceRange();
1403 return true;
1404}
1405
1406static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
1407 SourceLocation CallSiteLoc);
1408
1409bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
1410 CallExpr *TheCall) {
1411 switch (TI.getTriple().getArch()) {
1412 default:
1413 // Some builtins don't require additional checking, so just consider these
1414 // acceptable.
1415 return false;
1416 case llvm::Triple::arm:
1417 case llvm::Triple::armeb:
1418 case llvm::Triple::thumb:
1419 case llvm::Triple::thumbeb:
1420 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
1421 case llvm::Triple::aarch64:
1422 case llvm::Triple::aarch64_32:
1423 case llvm::Triple::aarch64_be:
1424 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
1425 case llvm::Triple::bpfeb:
1426 case llvm::Triple::bpfel:
1427 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
1428 case llvm::Triple::hexagon:
1429 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
1430 case llvm::Triple::mips:
1431 case llvm::Triple::mipsel:
1432 case llvm::Triple::mips64:
1433 case llvm::Triple::mips64el:
1434 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
1435 case llvm::Triple::systemz:
1436 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
1437 case llvm::Triple::x86:
1438 case llvm::Triple::x86_64:
1439 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
1440 case llvm::Triple::ppc:
1441 case llvm::Triple::ppcle:
1442 case llvm::Triple::ppc64:
1443 case llvm::Triple::ppc64le:
1444 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
1445 case llvm::Triple::amdgcn:
1446 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
1447 case llvm::Triple::riscv32:
1448 case llvm::Triple::riscv64:
1449 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
1450 }
1451}
1452
1453ExprResult
1454Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
1455 CallExpr *TheCall) {
1456 ExprResult TheCallResult(TheCall);
1457
1458 // Find out if any arguments are required to be integer constant expressions.
1459 unsigned ICEArguments = 0;
1460 ASTContext::GetBuiltinTypeError Error;
1461 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
1462 if (Error != ASTContext::GE_None)
1463 ICEArguments = 0; // Don't diagnose previously diagnosed errors.
1464
1465 // If any arguments are required to be ICE's, check and diagnose.
1466 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
1467 // Skip arguments not required to be ICE's.
1468 if ((ICEArguments & (1 << ArgNo)) == 0) continue;
1469
1470 llvm::APSInt Result;
1471 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
1472 return true;
1473 ICEArguments &= ~(1 << ArgNo);
1474 }
1475
1476 switch (BuiltinID) {
1477 case Builtin::BI__builtin___CFStringMakeConstantString:
1478 assert(TheCall->getNumArgs() == 1 &&(static_cast<void> (0))
1479 "Wrong # arguments to builtin CFStringMakeConstantString")(static_cast<void> (0));
1480 if (CheckObjCString(TheCall->getArg(0)))
1481 return ExprError();
1482 break;
1483 case Builtin::BI__builtin_ms_va_start:
1484 case Builtin::BI__builtin_stdarg_start:
1485 case Builtin::BI__builtin_va_start:
1486 if (SemaBuiltinVAStart(BuiltinID, TheCall))
1487 return ExprError();
1488 break;
1489 case Builtin::BI__va_start: {
1490 switch (Context.getTargetInfo().getTriple().getArch()) {
1491 case llvm::Triple::aarch64:
1492 case llvm::Triple::arm:
1493 case llvm::Triple::thumb:
1494 if (SemaBuiltinVAStartARMMicrosoft(TheCall))
1495 return ExprError();
1496 break;
1497 default:
1498 if (SemaBuiltinVAStart(BuiltinID, TheCall))
1499 return ExprError();
1500 break;
1501 }
1502 break;
1503 }
1504
1505 // The acquire, release, and no fence variants are ARM and AArch64 only.
1506 case Builtin::BI_interlockedbittestandset_acq:
1507 case Builtin::BI_interlockedbittestandset_rel:
1508 case Builtin::BI_interlockedbittestandset_nf:
1509 case Builtin::BI_interlockedbittestandreset_acq:
1510 case Builtin::BI_interlockedbittestandreset_rel:
1511 case Builtin::BI_interlockedbittestandreset_nf:
1512 if (CheckBuiltinTargetSupport(
1513 *this, BuiltinID, TheCall,
1514 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
1515 return ExprError();
1516 break;
1517
1518 // The 64-bit bittest variants are x64, ARM, and AArch64 only.
1519 case Builtin::BI_bittest64:
1520 case Builtin::BI_bittestandcomplement64:
1521 case Builtin::BI_bittestandreset64:
1522 case Builtin::BI_bittestandset64:
1523 case Builtin::BI_interlockedbittestandreset64:
1524 case Builtin::BI_interlockedbittestandset64:
1525 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
1526 {llvm::Triple::x86_64, llvm::Triple::arm,
1527 llvm::Triple::thumb, llvm::Triple::aarch64}))
1528 return ExprError();
1529 break;
1530
1531 case Builtin::BI__builtin_isgreater:
1532 case Builtin::BI__builtin_isgreaterequal:
1533 case Builtin::BI__builtin_isless:
1534 case Builtin::BI__builtin_islessequal:
1535 case Builtin::BI__builtin_islessgreater:
1536 case Builtin::BI__builtin_isunordered:
1537 if (SemaBuiltinUnorderedCompare(TheCall))
1538 return ExprError();
1539 break;
1540 case Builtin::BI__builtin_fpclassify:
1541 if (SemaBuiltinFPClassification(TheCall, 6))
1542 return ExprError();
1543 break;
1544 case Builtin::BI__builtin_isfinite:
1545 case Builtin::BI__builtin_isinf:
1546 case Builtin::BI__builtin_isinf_sign:
1547 case Builtin::BI__builtin_isnan:
1548 case Builtin::BI__builtin_isnormal:
1549 case Builtin::BI__builtin_signbit:
1550 case Builtin::BI__builtin_signbitf:
1551 case Builtin::BI__builtin_signbitl:
1552 if (SemaBuiltinFPClassification(TheCall, 1))
1553 return ExprError();
1554 break;
1555 case Builtin::BI__builtin_shufflevector:
1556 return SemaBuiltinShuffleVector(TheCall);
1557 // TheCall will be freed by the smart pointer here, but that's fine, since
1558 // SemaBuiltinShuffleVector guts it, but then doesn't release it.
1559 case Builtin::BI__builtin_prefetch:
1560 if (SemaBuiltinPrefetch(TheCall))
1561 return ExprError();
1562 break;
1563 case Builtin::BI__builtin_alloca_with_align:
1564 if (SemaBuiltinAllocaWithAlign(TheCall))
1565 return ExprError();
1566 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1567 case Builtin::BI__builtin_alloca:
1568 Diag(TheCall->getBeginLoc(), diag::warn_alloca)
1569 << TheCall->getDirectCallee();
1570 break;
1571 case Builtin::BI__arithmetic_fence:
1572 if (SemaBuiltinArithmeticFence(TheCall))
1573 return ExprError();
1574 break;
1575 case Builtin::BI__assume:
1576 case Builtin::BI__builtin_assume:
1577 if (SemaBuiltinAssume(TheCall))
1578 return ExprError();
1579 break;
1580 case Builtin::BI__builtin_assume_aligned:
1581 if (SemaBuiltinAssumeAligned(TheCall))
1582 return ExprError();
1583 break;
1584 case Builtin::BI__builtin_dynamic_object_size:
1585 case Builtin::BI__builtin_object_size:
1586 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3))
1587 return ExprError();
1588 break;
1589 case Builtin::BI__builtin_longjmp:
1590 if (SemaBuiltinLongjmp(TheCall))
1591 return ExprError();
1592 break;
1593 case Builtin::BI__builtin_setjmp:
1594 if (SemaBuiltinSetjmp(TheCall))
1595 return ExprError();
1596 break;
1597 case Builtin::BI__builtin_classify_type:
1598 if (checkArgCount(*this, TheCall, 1)) return true;
1599 TheCall->setType(Context.IntTy);
1600 break;
1601 case Builtin::BI__builtin_complex:
1602 if (SemaBuiltinComplex(TheCall))
1603 return ExprError();
1604 break;
1605 case Builtin::BI__builtin_constant_p: {
1606 if (checkArgCount(*this, TheCall, 1)) return true;
1607 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
1608 if (Arg.isInvalid()) return true;
1609 TheCall->setArg(0, Arg.get());
1610 TheCall->setType(Context.IntTy);
1611 break;
1612 }
1613 case Builtin::BI__builtin_launder:
1614 return SemaBuiltinLaunder(*this, TheCall);
1615 case Builtin::BI__sync_fetch_and_add:
1616 case Builtin::BI__sync_fetch_and_add_1:
1617 case Builtin::BI__sync_fetch_and_add_2:
1618 case Builtin::BI__sync_fetch_and_add_4:
1619 case Builtin::BI__sync_fetch_and_add_8:
1620 case Builtin::BI__sync_fetch_and_add_16:
1621 case Builtin::BI__sync_fetch_and_sub:
1622 case Builtin::BI__sync_fetch_and_sub_1:
1623 case Builtin::BI__sync_fetch_and_sub_2:
1624 case Builtin::BI__sync_fetch_and_sub_4:
1625 case Builtin::BI__sync_fetch_and_sub_8:
1626 case Builtin::BI__sync_fetch_and_sub_16:
1627 case Builtin::BI__sync_fetch_and_or:
1628 case Builtin::BI__sync_fetch_and_or_1:
1629 case Builtin::BI__sync_fetch_and_or_2:
1630 case Builtin::BI__sync_fetch_and_or_4:
1631 case Builtin::BI__sync_fetch_and_or_8:
1632 case Builtin::BI__sync_fetch_and_or_16:
1633 case Builtin::BI__sync_fetch_and_and:
1634 case Builtin::BI__sync_fetch_and_and_1:
1635 case Builtin::BI__sync_fetch_and_and_2:
1636 case Builtin::BI__sync_fetch_and_and_4:
1637 case Builtin::BI__sync_fetch_and_and_8:
1638 case Builtin::BI__sync_fetch_and_and_16:
1639 case Builtin::BI__sync_fetch_and_xor:
1640 case Builtin::BI__sync_fetch_and_xor_1:
1641 case Builtin::BI__sync_fetch_and_xor_2:
1642 case Builtin::BI__sync_fetch_and_xor_4:
1643 case Builtin::BI__sync_fetch_and_xor_8:
1644 case Builtin::BI__sync_fetch_and_xor_16:
1645 case Builtin::BI__sync_fetch_and_nand:
1646 case Builtin::BI__sync_fetch_and_nand_1:
1647 case Builtin::BI__sync_fetch_and_nand_2:
1648 case Builtin::BI__sync_fetch_and_nand_4:
1649 case Builtin::BI__sync_fetch_and_nand_8:
1650 case Builtin::BI__sync_fetch_and_nand_16:
1651 case Builtin::BI__sync_add_and_fetch:
1652 case Builtin::BI__sync_add_and_fetch_1:
1653 case Builtin::BI__sync_add_and_fetch_2:
1654 case Builtin::BI__sync_add_and_fetch_4:
1655 case Builtin::BI__sync_add_and_fetch_8:
1656 case Builtin::BI__sync_add_and_fetch_16:
1657 case Builtin::BI__sync_sub_and_fetch:
1658 case Builtin::BI__sync_sub_and_fetch_1:
1659 case Builtin::BI__sync_sub_and_fetch_2:
1660 case Builtin::BI__sync_sub_and_fetch_4:
1661 case Builtin::BI__sync_sub_and_fetch_8:
1662 case Builtin::BI__sync_sub_and_fetch_16:
1663 case Builtin::BI__sync_and_and_fetch:
1664 case Builtin::BI__sync_and_and_fetch_1:
1665 case Builtin::BI__sync_and_and_fetch_2:
1666 case Builtin::BI__sync_and_and_fetch_4:
1667 case Builtin::BI__sync_and_and_fetch_8:
1668 case Builtin::BI__sync_and_and_fetch_16:
1669 case Builtin::BI__sync_or_and_fetch:
1670 case Builtin::BI__sync_or_and_fetch_1:
1671 case Builtin::BI__sync_or_and_fetch_2:
1672 case Builtin::BI__sync_or_and_fetch_4:
1673 case Builtin::BI__sync_or_and_fetch_8:
1674 case Builtin::BI__sync_or_and_fetch_16:
1675 case Builtin::BI__sync_xor_and_fetch:
1676 case Builtin::BI__sync_xor_and_fetch_1:
1677 case Builtin::BI__sync_xor_and_fetch_2:
1678 case Builtin::BI__sync_xor_and_fetch_4:
1679 case Builtin::BI__sync_xor_and_fetch_8:
1680 case Builtin::BI__sync_xor_and_fetch_16:
1681 case Builtin::BI__sync_nand_and_fetch:
1682 case Builtin::BI__sync_nand_and_fetch_1:
1683 case Builtin::BI__sync_nand_and_fetch_2:
1684 case Builtin::BI__sync_nand_and_fetch_4:
1685 case Builtin::BI__sync_nand_and_fetch_8:
1686 case Builtin::BI__sync_nand_and_fetch_16:
1687 case Builtin::BI__sync_val_compare_and_swap:
1688 case Builtin::BI__sync_val_compare_and_swap_1:
1689 case Builtin::BI__sync_val_compare_and_swap_2:
1690 case Builtin::BI__sync_val_compare_and_swap_4:
1691 case Builtin::BI__sync_val_compare_and_swap_8:
1692 case Builtin::BI__sync_val_compare_and_swap_16:
1693 case Builtin::BI__sync_bool_compare_and_swap:
1694 case Builtin::BI__sync_bool_compare_and_swap_1:
1695 case Builtin::BI__sync_bool_compare_and_swap_2:
1696 case Builtin::BI__sync_bool_compare_and_swap_4:
1697 case Builtin::BI__sync_bool_compare_and_swap_8:
1698 case Builtin::BI__sync_bool_compare_and_swap_16:
1699 case Builtin::BI__sync_lock_test_and_set:
1700 case Builtin::BI__sync_lock_test_and_set_1:
1701 case Builtin::BI__sync_lock_test_and_set_2:
1702 case Builtin::BI__sync_lock_test_and_set_4:
1703 case Builtin::BI__sync_lock_test_and_set_8:
1704 case Builtin::BI__sync_lock_test_and_set_16:
1705 case Builtin::BI__sync_lock_release:
1706 case Builtin::BI__sync_lock_release_1:
1707 case Builtin::BI__sync_lock_release_2:
1708 case Builtin::BI__sync_lock_release_4:
1709 case Builtin::BI__sync_lock_release_8:
1710 case Builtin::BI__sync_lock_release_16:
1711 case Builtin::BI__sync_swap:
1712 case Builtin::BI__sync_swap_1:
1713 case Builtin::BI__sync_swap_2:
1714 case Builtin::BI__sync_swap_4:
1715 case Builtin::BI__sync_swap_8:
1716 case Builtin::BI__sync_swap_16:
1717 return SemaBuiltinAtomicOverloaded(TheCallResult);
1718 case Builtin::BI__sync_synchronize:
1719 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
1720 << TheCall->getCallee()->getSourceRange();
1721 break;
1722 case Builtin::BI__builtin_nontemporal_load:
1723 case Builtin::BI__builtin_nontemporal_store:
1724 return SemaBuiltinNontemporalOverloaded(TheCallResult);
1725 case Builtin::BI__builtin_memcpy_inline: {
1726 clang::Expr *SizeOp = TheCall->getArg(2);
1727 // We warn about copying to or from `nullptr` pointers when `size` is
1728 // greater than 0. When `size` is value dependent we cannot evaluate its
1729 // value so we bail out.
1730 if (SizeOp->isValueDependent())
1731 break;
1732 if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) {
1733 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
1734 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc());
1735 }
1736 break;
1737 }
1738#define BUILTIN(ID, TYPE, ATTRS)
1739#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
1740 case Builtin::BI##ID: \
1741 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
1742#include "clang/Basic/Builtins.def"
1743 case Builtin::BI__annotation:
1744 if (SemaBuiltinMSVCAnnotation(*this, TheCall))
1745 return ExprError();
1746 break;
1747 case Builtin::BI__builtin_annotation:
1748 if (SemaBuiltinAnnotation(*this, TheCall))
1749 return ExprError();
1750 break;
1751 case Builtin::BI__builtin_addressof:
1752 if (SemaBuiltinAddressof(*this, TheCall))
1753 return ExprError();
1754 break;
1755 case Builtin::BI__builtin_is_aligned:
1756 case Builtin::BI__builtin_align_up:
1757 case Builtin::BI__builtin_align_down:
1758 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID))
1759 return ExprError();
1760 break;
1761 case Builtin::BI__builtin_add_overflow:
1762 case Builtin::BI__builtin_sub_overflow:
1763 case Builtin::BI__builtin_mul_overflow:
1764 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID))
1765 return ExprError();
1766 break;
1767 case Builtin::BI__builtin_operator_new:
1768 case Builtin::BI__builtin_operator_delete: {
1769 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
1770 ExprResult Res =
1771 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
1772 if (Res.isInvalid())
1773 CorrectDelayedTyposInExpr(TheCallResult.get());
1774 return Res;
1775 }
1776 case Builtin::BI__builtin_dump_struct: {
1777 // We first want to ensure we are called with 2 arguments
1778 if (checkArgCount(*this, TheCall, 2))
1779 return ExprError();
1780 // Ensure that the first argument is of type 'struct XX *'
1781 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
1782 const QualType PtrArgType = PtrArg->getType();
1783 if (!PtrArgType->isPointerType() ||
1784 !PtrArgType->getPointeeType()->isRecordType()) {
1785 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1786 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
1787 << "structure pointer";
1788 return ExprError();
1789 }
1790
1791 // Ensure that the second argument is of type 'FunctionType'
1792 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
1793 const QualType FnPtrArgType = FnPtrArg->getType();
1794 if (!FnPtrArgType->isPointerType()) {
1795 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1796 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
1797 << FnPtrArgType << "'int (*)(const char *, ...)'";
1798 return ExprError();
1799 }
1800
1801 const auto *FuncType =
1802 FnPtrArgType->getPointeeType()->getAs<FunctionType>();
1803
1804 if (!FuncType) {
1805 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1806 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
1807 << FnPtrArgType << "'int (*)(const char *, ...)'";
1808 return ExprError();
1809 }
1810
1811 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
1812 if (!FT->getNumParams()) {
1813 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1814 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1815 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1816 return ExprError();
1817 }
1818 QualType PT = FT->getParamType(0);
1819 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
1820 !PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
1821 !PT->getPointeeType().isConstQualified()) {
1822 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
1823 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
1824 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
1825 return ExprError();
1826 }
1827 }
1828
1829 TheCall->setType(Context.IntTy);
1830 break;
1831 }
1832 case Builtin::BI__builtin_expect_with_probability: {
1833 // We first want to ensure we are called with 3 arguments
1834 if (checkArgCount(*this, TheCall, 3))
1835 return ExprError();
1836 // then check probability is constant float in range [0.0, 1.0]
1837 const Expr *ProbArg = TheCall->getArg(2);
1838 SmallVector<PartialDiagnosticAt, 8> Notes;
1839 Expr::EvalResult Eval;
1840 Eval.Diag = &Notes;
1841 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) ||
1842 !Eval.Val.isFloat()) {
1843 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
1844 << ProbArg->getSourceRange();
1845 for (const PartialDiagnosticAt &PDiag : Notes)
1846 Diag(PDiag.first, PDiag.second);
1847 return ExprError();
1848 }
1849 llvm::APFloat Probability = Eval.Val.getFloat();
1850 bool LoseInfo = false;
1851 Probability.convert(llvm::APFloat::IEEEdouble(),
1852 llvm::RoundingMode::Dynamic, &LoseInfo);
1853 if (!(Probability >= llvm::APFloat(0.0) &&
1854 Probability <= llvm::APFloat(1.0))) {
1855 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
1856 << ProbArg->getSourceRange();
1857 return ExprError();
1858 }
1859 break;
1860 }
1861 case Builtin::BI__builtin_preserve_access_index:
1862 if (SemaBuiltinPreserveAI(*this, TheCall))
1863 return ExprError();
1864 break;
1865 case Builtin::BI__builtin_call_with_static_chain:
1866 if (SemaBuiltinCallWithStaticChain(*this, TheCall))
1867 return ExprError();
1868 break;
1869 case Builtin::BI__exception_code:
1870 case Builtin::BI_exception_code:
1871 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
1872 diag::err_seh___except_block))
1873 return ExprError();
1874 break;
1875 case Builtin::BI__exception_info:
1876 case Builtin::BI_exception_info:
1877 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
1878 diag::err_seh___except_filter))
1879 return ExprError();
1880 break;
1881 case Builtin::BI__GetExceptionInfo:
1882 if (checkArgCount(*this, TheCall, 1))
1883 return ExprError();
1884
1885 if (CheckCXXThrowOperand(
1886 TheCall->getBeginLoc(),
1887 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
1888 TheCall))
1889 return ExprError();
1890
1891 TheCall->setType(Context.VoidPtrTy);
1892 break;
1893 // OpenCL v2.0, s6.13.16 - Pipe functions
1894 case Builtin::BIread_pipe:
1895 case Builtin::BIwrite_pipe:
1896 // Since those two functions are declared with var args, we need a semantic
1897 // check for the argument.
1898 if (SemaBuiltinRWPipe(*this, TheCall))
1899 return ExprError();
1900 break;
1901 case Builtin::BIreserve_read_pipe:
1902 case Builtin::BIreserve_write_pipe:
1903 case Builtin::BIwork_group_reserve_read_pipe:
1904 case Builtin::BIwork_group_reserve_write_pipe:
1905 if (SemaBuiltinReserveRWPipe(*this, TheCall))
1906 return ExprError();
1907 break;
1908 case Builtin::BIsub_group_reserve_read_pipe:
1909 case Builtin::BIsub_group_reserve_write_pipe:
1910 if (checkOpenCLSubgroupExt(*this, TheCall) ||
1911 SemaBuiltinReserveRWPipe(*this, TheCall))
1912 return ExprError();
1913 break;
1914 case Builtin::BIcommit_read_pipe:
1915 case Builtin::BIcommit_write_pipe:
1916 case Builtin::BIwork_group_commit_read_pipe:
1917 case Builtin::BIwork_group_commit_write_pipe:
1918 if (SemaBuiltinCommitRWPipe(*this, TheCall))
1919 return ExprError();
1920 break;
1921 case Builtin::BIsub_group_commit_read_pipe:
1922 case Builtin::BIsub_group_commit_write_pipe:
1923 if (checkOpenCLSubgroupExt(*this, TheCall) ||
1924 SemaBuiltinCommitRWPipe(*this, TheCall))
1925 return ExprError();
1926 break;
1927 case Builtin::BIget_pipe_num_packets:
1928 case Builtin::BIget_pipe_max_packets:
1929 if (SemaBuiltinPipePackets(*this, TheCall))
1930 return ExprError();
1931 break;
1932 case Builtin::BIto_global:
1933 case Builtin::BIto_local:
1934 case Builtin::BIto_private:
1935 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall))
1936 return ExprError();
1937 break;
1938 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
1939 case Builtin::BIenqueue_kernel:
1940 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall))
1941 return ExprError();
1942 break;
1943 case Builtin::BIget_kernel_work_group_size:
1944 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
1945 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
1946 return ExprError();
1947 break;
1948 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
1949 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
1950 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
1951 return ExprError();
1952 break;
1953 case Builtin::BI__builtin_os_log_format:
1954 Cleanup.setExprNeedsCleanups(true);
1955 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1956 case Builtin::BI__builtin_os_log_format_buffer_size:
1957 if (SemaBuiltinOSLogFormat(TheCall))
1958 return ExprError();
1959 break;
1960 case Builtin::BI__builtin_frame_address:
1961 case Builtin::BI__builtin_return_address: {
1962 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF))
1963 return ExprError();
1964
1965 // -Wframe-address warning if non-zero passed to builtin
1966 // return/frame address.
1967 Expr::EvalResult Result;
1968 if (!TheCall->getArg(0)->isValueDependent() &&
1969 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
1970 Result.Val.getInt() != 0)
1971 Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
1972 << ((BuiltinID == Builtin::BI__builtin_return_address)
1973 ? "__builtin_return_address"
1974 : "__builtin_frame_address")
1975 << TheCall->getSourceRange();
1976 break;
1977 }
1978
1979 case Builtin::BI__builtin_matrix_transpose:
1980 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult);
1981
1982 case Builtin::BI__builtin_matrix_column_major_load:
1983 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult);
1984
1985 case Builtin::BI__builtin_matrix_column_major_store:
1986 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
1987
1988 case Builtin::BI__builtin_get_device_side_mangled_name: {
1989 auto Check = [](CallExpr *TheCall) {
1990 if (TheCall->getNumArgs() != 1)
1991 return false;
1992 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts());
1993 if (!DRE)
1994 return false;
1995 auto *D = DRE->getDecl();
1996 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D))
1997 return false;
1998 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() ||
1999 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>();
2000 };
2001 if (!Check(TheCall)) {
2002 Diag(TheCall->getBeginLoc(),
2003 diag::err_hip_invalid_args_builtin_mangled_name);
2004 return ExprError();
2005 }
2006 }
2007 }
2008
2009 // Since the target specific builtins for each arch overlap, only check those
2010 // of the arch we are compiling for.
2011 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) {
2012 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
2013 assert(Context.getAuxTargetInfo() &&(static_cast<void> (0))
2014 "Aux Target Builtin, but not an aux target?")(static_cast<void> (0));
2015
2016 if (CheckTSBuiltinFunctionCall(
2017 *Context.getAuxTargetInfo(),
2018 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall))
2019 return ExprError();
2020 } else {
2021 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID,
2022 TheCall))
2023 return ExprError();
2024 }
2025 }
2026
2027 return TheCallResult;
2028}
2029
2030// Get the valid immediate range for the specified NEON type code.
2031static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
2032 NeonTypeFlags Type(t);
2033 int IsQuad = ForceQuad ? true : Type.isQuad();
2034 switch (Type.getEltType()) {
2035 case NeonTypeFlags::Int8:
2036 case NeonTypeFlags::Poly8:
2037 return shift ? 7 : (8 << IsQuad) - 1;
2038 case NeonTypeFlags::Int16:
2039 case NeonTypeFlags::Poly16:
2040 return shift ? 15 : (4 << IsQuad) - 1;
2041 case NeonTypeFlags::Int32:
2042 return shift ? 31 : (2 << IsQuad) - 1;
2043 case NeonTypeFlags::Int64:
2044 case NeonTypeFlags::Poly64:
2045 return shift ? 63 : (1 << IsQuad) - 1;
2046 case NeonTypeFlags::Poly128:
2047 return shift ? 127 : (1 << IsQuad) - 1;
2048 case NeonTypeFlags::Float16:
2049 assert(!shift && "cannot shift float types!")(static_cast<void> (0));
2050 return (4 << IsQuad) - 1;
2051 case NeonTypeFlags::Float32:
2052 assert(!shift && "cannot shift float types!")(static_cast<void> (0));
2053 return (2 << IsQuad) - 1;
2054 case NeonTypeFlags::Float64:
2055 assert(!shift && "cannot shift float types!")(static_cast<void> (0));
2056 return (1 << IsQuad) - 1;
2057 case NeonTypeFlags::BFloat16:
2058 assert(!shift && "cannot shift float types!")(static_cast<void> (0));
2059 return (4 << IsQuad) - 1;
2060 }
2061 llvm_unreachable("Invalid NeonTypeFlag!")__builtin_unreachable();
2062}
2063
2064/// getNeonEltType - Return the QualType corresponding to the elements of
2065/// the vector type specified by the NeonTypeFlags. This is used to check
2066/// the pointer arguments for Neon load/store intrinsics.
2067static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
2068 bool IsPolyUnsigned, bool IsInt64Long) {
2069 switch (Flags.getEltType()) {
2070 case NeonTypeFlags::Int8:
2071 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
2072 case NeonTypeFlags::Int16:
2073 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
2074 case NeonTypeFlags::Int32:
2075 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
2076 case NeonTypeFlags::Int64:
2077 if (IsInt64Long)
2078 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
2079 else
2080 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
2081 : Context.LongLongTy;
2082 case NeonTypeFlags::Poly8:
2083 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
2084 case NeonTypeFlags::Poly16:
2085 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
2086 case NeonTypeFlags::Poly64:
2087 if (IsInt64Long)
2088 return Context.UnsignedLongTy;
2089 else
2090 return Context.UnsignedLongLongTy;
2091 case NeonTypeFlags::Poly128:
2092 break;
2093 case NeonTypeFlags::Float16:
2094 return Context.HalfTy;
2095 case NeonTypeFlags::Float32:
2096 return Context.FloatTy;
2097 case NeonTypeFlags::Float64:
2098 return Context.DoubleTy;
2099 case NeonTypeFlags::BFloat16:
2100 return Context.BFloat16Ty;
2101 }
2102 llvm_unreachable("Invalid NeonTypeFlag!")__builtin_unreachable();
2103}
2104
2105bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2106 // Range check SVE intrinsics that take immediate values.
2107 SmallVector<std::tuple<int,int,int>, 3> ImmChecks;
2108
2109 switch (BuiltinID) {
2110 default:
2111 return false;
2112#define GET_SVE_IMMEDIATE_CHECK
2113#include "clang/Basic/arm_sve_sema_rangechecks.inc"
2114#undef GET_SVE_IMMEDIATE_CHECK
2115 }
2116
2117 // Perform all the immediate checks for this builtin call.
2118 bool HasError = false;
2119 for (auto &I : ImmChecks) {
2120 int ArgNum, CheckTy, ElementSizeInBits;
2121 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
2122
2123 typedef bool(*OptionSetCheckFnTy)(int64_t Value);
2124
2125 // Function that checks whether the operand (ArgNum) is an immediate
2126 // that is one of the predefined values.
2127 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
2128 int ErrDiag) -> bool {
2129 // We can't check the value of a dependent argument.
2130 Expr *Arg = TheCall->getArg(ArgNum);
2131 if (Arg->isTypeDependent() || Arg->isValueDependent())
2132 return false;
2133
2134 // Check constant-ness first.
2135 llvm::APSInt Imm;
2136 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm))
2137 return true;
2138
2139 if (!CheckImm(Imm.getSExtValue()))
2140 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
2141 return false;
2142 };
2143
2144 switch ((SVETypeFlags::ImmCheckType)CheckTy) {
2145 case SVETypeFlags::ImmCheck0_31:
2146 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31))
2147 HasError = true;
2148 break;
2149 case SVETypeFlags::ImmCheck0_13:
2150 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13))
2151 HasError = true;
2152 break;
2153 case SVETypeFlags::ImmCheck1_16:
2154 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16))
2155 HasError = true;
2156 break;
2157 case SVETypeFlags::ImmCheck0_7:
2158 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
2159 HasError = true;
2160 break;
2161 case SVETypeFlags::ImmCheckExtract:
2162 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2163 (2048 / ElementSizeInBits) - 1))
2164 HasError = true;
2165 break;
2166 case SVETypeFlags::ImmCheckShiftRight:
2167 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits))
2168 HasError = true;
2169 break;
2170 case SVETypeFlags::ImmCheckShiftRightNarrow:
2171 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1,
2172 ElementSizeInBits / 2))
2173 HasError = true;
2174 break;
2175 case SVETypeFlags::ImmCheckShiftLeft:
2176 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2177 ElementSizeInBits - 1))
2178 HasError = true;
2179 break;
2180 case SVETypeFlags::ImmCheckLaneIndex:
2181 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2182 (128 / (1 * ElementSizeInBits)) - 1))
2183 HasError = true;
2184 break;
2185 case SVETypeFlags::ImmCheckLaneIndexCompRotate:
2186 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2187 (128 / (2 * ElementSizeInBits)) - 1))
2188 HasError = true;
2189 break;
2190 case SVETypeFlags::ImmCheckLaneIndexDot:
2191 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
2192 (128 / (4 * ElementSizeInBits)) - 1))
2193 HasError = true;
2194 break;
2195 case SVETypeFlags::ImmCheckComplexRot90_270:
2196 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
2197 diag::err_rotation_argument_to_cadd))
2198 HasError = true;
2199 break;
2200 case SVETypeFlags::ImmCheckComplexRotAll90:
2201 if (CheckImmediateInSet(
2202 [](int64_t V) {
2203 return V == 0 || V == 90 || V == 180 || V == 270;
2204 },
2205 diag::err_rotation_argument_to_cmla))
2206 HasError = true;
2207 break;
2208 case SVETypeFlags::ImmCheck0_1:
2209 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1))
2210 HasError = true;
2211 break;
2212 case SVETypeFlags::ImmCheck0_2:
2213 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2))
2214 HasError = true;
2215 break;
2216 case SVETypeFlags::ImmCheck0_3:
2217 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
2218 HasError = true;
2219 break;
2220 }
2221 }
2222
2223 return HasError;
2224}
2225
2226bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
2227 unsigned BuiltinID, CallExpr *TheCall) {
2228 llvm::APSInt Result;
2229 uint64_t mask = 0;
2230 unsigned TV = 0;
2231 int PtrArgNum = -1;
2232 bool HasConstPtr = false;
2233 switch (BuiltinID) {
2234#define GET_NEON_OVERLOAD_CHECK
2235#include "clang/Basic/arm_neon.inc"
2236#include "clang/Basic/arm_fp16.inc"
2237#undef GET_NEON_OVERLOAD_CHECK
2238 }
2239
2240 // For NEON intrinsics which are overloaded on vector element type, validate
2241 // the immediate which specifies which variant to emit.
2242 unsigned ImmArg = TheCall->getNumArgs()-1;
2243 if (mask) {
2244 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
2245 return true;
2246
2247 TV = Result.getLimitedValue(64);
2248 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
2249 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
2250 << TheCall->getArg(ImmArg)->getSourceRange();
2251 }
2252
2253 if (PtrArgNum >= 0) {
2254 // Check that pointer arguments have the specified type.
2255 Expr *Arg = TheCall->getArg(PtrArgNum);
2256 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
2257 Arg = ICE->getSubExpr();
2258 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
2259 QualType RHSTy = RHS.get()->getType();
2260
2261 llvm::Triple::ArchType Arch = TI.getTriple().getArch();
2262 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
2263 Arch == llvm::Triple::aarch64_32 ||
2264 Arch == llvm::Triple::aarch64_be;
2265 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
2266 QualType EltTy =
2267 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
2268 if (HasConstPtr)
2269 EltTy = EltTy.withConst();
2270 QualType LHSTy = Context.getPointerType(EltTy);
2271 AssignConvertType ConvTy;
2272 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
2273 if (RHS.isInvalid())
2274 return true;
2275 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
2276 RHS.get(), AA_Assigning))
2277 return true;
2278 }
2279
2280 // For NEON intrinsics which take an immediate value as part of the
2281 // instruction, range check them here.
2282 unsigned i = 0, l = 0, u = 0;
2283 switch (BuiltinID) {
2284 default:
2285 return false;
2286 #define GET_NEON_IMMEDIATE_CHECK
2287 #include "clang/Basic/arm_neon.inc"
2288 #include "clang/Basic/arm_fp16.inc"
2289 #undef GET_NEON_IMMEDIATE_CHECK
2290 }
2291
2292 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
2293}
2294
2295bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
2296 switch (BuiltinID) {
2297 default:
2298 return false;
2299 #include "clang/Basic/arm_mve_builtin_sema.inc"
2300 }
2301}
2302
2303bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2304 CallExpr *TheCall) {
2305 bool Err = false;
2306 switch (BuiltinID) {
2307 default:
2308 return false;
2309#include "clang/Basic/arm_cde_builtin_sema.inc"
2310 }
2311
2312 if (Err)
2313 return true;
2314
2315 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
2316}
2317
2318bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
2319 const Expr *CoprocArg, bool WantCDE) {
2320 if (isConstantEvaluated())
2321 return false;
2322
2323 // We can't check the value of a dependent argument.
2324 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
2325 return false;
2326
2327 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
2328 int64_t CoprocNo = CoprocNoAP.getExtValue();
2329 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative")(static_cast<void> (0));
2330
2331 uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
2332 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
2333
2334 if (IsCDECoproc != WantCDE)
2335 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
2336 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
2337
2338 return false;
2339}
2340
2341bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
2342 unsigned MaxWidth) {
2343 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||(static_cast<void> (0))
2344 BuiltinID == ARM::BI__builtin_arm_ldaex ||(static_cast<void> (0))
2345 BuiltinID == ARM::BI__builtin_arm_strex ||(static_cast<void> (0))
2346 BuiltinID == ARM::BI__builtin_arm_stlex ||(static_cast<void> (0))
2347 BuiltinID == AArch64::BI__builtin_arm_ldrex ||(static_cast<void> (0))
2348 BuiltinID == AArch64::BI__builtin_arm_ldaex ||(static_cast<void> (0))
2349 BuiltinID == AArch64::BI__builtin_arm_strex ||(static_cast<void> (0))
2350 BuiltinID == AArch64::BI__builtin_arm_stlex) &&(static_cast<void> (0))
2351 "unexpected ARM builtin")(static_cast<void> (0));
2352 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
2353 BuiltinID == ARM::BI__builtin_arm_ldaex ||
2354 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
2355 BuiltinID == AArch64::BI__builtin_arm_ldaex;
2356
2357 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
2358
2359 // Ensure that we have the proper number of arguments.
2360 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2))
2361 return true;
2362
2363 // Inspect the pointer argument of the atomic builtin. This should always be
2364 // a pointer type, whose element is an integral scalar or pointer type.
2365 // Because it is a pointer type, we don't have to worry about any implicit
2366 // casts here.
2367 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
2368 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg);
2369 if (PointerArgRes.isInvalid())
2370 return true;
2371 PointerArg = PointerArgRes.get();
2372
2373 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
2374 if (!pointerType) {
2375 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
2376 << PointerArg->getType() << PointerArg->getSourceRange();
2377 return true;
2378 }
2379
2380 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
2381 // task is to insert the appropriate casts into the AST. First work out just
2382 // what the appropriate type is.
2383 QualType ValType = pointerType->getPointeeType();
2384 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
2385 if (IsLdrex)
2386 AddrType.addConst();
2387
2388 // Issue a warning if the cast is dodgy.
2389 CastKind CastNeeded = CK_NoOp;
2390 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
2391 CastNeeded = CK_BitCast;
2392 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
2393 << PointerArg->getType() << Context.getPointerType(AddrType)
2394 << AA_Passing << PointerArg->getSourceRange();
2395 }
2396
2397 // Finally, do the cast and replace the argument with the corrected version.
2398 AddrType = Context.getPointerType(AddrType);
2399 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded);
2400 if (PointerArgRes.isInvalid())
2401 return true;
2402 PointerArg = PointerArgRes.get();
2403
2404 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
2405
2406 // In general, we allow ints, floats and pointers to be loaded and stored.
2407 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
2408 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
2409 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
2410 << PointerArg->getType() << PointerArg->getSourceRange();
2411 return true;
2412 }
2413
2414 // But ARM doesn't have instructions to deal with 128-bit versions.
2415 if (Context.getTypeSize(ValType) > MaxWidth) {
2416 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate")(static_cast<void> (0));
2417 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
2418 << PointerArg->getType() << PointerArg->getSourceRange();
2419 return true;
2420 }
2421
2422 switch (ValType.getObjCLifetime()) {
2423 case Qualifiers::OCL_None:
2424 case Qualifiers::OCL_ExplicitNone:
2425 // okay
2426 break;
2427
2428 case Qualifiers::OCL_Weak:
2429 case Qualifiers::OCL_Strong:
2430 case Qualifiers::OCL_Autoreleasing:
2431 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
2432 << ValType << PointerArg->getSourceRange();
2433 return true;
2434 }
2435
2436 if (IsLdrex) {
2437 TheCall->setType(ValType);
2438 return false;
2439 }
2440
2441 // Initialize the argument to be stored.
2442 ExprResult ValArg = TheCall->getArg(0);
2443 InitializedEntity Entity = InitializedEntity::InitializeParameter(
2444 Context, ValType, /*consume*/ false);
2445 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
2446 if (ValArg.isInvalid())
2447 return true;
2448 TheCall->setArg(0, ValArg.get());
2449
2450 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
2451 // but the custom checker bypasses all default analysis.
2452 TheCall->setType(Context.IntTy);
2453 return false;
2454}
2455
2456bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2457 CallExpr *TheCall) {
2458 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
2459 BuiltinID == ARM::BI__builtin_arm_ldaex ||
2460 BuiltinID == ARM::BI__builtin_arm_strex ||
2461 BuiltinID == ARM::BI__builtin_arm_stlex) {
2462 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
2463 }
2464
2465 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
2466 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
2467 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
2468 }
2469
2470 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
2471 BuiltinID == ARM::BI__builtin_arm_wsr64)
2472 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
2473
2474 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
2475 BuiltinID == ARM::BI__builtin_arm_rsrp ||
2476 BuiltinID == ARM::BI__builtin_arm_wsr ||
2477 BuiltinID == ARM::BI__builtin_arm_wsrp)
2478 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2479
2480 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
2481 return true;
2482 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
2483 return true;
2484 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
2485 return true;
2486
2487 // For intrinsics which take an immediate value as part of the instruction,
2488 // range check them here.
2489 // FIXME: VFP Intrinsics should error if VFP not present.
2490 switch (BuiltinID) {
2491 default: return false;
2492 case ARM::BI__builtin_arm_ssat:
2493 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32);
2494 case ARM::BI__builtin_arm_usat:
2495 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
2496 case ARM::BI__builtin_arm_ssat16:
2497 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
2498 case ARM::BI__builtin_arm_usat16:
2499 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
2500 case ARM::BI__builtin_arm_vcvtr_f:
2501 case ARM::BI__builtin_arm_vcvtr_d:
2502 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
2503 case ARM::BI__builtin_arm_dmb:
2504 case ARM::BI__builtin_arm_dsb:
2505 case ARM::BI__builtin_arm_isb:
2506 case ARM::BI__builtin_arm_dbg:
2507 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15);
2508 case ARM::BI__builtin_arm_cdp:
2509 case ARM::BI__builtin_arm_cdp2:
2510 case ARM::BI__builtin_arm_mcr:
2511 case ARM::BI__builtin_arm_mcr2:
2512 case ARM::BI__builtin_arm_mrc:
2513 case ARM::BI__builtin_arm_mrc2:
2514 case ARM::BI__builtin_arm_mcrr:
2515 case ARM::BI__builtin_arm_mcrr2:
2516 case ARM::BI__builtin_arm_mrrc:
2517 case ARM::BI__builtin_arm_mrrc2:
2518 case ARM::BI__builtin_arm_ldc:
2519 case ARM::BI__builtin_arm_ldcl:
2520 case ARM::BI__builtin_arm_ldc2:
2521 case ARM::BI__builtin_arm_ldc2l:
2522 case ARM::BI__builtin_arm_stc:
2523 case ARM::BI__builtin_arm_stcl:
2524 case ARM::BI__builtin_arm_stc2:
2525 case ARM::BI__builtin_arm_stc2l:
2526 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) ||
2527 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
2528 /*WantCDE*/ false);
2529 }
2530}
2531
2532bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
2533 unsigned BuiltinID,
2534 CallExpr *TheCall) {
2535 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
2536 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
2537 BuiltinID == AArch64::BI__builtin_arm_strex ||
2538 BuiltinID == AArch64::BI__builtin_arm_stlex) {
2539 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
2540 }
2541
2542 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
2543 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
2544 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
2545 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
2546 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
2547 }
2548
2549 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
2550 BuiltinID == AArch64::BI__builtin_arm_wsr64)
2551 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2552
2553 // Memory Tagging Extensions (MTE) Intrinsics
2554 if (BuiltinID == AArch64::BI__builtin_arm_irg ||
2555 BuiltinID == AArch64::BI__builtin_arm_addg ||
2556 BuiltinID == AArch64::BI__builtin_arm_gmi ||
2557 BuiltinID == AArch64::BI__builtin_arm_ldg ||
2558 BuiltinID == AArch64::BI__builtin_arm_stg ||
2559 BuiltinID == AArch64::BI__builtin_arm_subp) {
2560 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
2561 }
2562
2563 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
2564 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
2565 BuiltinID == AArch64::BI__builtin_arm_wsr ||
2566 BuiltinID == AArch64::BI__builtin_arm_wsrp)
2567 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
2568
2569 // Only check the valid encoding range. Any constant in this range would be
2570 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
2571 // an exception for incorrect registers. This matches MSVC behavior.
2572 if (BuiltinID == AArch64::BI_ReadStatusReg ||
2573 BuiltinID == AArch64::BI_WriteStatusReg)
2574 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
2575
2576 if (BuiltinID == AArch64::BI__getReg)
2577 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
2578
2579 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
2580 return true;
2581
2582 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
2583 return true;
2584
2585 // For intrinsics which take an immediate value as part of the instruction,
2586 // range check them here.
2587 unsigned i = 0, l = 0, u = 0;
2588 switch (BuiltinID) {
2589 default: return false;
2590 case AArch64::BI__builtin_arm_dmb:
2591 case AArch64::BI__builtin_arm_dsb:
2592 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
2593 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
2594 }
2595
2596 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
2597}
2598
2599static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) {
2600 if (Arg->getType()->getAsPlaceholderType())
2601 return false;
2602
2603 // The first argument needs to be a record field access.
2604 // If it is an array element access, we delay decision
2605 // to BPF backend to check whether the access is a
2606 // field access or not.
2607 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
2608 dyn_cast<MemberExpr>(Arg->IgnoreParens()) ||
2609 dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens()));
2610}
2611
2612static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S,
2613 QualType VectorTy, QualType EltTy) {
2614 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType();
2615 if (!Context.hasSameType(VectorEltTy, EltTy)) {
2616 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types)
2617 << Call->getSourceRange() << VectorEltTy << EltTy;
2618 return false;
2619 }
2620 return true;
2621}
2622
2623static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) {
2624 QualType ArgType = Arg->getType();
2625 if (ArgType->getAsPlaceholderType())
2626 return false;
2627
2628 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type
2629 // format:
2630 // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
2631 // 2. <type> var;
2632 // __builtin_preserve_type_info(var, flag);
2633 if (!dyn_cast<DeclRefExpr>(Arg->IgnoreParens()) &&
2634 !dyn_cast<UnaryOperator>(Arg->IgnoreParens()))
2635 return false;
2636
2637 // Typedef type.
2638 if (ArgType->getAs<TypedefType>())
2639 return true;
2640
2641 // Record type or Enum type.
2642 const Type *Ty = ArgType->getUnqualifiedDesugaredType();
2643 if (const auto *RT = Ty->getAs<RecordType>()) {
2644 if (!RT->getDecl()->getDeclName().isEmpty())
2645 return true;
2646 } else if (const auto *ET = Ty->getAs<EnumType>()) {
2647 if (!ET->getDecl()->getDeclName().isEmpty())
2648 return true;
2649 }
2650
2651 return false;
2652}
2653
2654static bool isValidBPFPreserveEnumValueArg(Expr *Arg) {
2655 QualType ArgType = Arg->getType();
2656 if (ArgType->getAsPlaceholderType())
2657 return false;
2658
2659 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
2660 // format:
2661 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
2662 // flag);
2663 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens());
2664 if (!UO)
2665 return false;
2666
2667 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr());
2668 if (!CE)
2669 return false;
2670 if (CE->getCastKind() != CK_IntegralToPointer &&
2671 CE->getCastKind() != CK_NullToPointer)
2672 return false;
2673
2674 // The integer must be from an EnumConstantDecl.
2675 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
2676 if (!DR)
2677 return false;
2678
2679 const EnumConstantDecl *Enumerator =
2680 dyn_cast<EnumConstantDecl>(DR->getDecl());
2681 if (!Enumerator)
2682 return false;
2683
2684 // The type must be EnumType.
2685 const Type *Ty = ArgType->getUnqualifiedDesugaredType();
2686 const auto *ET = Ty->getAs<EnumType>();
2687 if (!ET)
2688 return false;
2689
2690 // The enum value must be supported.
2691 for (auto *EDI : ET->getDecl()->enumerators()) {
2692 if (EDI == Enumerator)
2693 return true;
2694 }
2695
2696 return false;
2697}
2698
2699bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
2700 CallExpr *TheCall) {
2701 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||(static_cast<void> (0))
2702 BuiltinID == BPF::BI__builtin_btf_type_id ||(static_cast<void> (0))
2703 BuiltinID == BPF::BI__builtin_preserve_type_info ||(static_cast<void> (0))
2704 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&(static_cast<void> (0))
2705 "unexpected BPF builtin")(static_cast<void> (0));
2706
2707 if (checkArgCount(*this, TheCall, 2))
2708 return true;
2709
2710 // The second argument needs to be a constant int
2711 Expr *Arg = TheCall->getArg(1);
2712 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context);
2713 diag::kind kind;
2714 if (!Value) {
2715 if (BuiltinID == BPF::BI__builtin_preserve_field_info)
2716 kind = diag::err_preserve_field_info_not_const;
2717 else if (BuiltinID == BPF::BI__builtin_btf_type_id)
2718 kind = diag::err_btf_type_id_not_const;
2719 else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
2720 kind = diag::err_preserve_type_info_not_const;
2721 else
2722 kind = diag::err_preserve_enum_value_not_const;
2723 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
2724 return true;
2725 }
2726
2727 // The first argument
2728 Arg = TheCall->getArg(0);
2729 bool InvalidArg = false;
2730 bool ReturnUnsignedInt = true;
2731 if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
2732 if (!isValidBPFPreserveFieldInfoArg(Arg)) {
2733 InvalidArg = true;
2734 kind = diag::err_preserve_field_info_not_field;
2735 }
2736 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
2737 if (!isValidBPFPreserveTypeInfoArg(Arg)) {
2738 InvalidArg = true;
2739 kind = diag::err_preserve_type_info_invalid;
2740 }
2741 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
2742 if (!isValidBPFPreserveEnumValueArg(Arg)) {
2743 InvalidArg = true;
2744 kind = diag::err_preserve_enum_value_invalid;
2745 }
2746 ReturnUnsignedInt = false;
2747 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
2748 ReturnUnsignedInt = false;
2749 }
2750
2751 if (InvalidArg) {
2752 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
2753 return true;
2754 }
2755
2756 if (ReturnUnsignedInt)
2757 TheCall->setType(Context.UnsignedIntTy);
2758 else
2759 TheCall->setType(Context.UnsignedLongTy);
2760 return false;
2761}
2762
2763bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
2764 struct ArgInfo {
2765 uint8_t OpNum;
2766 bool IsSigned;
2767 uint8_t BitWidth;
2768 uint8_t Align;
2769 };
2770 struct BuiltinInfo {
2771 unsigned BuiltinID;
2772 ArgInfo Infos[2];
2773 };
2774
2775 static BuiltinInfo Infos[] = {
2776 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
2777 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
2778 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
2779 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} },
2780 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
2781 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
2782 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
2783 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
2784 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
2785 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
2786 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
2787
2788 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
2789 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
2790 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
2791 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
2792 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
2793 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
2794 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
2795 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
2796 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
2797 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
2798 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
2799
2800 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
2801 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
2802 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
2803 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
2804 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
2805 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
2806 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
2807 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
2808 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
2809 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
2810 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
2811 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
2812 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
2813 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
2814 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
2815 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
2816 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
2817 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
2818 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
2819 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
2820 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
2821 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
2822 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
2823 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
2824 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
2825 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
2826 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
2827 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
2828 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
2829 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
2830 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
2831 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
2832 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
2833 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
2834 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
2835 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
2836 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
2837 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
2838 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
2839 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
2840 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
2841 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
2842 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
2843 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
2844 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
2845 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
2846 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
2847 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
2848 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
2849 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
2850 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
2851 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
2852 {{ 1, false, 6, 0 }} },
2853 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
2854 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
2855 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
2856 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
2857 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
2858 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
2859 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
2860 {{ 1, false, 5, 0 }} },
2861 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
2862 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
2863 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
2864 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
2865 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
2866 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
2867 { 2, false, 5, 0 }} },
2868 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
2869 { 2, false, 6, 0 }} },
2870 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
2871 { 3, false, 5, 0 }} },
2872 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
2873 { 3, false, 6, 0 }} },
2874 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
2875 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
2876 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
2877 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
2878 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
2879 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
2880 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
2881 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
2882 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
2883 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
2884 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
2885 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
2886 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
2887 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
2888 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
2889 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
2890 {{ 2, false, 4, 0 },
2891 { 3, false, 5, 0 }} },
2892 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
2893 {{ 2, false, 4, 0 },
2894 { 3, false, 5, 0 }} },
2895 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
2896 {{ 2, false, 4, 0 },
2897 { 3, false, 5, 0 }} },
2898 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
2899 {{ 2, false, 4, 0 },
2900 { 3, false, 5, 0 }} },
2901 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
2902 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
2903 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
2904 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
2905 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
2906 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
2907 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
2908 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
2909 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
2910 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
2911 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
2912 { 2, false, 5, 0 }} },
2913 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
2914 { 2, false, 6, 0 }} },
2915 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
2916 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
2917 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
2918 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
2919 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
2920 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
2921 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
2922 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
2923 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
2924 {{ 1, false, 4, 0 }} },
2925 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
2926 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
2927 {{ 1, false, 4, 0 }} },
2928 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
2929 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
2930 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
2931 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
2932 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
2933 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
2934 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
2935 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
2936 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
2937 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
2938 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
2939 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
2940 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
2941 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
2942 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
2943 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
2944 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
2945 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
2946 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
2947 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
2948 {{ 3, false, 1, 0 }} },
2949 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
2950 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
2951 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
2952 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
2953 {{ 3, false, 1, 0 }} },
2954 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
2955 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
2956 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
2957 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
2958 {{ 3, false, 1, 0 }} },
2959 };
2960
2961 // Use a dynamically initialized static to sort the table exactly once on
2962 // first run.
2963 static const bool SortOnce =
2964 (llvm::sort(Infos,
2965 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
2966 return LHS.BuiltinID < RHS.BuiltinID;
2967 }),
2968 true);
2969 (void)SortOnce;
2970
2971 const BuiltinInfo *F = llvm::partition_point(
2972 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
2973 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
2974 return false;
2975
2976 bool Error = false;
2977
2978 for (const ArgInfo &A : F->Infos) {
2979 // Ignore empty ArgInfo elements.
2980 if (A.BitWidth == 0)
2981 continue;
2982
2983 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
2984 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
2985 if (!A.Align) {
2986 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
2987 } else {
2988 unsigned M = 1 << A.Align;
2989 Min *= M;
2990 Max *= M;
2991 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) |
2992 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
2993 }
2994 }
2995 return Error;
2996}
2997
2998bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
2999 CallExpr *TheCall) {
3000 return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
3001}
3002
3003bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
3004 unsigned BuiltinID, CallExpr *TheCall) {
3005 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
3006 CheckMipsBuiltinArgument(BuiltinID, TheCall);
3007}
3008
3009bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
3010 CallExpr *TheCall) {
3011
3012 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
3013 BuiltinID <= Mips::BI__builtin_mips_lwx) {
3014 if (!TI.hasFeature("dsp"))
3015 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
3016 }
3017
3018 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
3019 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
3020 if (!TI.hasFeature("dspr2"))
3021 return Diag(TheCall->getBeginLoc(),
3022 diag::err_mips_builtin_requires_dspr2);
3023 }
3024
3025 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
3026 BuiltinID <= Mips::BI__builtin_msa_xori_b) {
3027 if (!TI.hasFeature("msa"))
3028 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
3029 }
3030
3031 return false;
3032}
3033
3034// CheckMipsBuiltinArgument - Checks the constant value passed to the
3035// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
3036// ordering for DSP is unspecified. MSA is ordered by the data format used
3037// by the underlying instruction i.e., df/m, df/n and then by size.
3038//
3039// FIXME: The size tests here should instead be tablegen'd along with the
3040// definitions from include/clang/Basic/BuiltinsMips.def.
3041// FIXME: GCC is strict on signedness for some of these intrinsics, we should
3042// be too.
3043bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
3044 unsigned i = 0, l = 0, u = 0, m = 0;
3045 switch (BuiltinID) {
3046 default: return false;
3047 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
3048 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
3049 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
3050 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
3051 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
3052 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
3053 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
3054 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
3055 // df/m field.
3056 // These intrinsics take an unsigned 3 bit immediate.
3057 case Mips::BI__builtin_msa_bclri_b:
3058 case Mips::BI__builtin_msa_bnegi_b:
3059 case Mips::BI__builtin_msa_bseti_b:
3060 case Mips::BI__builtin_msa_sat_s_b:
3061 case Mips::BI__builtin_msa_sat_u_b:
3062 case Mips::BI__builtin_msa_slli_b:
3063 case Mips::BI__builtin_msa_srai_b:
3064 case Mips::BI__builtin_msa_srari_b:
3065 case Mips::BI__builtin_msa_srli_b:
3066 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
3067 case Mips::BI__builtin_msa_binsli_b:
3068 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
3069 // These intrinsics take an unsigned 4 bit immediate.
3070 case Mips::BI__builtin_msa_bclri_h:
3071 case Mips::BI__builtin_msa_bnegi_h:
3072 case Mips::BI__builtin_msa_bseti_h:
3073 case Mips::BI__builtin_msa_sat_s_h:
3074 case Mips::BI__builtin_msa_sat_u_h:
3075 case Mips::BI__builtin_msa_slli_h:
3076 case Mips::BI__builtin_msa_srai_h:
3077 case Mips::BI__builtin_msa_srari_h:
3078 case Mips::BI__builtin_msa_srli_h:
3079 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
3080 case Mips::BI__builtin_msa_binsli_h:
3081 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
3082 // These intrinsics take an unsigned 5 bit immediate.
3083 // The first block of intrinsics actually have an unsigned 5 bit field,
3084 // not a df/n field.
3085 case Mips::BI__builtin_msa_cfcmsa:
3086 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
3087 case Mips::BI__builtin_msa_clei_u_b:
3088 case Mips::BI__builtin_msa_clei_u_h:
3089 case Mips::BI__builtin_msa_clei_u_w:
3090 case Mips::BI__builtin_msa_clei_u_d:
3091 case Mips::BI__builtin_msa_clti_u_b:
3092 case Mips::BI__builtin_msa_clti_u_h:
3093 case Mips::BI__builtin_msa_clti_u_w:
3094 case Mips::BI__builtin_msa_clti_u_d:
3095 case Mips::BI__builtin_msa_maxi_u_b:
3096 case Mips::BI__builtin_msa_maxi_u_h:
3097 case Mips::BI__builtin_msa_maxi_u_w:
3098 case Mips::BI__builtin_msa_maxi_u_d:
3099 case Mips::BI__builtin_msa_mini_u_b:
3100 case Mips::BI__builtin_msa_mini_u_h:
3101 case Mips::BI__builtin_msa_mini_u_w:
3102 case Mips::BI__builtin_msa_mini_u_d:
3103 case Mips::BI__builtin_msa_addvi_b:
3104 case Mips::BI__builtin_msa_addvi_h:
3105 case Mips::BI__builtin_msa_addvi_w:
3106 case Mips::BI__builtin_msa_addvi_d:
3107 case Mips::BI__builtin_msa_bclri_w:
3108 case Mips::BI__builtin_msa_bnegi_w:
3109 case Mips::BI__builtin_msa_bseti_w:
3110 case Mips::BI__builtin_msa_sat_s_w:
3111 case Mips::BI__builtin_msa_sat_u_w:
3112 case Mips::BI__builtin_msa_slli_w:
3113 case Mips::BI__builtin_msa_srai_w:
3114 case Mips::BI__builtin_msa_srari_w:
3115 case Mips::BI__builtin_msa_srli_w:
3116 case Mips::BI__builtin_msa_srlri_w:
3117 case Mips::BI__builtin_msa_subvi_b:
3118 case Mips::BI__builtin_msa_subvi_h:
3119 case Mips::BI__builtin_msa_subvi_w:
3120 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
3121 case Mips::BI__builtin_msa_binsli_w:
3122 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
3123 // These intrinsics take an unsigned 6 bit immediate.
3124 case Mips::BI__builtin_msa_bclri_d:
3125 case Mips::BI__builtin_msa_bnegi_d:
3126 case Mips::BI__builtin_msa_bseti_d:
3127 case Mips::BI__builtin_msa_sat_s_d:
3128 case Mips::BI__builtin_msa_sat_u_d:
3129 case Mips::BI__builtin_msa_slli_d:
3130 case Mips::BI__builtin_msa_srai_d:
3131 case Mips::BI__builtin_msa_srari_d:
3132 case Mips::BI__builtin_msa_srli_d:
3133 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
3134 case Mips::BI__builtin_msa_binsli_d:
3135 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
3136 // These intrinsics take a signed 5 bit immediate.
3137 case Mips::BI__builtin_msa_ceqi_b:
3138 case Mips::BI__builtin_msa_ceqi_h:
3139 case Mips::BI__builtin_msa_ceqi_w:
3140 case Mips::BI__builtin_msa_ceqi_d:
3141 case Mips::BI__builtin_msa_clti_s_b:
3142 case Mips::BI__builtin_msa_clti_s_h:
3143 case Mips::BI__builtin_msa_clti_s_w:
3144 case Mips::BI__builtin_msa_clti_s_d:
3145 case Mips::BI__builtin_msa_clei_s_b:
3146 case Mips::BI__builtin_msa_clei_s_h:
3147 case Mips::BI__builtin_msa_clei_s_w:
3148 case Mips::BI__builtin_msa_clei_s_d:
3149 case Mips::BI__builtin_msa_maxi_s_b:
3150 case Mips::BI__builtin_msa_maxi_s_h:
3151 case Mips::BI__builtin_msa_maxi_s_w:
3152 case Mips::BI__builtin_msa_maxi_s_d:
3153 case Mips::BI__builtin_msa_mini_s_b:
3154 case Mips::BI__builtin_msa_mini_s_h:
3155 case Mips::BI__builtin_msa_mini_s_w:
3156 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
3157 // These intrinsics take an unsigned 8 bit immediate.
3158 case Mips::BI__builtin_msa_andi_b:
3159 case Mips::BI__builtin_msa_nori_b:
3160 case Mips::BI__builtin_msa_ori_b:
3161 case Mips::BI__builtin_msa_shf_b:
3162 case Mips::BI__builtin_msa_shf_h:
3163 case Mips::BI__builtin_msa_shf_w:
3164 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
3165 case Mips::BI__builtin_msa_bseli_b:
3166 case Mips::BI__builtin_msa_bmnzi_b:
3167 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
3168 // df/n format
3169 // These intrinsics take an unsigned 4 bit immediate.
3170 case Mips::BI__builtin_msa_copy_s_b:
3171 case Mips::BI__builtin_msa_copy_u_b:
3172 case Mips::BI__builtin_msa_insve_b:
3173 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
3174 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
3175 // These intrinsics take an unsigned 3 bit immediate.
3176 case Mips::BI__builtin_msa_copy_s_h:
3177 case Mips::BI__builtin_msa_copy_u_h:
3178 case Mips::BI__builtin_msa_insve_h:
3179 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
3180 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
3181 // These intrinsics take an unsigned 2 bit immediate.
3182 case Mips::BI__builtin_msa_copy_s_w:
3183 case Mips::BI__builtin_msa_copy_u_w:
3184 case Mips::BI__builtin_msa_insve_w:
3185 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
3186 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
3187 // These intrinsics take an unsigned 1 bit immediate.
3188 case Mips::BI__builtin_msa_copy_s_d:
3189 case Mips::BI__builtin_msa_copy_u_d:
3190 case Mips::BI__builtin_msa_insve_d:
3191 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
3192 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
3193 // Memory offsets and immediate loads.
3194 // These intrinsics take a signed 10 bit immediate.
3195 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
3196 case Mips::BI__builtin_msa_ldi_h:
3197 case Mips::BI__builtin_msa_ldi_w:
3198 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
3199 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
3200 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
3201 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
3202 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
3203 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
3204 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
3205 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
3206 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
3207 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
3208 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
3209 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
3210 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
3211 }
3212
3213 if (!m)
3214 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3215
3216 return SemaBuiltinConstantArgRange(TheCall, i, l, u) ||
3217 SemaBuiltinConstantArgMultiple(TheCall, i, m);
3218}
3219
3220/// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
3221/// advancing the pointer over the consumed characters. The decoded type is
3222/// returned. If the decoded type represents a constant integer with a
3223/// constraint on its value then Mask is set to that value. The type descriptors
3224/// used in Str are specific to PPC MMA builtins and are documented in the file
3225/// defining the PPC builtins.
3226static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
3227 unsigned &Mask) {
3228 bool RequireICE = false;
3229 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
3230 switch (*Str++) {
3231 case 'V':
3232 return Context.getVectorType(Context.UnsignedCharTy, 16,
3233 VectorType::VectorKind::AltiVecVector);
3234 case 'i': {
3235 char *End;
3236 unsigned size = strtoul(Str, &End, 10);
3237 assert(End != Str && "Missing constant parameter constraint")(static_cast<void> (0));
3238 Str = End;
3239 Mask = size;
3240 return Context.IntTy;
3241 }
3242 case 'W': {
3243 char *End;
3244 unsigned size = strtoul(Str, &End, 10);
3245 assert(End != Str && "Missing PowerPC MMA type size")(static_cast<void> (0));
3246 Str = End;
3247 QualType Type;
3248 switch (size) {
3249 #define PPC_VECTOR_TYPE(typeName, Id, size) \
3250 case size: Type = Context.Id##Ty; break;
3251 #include "clang/Basic/PPCTypes.def"
3252 default: llvm_unreachable("Invalid PowerPC MMA vector type")__builtin_unreachable();
3253 }
3254 bool CheckVectorArgs = false;
3255 while (!CheckVectorArgs) {
3256 switch (*Str++) {
3257 case '*':
3258 Type = Context.getPointerType(Type);
3259 break;
3260 case 'C':
3261 Type = Type.withConst();
3262 break;
3263 default:
3264 CheckVectorArgs = true;
3265 --Str;
3266 break;
3267 }
3268 }
3269 return Type;
3270 }
3271 default:
3272 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true);
3273 }
3274}
3275
3276static bool isPPC_64Builtin(unsigned BuiltinID) {
3277 // These builtins only work on PPC 64bit targets.
3278 switch (BuiltinID) {
3279 case PPC::BI__builtin_divde:
3280 case PPC::BI__builtin_divdeu:
3281 case PPC::BI__builtin_bpermd:
3282 case PPC::BI__builtin_ppc_ldarx:
3283 case PPC::BI__builtin_ppc_stdcx:
3284 case PPC::BI__builtin_ppc_tdw:
3285 case PPC::BI__builtin_ppc_trapd:
3286 case PPC::BI__builtin_ppc_cmpeqb:
3287 case PPC::BI__builtin_ppc_setb:
3288 case PPC::BI__builtin_ppc_mulhd:
3289 case PPC::BI__builtin_ppc_mulhdu:
3290 case PPC::BI__builtin_ppc_maddhd:
3291 case PPC::BI__builtin_ppc_maddhdu:
3292 case PPC::BI__builtin_ppc_maddld:
3293 case PPC::BI__builtin_ppc_load8r:
3294 case PPC::BI__builtin_ppc_store8r:
3295 case PPC::BI__builtin_ppc_insert_exp:
3296 case PPC::BI__builtin_ppc_extract_sig:
3297 case PPC::BI__builtin_ppc_addex:
3298 return true;
3299 }
3300 return false;
3301}
3302
3303static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall,
3304 StringRef FeatureToCheck, unsigned DiagID,
3305 StringRef DiagArg = "") {
3306 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck))
3307 return false;
3308
3309 if (DiagArg.empty())
3310 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange();
3311 else
3312 S.Diag(TheCall->getBeginLoc(), DiagID)
3313 << DiagArg << TheCall->getSourceRange();
3314
3315 return true;
3316}
3317
3318/// Returns true if the argument consists of one contiguous run of 1s with any
3319/// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
3320/// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
3321/// since all 1s are not contiguous.
3322bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
3323 llvm::APSInt Result;
3324 // We can't check the value of a dependent argument.
3325 Expr *Arg = TheCall->getArg(ArgNum);
3326 if (Arg->isTypeDependent() || Arg->isValueDependent())
3327 return false;
3328
3329 // Check constant-ness first.
3330 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3331 return true;
3332
3333 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
3334 if (Result.isShiftedMask() || (~Result).isShiftedMask())
3335 return false;
3336
3337 return Diag(TheCall->getBeginLoc(),
3338 diag::err_argument_not_contiguous_bit_field)
3339 << ArgNum << Arg->getSourceRange();
3340}
3341
3342bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
3343 CallExpr *TheCall) {
3344 unsigned i = 0, l = 0, u = 0;
3345 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
3346 llvm::APSInt Result;
3347
3348 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
3349 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
3350 << TheCall->getSourceRange();
3351
3352 switch (BuiltinID) {
3353 default: return false;
3354 case PPC::BI__builtin_altivec_crypto_vshasigmaw:
3355 case PPC::BI__builtin_altivec_crypto_vshasigmad:
3356 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
3357 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3358 case PPC::BI__builtin_altivec_dss:
3359 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
3360 case PPC::BI__builtin_tbegin:
3361 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break;
3362 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break;
3363 case PPC::BI__builtin_tabortwc:
3364 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break;
3365 case PPC::BI__builtin_tabortwci:
3366 case PPC::BI__builtin_tabortdci:
3367 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
3368 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
3369 case PPC::BI__builtin_altivec_dst:
3370 case PPC::BI__builtin_altivec_dstt:
3371 case PPC::BI__builtin_altivec_dstst:
3372 case PPC::BI__builtin_altivec_dststt:
3373 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
3374 case PPC::BI__builtin_vsx_xxpermdi:
3375 case PPC::BI__builtin_vsx_xxsldwi:
3376 return SemaBuiltinVSX(TheCall);
3377 case PPC::BI__builtin_divwe:
3378 case PPC::BI__builtin_divweu:
3379 case PPC::BI__builtin_divde:
3380 case PPC::BI__builtin_divdeu:
3381 return SemaFeatureCheck(*this, TheCall, "extdiv",
3382 diag::err_ppc_builtin_only_on_arch, "7");
3383 case PPC::BI__builtin_bpermd:
3384 return SemaFeatureCheck(*this, TheCall, "bpermd",
3385 diag::err_ppc_builtin_only_on_arch, "7");
3386 case PPC::BI__builtin_unpack_vector_int128:
3387 return SemaFeatureCheck(*this, TheCall, "vsx",
3388 diag::err_ppc_builtin_only_on_arch, "7") ||
3389 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3390 case PPC::BI__builtin_pack_vector_int128:
3391 return SemaFeatureCheck(*this, TheCall, "vsx",
3392 diag::err_ppc_builtin_only_on_arch, "7");
3393 case PPC::BI__builtin_altivec_vgnb:
3394 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
3395 case PPC::BI__builtin_altivec_vec_replace_elt:
3396 case PPC::BI__builtin_altivec_vec_replace_unaligned: {
3397 QualType VecTy = TheCall->getArg(0)->getType();
3398 QualType EltTy = TheCall->getArg(1)->getType();
3399 unsigned Width = Context.getIntWidth(EltTy);
3400 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) ||
3401 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy);
3402 }
3403 case PPC::BI__builtin_vsx_xxeval:
3404 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
3405 case PPC::BI__builtin_altivec_vsldbi:
3406 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
3407 case PPC::BI__builtin_altivec_vsrdbi:
3408 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
3409 case PPC::BI__builtin_vsx_xxpermx:
3410 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
3411 case PPC::BI__builtin_ppc_tw:
3412 case PPC::BI__builtin_ppc_tdw:
3413 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31);
3414 case PPC::BI__builtin_ppc_cmpeqb:
3415 case PPC::BI__builtin_ppc_setb:
3416 case PPC::BI__builtin_ppc_maddhd:
3417 case PPC::BI__builtin_ppc_maddhdu:
3418 case PPC::BI__builtin_ppc_maddld:
3419 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3420 diag::err_ppc_builtin_only_on_arch, "9");
3421 case PPC::BI__builtin_ppc_cmprb:
3422 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3423 diag::err_ppc_builtin_only_on_arch, "9") ||
3424 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
3425 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
3426 // be a constant that represents a contiguous bit field.
3427 case PPC::BI__builtin_ppc_rlwnm:
3428 return SemaBuiltinConstantArg(TheCall, 1, Result) ||
3429 SemaValueIsRunOfOnes(TheCall, 2);
3430 case PPC::BI__builtin_ppc_rlwimi:
3431 case PPC::BI__builtin_ppc_rldimi:
3432 return SemaBuiltinConstantArg(TheCall, 2, Result) ||
3433 SemaValueIsRunOfOnes(TheCall, 3);
3434 case PPC::BI__builtin_ppc_extract_exp:
3435 case PPC::BI__builtin_ppc_extract_sig:
3436 case PPC::BI__builtin_ppc_insert_exp:
3437 return SemaFeatureCheck(*this, TheCall, "power9-vector",
3438 diag::err_ppc_builtin_only_on_arch, "9");
3439 case PPC::BI__builtin_ppc_addex: {
3440 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
3441 diag::err_ppc_builtin_only_on_arch, "9") ||
3442 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3))
3443 return true;
3444 // Output warning for reserved values 1 to 3.
3445 int ArgValue =
3446 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue();
3447 if (ArgValue != 0)
3448 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour)
3449 << ArgValue;
3450 return false;
3451 }
3452 case PPC::BI__builtin_ppc_mtfsb0:
3453 case PPC::BI__builtin_ppc_mtfsb1:
3454 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
3455 case PPC::BI__builtin_ppc_mtfsf:
3456 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255);
3457 case PPC::BI__builtin_ppc_mtfsfi:
3458 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) ||
3459 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
3460 case PPC::BI__builtin_ppc_alignx:
3461 return SemaBuiltinConstantArgPower2(TheCall, 0);
3462 case PPC::BI__builtin_ppc_rdlam:
3463 return SemaValueIsRunOfOnes(TheCall, 2);
3464 case PPC::BI__builtin_ppc_icbt:
3465 case PPC::BI__builtin_ppc_sthcx:
3466 case PPC::BI__builtin_ppc_stbcx:
3467 case PPC::BI__builtin_ppc_lharx:
3468 case PPC::BI__builtin_ppc_lbarx:
3469 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
3470 diag::err_ppc_builtin_only_on_arch, "8");
3471 case PPC::BI__builtin_vsx_ldrmb:
3472 case PPC::BI__builtin_vsx_strmb:
3473 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
3474 diag::err_ppc_builtin_only_on_arch, "8") ||
3475 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
3476#define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
3477 case PPC::BI__builtin_##Name: \
3478 return SemaBuiltinPPCMMACall(TheCall, Types);
3479#include "clang/Basic/BuiltinsPPC.def"
3480 }
3481 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3482}
3483
3484// Check if the given type is a non-pointer PPC MMA type. This function is used
3485// in Sema to prevent invalid uses of restricted PPC MMA types.
3486bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
3487 if (Type->isPointerType() || Type->isArrayType())
3488 return false;
3489
3490 QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
3491#define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
3492 if (false
3493#include "clang/Basic/PPCTypes.def"
3494 ) {
3495 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
3496 return true;
3497 }
3498 return false;
3499}
3500
3501bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
3502 CallExpr *TheCall) {
3503 // position of memory order and scope arguments in the builtin
3504 unsigned OrderIndex, ScopeIndex;
3505 switch (BuiltinID) {
3506 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
3507 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
3508 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
3509 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
3510 OrderIndex = 2;
3511 ScopeIndex = 3;
3512 break;
3513 case AMDGPU::BI__builtin_amdgcn_fence:
3514 OrderIndex = 0;
3515 ScopeIndex = 1;
3516 break;
3517 default:
3518 return false;
3519 }
3520
3521 ExprResult Arg = TheCall->getArg(OrderIndex);
3522 auto ArgExpr = Arg.get();
3523 Expr::EvalResult ArgResult;
3524
3525 if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
3526 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
3527 << ArgExpr->getType();
3528 auto Ord = ArgResult.Val.getInt().getZExtValue();
3529
3530 // Check valididty of memory ordering as per C11 / C++11's memody model.
3531 // Only fence needs check. Atomic dec/inc allow all memory orders.
3532 if (!llvm::isValidAtomicOrderingCABI(Ord))
3533 return Diag(ArgExpr->getBeginLoc(),
3534 diag::warn_atomic_op_has_invalid_memory_order)
3535 << ArgExpr->getSourceRange();
3536 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
3537 case llvm::AtomicOrderingCABI::relaxed:
3538 case llvm::AtomicOrderingCABI::consume:
3539 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
3540 return Diag(ArgExpr->getBeginLoc(),
3541 diag::warn_atomic_op_has_invalid_memory_order)
3542 << ArgExpr->getSourceRange();
3543 break;
3544 case llvm::AtomicOrderingCABI::acquire:
3545 case llvm::AtomicOrderingCABI::release:
3546 case llvm::AtomicOrderingCABI::acq_rel:
3547 case llvm::AtomicOrderingCABI::seq_cst:
3548 break;
3549 }
3550
3551 Arg = TheCall->getArg(ScopeIndex);
3552 ArgExpr = Arg.get();
3553 Expr::EvalResult ArgResult1;
3554 // Check that sync scope is a constant literal
3555 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context))
3556 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
3557 << ArgExpr->getType();
3558
3559 return false;
3560}
3561
3562bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
3563 llvm::APSInt Result;
3564
3565 // We can't check the value of a dependent argument.
3566 Expr *Arg = TheCall->getArg(ArgNum);
3567 if (Arg->isTypeDependent() || Arg->isValueDependent())
3568 return false;
3569
3570 // Check constant-ness first.
3571 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
3572 return true;
3573
3574 int64_t Val = Result.getSExtValue();
3575 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
3576 return false;
3577
3578 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
3579 << Arg->getSourceRange();
3580}
3581
3582bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
3583 unsigned BuiltinID,
3584 CallExpr *TheCall) {
3585 // CodeGenFunction can also detect this, but this gives a better error
3586 // message.
3587 bool FeatureMissing = false;
3588 SmallVector<StringRef> ReqFeatures;
3589 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID);
3590 Features.split(ReqFeatures, ',');
3591
3592 // Check if each required feature is included
3593 for (StringRef F : ReqFeatures) {
3594 if (TI.hasFeature(F))
3595 continue;
3596
3597 // If the feature is 64bit, alter the string so it will print better in
3598 // the diagnostic.
3599 if (F == "64bit")
3600 F = "RV64";
3601
3602 // Convert features like "zbr" and "experimental-zbr" to "Zbr".
3603 F.consume_front("experimental-");
3604 std::string FeatureStr = F.str();
3605 FeatureStr[0] = std::toupper(FeatureStr[0]);
3606
3607 // Error message
3608 FeatureMissing = true;
3609 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
3610 << TheCall->getSourceRange() << StringRef(FeatureStr);
3611 }
3612
3613 if (FeatureMissing)
3614 return true;
3615
3616 switch (BuiltinID) {
3617 case RISCV::BI__builtin_rvv_vsetvli:
3618 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) ||
3619 CheckRISCVLMUL(TheCall, 2);
3620 case RISCV::BI__builtin_rvv_vsetvlimax:
3621 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
3622 CheckRISCVLMUL(TheCall, 1);
3623 case RISCV::BI__builtin_rvv_vget_v_i8m2_i8m1:
3624 case RISCV::BI__builtin_rvv_vget_v_i16m2_i16m1:
3625 case RISCV::BI__builtin_rvv_vget_v_i32m2_i32m1:
3626 case RISCV::BI__builtin_rvv_vget_v_i64m2_i64m1:
3627 case RISCV::BI__builtin_rvv_vget_v_f32m2_f32m1:
3628 case RISCV::BI__builtin_rvv_vget_v_f64m2_f64m1:
3629 case RISCV::BI__builtin_rvv_vget_v_u8m2_u8m1:
3630 case RISCV::BI__builtin_rvv_vget_v_u16m2_u16m1:
3631 case RISCV::BI__builtin_rvv_vget_v_u32m2_u32m1:
3632 case RISCV::BI__builtin_rvv_vget_v_u64m2_u64m1:
3633 case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m2:
3634 case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m2:
3635 case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m2:
3636 case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m2:
3637 case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m2:
3638 case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m2:
3639 case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m2:
3640 case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m2:
3641 case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m2:
3642 case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m2:
3643 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m4:
3644 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m4:
3645 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m4:
3646 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m4:
3647 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m4:
3648 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m4:
3649 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m4:
3650 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m4:
3651 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m4:
3652 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m4:
3653 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3654 case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m1:
3655 case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m1:
3656 case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m1:
3657 case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m1:
3658 case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m1:
3659 case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m1:
3660 case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m1:
3661 case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m1:
3662 case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m1:
3663 case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m1:
3664 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m2:
3665 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m2:
3666 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m2:
3667 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m2:
3668 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m2:
3669 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m2:
3670 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m2:
3671 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m2:
3672 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m2:
3673 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m2:
3674 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
3675 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m1:
3676 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m1:
3677 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m1:
3678 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m1:
3679 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m1:
3680 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m1:
3681 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m1:
3682 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m1:
3683 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m1:
3684 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m1:
3685 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
3686 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m2:
3687 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m2:
3688 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m2:
3689 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m2:
3690 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m2:
3691 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m2:
3692 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m2:
3693 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m2:
3694 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m2:
3695 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m2:
3696 case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m4:
3697 case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m4:
3698 case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m4:
3699 case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m4:
3700 case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m4:
3701 case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m4:
3702 case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m4:
3703 case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m4:
3704 case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m4:
3705 case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m4:
3706 case RISCV::BI__builtin_rvv_vset_v_i8m4_i8m8:
3707 case RISCV::BI__builtin_rvv_vset_v_i16m4_i16m8:
3708 case RISCV::BI__builtin_rvv_vset_v_i32m4_i32m8:
3709 case RISCV::BI__builtin_rvv_vset_v_i64m4_i64m8:
3710 case RISCV::BI__builtin_rvv_vset_v_f32m4_f32m8:
3711 case RISCV::BI__builtin_rvv_vset_v_f64m4_f64m8:
3712 case RISCV::BI__builtin_rvv_vset_v_u8m4_u8m8:
3713 case RISCV::BI__builtin_rvv_vset_v_u16m4_u16m8:
3714 case RISCV::BI__builtin_rvv_vset_v_u32m4_u32m8:
3715 case RISCV::BI__builtin_rvv_vset_v_u64m4_u64m8:
3716 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
3717 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m4:
3718 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m4:
3719 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m4:
3720 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m4:
3721 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m4:
3722 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m4:
3723 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m4:
3724 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m4:
3725 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m4:
3726 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m4:
3727 case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m8:
3728 case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m8:
3729 case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m8:
3730 case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m8:
3731 case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m8:
3732 case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m8:
3733 case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m8:
3734 case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m8:
3735 case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m8:
3736 case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m8:
3737 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
3738 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m8:
3739 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m8:
3740 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m8:
3741 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m8:
3742 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m8:
3743 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m8:
3744 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m8:
3745 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m8:
3746 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m8:
3747 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m8:
3748 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
3749 }
3750
3751 return false;
3752}
3753
3754bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
3755 CallExpr *TheCall) {
3756 if (BuiltinID == SystemZ::BI__builtin_tabort) {
3757 Expr *Arg = TheCall->getArg(0);
3758 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context))
3759 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
3760 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
3761 << Arg->getSourceRange();
3762 }
3763
3764 // For intrinsics which take an immediate value as part of the instruction,
3765 // range check them here.
3766 unsigned i = 0, l = 0, u = 0;
3767 switch (BuiltinID) {
3768 default: return false;
3769 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
3770 case SystemZ::BI__builtin_s390_verimb:
3771 case SystemZ::BI__builtin_s390_verimh:
3772 case SystemZ::BI__builtin_s390_verimf:
3773 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
3774 case SystemZ::BI__builtin_s390_vfaeb:
3775 case SystemZ::BI__builtin_s390_vfaeh:
3776 case SystemZ::BI__builtin_s390_vfaef:
3777 case SystemZ::BI__builtin_s390_vfaebs:
3778 case SystemZ::BI__builtin_s390_vfaehs:
3779 case SystemZ::BI__builtin_s390_vfaefs:
3780 case SystemZ::BI__builtin_s390_vfaezb:
3781 case SystemZ::BI__builtin_s390_vfaezh:
3782 case SystemZ::BI__builtin_s390_vfaezf:
3783 case SystemZ::BI__builtin_s390_vfaezbs:
3784 case SystemZ::BI__builtin_s390_vfaezhs:
3785 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
3786 case SystemZ::BI__builtin_s390_vfisb:
3787 case SystemZ::BI__builtin_s390_vfidb:
3788 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
3789 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
3790 case SystemZ::BI__builtin_s390_vftcisb:
3791 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
3792 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
3793 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
3794 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
3795 case SystemZ::BI__builtin_s390_vstrcb:
3796 case SystemZ::BI__builtin_s390_vstrch:
3797 case SystemZ::BI__builtin_s390_vstrcf:
3798 case SystemZ::BI__builtin_s390_vstrczb:
3799 case SystemZ::BI__builtin_s390_vstrczh:
3800 case SystemZ::BI__builtin_s390_vstrczf:
3801 case SystemZ::BI__builtin_s390_vstrcbs:
3802 case SystemZ::BI__builtin_s390_vstrchs:
3803 case SystemZ::BI__builtin_s390_vstrcfs:
3804 case SystemZ::BI__builtin_s390_vstrczbs:
3805 case SystemZ::BI__builtin_s390_vstrczhs:
3806 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
3807 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
3808 case SystemZ::BI__builtin_s390_vfminsb:
3809 case SystemZ::BI__builtin_s390_vfmaxsb:
3810 case SystemZ::BI__builtin_s390_vfmindb:
3811 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
3812 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
3813 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
3814 case SystemZ::BI__builtin_s390_vclfnhs:
3815 case SystemZ::BI__builtin_s390_vclfnls:
3816 case SystemZ::BI__builtin_s390_vcfn:
3817 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
3818 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
3819 }
3820 return SemaBuiltinConstantArgRange(TheCall, i, l, u);
3821}
3822
3823/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
3824/// This checks that the target supports __builtin_cpu_supports and
3825/// that the string argument is constant and valid.
3826static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI,
3827 CallExpr *TheCall) {
3828 Expr *Arg = TheCall->getArg(0);
3829
3830 // Check if the argument is a string literal.
3831 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3832 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3833 << Arg->getSourceRange();
3834
3835 // Check the contents of the string.
3836 StringRef Feature =
3837 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3838 if (!TI.validateCpuSupports(Feature))
3839 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
3840 << Arg->getSourceRange();
3841 return false;
3842}
3843
3844/// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *).
3845/// This checks that the target supports __builtin_cpu_is and
3846/// that the string argument is constant and valid.
3847static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) {
3848 Expr *Arg = TheCall->getArg(0);
3849
3850 // Check if the argument is a string literal.
3851 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
3852 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
3853 << Arg->getSourceRange();
3854
3855 // Check the contents of the string.
3856 StringRef Feature =
3857 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
3858 if (!TI.validateCpuIs(Feature))
3859 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
3860 << Arg->getSourceRange();
3861 return false;
3862}
3863
3864// Check if the rounding mode is legal.
3865bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
3866 // Indicates if this instruction has rounding control or just SAE.
3867 bool HasRC = false;
3868
3869 unsigned ArgNum = 0;
3870 switch (BuiltinID) {
3871 default:
3872 return false;
3873 case X86::BI__builtin_ia32_vcvttsd2si32:
3874 case X86::BI__builtin_ia32_vcvttsd2si64:
3875 case X86::BI__builtin_ia32_vcvttsd2usi32:
3876 case X86::BI__builtin_ia32_vcvttsd2usi64:
3877 case X86::BI__builtin_ia32_vcvttss2si32:
3878 case X86::BI__builtin_ia32_vcvttss2si64:
3879 case X86::BI__builtin_ia32_vcvttss2usi32:
3880 case X86::BI__builtin_ia32_vcvttss2usi64:
3881 case X86::BI__builtin_ia32_vcvttsh2si32:
3882 case X86::BI__builtin_ia32_vcvttsh2si64:
3883 case X86::BI__builtin_ia32_vcvttsh2usi32:
3884 case X86::BI__builtin_ia32_vcvttsh2usi64:
3885 ArgNum = 1;
3886 break;
3887 case X86::BI__builtin_ia32_maxpd512:
3888 case X86::BI__builtin_ia32_maxps512:
3889 case X86::BI__builtin_ia32_minpd512:
3890 case X86::BI__builtin_ia32_minps512:
3891 case X86::BI__builtin_ia32_maxph512:
3892 case X86::BI__builtin_ia32_minph512:
3893 ArgNum = 2;
3894 break;
3895 case X86::BI__builtin_ia32_vcvtph2pd512_mask:
3896 case X86::BI__builtin_ia32_vcvtph2psx512_mask:
3897 case X86::BI__builtin_ia32_cvtps2pd512_mask:
3898 case X86::BI__builtin_ia32_cvttpd2dq512_mask:
3899 case X86::BI__builtin_ia32_cvttpd2qq512_mask:
3900 case X86::BI__builtin_ia32_cvttpd2udq512_mask:
3901 case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
3902 case X86::BI__builtin_ia32_cvttps2dq512_mask:
3903 case X86::BI__builtin_ia32_cvttps2qq512_mask:
3904 case X86::BI__builtin_ia32_cvttps2udq512_mask:
3905 case X86::BI__builtin_ia32_cvttps2uqq512_mask:
3906 case X86::BI__builtin_ia32_vcvttph2w512_mask:
3907 case X86::BI__builtin_ia32_vcvttph2uw512_mask:
3908 case X86::BI__builtin_ia32_vcvttph2dq512_mask:
3909 case X86::BI__builtin_ia32_vcvttph2udq512_mask:
3910 case X86::BI__builtin_ia32_vcvttph2qq512_mask:
3911 case X86::BI__builtin_ia32_vcvttph2uqq512_mask:
3912 case X86::BI__builtin_ia32_exp2pd_mask:
3913 case X86::BI__builtin_ia32_exp2ps_mask:
3914 case X86::BI__builtin_ia32_getexppd512_mask:
3915 case X86::BI__builtin_ia32_getexpps512_mask:
3916 case X86::BI__builtin_ia32_getexpph512_mask:
3917 case X86::BI__builtin_ia32_rcp28pd_mask:
3918 case X86::BI__builtin_ia32_rcp28ps_mask:
3919 case X86::BI__builtin_ia32_rsqrt28pd_mask:
3920 case X86::BI__builtin_ia32_rsqrt28ps_mask:
3921 case X86::BI__builtin_ia32_vcomisd:
3922 case X86::BI__builtin_ia32_vcomiss:
3923 case X86::BI__builtin_ia32_vcomish:
3924 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
3925 ArgNum = 3;
3926 break;
3927 case X86::BI__builtin_ia32_cmppd512_mask:
3928 case X86::BI__builtin_ia32_cmpps512_mask:
3929 case X86::BI__builtin_ia32_cmpsd_mask:
3930 case X86::BI__builtin_ia32_cmpss_mask:
3931 case X86::BI__builtin_ia32_cmpsh_mask:
3932 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask:
3933 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask:
3934 case X86::BI__builtin_ia32_cvtss2sd_round_mask:
3935 case X86::BI__builtin_ia32_getexpsd128_round_mask:
3936 case X86::BI__builtin_ia32_getexpss128_round_mask:
3937 case X86::BI__builtin_ia32_getexpsh128_round_mask:
3938 case X86::BI__builtin_ia32_getmantpd512_mask:
3939 case X86::BI__builtin_ia32_getmantps512_mask:
3940 case X86::BI__builtin_ia32_getmantph512_mask:
3941 case X86::BI__builtin_ia32_maxsd_round_mask:
3942 case X86::BI__builtin_ia32_maxss_round_mask:
3943 case X86::BI__builtin_ia32_maxsh_round_mask:
3944 case X86::BI__builtin_ia32_minsd_round_mask:
3945 case X86::BI__builtin_ia32_minss_round_mask:
3946 case X86::BI__builtin_ia32_minsh_round_mask:
3947 case X86::BI__builtin_ia32_rcp28sd_round_mask:
3948 case X86::BI__builtin_ia32_rcp28ss_round_mask:
3949 case X86::BI__builtin_ia32_reducepd512_mask:
3950 case X86::BI__builtin_ia32_reduceps512_mask:
3951 case X86::BI__builtin_ia32_reduceph512_mask:
3952 case X86::BI__builtin_ia32_rndscalepd_mask:
3953 case X86::BI__builtin_ia32_rndscaleps_mask:
3954 case X86::BI__builtin_ia32_rndscaleph_mask:
3955 case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
3956 case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
3957 ArgNum = 4;
3958 break;
3959 case X86::BI__builtin_ia32_fixupimmpd512_mask:
3960 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
3961 case X86::BI__builtin_ia32_fixupimmps512_mask:
3962 case X86::BI__builtin_ia32_fixupimmps512_maskz:
3963 case X86::BI__builtin_ia32_fixupimmsd_mask:
3964 case X86::BI__builtin_ia32_fixupimmsd_maskz:
3965 case X86::BI__builtin_ia32_fixupimmss_mask:
3966 case X86::BI__builtin_ia32_fixupimmss_maskz:
3967 case X86::BI__builtin_ia32_getmantsd_round_mask:
3968 case X86::BI__builtin_ia32_getmantss_round_mask:
3969 case X86::BI__builtin_ia32_getmantsh_round_mask:
3970 case X86::BI__builtin_ia32_rangepd512_mask:
3971 case X86::BI__builtin_ia32_rangeps512_mask:
3972 case X86::BI__builtin_ia32_rangesd128_round_mask:
3973 case X86::BI__builtin_ia32_rangess128_round_mask:
3974 case X86::BI__builtin_ia32_reducesd_mask:
3975 case X86::BI__builtin_ia32_reducess_mask:
3976 case X86::BI__builtin_ia32_reducesh_mask:
3977 case X86::BI__builtin_ia32_rndscalesd_round_mask:
3978 case X86::BI__builtin_ia32_rndscaless_round_mask:
3979 case X86::BI__builtin_ia32_rndscalesh_round_mask:
3980 ArgNum = 5;
3981 break;
3982 case X86::BI__builtin_ia32_vcvtsd2si64:
3983 case X86::BI__builtin_ia32_vcvtsd2si32:
3984 case X86::BI__builtin_ia32_vcvtsd2usi32:
3985 case X86::BI__builtin_ia32_vcvtsd2usi64:
3986 case X86::BI__builtin_ia32_vcvtss2si32:
3987 case X86::BI__builtin_ia32_vcvtss2si64:
3988 case X86::BI__builtin_ia32_vcvtss2usi32:
3989 case X86::BI__builtin_ia32_vcvtss2usi64:
3990 case X86::BI__builtin_ia32_vcvtsh2si32:
3991 case X86::BI__builtin_ia32_vcvtsh2si64:
3992 case X86::BI__builtin_ia32_vcvtsh2usi32:
3993 case X86::BI__builtin_ia32_vcvtsh2usi64:
3994 case X86::BI__builtin_ia32_sqrtpd512:
3995 case X86::BI__builtin_ia32_sqrtps512:
3996 case X86::BI__builtin_ia32_sqrtph512:
3997 ArgNum = 1;
3998 HasRC = true;
3999 break;
4000 case X86::BI__builtin_ia32_addph512:
4001 case X86::BI__builtin_ia32_divph512:
4002 case X86::BI__builtin_ia32_mulph512:
4003 case X86::BI__builtin_ia32_subph512:
4004 case X86::BI__builtin_ia32_addpd512:
4005 case X86::BI__builtin_ia32_addps512:
4006 case X86::BI__builtin_ia32_divpd512:
4007 case X86::BI__builtin_ia32_divps512:
4008 case X86::BI__builtin_ia32_mulpd512:
4009 case X86::BI__builtin_ia32_mulps512:
4010 case X86::BI__builtin_ia32_subpd512:
4011 case X86::BI__builtin_ia32_subps512:
4012 case X86::BI__builtin_ia32_cvtsi2sd64:
4013 case X86::BI__builtin_ia32_cvtsi2ss32:
4014 case X86::BI__builtin_ia32_cvtsi2ss64:
4015 case X86::BI__builtin_ia32_cvtusi2sd64:
4016 case X86::BI__builtin_ia32_cvtusi2ss32:
4017 case X86::BI__builtin_ia32_cvtusi2ss64:
4018 case X86::BI__builtin_ia32_vcvtusi2sh:
4019 case X86::BI__builtin_ia32_vcvtusi642sh:
4020 case X86::BI__builtin_ia32_vcvtsi2sh:
4021 case X86::BI__builtin_ia32_vcvtsi642sh:
4022 ArgNum = 2;
4023 HasRC = true;
4024 break;
4025 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
4026 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
4027 case X86::BI__builtin_ia32_vcvtpd2ph512_mask:
4028 case X86::BI__builtin_ia32_vcvtps2phx512_mask:
4029 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
4030 case X86::BI__builtin_ia32_cvtpd2dq512_mask:
4031 case X86::BI__builtin_ia32_cvtpd2qq512_mask:
4032 case X86::BI__builtin_ia32_cvtpd2udq512_mask:
4033 case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
4034 case X86::BI__builtin_ia32_cvtps2dq512_mask:
4035 case X86::BI__builtin_ia32_cvtps2qq512_mask:
4036 case X86::BI__builtin_ia32_cvtps2udq512_mask:
4037 case X86::BI__builtin_ia32_cvtps2uqq512_mask:
4038 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
4039 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
4040 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
4041 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
4042 case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
4043 case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
4044 case X86::BI__builtin_ia32_vcvtw2ph512_mask:
4045 case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
4046 case X86::BI__builtin_ia32_vcvtph2w512_mask:
4047 case X86::BI__builtin_ia32_vcvtph2uw512_mask:
4048 case X86::BI__builtin_ia32_vcvtph2dq512_mask:
4049 case X86::BI__builtin_ia32_vcvtph2udq512_mask:
4050 case X86::BI__builtin_ia32_vcvtph2qq512_mask:
4051 case X86::BI__builtin_ia32_vcvtph2uqq512_mask:
4052 case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
4053 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
4054 ArgNum = 3;
4055 HasRC = true;
4056 break;
4057 case X86::BI__builtin_ia32_addsh_round_mask:
4058 case X86::BI__builtin_ia32_addss_round_mask:
4059 case X86::BI__builtin_ia32_addsd_round_mask:
4060 case X86::BI__builtin_ia32_divsh_round_mask:
4061 case X86::BI__builtin_ia32_divss_round_mask:
4062 case X86::BI__builtin_ia32_divsd_round_mask:
4063 case X86::BI__builtin_ia32_mulsh_round_mask:
4064 case X86::BI__builtin_ia32_mulss_round_mask:
4065 case X86::BI__builtin_ia32_mulsd_round_mask:
4066 case X86::BI__builtin_ia32_subsh_round_mask:
4067 case X86::BI__builtin_ia32_subss_round_mask:
4068 case X86::BI__builtin_ia32_subsd_round_mask:
4069 case X86::BI__builtin_ia32_scalefph512_mask:
4070 case X86::BI__builtin_ia32_scalefpd512_mask:
4071 case X86::BI__builtin_ia32_scalefps512_mask:
4072 case X86::BI__builtin_ia32_scalefsd_round_mask:
4073 case X86::BI__builtin_ia32_scalefss_round_mask:
4074 case X86::BI__builtin_ia32_scalefsh_round_mask:
4075 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
4076 case X86::BI__builtin_ia32_vcvtss2sh_round_mask:
4077 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask:
4078 case X86::BI__builtin_ia32_sqrtsd_round_mask:
4079 case X86::BI__builtin_ia32_sqrtss_round_mask:
4080 case X86::BI__builtin_ia32_sqrtsh_round_mask:
4081 case X86::BI__builtin_ia32_vfmaddsd3_mask:
4082 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
4083 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
4084 case X86::BI__builtin_ia32_vfmaddss3_mask:
4085 case X86::BI__builtin_ia32_vfmaddss3_maskz:
4086 case X86::BI__builtin_ia32_vfmaddss3_mask3:
4087 case X86::BI__builtin_ia32_vfmaddsh3_mask:
4088 case X86::BI__builtin_ia32_vfmaddsh3_maskz:
4089 case X86::BI__builtin_ia32_vfmaddsh3_mask3:
4090 case X86::BI__builtin_ia32_vfmaddpd512_mask:
4091 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
4092 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
4093 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
4094 case X86::BI__builtin_ia32_vfmaddps512_mask:
4095 case X86::BI__builtin_ia32_vfmaddps512_maskz:
4096 case X86::BI__builtin_ia32_vfmaddps512_mask3:
4097 case X86::BI__builtin_ia32_vfmsubps512_mask3:
4098 case X86::BI__builtin_ia32_vfmaddph512_mask:
4099 case X86::BI__builtin_ia32_vfmaddph512_maskz:
4100 case X86::BI__builtin_ia32_vfmaddph512_mask3:
4101 case X86::BI__builtin_ia32_vfmsubph512_mask3:
4102 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
4103 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
4104 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
4105 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
4106 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
4107 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
4108 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
4109 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
4110 case X86::BI__builtin_ia32_vfmaddsubph512_mask:
4111 case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
4112 case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
4113 case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
4114 case X86::BI__builtin_ia32_vfmaddcsh_mask:
4115 case X86::BI__builtin_ia32_vfmaddcph512_mask:
4116 case X86::BI__builtin_ia32_vfmaddcph512_maskz:
4117 case X86::BI__builtin_ia32_vfcmaddcsh_mask:
4118 case X86::BI__builtin_ia32_vfcmaddcph512_mask:
4119 case X86::BI__builtin_ia32_vfcmaddcph512_maskz:
4120 case X86::BI__builtin_ia32_vfmulcsh_mask:
4121 case X86::BI__builtin_ia32_vfmulcph512_mask:
4122 case X86::BI__builtin_ia32_vfcmulcsh_mask:
4123 case X86::BI__builtin_ia32_vfcmulcph512_mask:
4124 ArgNum = 4;
4125 HasRC = true;
4126 break;
4127 }
4128
4129 llvm::APSInt Result;
4130
4131 // We can't check the value of a dependent argument.
4132 Expr *Arg = TheCall->getArg(ArgNum);
4133 if (Arg->isTypeDependent() || Arg->isValueDependent())
4134 return false;
4135
4136 // Check constant-ness first.
4137 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4138 return true;
4139
4140 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
4141 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
4142 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
4143 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
4144 if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
4145 Result == 8/*ROUND_NO_EXC*/ ||
4146 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
4147 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
4148 return false;
4149
4150 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
4151 << Arg->getSourceRange();
4152}
4153
4154// Check if the gather/scatter scale is legal.
4155bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
4156 CallExpr *TheCall) {
4157 unsigned ArgNum = 0;
4158 switch (BuiltinID) {
4159 default:
4160 return false;
4161 case X86::BI__builtin_ia32_gatherpfdpd:
4162 case X86::BI__builtin_ia32_gatherpfdps:
4163 case X86::BI__builtin_ia32_gatherpfqpd:
4164 case X86::BI__builtin_ia32_gatherpfqps:
4165 case X86::BI__builtin_ia32_scatterpfdpd:
4166 case X86::BI__builtin_ia32_scatterpfdps:
4167 case X86::BI__builtin_ia32_scatterpfqpd:
4168 case X86::BI__builtin_ia32_scatterpfqps:
4169 ArgNum = 3;
4170 break;
4171 case X86::BI__builtin_ia32_gatherd_pd:
4172 case X86::BI__builtin_ia32_gatherd_pd256:
4173 case X86::BI__builtin_ia32_gatherq_pd:
4174 case X86::BI__builtin_ia32_gatherq_pd256:
4175 case X86::BI__builtin_ia32_gatherd_ps:
4176 case X86::BI__builtin_ia32_gatherd_ps256:
4177 case X86::BI__builtin_ia32_gatherq_ps:
4178 case X86::BI__builtin_ia32_gatherq_ps256:
4179 case X86::BI__builtin_ia32_gatherd_q:
4180 case X86::BI__builtin_ia32_gatherd_q256:
4181 case X86::BI__builtin_ia32_gatherq_q:
4182 case X86::BI__builtin_ia32_gatherq_q256:
4183 case X86::BI__builtin_ia32_gatherd_d:
4184 case X86::BI__builtin_ia32_gatherd_d256:
4185 case X86::BI__builtin_ia32_gatherq_d:
4186 case X86::BI__builtin_ia32_gatherq_d256:
4187 case X86::BI__builtin_ia32_gather3div2df:
4188 case X86::BI__builtin_ia32_gather3div2di:
4189 case X86::BI__builtin_ia32_gather3div4df:
4190 case X86::BI__builtin_ia32_gather3div4di:
4191 case X86::BI__builtin_ia32_gather3div4sf:
4192 case X86::BI__builtin_ia32_gather3div4si:
4193 case X86::BI__builtin_ia32_gather3div8sf:
4194 case X86::BI__builtin_ia32_gather3div8si:
4195 case X86::BI__builtin_ia32_gather3siv2df:
4196 case X86::BI__builtin_ia32_gather3siv2di:
4197 case X86::BI__builtin_ia32_gather3siv4df:
4198 case X86::BI__builtin_ia32_gather3siv4di:
4199 case X86::BI__builtin_ia32_gather3siv4sf:
4200 case X86::BI__builtin_ia32_gather3siv4si:
4201 case X86::BI__builtin_ia32_gather3siv8sf:
4202 case X86::BI__builtin_ia32_gather3siv8si:
4203 case X86::BI__builtin_ia32_gathersiv8df:
4204 case X86::BI__builtin_ia32_gathersiv16sf:
4205 case X86::BI__builtin_ia32_gatherdiv8df:
4206 case X86::BI__builtin_ia32_gatherdiv16sf:
4207 case X86::BI__builtin_ia32_gathersiv8di:
4208 case X86::BI__builtin_ia32_gathersiv16si:
4209 case X86::BI__builtin_ia32_gatherdiv8di:
4210 case X86::BI__builtin_ia32_gatherdiv16si:
4211 case X86::BI__builtin_ia32_scatterdiv2df:
4212 case X86::BI__builtin_ia32_scatterdiv2di:
4213 case X86::BI__builtin_ia32_scatterdiv4df:
4214 case X86::BI__builtin_ia32_scatterdiv4di:
4215 case X86::BI__builtin_ia32_scatterdiv4sf:
4216 case X86::BI__builtin_ia32_scatterdiv4si:
4217 case X86::BI__builtin_ia32_scatterdiv8sf:
4218 case X86::BI__builtin_ia32_scatterdiv8si:
4219 case X86::BI__builtin_ia32_scattersiv2df:
4220 case X86::BI__builtin_ia32_scattersiv2di:
4221 case X86::BI__builtin_ia32_scattersiv4df:
4222 case X86::BI__builtin_ia32_scattersiv4di:
4223 case X86::BI__builtin_ia32_scattersiv4sf:
4224 case X86::BI__builtin_ia32_scattersiv4si:
4225 case X86::BI__builtin_ia32_scattersiv8sf:
4226 case X86::BI__builtin_ia32_scattersiv8si:
4227 case X86::BI__builtin_ia32_scattersiv8df:
4228 case X86::BI__builtin_ia32_scattersiv16sf:
4229 case X86::BI__builtin_ia32_scatterdiv8df:
4230 case X86::BI__builtin_ia32_scatterdiv16sf:
4231 case X86::BI__builtin_ia32_scattersiv8di:
4232 case X86::BI__builtin_ia32_scattersiv16si:
4233 case X86::BI__builtin_ia32_scatterdiv8di:
4234 case X86::BI__builtin_ia32_scatterdiv16si:
4235 ArgNum = 4;
4236 break;
4237 }
4238
4239 llvm::APSInt Result;
4240
4241 // We can't check the value of a dependent argument.
4242 Expr *Arg = TheCall->getArg(ArgNum);
4243 if (Arg->isTypeDependent() || Arg->isValueDependent())
4244 return false;
4245
4246 // Check constant-ness first.
4247 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4248 return true;
4249
4250 if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
4251 return false;
4252
4253 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
4254 << Arg->getSourceRange();
4255}
4256
4257enum { TileRegLow = 0, TileRegHigh = 7 };
4258
4259bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
4260 ArrayRef<int> ArgNums) {
4261 for (int ArgNum : ArgNums) {
4262 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh))
4263 return true;
4264 }
4265 return false;
4266}
4267
4268bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
4269 ArrayRef<int> ArgNums) {
4270 // Because the max number of tile register is TileRegHigh + 1, so here we use
4271 // each bit to represent the usage of them in bitset.
4272 std::bitset<TileRegHigh + 1> ArgValues;
4273 for (int ArgNum : ArgNums) {
4274 Expr *Arg = TheCall->getArg(ArgNum);
4275 if (Arg->isTypeDependent() || Arg->isValueDependent())
4276 continue;
4277
4278 llvm::APSInt Result;
4279 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4280 return true;
4281 int ArgExtValue = Result.getExtValue();
4282 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) &&(static_cast<void> (0))
4283 "Incorrect tile register num.")(static_cast<void> (0));
4284 if (ArgValues.test(ArgExtValue))
4285 return Diag(TheCall->getBeginLoc(),
4286 diag::err_x86_builtin_tile_arg_duplicate)
4287 << TheCall->getArg(ArgNum)->getSourceRange();
4288 ArgValues.set(ArgExtValue);
4289 }
4290 return false;
4291}
4292
4293bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
4294 ArrayRef<int> ArgNums) {
4295 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) ||
4296 CheckX86BuiltinTileDuplicate(TheCall, ArgNums);
4297}
4298
4299bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
4300 switch (BuiltinID) {
4301 default:
4302 return false;
4303 case X86::BI__builtin_ia32_tileloadd64:
4304 case X86::BI__builtin_ia32_tileloaddt164:
4305 case X86::BI__builtin_ia32_tilestored64:
4306 case X86::BI__builtin_ia32_tilezero:
4307 return CheckX86BuiltinTileArgumentsRange(TheCall, 0);
4308 case X86::BI__builtin_ia32_tdpbssd:
4309 case X86::BI__builtin_ia32_tdpbsud:
4310 case X86::BI__builtin_ia32_tdpbusd:
4311 case X86::BI__builtin_ia32_tdpbuud:
4312 case X86::BI__builtin_ia32_tdpbf16ps:
4313 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
4314 }
4315}
4316static bool isX86_32Builtin(unsigned BuiltinID) {
4317 // These builtins only work on x86-32 targets.
4318 switch (BuiltinID) {
4319 case X86::BI__builtin_ia32_readeflags_u32:
4320 case X86::BI__builtin_ia32_writeeflags_u32:
4321 return true;
4322 }
4323
4324 return false;
4325}
4326
4327bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
4328 CallExpr *TheCall) {
4329 if (BuiltinID == X86::BI__builtin_cpu_supports)
4330 return SemaBuiltinCpuSupports(*this, TI, TheCall);
4331
4332 if (BuiltinID == X86::BI__builtin_cpu_is)
4333 return SemaBuiltinCpuIs(*this, TI, TheCall);
4334
4335 // Check for 32-bit only builtins on a 64-bit target.
4336 const llvm::Triple &TT = TI.getTriple();
4337 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
4338 return Diag(TheCall->getCallee()->getBeginLoc(),
4339 diag::err_32_bit_builtin_64_bit_tgt);
4340
4341 // If the intrinsic has rounding or SAE make sure its valid.
4342 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
4343 return true;
4344
4345 // If the intrinsic has a gather/scatter scale immediate make sure its valid.
4346 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
4347 return true;
4348
4349 // If the intrinsic has a tile arguments, make sure they are valid.
4350 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall))
4351 return true;
4352
4353 // For intrinsics which take an immediate value as part of the instruction,
4354 // range check them here.
4355 int i = 0, l = 0, u = 0;
4356 switch (BuiltinID) {
4357 default:
4358 return false;
4359 case X86::BI__builtin_ia32_vec_ext_v2si:
4360 case X86::BI__builtin_ia32_vec_ext_v2di:
4361 case X86::BI__builtin_ia32_vextractf128_pd256:
4362 case X86::BI__builtin_ia32_vextractf128_ps256:
4363 case X86::BI__builtin_ia32_vextractf128_si256:
4364 case X86::BI__builtin_ia32_extract128i256:
4365 case X86::BI__builtin_ia32_extractf64x4_mask:
4366 case X86::BI__builtin_ia32_extracti64x4_mask:
4367 case X86::BI__builtin_ia32_extractf32x8_mask:
4368 case X86::BI__builtin_ia32_extracti32x8_mask:
4369 case X86::BI__builtin_ia32_extractf64x2_256_mask:
4370 case X86::BI__builtin_ia32_extracti64x2_256_mask:
4371 case X86::BI__builtin_ia32_extractf32x4_256_mask:
4372 case X86::BI__builtin_ia32_extracti32x4_256_mask:
4373 i = 1; l = 0; u = 1;
4374 break;
4375 case X86::BI__builtin_ia32_vec_set_v2di:
4376 case X86::BI__builtin_ia32_vinsertf128_pd256:
4377 case X86::BI__builtin_ia32_vinsertf128_ps256:
4378 case X86::BI__builtin_ia32_vinsertf128_si256:
4379 case X86::BI__builtin_ia32_insert128i256:
4380 case X86::BI__builtin_ia32_insertf32x8:
4381 case X86::BI__builtin_ia32_inserti32x8:
4382 case X86::BI__builtin_ia32_insertf64x4:
4383 case X86::BI__builtin_ia32_inserti64x4:
4384 case X86::BI__builtin_ia32_insertf64x2_256:
4385 case X86::BI__builtin_ia32_inserti64x2_256:
4386 case X86::BI__builtin_ia32_insertf32x4_256:
4387 case X86::BI__builtin_ia32_inserti32x4_256:
4388 i = 2; l = 0; u = 1;
4389 break;
4390 case X86::BI__builtin_ia32_vpermilpd:
4391 case X86::BI__builtin_ia32_vec_ext_v4hi:
4392 case X86::BI__builtin_ia32_vec_ext_v4si:
4393 case X86::BI__builtin_ia32_vec_ext_v4sf:
4394 case X86::BI__builtin_ia32_vec_ext_v4di:
4395 case X86::BI__builtin_ia32_extractf32x4_mask:
4396 case X86::BI__builtin_ia32_extracti32x4_mask:
4397 case X86::BI__builtin_ia32_extractf64x2_512_mask:
4398 case X86::BI__builtin_ia32_extracti64x2_512_mask:
4399 i = 1; l = 0; u = 3;
4400 break;
4401 case X86::BI_mm_prefetch:
4402 case X86::BI__builtin_ia32_vec_ext_v8hi:
4403 case X86::BI__builtin_ia32_vec_ext_v8si:
4404 i = 1; l = 0; u = 7;
4405 break;
4406 case X86::BI__builtin_ia32_sha1rnds4:
4407 case X86::BI__builtin_ia32_blendpd:
4408 case X86::BI__builtin_ia32_shufpd:
4409 case X86::BI__builtin_ia32_vec_set_v4hi:
4410 case X86::BI__builtin_ia32_vec_set_v4si:
4411 case X86::BI__builtin_ia32_vec_set_v4di:
4412 case X86::BI__builtin_ia32_shuf_f32x4_256:
4413 case X86::BI__builtin_ia32_shuf_f64x2_256:
4414 case X86::BI__builtin_ia32_shuf_i32x4_256:
4415 case X86::BI__builtin_ia32_shuf_i64x2_256:
4416 case X86::BI__builtin_ia32_insertf64x2_512:
4417 case X86::BI__builtin_ia32_inserti64x2_512:
4418 case X86::BI__builtin_ia32_insertf32x4:
4419 case X86::BI__builtin_ia32_inserti32x4:
4420 i = 2; l = 0; u = 3;
4421 break;
4422 case X86::BI__builtin_ia32_vpermil2pd:
4423 case X86::BI__builtin_ia32_vpermil2pd256:
4424 case X86::BI__builtin_ia32_vpermil2ps:
4425 case X86::BI__builtin_ia32_vpermil2ps256:
4426 i = 3; l = 0; u = 3;
4427 break;
4428 case X86::BI__builtin_ia32_cmpb128_mask:
4429 case X86::BI__builtin_ia32_cmpw128_mask:
4430 case X86::BI__builtin_ia32_cmpd128_mask:
4431 case X86::BI__builtin_ia32_cmpq128_mask:
4432 case X86::BI__builtin_ia32_cmpb256_mask:
4433 case X86::BI__builtin_ia32_cmpw256_mask:
4434 case X86::BI__builtin_ia32_cmpd256_mask:
4435 case X86::BI__builtin_ia32_cmpq256_mask:
4436 case X86::BI__builtin_ia32_cmpb512_mask:
4437 case X86::BI__builtin_ia32_cmpw512_mask:
4438 case X86::BI__builtin_ia32_cmpd512_mask:
4439 case X86::BI__builtin_ia32_cmpq512_mask:
4440 case X86::BI__builtin_ia32_ucmpb128_mask:
4441 case X86::BI__builtin_ia32_ucmpw128_mask:
4442 case X86::BI__builtin_ia32_ucmpd128_mask:
4443 case X86::BI__builtin_ia32_ucmpq128_mask:
4444 case X86::BI__builtin_ia32_ucmpb256_mask:
4445 case X86::BI__builtin_ia32_ucmpw256_mask:
4446 case X86::BI__builtin_ia32_ucmpd256_mask:
4447 case X86::BI__builtin_ia32_ucmpq256_mask:
4448 case X86::BI__builtin_ia32_ucmpb512_mask:
4449 case X86::BI__builtin_ia32_ucmpw512_mask:
4450 case X86::BI__builtin_ia32_ucmpd512_mask:
4451 case X86::BI__builtin_ia32_ucmpq512_mask:
4452 case X86::BI__builtin_ia32_vpcomub:
4453 case X86::BI__builtin_ia32_vpcomuw:
4454 case X86::BI__builtin_ia32_vpcomud:
4455 case X86::BI__builtin_ia32_vpcomuq:
4456 case X86::BI__builtin_ia32_vpcomb:
4457 case X86::BI__builtin_ia32_vpcomw:
4458 case X86::BI__builtin_ia32_vpcomd:
4459 case X86::BI__builtin_ia32_vpcomq:
4460 case X86::BI__builtin_ia32_vec_set_v8hi:
4461 case X86::BI__builtin_ia32_vec_set_v8si:
4462 i = 2; l = 0; u = 7;
4463 break;
4464 case X86::BI__builtin_ia32_vpermilpd256:
4465 case X86::BI__builtin_ia32_roundps:
4466 case X86::BI__builtin_ia32_roundpd:
4467 case X86::BI__builtin_ia32_roundps256:
4468 case X86::BI__builtin_ia32_roundpd256:
4469 case X86::BI__builtin_ia32_getmantpd128_mask:
4470 case X86::BI__builtin_ia32_getmantpd256_mask:
4471 case X86::BI__builtin_ia32_getmantps128_mask:
4472 case X86::BI__builtin_ia32_getmantps256_mask:
4473 case X86::BI__builtin_ia32_getmantpd512_mask:
4474 case X86::BI__builtin_ia32_getmantps512_mask:
4475 case X86::BI__builtin_ia32_getmantph128_mask:
4476 case X86::BI__builtin_ia32_getmantph256_mask:
4477 case X86::BI__builtin_ia32_getmantph512_mask:
4478 case X86::BI__builtin_ia32_vec_ext_v16qi:
4479 case X86::BI__builtin_ia32_vec_ext_v16hi:
4480 i = 1; l = 0; u = 15;
4481 break;
4482 case X86::BI__builtin_ia32_pblendd128:
4483 case X86::BI__builtin_ia32_blendps:
4484 case X86::BI__builtin_ia32_blendpd256:
4485 case X86::BI__builtin_ia32_shufpd256:
4486 case X86::BI__builtin_ia32_roundss:
4487 case X86::BI__builtin_ia32_roundsd:
4488 case X86::BI__builtin_ia32_rangepd128_mask:
4489 case X86::BI__builtin_ia32_rangepd256_mask:
4490 case X86::BI__builtin_ia32_rangepd512_mask:
4491 case X86::BI__builtin_ia32_rangeps128_mask:
4492 case X86::BI__builtin_ia32_rangeps256_mask:
4493 case X86::BI__builtin_ia32_rangeps512_mask:
4494 case X86::BI__builtin_ia32_getmantsd_round_mask:
4495 case X86::BI__builtin_ia32_getmantss_round_mask:
4496 case X86::BI__builtin_ia32_getmantsh_round_mask:
4497 case X86::BI__builtin_ia32_vec_set_v16qi:
4498 case X86::BI__builtin_ia32_vec_set_v16hi:
4499 i = 2; l = 0; u = 15;
4500 break;
4501 case X86::BI__builtin_ia32_vec_ext_v32qi:
4502 i = 1; l = 0; u = 31;
4503 break;
4504 case X86::BI__builtin_ia32_cmpps:
4505 case X86::BI__builtin_ia32_cmpss:
4506 case X86::BI__builtin_ia32_cmppd:
4507 case X86::BI__builtin_ia32_cmpsd:
4508 case X86::BI__builtin_ia32_cmpps256:
4509 case X86::BI__builtin_ia32_cmppd256:
4510 case X86::BI__builtin_ia32_cmpps128_mask:
4511 case X86::BI__builtin_ia32_cmppd128_mask:
4512 case X86::BI__builtin_ia32_cmpps256_mask:
4513 case X86::BI__builtin_ia32_cmppd256_mask:
4514 case X86::BI__builtin_ia32_cmpps512_mask:
4515 case X86::BI__builtin_ia32_cmppd512_mask:
4516 case X86::BI__builtin_ia32_cmpsd_mask:
4517 case X86::BI__builtin_ia32_cmpss_mask:
4518 case X86::BI__builtin_ia32_vec_set_v32qi:
4519 i = 2; l = 0; u = 31;
4520 break;
4521 case X86::BI__builtin_ia32_permdf256:
4522 case X86::BI__builtin_ia32_permdi256:
4523 case X86::BI__builtin_ia32_permdf512:
4524 case X86::BI__builtin_ia32_permdi512:
4525 case X86::BI__builtin_ia32_vpermilps:
4526 case X86::BI__builtin_ia32_vpermilps256:
4527 case X86::BI__builtin_ia32_vpermilpd512:
4528 case X86::BI__builtin_ia32_vpermilps512:
4529 case X86::BI__builtin_ia32_pshufd:
4530 case X86::BI__builtin_ia32_pshufd256:
4531 case X86::BI__builtin_ia32_pshufd512:
4532 case X86::BI__builtin_ia32_pshufhw:
4533 case X86::BI__builtin_ia32_pshufhw256:
4534 case X86::BI__builtin_ia32_pshufhw512:
4535 case X86::BI__builtin_ia32_pshuflw:
4536 case X86::BI__builtin_ia32_pshuflw256:
4537 case X86::BI__builtin_ia32_pshuflw512:
4538 case X86::BI__builtin_ia32_vcvtps2ph:
4539 case X86::BI__builtin_ia32_vcvtps2ph_mask:
4540 case X86::BI__builtin_ia32_vcvtps2ph256:
4541 case X86::BI__builtin_ia32_vcvtps2ph256_mask:
4542 case X86::BI__builtin_ia32_vcvtps2ph512_mask:
4543 case X86::BI__builtin_ia32_rndscaleps_128_mask:
4544 case X86::BI__builtin_ia32_rndscalepd_128_mask:
4545 case X86::BI__builtin_ia32_rndscaleps_256_mask:
4546 case X86::BI__builtin_ia32_rndscalepd_256_mask:
4547 case X86::BI__builtin_ia32_rndscaleps_mask:
4548 case X86::BI__builtin_ia32_rndscalepd_mask:
4549 case X86::BI__builtin_ia32_rndscaleph_mask:
4550 case X86::BI__builtin_ia32_reducepd128_mask:
4551 case X86::BI__builtin_ia32_reducepd256_mask:
4552 case X86::BI__builtin_ia32_reducepd512_mask:
4553 case X86::BI__builtin_ia32_reduceps128_mask:
4554 case X86::BI__builtin_ia32_reduceps256_mask:
4555 case X86::BI__builtin_ia32_reduceps512_mask:
4556 case X86::BI__builtin_ia32_reduceph128_mask:
4557 case X86::BI__builtin_ia32_reduceph256_mask:
4558 case X86::BI__builtin_ia32_reduceph512_mask:
4559 case X86::BI__builtin_ia32_prold512:
4560 case X86::BI__builtin_ia32_prolq512:
4561 case X86::BI__builtin_ia32_prold128:
4562 case X86::BI__builtin_ia32_prold256:
4563 case X86::BI__builtin_ia32_prolq128:
4564 case X86::BI__builtin_ia32_prolq256:
4565 case X86::BI__builtin_ia32_prord512:
4566 case X86::BI__builtin_ia32_prorq512:
4567 case X86::BI__builtin_ia32_prord128:
4568 case X86::BI__builtin_ia32_prord256:
4569 case X86::BI__builtin_ia32_prorq128:
4570 case X86::BI__builtin_ia32_prorq256:
4571 case X86::BI__builtin_ia32_fpclasspd128_mask:
4572 case X86::BI__builtin_ia32_fpclasspd256_mask:
4573 case X86::BI__builtin_ia32_fpclassps128_mask:
4574 case X86::BI__builtin_ia32_fpclassps256_mask:
4575 case X86::BI__builtin_ia32_fpclassps512_mask:
4576 case X86::BI__builtin_ia32_fpclasspd512_mask:
4577 case X86::BI__builtin_ia32_fpclassph128_mask:
4578 case X86::BI__builtin_ia32_fpclassph256_mask:
4579 case X86::BI__builtin_ia32_fpclassph512_mask:
4580 case X86::BI__builtin_ia32_fpclasssd_mask:
4581 case X86::BI__builtin_ia32_fpclassss_mask:
4582 case X86::BI__builtin_ia32_fpclasssh_mask:
4583 case X86::BI__builtin_ia32_pslldqi128_byteshift:
4584 case X86::BI__builtin_ia32_pslldqi256_byteshift:
4585 case X86::BI__builtin_ia32_pslldqi512_byteshift:
4586 case X86::BI__builtin_ia32_psrldqi128_byteshift:
4587 case X86::BI__builtin_ia32_psrldqi256_byteshift:
4588 case X86::BI__builtin_ia32_psrldqi512_byteshift:
4589 case X86::BI__builtin_ia32_kshiftliqi:
4590 case X86::BI__builtin_ia32_kshiftlihi:
4591 case X86::BI__builtin_ia32_kshiftlisi:
4592 case X86::BI__builtin_ia32_kshiftlidi:
4593 case X86::BI__builtin_ia32_kshiftriqi:
4594 case X86::BI__builtin_ia32_kshiftrihi:
4595 case X86::BI__builtin_ia32_kshiftrisi:
4596 case X86::BI__builtin_ia32_kshiftridi:
4597 i = 1; l = 0; u = 255;
4598 break;
4599 case X86::BI__builtin_ia32_vperm2f128_pd256:
4600 case X86::BI__builtin_ia32_vperm2f128_ps256:
4601 case X86::BI__builtin_ia32_vperm2f128_si256:
4602 case X86::BI__builtin_ia32_permti256:
4603 case X86::BI__builtin_ia32_pblendw128:
4604 case X86::BI__builtin_ia32_pblendw256:
4605 case X86::BI__builtin_ia32_blendps256:
4606 case X86::BI__builtin_ia32_pblendd256:
4607 case X86::BI__builtin_ia32_palignr128:
4608 case X86::BI__builtin_ia32_palignr256:
4609 case X86::BI__builtin_ia32_palignr512:
4610 case X86::BI__builtin_ia32_alignq512:
4611 case X86::BI__builtin_ia32_alignd512:
4612 case X86::BI__builtin_ia32_alignd128:
4613 case X86::BI__builtin_ia32_alignd256:
4614 case X86::BI__builtin_ia32_alignq128:
4615 case X86::BI__builtin_ia32_alignq256:
4616 case X86::BI__builtin_ia32_vcomisd:
4617 case X86::BI__builtin_ia32_vcomiss:
4618 case X86::BI__builtin_ia32_shuf_f32x4:
4619 case X86::BI__builtin_ia32_shuf_f64x2:
4620 case X86::BI__builtin_ia32_shuf_i32x4:
4621 case X86::BI__builtin_ia32_shuf_i64x2:
4622 case X86::BI__builtin_ia32_shufpd512:
4623 case X86::BI__builtin_ia32_shufps:
4624 case X86::BI__builtin_ia32_shufps256:
4625 case X86::BI__builtin_ia32_shufps512:
4626 case X86::BI__builtin_ia32_dbpsadbw128:
4627 case X86::BI__builtin_ia32_dbpsadbw256:
4628 case X86::BI__builtin_ia32_dbpsadbw512:
4629 case X86::BI__builtin_ia32_vpshldd128:
4630 case X86::BI__builtin_ia32_vpshldd256:
4631 case X86::BI__builtin_ia32_vpshldd512:
4632 case X86::BI__builtin_ia32_vpshldq128:
4633 case X86::BI__builtin_ia32_vpshldq256:
4634 case X86::BI__builtin_ia32_vpshldq512:
4635 case X86::BI__builtin_ia32_vpshldw128:
4636 case X86::BI__builtin_ia32_vpshldw256:
4637 case X86::BI__builtin_ia32_vpshldw512:
4638 case X86::BI__builtin_ia32_vpshrdd128:
4639 case X86::BI__builtin_ia32_vpshrdd256:
4640 case X86::BI__builtin_ia32_vpshrdd512:
4641 case X86::BI__builtin_ia32_vpshrdq128:
4642 case X86::BI__builtin_ia32_vpshrdq256:
4643 case X86::BI__builtin_ia32_vpshrdq512:
4644 case X86::BI__builtin_ia32_vpshrdw128:
4645 case X86::BI__builtin_ia32_vpshrdw256:
4646 case X86::BI__builtin_ia32_vpshrdw512:
4647 i = 2; l = 0; u = 255;
4648 break;
4649 case X86::BI__builtin_ia32_fixupimmpd512_mask:
4650 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
4651 case X86::BI__builtin_ia32_fixupimmps512_mask:
4652 case X86::BI__builtin_ia32_fixupimmps512_maskz:
4653 case X86::BI__builtin_ia32_fixupimmsd_mask:
4654 case X86::BI__builtin_ia32_fixupimmsd_maskz:
4655 case X86::BI__builtin_ia32_fixupimmss_mask:
4656 case X86::BI__builtin_ia32_fixupimmss_maskz:
4657 case X86::BI__builtin_ia32_fixupimmpd128_mask:
4658 case X86::BI__builtin_ia32_fixupimmpd128_maskz:
4659 case X86::BI__builtin_ia32_fixupimmpd256_mask:
4660 case X86::BI__builtin_ia32_fixupimmpd256_maskz:
4661 case X86::BI__builtin_ia32_fixupimmps128_mask:
4662 case X86::BI__builtin_ia32_fixupimmps128_maskz:
4663 case X86::BI__builtin_ia32_fixupimmps256_mask:
4664 case X86::BI__builtin_ia32_fixupimmps256_maskz:
4665 case X86::BI__builtin_ia32_pternlogd512_mask:
4666 case X86::BI__builtin_ia32_pternlogd512_maskz:
4667 case X86::BI__builtin_ia32_pternlogq512_mask:
4668 case X86::BI__builtin_ia32_pternlogq512_maskz:
4669 case X86::BI__builtin_ia32_pternlogd128_mask:
4670 case X86::BI__builtin_ia32_pternlogd128_maskz:
4671 case X86::BI__builtin_ia32_pternlogd256_mask:
4672 case X86::BI__builtin_ia32_pternlogd256_maskz:
4673 case X86::BI__builtin_ia32_pternlogq128_mask:
4674 case X86::BI__builtin_ia32_pternlogq128_maskz:
4675 case X86::BI__builtin_ia32_pternlogq256_mask:
4676 case X86::BI__builtin_ia32_pternlogq256_maskz:
4677 i = 3; l = 0; u = 255;
4678 break;
4679 case X86::BI__builtin_ia32_gatherpfdpd:
4680 case X86::BI__builtin_ia32_gatherpfdps:
4681 case X86::BI__builtin_ia32_gatherpfqpd:
4682 case X86::BI__builtin_ia32_gatherpfqps:
4683 case X86::BI__builtin_ia32_scatterpfdpd:
4684 case X86::BI__builtin_ia32_scatterpfdps:
4685 case X86::BI__builtin_ia32_scatterpfqpd:
4686 case X86::BI__builtin_ia32_scatterpfqps:
4687 i = 4; l = 2; u = 3;
4688 break;
4689 case X86::BI__builtin_ia32_reducesd_mask:
4690 case X86::BI__builtin_ia32_reducess_mask:
4691 case X86::BI__builtin_ia32_rndscalesd_round_mask:
4692 case X86::BI__builtin_ia32_rndscaless_round_mask:
4693 case X86::BI__builtin_ia32_rndscalesh_round_mask:
4694 case X86::BI__builtin_ia32_reducesh_mask:
4695 i = 4; l = 0; u = 255;
4696 break;
4697 }
4698
4699 // Note that we don't force a hard error on the range check here, allowing
4700 // template-generated or macro-generated dead code to potentially have out-of-
4701 // range values. These need to code generate, but don't need to necessarily
4702 // make any sense. We use a warning that defaults to an error.
4703 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false);
4704}
4705
4706/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
4707/// parameter with the FormatAttr's correct format_idx and firstDataArg.
4708/// Returns true when the format fits the function and the FormatStringInfo has
4709/// been populated.
4710bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
4711 FormatStringInfo *FSI) {
4712 FSI->HasVAListArg = Format->getFirstArg() == 0;
4713 FSI->FormatIdx = Format->getFormatIdx() - 1;
4714 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1;
4715
4716 // The way the format attribute works in GCC, the implicit this argument
4717 // of member functions is counted. However, it doesn't appear in our own
4718 // lists, so decrement format_idx in that case.
4719 if (IsCXXMember) {
4720 if(FSI->FormatIdx == 0)
4721 return false;
4722 --FSI->FormatIdx;
4723 if (FSI->FirstDataArg != 0)
4724 --FSI->FirstDataArg;
4725 }
4726 return true;
4727}
4728
4729/// Checks if a the given expression evaluates to null.
4730///
4731/// Returns true if the value evaluates to null.
4732static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
4733 // If the expression has non-null type, it doesn't evaluate to null.
4734 if (auto nullability
4735 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) {
4736 if (*nullability == NullabilityKind::NonNull)
4737 return false;
4738 }
4739
4740 // As a special case, transparent unions initialized with zero are
4741 // considered null for the purposes of the nonnull attribute.
4742 if (const RecordType *UT = Expr->getType()->getAsUnionType()) {
4743 if (UT->getDecl()->hasAttr<TransparentUnionAttr>())
4744 if (const CompoundLiteralExpr *CLE =
4745 dyn_cast<CompoundLiteralExpr>(Expr))
4746 if (const InitListExpr *ILE =
4747 dyn_cast<InitListExpr>(CLE->getInitializer()))
4748 Expr = ILE->getInit(0);
4749 }
4750
4751 bool Result;
4752 return (!Expr->isValueDependent() &&
4753 Expr->EvaluateAsBooleanCondition(Result, S.Context) &&
4754 !Result);
4755}
4756
4757static void CheckNonNullArgument(Sema &S,
4758 const Expr *ArgExpr,
4759 SourceLocation CallSiteLoc) {
4760 if (CheckNonNullExpr(S, ArgExpr))
4761 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
4762 S.PDiag(diag::warn_null_arg)
4763 << ArgExpr->getSourceRange());
4764}
4765
4766bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
4767 FormatStringInfo FSI;
4768 if ((GetFormatStringType(Format) == FST_NSString) &&
4769 getFormatStringInfo(Format, false, &FSI)) {
4770 Idx = FSI.FormatIdx;
4771 return true;
4772 }
4773 return false;
4774}
4775
4776/// Diagnose use of %s directive in an NSString which is being passed
4777/// as formatting string to formatting method.
4778static void
4779DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
4780 const NamedDecl *FDecl,
4781 Expr **Args,
4782 unsigned NumArgs) {
4783 unsigned Idx = 0;
4784 bool Format = false;
4785 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
4786 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
4787 Idx = 2;
4788 Format = true;
4789 }
4790 else
4791 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
4792 if (S.GetFormatNSStringIdx(I, Idx)) {
4793 Format = true;
4794 break;
4795 }
4796 }
4797 if (!Format || NumArgs <= Idx)
4798 return;
4799 const Expr *FormatExpr = Args[Idx];
4800 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr))
4801 FormatExpr = CSCE->getSubExpr();
4802 const StringLiteral *FormatString;
4803 if (const ObjCStringLiteral *OSL =
4804 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts()))
4805 FormatString = OSL->getString();
4806 else
4807 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts());
4808 if (!FormatString)
4809 return;
4810 if (S.FormatStringHasSArg(FormatString)) {
4811 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
4812 << "%s" << 1 << 1;
4813 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
4814 << FDecl->getDeclName();
4815 }
4816}
4817
4818/// Determine whether the given type has a non-null nullability annotation.
4819static bool isNonNullType(ASTContext &ctx, QualType type) {
4820 if (auto nullability = type->getNullability(ctx))
4821 return *nullability == NullabilityKind::NonNull;
4822
4823 return false;
4824}
4825
4826static void CheckNonNullArguments(Sema &S,
4827 const NamedDecl *FDecl,
4828 const FunctionProtoType *Proto,
4829 ArrayRef<const Expr *> Args,
4830 SourceLocation CallSiteLoc) {
4831 assert((FDecl || Proto) && "Need a function declaration or prototype")(static_cast<void> (0));
4832
4833 // Already checked by by constant evaluator.
4834 if (S.isConstantEvaluated())
4835 return;
4836 // Check the attributes attached to the method/function itself.
4837 llvm::SmallBitVector NonNullArgs;
4838 if (FDecl) {
4839 // Handle the nonnull attribute on the function/method declaration itself.
4840 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
4841 if (!NonNull->args_size()) {
4842 // Easy case: all pointer arguments are nonnull.
4843 for (const auto *Arg : Args)
4844 if (S.isValidPointerAttrType(Arg->getType()))
4845 CheckNonNullArgument(S, Arg, CallSiteLoc);
4846 return;
4847 }
4848
4849 for (const ParamIdx &Idx : NonNull->args()) {
4850 unsigned IdxAST = Idx.getASTIndex();
4851 if (IdxAST >= Args.size())
4852 continue;
4853 if (NonNullArgs.empty())
4854 NonNullArgs.resize(Args.size());
4855 NonNullArgs.set(IdxAST);
4856 }
4857 }
4858 }
4859
4860 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) {
4861 // Handle the nonnull attribute on the parameters of the
4862 // function/method.
4863 ArrayRef<ParmVarDecl*> parms;
4864 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl))
4865 parms = FD->parameters();
4866 else
4867 parms = cast<ObjCMethodDecl>(FDecl)->parameters();
4868
4869 unsigned ParamIndex = 0;
4870 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
4871 I != E; ++I, ++ParamIndex) {
4872 const ParmVarDecl *PVD = *I;
4873 if (PVD->hasAttr<NonNullAttr>() ||
4874 isNonNullType(S.Context, PVD->getType())) {
4875 if (NonNullArgs.empty())
4876 NonNullArgs.resize(Args.size());
4877
4878 NonNullArgs.set(ParamIndex);
4879 }
4880 }
4881 } else {
4882 // If we have a non-function, non-method declaration but no
4883 // function prototype, try to dig out the function prototype.
4884 if (!Proto) {
4885 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) {
4886 QualType type = VD->getType().getNonReferenceType();
4887 if (auto pointerType = type->getAs<PointerType>())
4888 type = pointerType->getPointeeType();
4889 else if (auto blockType = type->getAs<BlockPointerType>())
4890 type = blockType->getPointeeType();
4891 // FIXME: data member pointers?
4892
4893 // Dig out the function prototype, if there is one.
4894 Proto = type->getAs<FunctionProtoType>();
4895 }
4896 }
4897
4898 // Fill in non-null argument information from the nullability
4899 // information on the parameter types (if we have them).
4900 if (Proto) {
4901 unsigned Index = 0;
4902 for (auto paramType : Proto->getParamTypes()) {
4903 if (isNonNullType(S.Context, paramType)) {
4904 if (NonNullArgs.empty())
4905 NonNullArgs.resize(Args.size());
4906
4907 NonNullArgs.set(Index);
4908 }
4909
4910 ++Index;
4911 }
4912 }
4913 }
4914
4915 // Check for non-null arguments.
4916 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
4917 ArgIndex != ArgIndexEnd; ++ArgIndex) {
4918 if (NonNullArgs[ArgIndex])
4919 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
4920 }
4921}
4922
4923/// Warn if a pointer or reference argument passed to a function points to an
4924/// object that is less aligned than the parameter. This can happen when
4925/// creating a typedef with a lower alignment than the original type and then
4926/// calling functions defined in terms of the original type.
4927void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
4928 StringRef ParamName, QualType ArgTy,
4929 QualType ParamTy) {
4930
4931 // If a function accepts a pointer or reference type
4932 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType())
4933 return;
4934
4935 // If the parameter is a pointer type, get the pointee type for the
4936 // argument too. If the parameter is a reference type, don't try to get
4937 // the pointee type for the argument.
4938 if (ParamTy->isPointerType())
4939 ArgTy = ArgTy->getPointeeType();
4940
4941 // Remove reference or pointer
4942 ParamTy = ParamTy->getPointeeType();
4943
4944 // Find expected alignment, and the actual alignment of the passed object.
4945 // getTypeAlignInChars requires complete types
4946 if (ArgTy.isNull() || ParamTy->isIncompleteType() ||
4947 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() ||
4948 ArgTy->isUndeducedType())
4949 return;
4950
4951 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy);
4952 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy);
4953
4954 // If the argument is less aligned than the parameter, there is a
4955 // potential alignment issue.
4956 if (ArgAlign < ParamAlign)
4957 Diag(Loc, diag::warn_param_mismatched_alignment)
4958 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity()
4959 << ParamName << FDecl;
4960}
4961
4962/// Handles the checks for format strings, non-POD arguments to vararg
4963/// functions, NULL arguments passed to non-NULL parameters, and diagnose_if
4964/// attributes.
4965void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
4966 const Expr *ThisArg, ArrayRef<const Expr *> Args,
4967 bool IsMemberFunction, SourceLocation Loc,
4968 SourceRange Range, VariadicCallType CallType) {
4969 // FIXME: We should check as much as we can in the template definition.
4970 if (CurContext->isDependentContext())
4971 return;
4972
4973 // Printf and scanf checking.
4974 llvm::SmallBitVector CheckedVarArgs;
4975 if (FDecl) {
4976 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
4977 // Only create vector if there are format attributes.
4978 CheckedVarArgs.resize(Args.size());
4979
4980 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
4981 CheckedVarArgs);
4982 }
4983 }
4984
4985 // Refuse POD arguments that weren't caught by the format string
4986 // checks above.
4987 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
4988 if (CallType != VariadicDoesNotApply &&
4989 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
4990 unsigned NumParams = Proto ? Proto->getNumParams()
4991 : FDecl && isa<FunctionDecl>(FDecl)
4992 ? cast<FunctionDecl>(FDecl)->getNumParams()
4993 : FDecl && isa<ObjCMethodDecl>(FDecl)
4994 ? cast<ObjCMethodDecl>(FDecl)->param_size()
4995 : 0;
4996
4997 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
4998 // Args[ArgIdx] can be null in malformed code.
4999 if (const Expr *Arg = Args[ArgIdx]) {
5000 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
5001 checkVariadicArgument(Arg, CallType);
5002 }
5003 }
5004 }
5005
5006 if (FDecl || Proto) {
5007 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc);
5008
5009 // Type safety checking.
5010 if (FDecl) {
5011 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
5012 CheckArgumentWithTypeTag(I, Args, Loc);
5013 }
5014 }
5015
5016 // Check that passed arguments match the alignment of original arguments.
5017 // Try to get the missing prototype from the declaration.
5018 if (!Proto && FDecl) {
5019 const auto *FT = FDecl->getFunctionType();
5020 if (isa_and_nonnull<FunctionProtoType>(FT))
5021 Proto = cast<FunctionProtoType>(FDecl->getFunctionType());
5022 }
5023 if (Proto) {
5024 // For variadic functions, we may have more args than parameters.
5025 // For some K&R functions, we may have less args than parameters.
5026 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size());
5027 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) {
5028 // Args[ArgIdx] can be null in malformed code.
5029 if (const Expr *Arg = Args[ArgIdx]) {
5030 if (Arg->containsErrors())
5031 continue;
5032
5033 QualType ParamTy = Proto->getParamType(ArgIdx);
5034 QualType ArgTy = Arg->getType();
5035 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1),
5036 ArgTy, ParamTy);
5037 }
5038 }
5039 }
5040
5041 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
5042 auto *AA = FDecl->getAttr<AllocAlignAttr>();
5043 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
5044 if (!Arg->isValueDependent()) {
5045 Expr::EvalResult Align;
5046 if (Arg->EvaluateAsInt(Align, Context)) {
5047 const llvm::APSInt &I = Align.Val.getInt();
5048 if (!I.isPowerOf2())
5049 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two)
5050 << Arg->getSourceRange();
5051
5052 if (I > Sema::MaximumAlignment)
5053 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great)
5054 << Arg->getSourceRange() << Sema::MaximumAlignment;
5055 }
5056 }
5057 }
5058
5059 if (FD)
5060 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc);
5061}
5062
5063/// CheckConstructorCall - Check a constructor call for correctness and safety
5064/// properties not enforced by the C type system.
5065void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
5066 ArrayRef<const Expr *> Args,
5067 const FunctionProtoType *Proto,
5068 SourceLocation Loc) {
5069 VariadicCallType CallType =
5070 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
5071
5072 auto *Ctor = cast<CXXConstructorDecl>(FDecl);
5073 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType),
5074 Context.getPointerType(Ctor->getThisObjectType()));
5075
5076 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
5077 Loc, SourceRange(), CallType);
5078}
5079
5080/// CheckFunctionCall - Check a direct function call for various correctness
5081/// and safety properties not strictly enforced by the C type system.
5082bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
5083 const FunctionProtoType *Proto) {
5084 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) &&
5085 isa<CXXMethodDecl>(FDecl);
5086 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) ||
5087 IsMemberOperatorCall;
5088 VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
5089 TheCall->getCallee());
5090 Expr** Args = TheCall->getArgs();
5091 unsigned NumArgs = TheCall->getNumArgs();
5092
5093 Expr *ImplicitThis = nullptr;
5094 if (IsMemberOperatorCall) {
5095 // If this is a call to a member operator, hide the first argument
5096 // from checkCall.
5097 // FIXME: Our choice of AST representation here is less than ideal.
5098 ImplicitThis = Args[0];
5099 ++Args;
5100 --NumArgs;
5101 } else if (IsMemberFunction)
5102 ImplicitThis =
5103 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
5104
5105 if (ImplicitThis) {
5106 // ImplicitThis may or may not be a pointer, depending on whether . or -> is
5107 // used.
5108 QualType ThisType = ImplicitThis->getType();
5109 if (!ThisType->isPointerType()) {
5110 assert(!ThisType->isReferenceType())(static_cast<void> (0));
5111 ThisType = Context.getPointerType(ThisType);
5112 }
5113
5114 QualType ThisTypeFromDecl =
5115 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType());
5116
5117 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType,
5118 ThisTypeFromDecl);
5119 }
5120
5121 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs),
5122 IsMemberFunction, TheCall->getRParenLoc(),
5123 TheCall->getCallee()->getSourceRange(), CallType);
5124
5125 IdentifierInfo *FnInfo = FDecl->getIdentifier();
5126 // None of the checks below are needed for functions that don't have
5127 // simple names (e.g., C++ conversion functions).
5128 if (!FnInfo)
5129 return false;
5130
5131 CheckTCBEnforcement(TheCall, FDecl);
5132
5133 CheckAbsoluteValueFunction(TheCall, FDecl);
5134 CheckMaxUnsignedZero(TheCall, FDecl);
5135
5136 if (getLangOpts().ObjC)
5137 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
5138
5139 unsigned CMId = FDecl->getMemoryFunctionKind();
5140
5141 // Handle memory setting and copying functions.
5142 switch (CMId) {
5143 case 0:
5144 return false;
5145/* case Builtin::BIstrlcpy: // fallthrough
5146 case Builtin::BIstrlcat:
5147 CheckStrlcpycatArguments(TheCall, FnInfo);
5148 break;*/
5149 case Builtin::BIstrncat:
5150 CheckStrncatArguments(TheCall, FnInfo);
5151 break;
5152 case Builtin::BIfree:
5153 CheckFreeArguments(TheCall);
5154 break;
5155 default:
5156 CheckMemaccessArguments(TheCall, CMId, FnInfo);
5157 }
5158
5159 return false;
5160}
5161
5162bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
5163 ArrayRef<const Expr *> Args) {
5164 VariadicCallType CallType =
5165 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
5166
5167 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
5168 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
5169 CallType);
5170
5171 return false;
5172}
5173
5174bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
5175 const FunctionProtoType *Proto) {
5176 QualType Ty;
5177 if (const auto *V = dyn_cast<VarDecl>(NDecl))
5178 Ty = V->getType().getNonReferenceType();
5179 else if (const auto *F = dyn_cast<FieldDecl>(NDecl))
5180 Ty = F->getType().getNonReferenceType();
5181 else
5182 return false;
5183
5184 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
5185 !Ty->isFunctionProtoType())
5186 return false;
5187
5188 VariadicCallType CallType;
5189 if (!Proto || !Proto->isVariadic()) {
5190 CallType = VariadicDoesNotApply;
5191 } else if (Ty->isBlockPointerType()) {
5192 CallType = VariadicBlock;
5193 } else { // Ty->isFunctionPointerType()
5194 CallType = VariadicFunction;
5195 }
5196
5197 checkCall(NDecl, Proto, /*ThisArg=*/nullptr,
5198 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
5199 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
5200 TheCall->getCallee()->getSourceRange(), CallType);
5201
5202 return false;
5203}
5204
5205/// Checks function calls when a FunctionDecl or a NamedDecl is not available,
5206/// such as function pointers returned from functions.
5207bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
5208 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
5209 TheCall->getCallee());
5210 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
5211 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
5212 /*IsMemberFunction=*/false, TheCall->getRParenLoc(),
5213 TheCall->getCallee()->getSourceRange(), CallType);
5214
5215 return false;
5216}
5217
5218static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
5219 if (!llvm::isValidAtomicOrderingCABI(Ordering))
5220 return false;
5221
5222 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
5223 switch (Op) {
5224 case AtomicExpr::AO__c11_atomic_init:
5225 case AtomicExpr::AO__opencl_atomic_init:
5226 llvm_unreachable("There is no ordering argument for an init")__builtin_unreachable();
5227
5228 case AtomicExpr::AO__c11_atomic_load:
5229 case AtomicExpr::AO__opencl_atomic_load:
5230 case AtomicExpr::AO__atomic_load_n:
5231 case AtomicExpr::AO__atomic_load:
5232 return OrderingCABI != llvm::AtomicOrderingCABI::release &&
5233 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
5234
5235 case AtomicExpr::AO__c11_atomic_store:
5236 case AtomicExpr::AO__opencl_atomic_store:
5237 case AtomicExpr::AO__atomic_store:
5238 case AtomicExpr::AO__atomic_store_n:
5239 return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
5240 OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
5241 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
5242
5243 default:
5244 return true;
5245 }
5246}
5247
5248ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
5249 AtomicExpr::AtomicOp Op) {
5250 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
5251 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
5252 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()};
5253 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()},
5254 DRE->getSourceRange(), TheCall->getRParenLoc(), Args,
5255 Op);
5256}
5257
5258ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
5259 SourceLocation RParenLoc, MultiExprArg Args,
5260 AtomicExpr::AtomicOp Op,
5261 AtomicArgumentOrder ArgOrder) {
5262 // All the non-OpenCL operations take one of the following forms.
5263 // The OpenCL operations take the __c11 forms with one extra argument for
5264 // synchronization scope.
5265 enum {
5266 // C __c11_atomic_init(A *, C)
5267 Init,
5268
5269 // C __c11_atomic_load(A *, int)
5270 Load,
5271
5272 // void __atomic_load(A *, CP, int)
5273 LoadCopy,
5274
5275 // void __atomic_store(A *, CP, int)
5276 Copy,
5277
5278 // C __c11_atomic_add(A *, M, int)
5279 Arithmetic,
5280
5281 // C __atomic_exchange_n(A *, CP, int)
5282 Xchg,
5283
5284 // void __atomic_exchange(A *, C *, CP, int)
5285 GNUXchg,
5286
5287 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
5288 C11CmpXchg,
5289
5290 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
5291 GNUCmpXchg
5292 } Form = Init;
5293
5294 const unsigned NumForm = GNUCmpXchg + 1;
5295 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
5296 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
5297 // where:
5298 // C is an appropriate type,
5299 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
5300 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
5301 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
5302 // the int parameters are for orderings.
5303
5304 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
5305 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
5306 "need to update code for modified forms");
5307 static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
5308 AtomicExpr::AO__c11_atomic_fetch_min + 1 ==
5309 AtomicExpr::AO__atomic_load,
5310 "need to update code for modified C11 atomics");
5311 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
5312 Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
5313 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
5314 Op <= AtomicExpr::AO__c11_atomic_fetch_min) ||
5315 IsOpenCL;
5316 bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
5317 Op == AtomicExpr::AO__atomic_store_n ||
5318 Op == AtomicExpr::AO__atomic_exchange_n ||
5319 Op == AtomicExpr::AO__atomic_compare_exchange_n;
5320 bool IsAddSub = false;
5321
5322 switch (Op) {
5323 case AtomicExpr::AO__c11_atomic_init:
5324 case AtomicExpr::AO__opencl_atomic_init:
5325 Form = Init;
5326 break;
5327
5328 case AtomicExpr::AO__c11_atomic_load:
5329 case AtomicExpr::AO__opencl_atomic_load:
5330 case AtomicExpr::AO__atomic_load_n:
5331 Form = Load;
5332 break;
5333
5334 case AtomicExpr::AO__atomic_load:
5335 Form = LoadCopy;
5336 break;
5337
5338 case AtomicExpr::AO__c11_atomic_store:
5339 case AtomicExpr::AO__opencl_atomic_store:
5340 case AtomicExpr::AO__atomic_store:
5341 case AtomicExpr::AO__atomic_store_n:
5342 Form = Copy;
5343 break;
5344
5345 case AtomicExpr::AO__c11_atomic_fetch_add:
5346 case AtomicExpr::AO__c11_atomic_fetch_sub:
5347 case AtomicExpr::AO__opencl_atomic_fetch_add:
5348 case AtomicExpr::AO__opencl_atomic_fetch_sub:
5349 case AtomicExpr::AO__atomic_fetch_add:
5350 case AtomicExpr::AO__atomic_fetch_sub:
5351 case AtomicExpr::AO__atomic_add_fetch:
5352 case AtomicExpr::AO__atomic_sub_fetch:
5353 IsAddSub = true;
5354 Form = Arithmetic;
5355 break;
5356 case AtomicExpr::AO__c11_atomic_fetch_and:
5357 case AtomicExpr::AO__c11_atomic_fetch_or:
5358 case AtomicExpr::AO__c11_atomic_fetch_xor:
5359 case AtomicExpr::AO__opencl_atomic_fetch_and:
5360 case AtomicExpr::AO__opencl_atomic_fetch_or:
5361 case AtomicExpr::AO__opencl_atomic_fetch_xor:
5362 case AtomicExpr::AO__atomic_fetch_and:
5363 case AtomicExpr::AO__atomic_fetch_or:
5364 case AtomicExpr::AO__atomic_fetch_xor:
5365 case AtomicExpr::AO__atomic_fetch_nand:
5366 case AtomicExpr::AO__atomic_and_fetch:
5367 case AtomicExpr::AO__atomic_or_fetch:
5368 case AtomicExpr::AO__atomic_xor_fetch:
5369 case AtomicExpr::AO__atomic_nand_fetch:
5370 Form = Arithmetic;
5371 break;
5372 case AtomicExpr::AO__c11_atomic_fetch_min:
5373 case AtomicExpr::AO__c11_atomic_fetch_max:
5374 case AtomicExpr::AO__opencl_atomic_fetch_min:
5375 case AtomicExpr::AO__opencl_atomic_fetch_max:
5376 case AtomicExpr::AO__atomic_min_fetch:
5377 case AtomicExpr::AO__atomic_max_fetch:
5378 case AtomicExpr::AO__atomic_fetch_min:
5379 case AtomicExpr::AO__atomic_fetch_max:
5380 Form = Arithmetic;
5381 break;
5382
5383 case AtomicExpr::AO__c11_atomic_exchange:
5384 case AtomicExpr::AO__opencl_atomic_exchange:
5385 case AtomicExpr::AO__atomic_exchange_n:
5386 Form = Xchg;
5387 break;
5388
5389 case AtomicExpr::AO__atomic_exchange:
5390 Form = GNUXchg;
5391 break;
5392
5393 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
5394 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
5395 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
5396 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
5397 Form = C11CmpXchg;
5398 break;
5399
5400 case AtomicExpr::AO__atomic_compare_exchange:
5401 case AtomicExpr::AO__atomic_compare_exchange_n:
5402 Form = GNUCmpXchg;
5403 break;
5404 }
5405
5406 unsigned AdjustedNumArgs = NumArgs[Form];
5407 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init)
5408 ++AdjustedNumArgs;
5409 // Check we have the right number of arguments.
5410 if (Args.size() < AdjustedNumArgs) {
5411 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args)
5412 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
5413 << ExprRange;
5414 return ExprError();
5415 } else if (Args.size() > AdjustedNumArgs) {
5416 Diag(Args[AdjustedNumArgs]->getBeginLoc(),
5417 diag::err_typecheck_call_too_many_args)
5418 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
5419 << ExprRange;
5420 return ExprError();
5421 }
5422
5423 // Inspect the first argument of the atomic operation.
5424 Expr *Ptr = Args[0];
5425 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr);
5426 if (ConvertedPtr.isInvalid())
5427 return ExprError();
5428
5429 Ptr = ConvertedPtr.get();
5430 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
5431 if (!pointerType) {
5432 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
5433 << Ptr->getType() << Ptr->getSourceRange();
5434 return ExprError();
5435 }
5436
5437 // For a __c11 builtin, this should be a pointer to an _Atomic type.
5438 QualType AtomTy = pointerType->getPointeeType(); // 'A'
5439 QualType ValType = AtomTy; // 'C'
5440 if (IsC11) {
5441 if (!AtomTy->isAtomicType()) {
5442 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic)
5443 << Ptr->getType() << Ptr->getSourceRange();
5444 return ExprError();
5445 }
5446 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) ||
5447 AtomTy.getAddressSpace() == LangAS::opencl_constant) {
5448 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic)
5449 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
5450 << Ptr->getSourceRange();
5451 return ExprError();
5452 }
5453 ValType = AtomTy->castAs<AtomicType>()->getValueType();
5454 } else if (Form != Load && Form != LoadCopy) {
5455 if (ValType.isConstQualified()) {
5456 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer)
5457 << Ptr->getType() << Ptr->getSourceRange();
5458 return ExprError();
5459 }
5460 }
5461
5462 // For an arithmetic operation, the implied arithmetic must be well-formed.
5463 if (Form == Arithmetic) {
5464 // gcc does not enforce these rules for GNU atomics, but we do so for
5465 // sanity.
5466 auto IsAllowedValueType = [&](QualType ValType) {
5467 if (ValType->isIntegerType())
5468 return true;
5469 if (ValType->isPointerType())
5470 return true;
5471 if (!ValType->isFloatingType())
5472 return false;
5473 // LLVM Parser does not allow atomicrmw with x86_fp80 type.
5474 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) &&
5475 &Context.getTargetInfo().getLongDoubleFormat() ==
5476 &llvm::APFloat::x87DoubleExtended())
5477 return false;
5478 return true;
5479 };
5480 if (IsAddSub && !IsAllowedValueType(ValType)) {
5481 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp)
5482 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
5483 return ExprError();
5484 }
5485 if (!IsAddSub && !ValType->isIntegerType()) {
5486 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int)
5487 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
5488 return ExprError();
5489 }
5490 if (IsC11 && ValType->isPointerType() &&
5491 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(),
5492 diag::err_incomplete_type)) {
5493 return ExprError();
5494 }
5495 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
5496 // For __atomic_*_n operations, the value type must be a scalar integral or
5497 // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
5498 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr)
5499 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
5500 return ExprError();
5501 }
5502
5503 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
5504 !AtomTy->isScalarType()) {
5505 // For GNU atomics, require a trivially-copyable type. This is not part of
5506 // the GNU atomics specification, but we enforce it for sanity.
5507 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy)
5508 << Ptr->getType() << Ptr->getSourceRange();
5509 return ExprError();
5510 }
5511
5512 switch (ValType.getObjCLifetime()) {
5513 case Qualifiers::OCL_None:
5514 case Qualifiers::OCL_ExplicitNone:
5515 // okay
5516 break;
5517
5518 case Qualifiers::OCL_Weak:
5519 case Qualifiers::OCL_Strong:
5520 case Qualifiers::OCL_Autoreleasing:
5521 // FIXME: Can this happen? By this point, ValType should be known
5522 // to be trivially copyable.
5523 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership)
5524 << ValType << Ptr->getSourceRange();
5525 return ExprError();
5526 }
5527
5528 // All atomic operations have an overload which takes a pointer to a volatile
5529 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
5530 // into the result or the other operands. Similarly atomic_load takes a
5531 // pointer to a const 'A'.
5532 ValType.removeLocalVolatile();
5533 ValType.removeLocalConst();
5534 QualType ResultType = ValType;
5535 if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
5536 Form == Init)
5537 ResultType = Context.VoidTy;
5538 else if (Form == C11CmpXchg || Form == GNUCmpXchg)
5539 ResultType = Context.BoolTy;
5540
5541 // The type of a parameter passed 'by value'. In the GNU atomics, such
5542 // arguments are actually passed as pointers.
5543 QualType ByValType = ValType; // 'CP'
5544 bool IsPassedByAddress = false;
5545 if (!IsC11 && !IsN) {
5546 ByValType = Ptr->getType();
5547 IsPassedByAddress = true;
5548 }
5549
5550 SmallVector<Expr *, 5> APIOrderedArgs;
5551 if (ArgOrder == Sema::AtomicArgumentOrder::AST) {
5552 APIOrderedArgs.push_back(Args[0]);
5553 switch (Form) {
5554 case Init:
5555 case Load:
5556 APIOrderedArgs.push_back(Args[1]); // Val1/Order
5557 break;
5558 case LoadCopy:
5559 case Copy:
5560 case Arithmetic:
5561 case Xchg:
5562 APIOrderedArgs.push_back(Args[2]); // Val1
5563 APIOrderedArgs.push_back(Args[1]); // Order
5564 break;
5565 case GNUXchg:
5566 APIOrderedArgs.push_back(Args[2]); // Val1
5567 APIOrderedArgs.push_back(Args[3]); // Val2
5568 APIOrderedArgs.push_back(Args[1]); // Order
5569 break;
5570 case C11CmpXchg:
5571 APIOrderedArgs.push_back(Args[2]); // Val1
5572 APIOrderedArgs.push_back(Args[4]); // Val2
5573 APIOrderedArgs.push_back(Args[1]); // Order
5574 APIOrderedArgs.push_back(Args[3]); // OrderFail
5575 break;
5576 case GNUCmpXchg:
5577 APIOrderedArgs.push_back(Args[2]); // Val1
5578 APIOrderedArgs.push_back(Args[4]); // Val2
5579 APIOrderedArgs.push_back(Args[5]); // Weak
5580 APIOrderedArgs.push_back(Args[1]); // Order
5581 APIOrderedArgs.push_back(Args[3]); // OrderFail
5582 break;
5583 }
5584 } else
5585 APIOrderedArgs.append(Args.begin(), Args.end());
5586
5587 // The first argument's non-CV pointer type is used to deduce the type of
5588 // subsequent arguments, except for:
5589 // - weak flag (always converted to bool)
5590 // - memory order (always converted to int)
5591 // - scope (always converted to int)
5592 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) {
5593 QualType Ty;
5594 if (i < NumVals[Form] + 1) {
5595 switch (i) {
5596 case 0:
5597 // The first argument is always a pointer. It has a fixed type.
5598 // It is always dereferenced, a nullptr is undefined.
5599 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
5600 // Nothing else to do: we already know all we want about this pointer.
5601 continue;
5602 case 1:
5603 // The second argument is the non-atomic operand. For arithmetic, this
5604 // is always passed by value, and for a compare_exchange it is always
5605 // passed by address. For the rest, GNU uses by-address and C11 uses
5606 // by-value.
5607 assert(Form != Load)(static_cast<void> (0));
5608 if (Form == Arithmetic && ValType->isPointerType())
5609 Ty = Context.getPointerDiffType();
5610 else if (Form == Init || Form == Arithmetic)
5611 Ty = ValType;
5612 else if (Form == Copy || Form == Xchg) {
5613 if (IsPassedByAddress) {
5614 // The value pointer is always dereferenced, a nullptr is undefined.
5615 CheckNonNullArgument(*this, APIOrderedArgs[i],
5616 ExprRange.getBegin());
5617 }
5618 Ty = ByValType;
5619 } else {
5620 Expr *ValArg = APIOrderedArgs[i];
5621 // The value pointer is always dereferenced, a nullptr is undefined.
5622 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin());
5623 LangAS AS = LangAS::Default;
5624 // Keep address space of non-atomic pointer type.
5625 if (const PointerType *PtrTy =
5626 ValArg->getType()->getAs<PointerType>()) {
5627 AS = PtrTy->getPointeeType().getAddressSpace();
5628 }
5629 Ty = Context.getPointerType(
5630 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS));
5631 }
5632 break;
5633 case 2:
5634 // The third argument to compare_exchange / GNU exchange is the desired
5635 // value, either by-value (for the C11 and *_n variant) or as a pointer.
5636 if (IsPassedByAddress)
5637 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin());
5638 Ty = ByValType;
5639 break;
5640 case 3:
5641 // The fourth argument to GNU compare_exchange is a 'weak' flag.
5642 Ty = Context.BoolTy;
5643 break;
5644 }
5645 } else {
5646 // The order(s) and scope are always converted to int.
5647 Ty = Context.IntTy;
5648 }
5649
5650 InitializedEntity Entity =
5651 InitializedEntity::InitializeParameter(Context, Ty, false);
5652 ExprResult Arg = APIOrderedArgs[i];
5653 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
5654 if (Arg.isInvalid())
5655 return true;
5656 APIOrderedArgs[i] = Arg.get();
5657 }
5658
5659 // Permute the arguments into a 'consistent' order.
5660 SmallVector<Expr*, 5> SubExprs;
5661 SubExprs.push_back(Ptr);
5662 switch (Form) {
5663 case Init:
5664 // Note, AtomicExpr::getVal1() has a special case for this atomic.
5665 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5666 break;
5667 case Load:
5668 SubExprs.push_back(APIOrderedArgs[1]); // Order
5669 break;
5670 case LoadCopy:
5671 case Copy:
5672 case Arithmetic:
5673 case Xchg:
5674 SubExprs.push_back(APIOrderedArgs[2]); // Order
5675 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5676 break;
5677 case GNUXchg:
5678 // Note, AtomicExpr::getVal2() has a special case for this atomic.
5679 SubExprs.push_back(APIOrderedArgs[3]); // Order
5680 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5681 SubExprs.push_back(APIOrderedArgs[2]); // Val2
5682 break;
5683 case C11CmpXchg:
5684 SubExprs.push_back(APIOrderedArgs[3]); // Order
5685 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5686 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail
5687 SubExprs.push_back(APIOrderedArgs[2]); // Val2
5688 break;
5689 case GNUCmpXchg:
5690 SubExprs.push_back(APIOrderedArgs[4]); // Order
5691 SubExprs.push_back(APIOrderedArgs[1]); // Val1
5692 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail
5693 SubExprs.push_back(APIOrderedArgs[2]); // Val2
5694 SubExprs.push_back(APIOrderedArgs[3]); // Weak
5695 break;
5696 }
5697
5698 if (SubExprs.size() >= 2 && Form != Init) {
5699 if (Optional<llvm::APSInt> Result =
5700 SubExprs[1]->getIntegerConstantExpr(Context))
5701 if (!isValidOrderingForOp(Result->getSExtValue(), Op))
5702 Diag(SubExprs[1]->getBeginLoc(),
5703 diag::warn_atomic_op_has_invalid_memory_order)
5704 << SubExprs[1]->getSourceRange();
5705 }
5706
5707 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
5708 auto *Scope = Args[Args.size() - 1];
5709 if (Optional<llvm::APSInt> Result =
5710 Scope->getIntegerConstantExpr(Context)) {
5711 if (!ScopeModel->isValid(Result->getZExtValue()))
5712 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
5713 << Scope->getSourceRange();
5714 }
5715 SubExprs.push_back(Scope);
5716 }
5717
5718 AtomicExpr *AE = new (Context)
5719 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc);
5720
5721 if ((Op == AtomicExpr::AO__c11_atomic_load ||
5722 Op == AtomicExpr::AO__c11_atomic_store ||
5723 Op == AtomicExpr::AO__opencl_atomic_load ||
5724 Op == AtomicExpr::AO__opencl_atomic_store ) &&
5725 Context.AtomicUsesUnsupportedLibcall(AE))
5726 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
5727 << ((Op == AtomicExpr::AO__c11_atomic_load ||
5728 Op == AtomicExpr::AO__opencl_atomic_load)
5729 ? 0
5730 : 1);
5731
5732 if (ValType->isExtIntType()) {
5733 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit);
5734 return ExprError();
5735 }
5736
5737 return AE;
5738}
5739
5740/// checkBuiltinArgument - Given a call to a builtin function, perform
5741/// normal type-checking on the given argument, updating the call in
5742/// place. This is useful when a builtin function requires custom
5743/// type-checking for some of its arguments but not necessarily all of
5744/// them.
5745///
5746/// Returns true on error.
5747static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
5748 FunctionDecl *Fn = E->getDirectCallee();
5749 assert(Fn && "builtin call without direct callee!")(static_cast<void> (0));
5750
5751 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
5752 InitializedEntity Entity =
5753 InitializedEntity::InitializeParameter(S.Context, Param);
5754
5755 ExprResult Arg = E->getArg(0);
5756 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
5757 if (Arg.isInvalid())
5758 return true;
5759
5760 E->setArg(ArgIndex, Arg.get());
5761 return false;
5762}
5763
5764/// We have a call to a function like __sync_fetch_and_add, which is an
5765/// overloaded function based on the pointer type of its first argument.
5766/// The main BuildCallExpr routines have already promoted the types of
5767/// arguments because all of these calls are prototyped as void(...).
5768///
5769/// This function goes through and does final semantic checking for these
5770/// builtins, as well as generating any warnings.
5771ExprResult
5772Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
5773 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
5774 Expr *Callee = TheCall->getCallee();
5775 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts());
5776 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
5777
5778 // Ensure that we have at least one argument to do type inference from.
5779 if (TheCall->getNumArgs() < 1) {
5780 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
5781 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange();
5782 return ExprError();
5783 }
5784
5785 // Inspect the first argument of the atomic builtin. This should always be
5786 // a pointer type, whose element is an integral scalar or pointer type.
5787 // Because it is a pointer type, we don't have to worry about any implicit
5788 // casts here.
5789 // FIXME: We don't allow floating point scalars as input.
5790 Expr *FirstArg = TheCall->getArg(0);
5791 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
5792 if (FirstArgResult.isInvalid())
5793 return ExprError();
5794 FirstArg = FirstArgResult.get();
5795 TheCall->setArg(0, FirstArg);
5796
5797 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
5798 if (!pointerType) {
5799 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
5800 << FirstArg->getType() << FirstArg->getSourceRange();
5801 return ExprError();
5802 }
5803
5804 QualType ValType = pointerType->getPointeeType();
5805 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
5806 !ValType->isBlockPointerType()) {
5807 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
5808 << FirstArg->getType() << FirstArg->getSourceRange();
5809 return ExprError();
5810 }
5811
5812 if (ValType.isConstQualified()) {
5813 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const)
5814 << FirstArg->getType() << FirstArg->getSourceRange();
5815 return ExprError();
5816 }
5817
5818 switch (ValType.getObjCLifetime()) {
5819 case Qualifiers::OCL_None:
5820 case Qualifiers::OCL_ExplicitNone:
5821 // okay
5822 break;
5823
5824 case Qualifiers::OCL_Weak:
5825 case Qualifiers::OCL_Strong:
5826 case Qualifiers::OCL_Autoreleasing:
5827 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
5828 << ValType << FirstArg->getSourceRange();
5829 return ExprError();
5830 }
5831
5832 // Strip any qualifiers off ValType.
5833 ValType = ValType.getUnqualifiedType();
5834
5835 // The majority of builtins return a value, but a few have special return
5836 // types, so allow them to override appropriately below.
5837 QualType ResultType = ValType;
5838
5839 // We need to figure out which concrete builtin this maps onto. For example,
5840 // __sync_fetch_and_add with a 2 byte object turns into
5841 // __sync_fetch_and_add_2.
5842#define BUILTIN_ROW(x) \
5843 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
5844 Builtin::BI##x##_8, Builtin::BI##x##_16 }
5845
5846 static const unsigned BuiltinIndices[][5] = {
5847 BUILTIN_ROW(__sync_fetch_and_add),
5848 BUILTIN_ROW(__sync_fetch_and_sub),
5849 BUILTIN_ROW(__sync_fetch_and_or),
5850 BUILTIN_ROW(__sync_fetch_and_and),
5851 BUILTIN_ROW(__sync_fetch_and_xor),
5852 BUILTIN_ROW(__sync_fetch_and_nand),
5853
5854 BUILTIN_ROW(__sync_add_and_fetch),
5855 BUILTIN_ROW(__sync_sub_and_fetch),
5856 BUILTIN_ROW(__sync_and_and_fetch),
5857 BUILTIN_ROW(__sync_or_and_fetch),
5858 BUILTIN_ROW(__sync_xor_and_fetch),
5859 BUILTIN_ROW(__sync_nand_and_fetch),
5860
5861 BUILTIN_ROW(__sync_val_compare_and_swap),
5862 BUILTIN_ROW(__sync_bool_compare_and_swap),
5863 BUILTIN_ROW(__sync_lock_test_and_set),
5864 BUILTIN_ROW(__sync_lock_release),
5865 BUILTIN_ROW(__sync_swap)
5866 };
5867#undef BUILTIN_ROW
5868
5869 // Determine the index of the size.
5870 unsigned SizeIndex;
5871 switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
5872 case 1: SizeIndex = 0; break;
5873 case 2: SizeIndex = 1; break;
5874 case 4: SizeIndex = 2; break;
5875 case 8: SizeIndex = 3; break;
5876 case 16: SizeIndex = 4; break;
5877 default:
5878 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size)
5879 << FirstArg->getType() << FirstArg->getSourceRange();
5880 return ExprError();
5881 }
5882
5883 // Each of these builtins has one pointer argument, followed by some number of
5884 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
5885 // that we ignore. Find out which row of BuiltinIndices to read from as well
5886 // as the number of fixed args.
5887 unsigned BuiltinID = FDecl->getBuiltinID();
5888 unsigned BuiltinIndex, NumFixed = 1;
5889 bool WarnAboutSemanticsChange = false;
5890 switch (BuiltinID) {
5891 default: llvm_unreachable("Unknown overloaded atomic builtin!")__builtin_unreachable();
5892 case Builtin::BI__sync_fetch_and_add:
5893 case Builtin::BI__sync_fetch_and_add_1:
5894 case Builtin::BI__sync_fetch_and_add_2:
5895 case Builtin::BI__sync_fetch_and_add_4:
5896 case Builtin::BI__sync_fetch_and_add_8:
5897 case Builtin::BI__sync_fetch_and_add_16:
5898 BuiltinIndex = 0;
5899 break;
5900
5901 case Builtin::BI__sync_fetch_and_sub:
5902 case Builtin::BI__sync_fetch_and_sub_1:
5903 case Builtin::BI__sync_fetch_and_sub_2:
5904 case Builtin::BI__sync_fetch_and_sub_4:
5905 case Builtin::BI__sync_fetch_and_sub_8:
5906 case Builtin::BI__sync_fetch_and_sub_16:
5907 BuiltinIndex = 1;
5908 break;
5909
5910 case Builtin::BI__sync_fetch_and_or:
5911 case Builtin::BI__sync_fetch_and_or_1:
5912 case Builtin::BI__sync_fetch_and_or_2:
5913 case Builtin::BI__sync_fetch_and_or_4:
5914 case Builtin::BI__sync_fetch_and_or_8:
5915 case Builtin::BI__sync_fetch_and_or_16:
5916 BuiltinIndex = 2;
5917 break;
5918
5919 case Builtin::BI__sync_fetch_and_and:
5920 case Builtin::BI__sync_fetch_and_and_1:
5921 case Builtin::BI__sync_fetch_and_and_2:
5922 case Builtin::BI__sync_fetch_and_and_4:
5923 case Builtin::BI__sync_fetch_and_and_8:
5924 case Builtin::BI__sync_fetch_and_and_16:
5925 BuiltinIndex = 3;
5926 break;
5927
5928