Bug Summary

File:clang/lib/CodeGen/TargetInfo.cpp
Warning:line 10397, column 24
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name TargetInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/build-llvm/tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include -I /build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/build-llvm/tools/clang/include -I /build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/build-llvm/tools/clang/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-11-24-172238-38865-1 -x c++ /build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp

/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp

1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These classes wrap the information about a call or function
10// definition used to handle ABI compliancy.
11//
12//===----------------------------------------------------------------------===//
13
14#include "TargetInfo.h"
15#include "ABIInfo.h"
16#include "CGBlocks.h"
17#include "CGCXXABI.h"
18#include "CGValue.h"
19#include "CodeGenFunction.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/RecordLayout.h"
22#include "clang/Basic/CodeGenOptions.h"
23#include "clang/Basic/DiagnosticFrontend.h"
24#include "clang/CodeGen/CGFunctionInfo.h"
25#include "clang/CodeGen/SwiftCallingConv.h"
26#include "llvm/ADT/SmallBitVector.h"
27#include "llvm/ADT/StringExtras.h"
28#include "llvm/ADT/StringSwitch.h"
29#include "llvm/ADT/Triple.h"
30#include "llvm/ADT/Twine.h"
31#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/IntrinsicsNVPTX.h"
33#include "llvm/IR/Type.h"
34#include "llvm/Support/raw_ostream.h"
35#include <algorithm> // std::sort
36
37using namespace clang;
38using namespace CodeGen;
39
40// Helper for coercing an aggregate argument or return value into an integer
41// array of the same size (including padding) and alignment. This alternate
42// coercion happens only for the RenderScript ABI and can be removed after
43// runtimes that rely on it are no longer supported.
44//
45// RenderScript assumes that the size of the argument / return value in the IR
46// is the same as the size of the corresponding qualified type. This helper
47// coerces the aggregate type into an array of the same size (including
48// padding). This coercion is used in lieu of expansion of struct members or
49// other canonical coercions that return a coerced-type of larger size.
50//
51// Ty - The argument / return value type
52// Context - The associated ASTContext
53// LLVMContext - The associated LLVMContext
54static ABIArgInfo coerceToIntArray(QualType Ty,
55 ASTContext &Context,
56 llvm::LLVMContext &LLVMContext) {
57 // Alignment and Size are measured in bits.
58 const uint64_t Size = Context.getTypeSize(Ty);
59 const uint64_t Alignment = Context.getTypeAlign(Ty);
60 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
61 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
62 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
63}
64
65static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
66 llvm::Value *Array,
67 llvm::Value *Value,
68 unsigned FirstIndex,
69 unsigned LastIndex) {
70 // Alternatively, we could emit this as a loop in the source.
71 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
72 llvm::Value *Cell =
73 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
74 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
75 }
76}
77
78static bool isAggregateTypeForABI(QualType T) {
79 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
80 T->isMemberFunctionPointerType();
81}
82
83ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal,
84 bool Realign,
85 llvm::Type *Padding) const {
86 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal,
87 Realign, Padding);
88}
89
90ABIArgInfo
91ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
92 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
93 /*ByVal*/ false, Realign);
94}
95
96Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
97 QualType Ty) const {
98 return Address::invalid();
99}
100
101bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
102 if (Ty->isPromotableIntegerType())
103 return true;
104
105 if (const auto *EIT = Ty->getAs<ExtIntType>())
106 if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
107 return true;
108
109 return false;
110}
111
112ABIInfo::~ABIInfo() {}
113
114/// Does the given lowering require more than the given number of
115/// registers when expanded?
116///
117/// This is intended to be the basis of a reasonable basic implementation
118/// of should{Pass,Return}IndirectlyForSwift.
119///
120/// For most targets, a limit of four total registers is reasonable; this
121/// limits the amount of code required in order to move around the value
122/// in case it wasn't produced immediately prior to the call by the caller
123/// (or wasn't produced in exactly the right registers) or isn't used
124/// immediately within the callee. But some targets may need to further
125/// limit the register count due to an inability to support that many
126/// return registers.
127static bool occupiesMoreThan(CodeGenTypes &cgt,
128 ArrayRef<llvm::Type*> scalarTypes,
129 unsigned maxAllRegisters) {
130 unsigned intCount = 0, fpCount = 0;
131 for (llvm::Type *type : scalarTypes) {
132 if (type->isPointerTy()) {
133 intCount++;
134 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
135 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
136 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
137 } else {
138 assert(type->isVectorTy() || type->isFloatingPointTy())((type->isVectorTy() || type->isFloatingPointTy()) ? static_cast
<void> (0) : __assert_fail ("type->isVectorTy() || type->isFloatingPointTy()"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 138, __PRETTY_FUNCTION__))
;
139 fpCount++;
140 }
141 }
142
143 return (intCount + fpCount > maxAllRegisters);
144}
145
146bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
147 llvm::Type *eltTy,
148 unsigned numElts) const {
149 // The default implementation of this assumes that the target guarantees
150 // 128-bit SIMD support but nothing more.
151 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
152}
153
154static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
155 CGCXXABI &CXXABI) {
156 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
157 if (!RD) {
158 if (!RT->getDecl()->canPassInRegisters())
159 return CGCXXABI::RAA_Indirect;
160 return CGCXXABI::RAA_Default;
161 }
162 return CXXABI.getRecordArgABI(RD);
163}
164
165static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
166 CGCXXABI &CXXABI) {
167 const RecordType *RT = T->getAs<RecordType>();
168 if (!RT)
169 return CGCXXABI::RAA_Default;
170 return getRecordArgABI(RT, CXXABI);
171}
172
173static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
174 const ABIInfo &Info) {
175 QualType Ty = FI.getReturnType();
176
177 if (const auto *RT = Ty->getAs<RecordType>())
178 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
179 !RT->getDecl()->canPassInRegisters()) {
180 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
181 return true;
182 }
183
184 return CXXABI.classifyReturnType(FI);
185}
186
187/// Pass transparent unions as if they were the type of the first element. Sema
188/// should ensure that all elements of the union have the same "machine type".
189static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
190 if (const RecordType *UT = Ty->getAsUnionType()) {
191 const RecordDecl *UD = UT->getDecl();
192 if (UD->hasAttr<TransparentUnionAttr>()) {
193 assert(!UD->field_empty() && "sema created an empty transparent union")((!UD->field_empty() && "sema created an empty transparent union"
) ? static_cast<void> (0) : __assert_fail ("!UD->field_empty() && \"sema created an empty transparent union\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 193, __PRETTY_FUNCTION__))
;
194 return UD->field_begin()->getType();
195 }
196 }
197 return Ty;
198}
199
200CGCXXABI &ABIInfo::getCXXABI() const {
201 return CGT.getCXXABI();
202}
203
204ASTContext &ABIInfo::getContext() const {
205 return CGT.getContext();
206}
207
208llvm::LLVMContext &ABIInfo::getVMContext() const {
209 return CGT.getLLVMContext();
210}
211
212const llvm::DataLayout &ABIInfo::getDataLayout() const {
213 return CGT.getDataLayout();
214}
215
216const TargetInfo &ABIInfo::getTarget() const {
217 return CGT.getTarget();
218}
219
220const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
221 return CGT.getCodeGenOpts();
222}
223
224bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
225
226bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
227 return false;
228}
229
230bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
231 uint64_t Members) const {
232 return false;
233}
234
235LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void ABIArgInfo::dump() const {
236 raw_ostream &OS = llvm::errs();
237 OS << "(ABIArgInfo Kind=";
238 switch (TheKind) {
239 case Direct:
240 OS << "Direct Type=";
241 if (llvm::Type *Ty = getCoerceToType())
242 Ty->print(OS);
243 else
244 OS << "null";
245 break;
246 case Extend:
247 OS << "Extend";
248 break;
249 case Ignore:
250 OS << "Ignore";
251 break;
252 case InAlloca:
253 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
254 break;
255 case Indirect:
256 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
257 << " ByVal=" << getIndirectByVal()
258 << " Realign=" << getIndirectRealign();
259 break;
260 case IndirectAliased:
261 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
262 << " AadrSpace=" << getIndirectAddrSpace()
263 << " Realign=" << getIndirectRealign();
264 break;
265 case Expand:
266 OS << "Expand";
267 break;
268 case CoerceAndExpand:
269 OS << "CoerceAndExpand Type=";
270 getCoerceAndExpandType()->print(OS);
271 break;
272 }
273 OS << ")\n";
274}
275
276// Dynamically round a pointer up to a multiple of the given alignment.
277static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
278 llvm::Value *Ptr,
279 CharUnits Align) {
280 llvm::Value *PtrAsInt = Ptr;
281 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
282 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
283 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
284 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
285 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
286 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
287 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
288 Ptr->getType(),
289 Ptr->getName() + ".aligned");
290 return PtrAsInt;
291}
292
293/// Emit va_arg for a platform using the common void* representation,
294/// where arguments are simply emitted in an array of slots on the stack.
295///
296/// This version implements the core direct-value passing rules.
297///
298/// \param SlotSize - The size and alignment of a stack slot.
299/// Each argument will be allocated to a multiple of this number of
300/// slots, and all the slots will be aligned to this value.
301/// \param AllowHigherAlign - The slot alignment is not a cap;
302/// an argument type with an alignment greater than the slot size
303/// will be emitted on a higher-alignment address, potentially
304/// leaving one or more empty slots behind as padding. If this
305/// is false, the returned address might be less-aligned than
306/// DirectAlign.
307static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
308 Address VAListAddr,
309 llvm::Type *DirectTy,
310 CharUnits DirectSize,
311 CharUnits DirectAlign,
312 CharUnits SlotSize,
313 bool AllowHigherAlign) {
314 // Cast the element type to i8* if necessary. Some platforms define
315 // va_list as a struct containing an i8* instead of just an i8*.
316 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
317 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
318
319 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
320
321 // If the CC aligns values higher than the slot size, do so if needed.
322 Address Addr = Address::invalid();
323 if (AllowHigherAlign && DirectAlign > SlotSize) {
324 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
325 DirectAlign);
326 } else {
327 Addr = Address(Ptr, SlotSize);
328 }
329
330 // Advance the pointer past the argument, then store that back.
331 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
332 Address NextPtr =
333 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
334 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
335
336 // If the argument is smaller than a slot, and this is a big-endian
337 // target, the argument will be right-adjusted in its slot.
338 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
339 !DirectTy->isStructTy()) {
340 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
341 }
342
343 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
344 return Addr;
345}
346
347/// Emit va_arg for a platform using the common void* representation,
348/// where arguments are simply emitted in an array of slots on the stack.
349///
350/// \param IsIndirect - Values of this type are passed indirectly.
351/// \param ValueInfo - The size and alignment of this type, generally
352/// computed with getContext().getTypeInfoInChars(ValueTy).
353/// \param SlotSizeAndAlign - The size and alignment of a stack slot.
354/// Each argument will be allocated to a multiple of this number of
355/// slots, and all the slots will be aligned to this value.
356/// \param AllowHigherAlign - The slot alignment is not a cap;
357/// an argument type with an alignment greater than the slot size
358/// will be emitted on a higher-alignment address, potentially
359/// leaving one or more empty slots behind as padding.
360static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
361 QualType ValueTy, bool IsIndirect,
362 TypeInfoChars ValueInfo,
363 CharUnits SlotSizeAndAlign,
364 bool AllowHigherAlign) {
365 // The size and alignment of the value that was passed directly.
366 CharUnits DirectSize, DirectAlign;
367 if (IsIndirect) {
368 DirectSize = CGF.getPointerSize();
369 DirectAlign = CGF.getPointerAlign();
370 } else {
371 DirectSize = ValueInfo.Width;
372 DirectAlign = ValueInfo.Align;
373 }
374
375 // Cast the address we've calculated to the right type.
376 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
377 if (IsIndirect)
378 DirectTy = DirectTy->getPointerTo(0);
379
380 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
381 DirectSize, DirectAlign,
382 SlotSizeAndAlign,
383 AllowHigherAlign);
384
385 if (IsIndirect) {
386 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.Align);
387 }
388
389 return Addr;
390
391}
392
393static Address emitMergePHI(CodeGenFunction &CGF,
394 Address Addr1, llvm::BasicBlock *Block1,
395 Address Addr2, llvm::BasicBlock *Block2,
396 const llvm::Twine &Name = "") {
397 assert(Addr1.getType() == Addr2.getType())((Addr1.getType() == Addr2.getType()) ? static_cast<void>
(0) : __assert_fail ("Addr1.getType() == Addr2.getType()", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 397, __PRETTY_FUNCTION__))
;
398 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
399 PHI->addIncoming(Addr1.getPointer(), Block1);
400 PHI->addIncoming(Addr2.getPointer(), Block2);
401 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
402 return Address(PHI, Align);
403}
404
405TargetCodeGenInfo::~TargetCodeGenInfo() = default;
406
407// If someone can figure out a general rule for this, that would be great.
408// It's probably just doomed to be platform-dependent, though.
409unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
410 // Verified for:
411 // x86-64 FreeBSD, Linux, Darwin
412 // x86-32 FreeBSD, Linux, Darwin
413 // PowerPC Linux, Darwin
414 // ARM Darwin (*not* EABI)
415 // AArch64 Linux
416 return 32;
417}
418
419bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
420 const FunctionNoProtoType *fnType) const {
421 // The following conventions are known to require this to be false:
422 // x86_stdcall
423 // MIPS
424 // For everything else, we just prefer false unless we opt out.
425 return false;
426}
427
428void
429TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
430 llvm::SmallString<24> &Opt) const {
431 // This assumes the user is passing a library name like "rt" instead of a
432 // filename like "librt.a/so", and that they don't care whether it's static or
433 // dynamic.
434 Opt = "-l";
435 Opt += Lib;
436}
437
438unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
439 // OpenCL kernels are called via an explicit runtime API with arguments
440 // set with clSetKernelArg(), not as normal sub-functions.
441 // Return SPIR_KERNEL by default as the kernel calling convention to
442 // ensure the fingerprint is fixed such way that each OpenCL argument
443 // gets one matching argument in the produced kernel function argument
444 // list to enable feasible implementation of clSetKernelArg() with
445 // aggregates etc. In case we would use the default C calling conv here,
446 // clSetKernelArg() might break depending on the target-specific
447 // conventions; different targets might split structs passed as values
448 // to multiple function arguments etc.
449 return llvm::CallingConv::SPIR_KERNEL;
450}
451
452llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
453 llvm::PointerType *T, QualType QT) const {
454 return llvm::ConstantPointerNull::get(T);
455}
456
457LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
458 const VarDecl *D) const {
459 assert(!CGM.getLangOpts().OpenCL &&((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA
&& CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only"
) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 461, __PRETTY_FUNCTION__))
460 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA
&& CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only"
) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 461, __PRETTY_FUNCTION__))
461 "Address space agnostic languages only")((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA
&& CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only"
) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 461, __PRETTY_FUNCTION__))
;
462 return D ? D->getType().getAddressSpace() : LangAS::Default;
463}
464
465llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
466 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
467 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
468 // Since target may map different address spaces in AST to the same address
469 // space, an address space conversion may end up as a bitcast.
470 if (auto *C = dyn_cast<llvm::Constant>(Src))
471 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
472 // Try to preserve the source's name to make IR more readable.
473 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
474 Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
475}
476
477llvm::Constant *
478TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
479 LangAS SrcAddr, LangAS DestAddr,
480 llvm::Type *DestTy) const {
481 // Since target may map different address spaces in AST to the same address
482 // space, an address space conversion may end up as a bitcast.
483 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
484}
485
486llvm::SyncScope::ID
487TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
488 SyncScope Scope,
489 llvm::AtomicOrdering Ordering,
490 llvm::LLVMContext &Ctx) const {
491 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
492}
493
494static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
495
496/// isEmptyField - Return true iff a the field is "empty", that is it
497/// is an unnamed bit-field or an (array of) empty record(s).
498static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
499 bool AllowArrays) {
500 if (FD->isUnnamedBitfield())
501 return true;
502
503 QualType FT = FD->getType();
504
505 // Constant arrays of empty records count as empty, strip them off.
506 // Constant arrays of zero length always count as empty.
507 bool WasArray = false;
508 if (AllowArrays)
509 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
510 if (AT->getSize() == 0)
511 return true;
512 FT = AT->getElementType();
513 // The [[no_unique_address]] special case below does not apply to
514 // arrays of C++ empty records, so we need to remember this fact.
515 WasArray = true;
516 }
517
518 const RecordType *RT = FT->getAs<RecordType>();
519 if (!RT)
520 return false;
521
522 // C++ record fields are never empty, at least in the Itanium ABI.
523 //
524 // FIXME: We should use a predicate for whether this behavior is true in the
525 // current ABI.
526 //
527 // The exception to the above rule are fields marked with the
528 // [[no_unique_address]] attribute (since C++20). Those do count as empty
529 // according to the Itanium ABI. The exception applies only to records,
530 // not arrays of records, so we must also check whether we stripped off an
531 // array type above.
532 if (isa<CXXRecordDecl>(RT->getDecl()) &&
533 (WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
534 return false;
535
536 return isEmptyRecord(Context, FT, AllowArrays);
537}
538
539/// isEmptyRecord - Return true iff a structure contains only empty
540/// fields. Note that a structure with a flexible array member is not
541/// considered empty.
542static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
543 const RecordType *RT = T->getAs<RecordType>();
544 if (!RT)
545 return false;
546 const RecordDecl *RD = RT->getDecl();
547 if (RD->hasFlexibleArrayMember())
548 return false;
549
550 // If this is a C++ record, check the bases first.
551 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
552 for (const auto &I : CXXRD->bases())
553 if (!isEmptyRecord(Context, I.getType(), true))
554 return false;
555
556 for (const auto *I : RD->fields())
557 if (!isEmptyField(Context, I, AllowArrays))
558 return false;
559 return true;
560}
561
562/// isSingleElementStruct - Determine if a structure is a "single
563/// element struct", i.e. it has exactly one non-empty field or
564/// exactly one field which is itself a single element
565/// struct. Structures with flexible array members are never
566/// considered single element structs.
567///
568/// \return The field declaration for the single non-empty field, if
569/// it exists.
570static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
571 const RecordType *RT = T->getAs<RecordType>();
572 if (!RT)
573 return nullptr;
574
575 const RecordDecl *RD = RT->getDecl();
576 if (RD->hasFlexibleArrayMember())
577 return nullptr;
578
579 const Type *Found = nullptr;
580
581 // If this is a C++ record, check the bases first.
582 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
583 for (const auto &I : CXXRD->bases()) {
584 // Ignore empty records.
585 if (isEmptyRecord(Context, I.getType(), true))
586 continue;
587
588 // If we already found an element then this isn't a single-element struct.
589 if (Found)
590 return nullptr;
591
592 // If this is non-empty and not a single element struct, the composite
593 // cannot be a single element struct.
594 Found = isSingleElementStruct(I.getType(), Context);
595 if (!Found)
596 return nullptr;
597 }
598 }
599
600 // Check for single element.
601 for (const auto *FD : RD->fields()) {
602 QualType FT = FD->getType();
603
604 // Ignore empty fields.
605 if (isEmptyField(Context, FD, true))
606 continue;
607
608 // If we already found an element then this isn't a single-element
609 // struct.
610 if (Found)
611 return nullptr;
612
613 // Treat single element arrays as the element.
614 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
615 if (AT->getSize().getZExtValue() != 1)
616 break;
617 FT = AT->getElementType();
618 }
619
620 if (!isAggregateTypeForABI(FT)) {
621 Found = FT.getTypePtr();
622 } else {
623 Found = isSingleElementStruct(FT, Context);
624 if (!Found)
625 return nullptr;
626 }
627 }
628
629 // We don't consider a struct a single-element struct if it has
630 // padding beyond the element type.
631 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
632 return nullptr;
633
634 return Found;
635}
636
637namespace {
638Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
639 const ABIArgInfo &AI) {
640 // This default implementation defers to the llvm backend's va_arg
641 // instruction. It can handle only passing arguments directly
642 // (typically only handled in the backend for primitive types), or
643 // aggregates passed indirectly by pointer (NOTE: if the "byval"
644 // flag has ABI impact in the callee, this implementation cannot
645 // work.)
646
647 // Only a few cases are covered here at the moment -- those needed
648 // by the default abi.
649 llvm::Value *Val;
650
651 if (AI.isIndirect()) {
652 assert(!AI.getPaddingType() &&((!AI.getPaddingType() && "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getPaddingType() && \"Unexpected PaddingType seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 653, __PRETTY_FUNCTION__))
653 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!")((!AI.getPaddingType() && "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getPaddingType() && \"Unexpected PaddingType seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 653, __PRETTY_FUNCTION__))
;
654 assert(((!AI.getIndirectRealign() && "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getIndirectRealign() && \"Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 656, __PRETTY_FUNCTION__))
655 !AI.getIndirectRealign() &&((!AI.getIndirectRealign() && "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getIndirectRealign() && \"Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 656, __PRETTY_FUNCTION__))
656 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!")((!AI.getIndirectRealign() && "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getIndirectRealign() && \"Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 656, __PRETTY_FUNCTION__))
;
657
658 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
659 CharUnits TyAlignForABI = TyInfo.Align;
660
661 llvm::Type *BaseTy =
662 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
663 llvm::Value *Addr =
664 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
665 return Address(Addr, TyAlignForABI);
666 } else {
667 assert((AI.isDirect() || AI.isExtend()) &&(((AI.isDirect() || AI.isExtend()) && "Unexpected ArgInfo Kind in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("(AI.isDirect() || AI.isExtend()) && \"Unexpected ArgInfo Kind in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 668, __PRETTY_FUNCTION__))
668 "Unexpected ArgInfo Kind in generic VAArg emitter!")(((AI.isDirect() || AI.isExtend()) && "Unexpected ArgInfo Kind in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("(AI.isDirect() || AI.isExtend()) && \"Unexpected ArgInfo Kind in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 668, __PRETTY_FUNCTION__))
;
669
670 assert(!AI.getInReg() &&((!AI.getInReg() && "Unexpected InReg seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getInReg() && \"Unexpected InReg seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 671, __PRETTY_FUNCTION__))
671 "Unexpected InReg seen in arginfo in generic VAArg emitter!")((!AI.getInReg() && "Unexpected InReg seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getInReg() && \"Unexpected InReg seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 671, __PRETTY_FUNCTION__))
;
672 assert(!AI.getPaddingType() &&((!AI.getPaddingType() && "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getPaddingType() && \"Unexpected PaddingType seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 673, __PRETTY_FUNCTION__))
673 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!")((!AI.getPaddingType() && "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getPaddingType() && \"Unexpected PaddingType seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 673, __PRETTY_FUNCTION__))
;
674 assert(!AI.getDirectOffset() &&((!AI.getDirectOffset() && "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getDirectOffset() && \"Unexpected DirectOffset seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 675, __PRETTY_FUNCTION__))
675 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!")((!AI.getDirectOffset() && "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getDirectOffset() && \"Unexpected DirectOffset seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 675, __PRETTY_FUNCTION__))
;
676 assert(!AI.getCoerceToType() &&((!AI.getCoerceToType() && "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getCoerceToType() && \"Unexpected CoerceToType seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 677, __PRETTY_FUNCTION__))
677 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!")((!AI.getCoerceToType() && "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"
) ? static_cast<void> (0) : __assert_fail ("!AI.getCoerceToType() && \"Unexpected CoerceToType seen in arginfo in generic VAArg emitter!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 677, __PRETTY_FUNCTION__))
;
678
679 Address Temp = CGF.CreateMemTemp(Ty, "varet");
680 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
681 CGF.Builder.CreateStore(Val, Temp);
682 return Temp;
683 }
684}
685
686/// DefaultABIInfo - The default implementation for ABI specific
687/// details. This implementation provides information which results in
688/// self-consistent and sensible LLVM IR generation, but does not
689/// conform to any particular ABI.
690class DefaultABIInfo : public ABIInfo {
691public:
692 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
693
694 ABIArgInfo classifyReturnType(QualType RetTy) const;
695 ABIArgInfo classifyArgumentType(QualType RetTy) const;
696
697 void computeInfo(CGFunctionInfo &FI) const override {
698 if (!getCXXABI().classifyReturnType(FI))
699 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
700 for (auto &I : FI.arguments())
701 I.info = classifyArgumentType(I.type);
702 }
703
704 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
705 QualType Ty) const override {
706 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
707 }
708};
709
710class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
711public:
712 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
713 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
714};
715
716ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
717 Ty = useFirstFieldIfTransparentUnion(Ty);
718
719 if (isAggregateTypeForABI(Ty)) {
720 // Records with non-trivial destructors/copy-constructors should not be
721 // passed by value.
722 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
723 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
724
725 return getNaturalAlignIndirect(Ty);
726 }
727
728 // Treat an enum type as its underlying type.
729 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
730 Ty = EnumTy->getDecl()->getIntegerType();
731
732 ASTContext &Context = getContext();
733 if (const auto *EIT = Ty->getAs<ExtIntType>())
734 if (EIT->getNumBits() >
735 Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
736 ? Context.Int128Ty
737 : Context.LongLongTy))
738 return getNaturalAlignIndirect(Ty);
739
740 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
741 : ABIArgInfo::getDirect());
742}
743
744ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
745 if (RetTy->isVoidType())
746 return ABIArgInfo::getIgnore();
747
748 if (isAggregateTypeForABI(RetTy))
749 return getNaturalAlignIndirect(RetTy);
750
751 // Treat an enum type as its underlying type.
752 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
753 RetTy = EnumTy->getDecl()->getIntegerType();
754
755 if (const auto *EIT = RetTy->getAs<ExtIntType>())
756 if (EIT->getNumBits() >
757 getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
758 ? getContext().Int128Ty
759 : getContext().LongLongTy))
760 return getNaturalAlignIndirect(RetTy);
761
762 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
763 : ABIArgInfo::getDirect());
764}
765
766//===----------------------------------------------------------------------===//
767// WebAssembly ABI Implementation
768//
769// This is a very simple ABI that relies a lot on DefaultABIInfo.
770//===----------------------------------------------------------------------===//
771
772class WebAssemblyABIInfo final : public SwiftABIInfo {
773public:
774 enum ABIKind {
775 MVP = 0,
776 ExperimentalMV = 1,
777 };
778
779private:
780 DefaultABIInfo defaultInfo;
781 ABIKind Kind;
782
783public:
784 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
785 : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
786
787private:
788 ABIArgInfo classifyReturnType(QualType RetTy) const;
789 ABIArgInfo classifyArgumentType(QualType Ty) const;
790
791 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
792 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
793 // overload them.
794 void computeInfo(CGFunctionInfo &FI) const override {
795 if (!getCXXABI().classifyReturnType(FI))
796 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
797 for (auto &Arg : FI.arguments())
798 Arg.info = classifyArgumentType(Arg.type);
799 }
800
801 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
802 QualType Ty) const override;
803
804 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
805 bool asReturnValue) const override {
806 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
807 }
808
809 bool isSwiftErrorInRegister() const override {
810 return false;
811 }
812};
813
814class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
815public:
816 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
817 WebAssemblyABIInfo::ABIKind K)
818 : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {}
819
820 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
821 CodeGen::CodeGenModule &CGM) const override {
822 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
823 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
824 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
825 llvm::Function *Fn = cast<llvm::Function>(GV);
826 llvm::AttrBuilder B;
827 B.addAttribute("wasm-import-module", Attr->getImportModule());
828 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
829 }
830 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
831 llvm::Function *Fn = cast<llvm::Function>(GV);
832 llvm::AttrBuilder B;
833 B.addAttribute("wasm-import-name", Attr->getImportName());
834 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
835 }
836 if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
837 llvm::Function *Fn = cast<llvm::Function>(GV);
838 llvm::AttrBuilder B;
839 B.addAttribute("wasm-export-name", Attr->getExportName());
840 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
841 }
842 }
843
844 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
845 llvm::Function *Fn = cast<llvm::Function>(GV);
846 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
847 Fn->addFnAttr("no-prototype");
848 }
849 }
850};
851
852/// Classify argument of given type \p Ty.
853ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
854 Ty = useFirstFieldIfTransparentUnion(Ty);
855
856 if (isAggregateTypeForABI(Ty)) {
857 // Records with non-trivial destructors/copy-constructors should not be
858 // passed by value.
859 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
860 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
861 // Ignore empty structs/unions.
862 if (isEmptyRecord(getContext(), Ty, true))
863 return ABIArgInfo::getIgnore();
864 // Lower single-element structs to just pass a regular value. TODO: We
865 // could do reasonable-size multiple-element structs too, using getExpand(),
866 // though watch out for things like bitfields.
867 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
868 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
869 // For the experimental multivalue ABI, fully expand all other aggregates
870 if (Kind == ABIKind::ExperimentalMV) {
871 const RecordType *RT = Ty->getAs<RecordType>();
872 assert(RT)((RT) ? static_cast<void> (0) : __assert_fail ("RT", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 872, __PRETTY_FUNCTION__))
;
873 bool HasBitField = false;
874 for (auto *Field : RT->getDecl()->fields()) {
875 if (Field->isBitField()) {
876 HasBitField = true;
877 break;
878 }
879 }
880 if (!HasBitField)
881 return ABIArgInfo::getExpand();
882 }
883 }
884
885 // Otherwise just do the default thing.
886 return defaultInfo.classifyArgumentType(Ty);
887}
888
889ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
890 if (isAggregateTypeForABI(RetTy)) {
891 // Records with non-trivial destructors/copy-constructors should not be
892 // returned by value.
893 if (!getRecordArgABI(RetTy, getCXXABI())) {
894 // Ignore empty structs/unions.
895 if (isEmptyRecord(getContext(), RetTy, true))
896 return ABIArgInfo::getIgnore();
897 // Lower single-element structs to just return a regular value. TODO: We
898 // could do reasonable-size multiple-element structs too, using
899 // ABIArgInfo::getDirect().
900 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
901 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
902 // For the experimental multivalue ABI, return all other aggregates
903 if (Kind == ABIKind::ExperimentalMV)
904 return ABIArgInfo::getDirect();
905 }
906 }
907
908 // Otherwise just do the default thing.
909 return defaultInfo.classifyReturnType(RetTy);
910}
911
912Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
913 QualType Ty) const {
914 bool IsIndirect = isAggregateTypeForABI(Ty) &&
915 !isEmptyRecord(getContext(), Ty, true) &&
916 !isSingleElementStruct(Ty, getContext());
917 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
918 getContext().getTypeInfoInChars(Ty),
919 CharUnits::fromQuantity(4),
920 /*AllowHigherAlign=*/true);
921}
922
923//===----------------------------------------------------------------------===//
924// le32/PNaCl bitcode ABI Implementation
925//
926// This is a simplified version of the x86_32 ABI. Arguments and return values
927// are always passed on the stack.
928//===----------------------------------------------------------------------===//
929
930class PNaClABIInfo : public ABIInfo {
931 public:
932 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
933
934 ABIArgInfo classifyReturnType(QualType RetTy) const;
935 ABIArgInfo classifyArgumentType(QualType RetTy) const;
936
937 void computeInfo(CGFunctionInfo &FI) const override;
938 Address EmitVAArg(CodeGenFunction &CGF,
939 Address VAListAddr, QualType Ty) const override;
940};
941
942class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
943 public:
944 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
945 : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
946};
947
948void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
949 if (!getCXXABI().classifyReturnType(FI))
950 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
951
952 for (auto &I : FI.arguments())
953 I.info = classifyArgumentType(I.type);
954}
955
956Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
957 QualType Ty) const {
958 // The PNaCL ABI is a bit odd, in that varargs don't use normal
959 // function classification. Structs get passed directly for varargs
960 // functions, through a rewriting transform in
961 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
962 // this target to actually support a va_arg instructions with an
963 // aggregate type, unlike other targets.
964 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
965}
966
967/// Classify argument of given type \p Ty.
968ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
969 if (isAggregateTypeForABI(Ty)) {
970 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
971 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
972 return getNaturalAlignIndirect(Ty);
973 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
974 // Treat an enum type as its underlying type.
975 Ty = EnumTy->getDecl()->getIntegerType();
976 } else if (Ty->isFloatingType()) {
977 // Floating-point types don't go inreg.
978 return ABIArgInfo::getDirect();
979 } else if (const auto *EIT = Ty->getAs<ExtIntType>()) {
980 // Treat extended integers as integers if <=64, otherwise pass indirectly.
981 if (EIT->getNumBits() > 64)
982 return getNaturalAlignIndirect(Ty);
983 return ABIArgInfo::getDirect();
984 }
985
986 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
987 : ABIArgInfo::getDirect());
988}
989
990ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
991 if (RetTy->isVoidType())
992 return ABIArgInfo::getIgnore();
993
994 // In the PNaCl ABI we always return records/structures on the stack.
995 if (isAggregateTypeForABI(RetTy))
996 return getNaturalAlignIndirect(RetTy);
997
998 // Treat extended integers as integers if <=64, otherwise pass indirectly.
999 if (const auto *EIT = RetTy->getAs<ExtIntType>()) {
1000 if (EIT->getNumBits() > 64)
1001 return getNaturalAlignIndirect(RetTy);
1002 return ABIArgInfo::getDirect();
1003 }
1004
1005 // Treat an enum type as its underlying type.
1006 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1007 RetTy = EnumTy->getDecl()->getIntegerType();
1008
1009 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1010 : ABIArgInfo::getDirect());
1011}
1012
1013/// IsX86_MMXType - Return true if this is an MMX type.
1014bool IsX86_MMXType(llvm::Type *IRType) {
1015 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
1016 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
1017 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
1018 IRType->getScalarSizeInBits() != 64;
1019}
1020
1021static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1022 StringRef Constraint,
1023 llvm::Type* Ty) {
1024 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
1025 .Cases("y", "&y", "^Ym", true)
1026 .Default(false);
1027 if (IsMMXCons && Ty->isVectorTy()) {
1028 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() !=
1029 64) {
1030 // Invalid MMX constraint
1031 return nullptr;
1032 }
1033
1034 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
1035 }
1036
1037 // No operation needed
1038 return Ty;
1039}
1040
1041/// Returns true if this type can be passed in SSE registers with the
1042/// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
1043static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
1044 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1045 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
1046 if (BT->getKind() == BuiltinType::LongDouble) {
1047 if (&Context.getTargetInfo().getLongDoubleFormat() ==
1048 &llvm::APFloat::x87DoubleExtended())
1049 return false;
1050 }
1051 return true;
1052 }
1053 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
1054 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
1055 // registers specially.
1056 unsigned VecSize = Context.getTypeSize(VT);
1057 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
1058 return true;
1059 }
1060 return false;
1061}
1062
1063/// Returns true if this aggregate is small enough to be passed in SSE registers
1064/// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
1065static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
1066 return NumMembers <= 4;
1067}
1068
1069/// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
1070static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
1071 auto AI = ABIArgInfo::getDirect(T);
1072 AI.setInReg(true);
1073 AI.setCanBeFlattened(false);
1074 return AI;
1075}
1076
1077//===----------------------------------------------------------------------===//
1078// X86-32 ABI Implementation
1079//===----------------------------------------------------------------------===//
1080
1081/// Similar to llvm::CCState, but for Clang.
1082struct CCState {
1083 CCState(CGFunctionInfo &FI)
1084 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
1085
1086 llvm::SmallBitVector IsPreassigned;
1087 unsigned CC = CallingConv::CC_C;
1088 unsigned FreeRegs = 0;
1089 unsigned FreeSSERegs = 0;
1090};
1091
1092enum {
1093 // Vectorcall only allows the first 6 parameters to be passed in registers.
1094 VectorcallMaxParamNumAsReg = 6
1095};
1096
1097/// X86_32ABIInfo - The X86-32 ABI information.
1098class X86_32ABIInfo : public SwiftABIInfo {
1099 enum Class {
1100 Integer,
1101 Float
1102 };
1103
1104 static const unsigned MinABIStackAlignInBytes = 4;
1105
1106 bool IsDarwinVectorABI;
1107 bool IsRetSmallStructInRegABI;
1108 bool IsWin32StructABI;
1109 bool IsSoftFloatABI;
1110 bool IsMCUABI;
1111 unsigned DefaultNumRegisterParameters;
1112
1113 static bool isRegisterSize(unsigned Size) {
1114 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1115 }
1116
1117 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1118 // FIXME: Assumes vectorcall is in use.
1119 return isX86VectorTypeForVectorCall(getContext(), Ty);
1120 }
1121
1122 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1123 uint64_t NumMembers) const override {
1124 // FIXME: Assumes vectorcall is in use.
1125 return isX86VectorCallAggregateSmallEnough(NumMembers);
1126 }
1127
1128 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
1129
1130 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1131 /// such that the argument will be passed in memory.
1132 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
1133
1134 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
1135
1136 /// Return the alignment to use for the given type on the stack.
1137 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
1138
1139 Class classify(QualType Ty) const;
1140 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
1141 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
1142
1143 /// Updates the number of available free registers, returns
1144 /// true if any registers were allocated.
1145 bool updateFreeRegs(QualType Ty, CCState &State) const;
1146
1147 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
1148 bool &NeedsPadding) const;
1149 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
1150
1151 bool canExpandIndirectArgument(QualType Ty) const;
1152
1153 /// Rewrite the function info so that all memory arguments use
1154 /// inalloca.
1155 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
1156
1157 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1158 CharUnits &StackOffset, ABIArgInfo &Info,
1159 QualType Type) const;
1160 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
1161
1162public:
1163
1164 void computeInfo(CGFunctionInfo &FI) const override;
1165 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1166 QualType Ty) const override;
1167
1168 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1169 bool RetSmallStructInRegABI, bool Win32StructABI,
1170 unsigned NumRegisterParameters, bool SoftFloatABI)
1171 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1172 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1173 IsWin32StructABI(Win32StructABI),
1174 IsSoftFloatABI(SoftFloatABI),
1175 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1176 DefaultNumRegisterParameters(NumRegisterParameters) {}
1177
1178 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
1179 bool asReturnValue) const override {
1180 // LLVM's x86-32 lowering currently only assigns up to three
1181 // integer registers and three fp registers. Oddly, it'll use up to
1182 // four vector registers for vectors, but those can overlap with the
1183 // scalar registers.
1184 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1185 }
1186
1187 bool isSwiftErrorInRegister() const override {
1188 // x86-32 lowering does not support passing swifterror in a register.
1189 return false;
1190 }
1191};
1192
1193class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1194public:
1195 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1196 bool RetSmallStructInRegABI, bool Win32StructABI,
1197 unsigned NumRegisterParameters, bool SoftFloatABI)
1198 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
1199 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1200 NumRegisterParameters, SoftFloatABI)) {}
1201
1202 static bool isStructReturnInRegABI(
1203 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1204
1205 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1206 CodeGen::CodeGenModule &CGM) const override;
1207
1208 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1209 // Darwin uses different dwarf register numbers for EH.
1210 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1211 return 4;
1212 }
1213
1214 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1215 llvm::Value *Address) const override;
1216
1217 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1218 StringRef Constraint,
1219 llvm::Type* Ty) const override {
1220 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1221 }
1222
1223 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1224 std::string &Constraints,
1225 std::vector<llvm::Type *> &ResultRegTypes,
1226 std::vector<llvm::Type *> &ResultTruncRegTypes,
1227 std::vector<LValue> &ResultRegDests,
1228 std::string &AsmString,
1229 unsigned NumOutputs) const override;
1230
1231 llvm::Constant *
1232 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1233 unsigned Sig = (0xeb << 0) | // jmp rel8
1234 (0x06 << 8) | // .+0x08
1235 ('v' << 16) |
1236 ('2' << 24);
1237 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1238 }
1239
1240 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1241 return "movl\t%ebp, %ebp"
1242 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1243 }
1244};
1245
1246}
1247
1248/// Rewrite input constraint references after adding some output constraints.
1249/// In the case where there is one output and one input and we add one output,
1250/// we need to replace all operand references greater than or equal to 1:
1251/// mov $0, $1
1252/// mov eax, $1
1253/// The result will be:
1254/// mov $0, $2
1255/// mov eax, $2
1256static void rewriteInputConstraintReferences(unsigned FirstIn,
1257 unsigned NumNewOuts,
1258 std::string &AsmString) {
1259 std::string Buf;
1260 llvm::raw_string_ostream OS(Buf);
1261 size_t Pos = 0;
1262 while (Pos < AsmString.size()) {
1263 size_t DollarStart = AsmString.find('$', Pos);
1264 if (DollarStart == std::string::npos)
1265 DollarStart = AsmString.size();
1266 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1267 if (DollarEnd == std::string::npos)
1268 DollarEnd = AsmString.size();
1269 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1270 Pos = DollarEnd;
1271 size_t NumDollars = DollarEnd - DollarStart;
1272 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1273 // We have an operand reference.
1274 size_t DigitStart = Pos;
1275 if (AsmString[DigitStart] == '{') {
1276 OS << '{';
1277 ++DigitStart;
1278 }
1279 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1280 if (DigitEnd == std::string::npos)
1281 DigitEnd = AsmString.size();
1282 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1283 unsigned OperandIndex;
1284 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1285 if (OperandIndex >= FirstIn)
1286 OperandIndex += NumNewOuts;
1287 OS << OperandIndex;
1288 } else {
1289 OS << OperandStr;
1290 }
1291 Pos = DigitEnd;
1292 }
1293 }
1294 AsmString = std::move(OS.str());
1295}
1296
1297/// Add output constraints for EAX:EDX because they are return registers.
1298void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1299 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1300 std::vector<llvm::Type *> &ResultRegTypes,
1301 std::vector<llvm::Type *> &ResultTruncRegTypes,
1302 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1303 unsigned NumOutputs) const {
1304 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1305
1306 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1307 // larger.
1308 if (!Constraints.empty())
1309 Constraints += ',';
1310 if (RetWidth <= 32) {
1311 Constraints += "={eax}";
1312 ResultRegTypes.push_back(CGF.Int32Ty);
1313 } else {
1314 // Use the 'A' constraint for EAX:EDX.
1315 Constraints += "=A";
1316 ResultRegTypes.push_back(CGF.Int64Ty);
1317 }
1318
1319 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1320 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1321 ResultTruncRegTypes.push_back(CoerceTy);
1322
1323 // Coerce the integer by bitcasting the return slot pointer.
1324 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF),
1325 CoerceTy->getPointerTo()));
1326 ResultRegDests.push_back(ReturnSlot);
1327
1328 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1329}
1330
1331/// shouldReturnTypeInRegister - Determine if the given type should be
1332/// returned in a register (for the Darwin and MCU ABI).
1333bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1334 ASTContext &Context) const {
1335 uint64_t Size = Context.getTypeSize(Ty);
1336
1337 // For i386, type must be register sized.
1338 // For the MCU ABI, it only needs to be <= 8-byte
1339 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1340 return false;
1341
1342 if (Ty->isVectorType()) {
1343 // 64- and 128- bit vectors inside structures are not returned in
1344 // registers.
1345 if (Size == 64 || Size == 128)
1346 return false;
1347
1348 return true;
1349 }
1350
1351 // If this is a builtin, pointer, enum, complex type, member pointer, or
1352 // member function pointer it is ok.
1353 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1354 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1355 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1356 return true;
1357
1358 // Arrays are treated like records.
1359 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1360 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1361
1362 // Otherwise, it must be a record type.
1363 const RecordType *RT = Ty->getAs<RecordType>();
1364 if (!RT) return false;
1365
1366 // FIXME: Traverse bases here too.
1367
1368 // Structure types are passed in register if all fields would be
1369 // passed in a register.
1370 for (const auto *FD : RT->getDecl()->fields()) {
1371 // Empty fields are ignored.
1372 if (isEmptyField(Context, FD, true))
1373 continue;
1374
1375 // Check fields recursively.
1376 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1377 return false;
1378 }
1379 return true;
1380}
1381
1382static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1383 // Treat complex types as the element type.
1384 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1385 Ty = CTy->getElementType();
1386
1387 // Check for a type which we know has a simple scalar argument-passing
1388 // convention without any padding. (We're specifically looking for 32
1389 // and 64-bit integer and integer-equivalents, float, and double.)
1390 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1391 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1392 return false;
1393
1394 uint64_t Size = Context.getTypeSize(Ty);
1395 return Size == 32 || Size == 64;
1396}
1397
1398static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1399 uint64_t &Size) {
1400 for (const auto *FD : RD->fields()) {
1401 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1402 // argument is smaller than 32-bits, expanding the struct will create
1403 // alignment padding.
1404 if (!is32Or64BitBasicType(FD->getType(), Context))
1405 return false;
1406
1407 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1408 // how to expand them yet, and the predicate for telling if a bitfield still
1409 // counts as "basic" is more complicated than what we were doing previously.
1410 if (FD->isBitField())
1411 return false;
1412
1413 Size += Context.getTypeSize(FD->getType());
1414 }
1415 return true;
1416}
1417
1418static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1419 uint64_t &Size) {
1420 // Don't do this if there are any non-empty bases.
1421 for (const CXXBaseSpecifier &Base : RD->bases()) {
1422 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1423 Size))
1424 return false;
1425 }
1426 if (!addFieldSizes(Context, RD, Size))
1427 return false;
1428 return true;
1429}
1430
1431/// Test whether an argument type which is to be passed indirectly (on the
1432/// stack) would have the equivalent layout if it was expanded into separate
1433/// arguments. If so, we prefer to do the latter to avoid inhibiting
1434/// optimizations.
1435bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1436 // We can only expand structure types.
1437 const RecordType *RT = Ty->getAs<RecordType>();
1438 if (!RT)
1439 return false;
1440 const RecordDecl *RD = RT->getDecl();
1441 uint64_t Size = 0;
1442 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1443 if (!IsWin32StructABI) {
1444 // On non-Windows, we have to conservatively match our old bitcode
1445 // prototypes in order to be ABI-compatible at the bitcode level.
1446 if (!CXXRD->isCLike())
1447 return false;
1448 } else {
1449 // Don't do this for dynamic classes.
1450 if (CXXRD->isDynamicClass())
1451 return false;
1452 }
1453 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1454 return false;
1455 } else {
1456 if (!addFieldSizes(getContext(), RD, Size))
1457 return false;
1458 }
1459
1460 // We can do this if there was no alignment padding.
1461 return Size == getContext().getTypeSize(Ty);
1462}
1463
1464ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1465 // If the return value is indirect, then the hidden argument is consuming one
1466 // integer register.
1467 if (State.FreeRegs) {
1468 --State.FreeRegs;
1469 if (!IsMCUABI)
1470 return getNaturalAlignIndirectInReg(RetTy);
1471 }
1472 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1473}
1474
1475ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1476 CCState &State) const {
1477 if (RetTy->isVoidType())
1478 return ABIArgInfo::getIgnore();
1479
1480 const Type *Base = nullptr;
1481 uint64_t NumElts = 0;
1482 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1483 State.CC == llvm::CallingConv::X86_RegCall) &&
1484 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1485 // The LLVM struct type for such an aggregate should lower properly.
1486 return ABIArgInfo::getDirect();
1487 }
1488
1489 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1490 // On Darwin, some vectors are returned in registers.
1491 if (IsDarwinVectorABI) {
1492 uint64_t Size = getContext().getTypeSize(RetTy);
1493
1494 // 128-bit vectors are a special case; they are returned in
1495 // registers and we need to make sure to pick a type the LLVM
1496 // backend will like.
1497 if (Size == 128)
1498 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
1499 llvm::Type::getInt64Ty(getVMContext()), 2));
1500
1501 // Always return in register if it fits in a general purpose
1502 // register, or if it is 64 bits and has a single element.
1503 if ((Size == 8 || Size == 16 || Size == 32) ||
1504 (Size == 64 && VT->getNumElements() == 1))
1505 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1506 Size));
1507
1508 return getIndirectReturnResult(RetTy, State);
1509 }
1510
1511 return ABIArgInfo::getDirect();
1512 }
1513
1514 if (isAggregateTypeForABI(RetTy)) {
1515 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1516 // Structures with flexible arrays are always indirect.
1517 if (RT->getDecl()->hasFlexibleArrayMember())
1518 return getIndirectReturnResult(RetTy, State);
1519 }
1520
1521 // If specified, structs and unions are always indirect.
1522 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1523 return getIndirectReturnResult(RetTy, State);
1524
1525 // Ignore empty structs/unions.
1526 if (isEmptyRecord(getContext(), RetTy, true))
1527 return ABIArgInfo::getIgnore();
1528
1529 // Small structures which are register sized are generally returned
1530 // in a register.
1531 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1532 uint64_t Size = getContext().getTypeSize(RetTy);
1533
1534 // As a special-case, if the struct is a "single-element" struct, and
1535 // the field is of type "float" or "double", return it in a
1536 // floating-point register. (MSVC does not apply this special case.)
1537 // We apply a similar transformation for pointer types to improve the
1538 // quality of the generated IR.
1539 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1540 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1541 || SeltTy->hasPointerRepresentation())
1542 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1543
1544 // FIXME: We should be able to narrow this integer in cases with dead
1545 // padding.
1546 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1547 }
1548
1549 return getIndirectReturnResult(RetTy, State);
1550 }
1551
1552 // Treat an enum type as its underlying type.
1553 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1554 RetTy = EnumTy->getDecl()->getIntegerType();
1555
1556 if (const auto *EIT = RetTy->getAs<ExtIntType>())
1557 if (EIT->getNumBits() > 64)
1558 return getIndirectReturnResult(RetTy, State);
1559
1560 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1561 : ABIArgInfo::getDirect());
1562}
1563
1564static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
1565 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1566}
1567
1568static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
1569 const RecordType *RT = Ty->getAs<RecordType>();
1570 if (!RT)
1571 return 0;
1572 const RecordDecl *RD = RT->getDecl();
1573
1574 // If this is a C++ record, check the bases first.
1575 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1576 for (const auto &I : CXXRD->bases())
1577 if (!isRecordWithSIMDVectorType(Context, I.getType()))
1578 return false;
1579
1580 for (const auto *i : RD->fields()) {
1581 QualType FT = i->getType();
1582
1583 if (isSIMDVectorType(Context, FT))
1584 return true;
1585
1586 if (isRecordWithSIMDVectorType(Context, FT))
1587 return true;
1588 }
1589
1590 return false;
1591}
1592
1593unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1594 unsigned Align) const {
1595 // Otherwise, if the alignment is less than or equal to the minimum ABI
1596 // alignment, just use the default; the backend will handle this.
1597 if (Align <= MinABIStackAlignInBytes)
1598 return 0; // Use default alignment.
1599
1600 // On non-Darwin, the stack type alignment is always 4.
1601 if (!IsDarwinVectorABI) {
1602 // Set explicit alignment, since we may need to realign the top.
1603 return MinABIStackAlignInBytes;
1604 }
1605
1606 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1607 if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
1608 isRecordWithSIMDVectorType(getContext(), Ty)))
1609 return 16;
1610
1611 return MinABIStackAlignInBytes;
1612}
1613
1614ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1615 CCState &State) const {
1616 if (!ByVal) {
1617 if (State.FreeRegs) {
1618 --State.FreeRegs; // Non-byval indirects just use one pointer.
1619 if (!IsMCUABI)
1620 return getNaturalAlignIndirectInReg(Ty);
1621 }
1622 return getNaturalAlignIndirect(Ty, false);
1623 }
1624
1625 // Compute the byval alignment.
1626 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1627 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1628 if (StackAlign == 0)
1629 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1630
1631 // If the stack alignment is less than the type alignment, realign the
1632 // argument.
1633 bool Realign = TypeAlign > StackAlign;
1634 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1635 /*ByVal=*/true, Realign);
1636}
1637
1638X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1639 const Type *T = isSingleElementStruct(Ty, getContext());
1640 if (!T)
1641 T = Ty.getTypePtr();
1642
1643 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1644 BuiltinType::Kind K = BT->getKind();
1645 if (K == BuiltinType::Float || K == BuiltinType::Double)
1646 return Float;
1647 }
1648 return Integer;
1649}
1650
1651bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1652 if (!IsSoftFloatABI) {
1653 Class C = classify(Ty);
1654 if (C == Float)
1655 return false;
1656 }
1657
1658 unsigned Size = getContext().getTypeSize(Ty);
1659 unsigned SizeInRegs = (Size + 31) / 32;
1660
1661 if (SizeInRegs == 0)
1662 return false;
1663
1664 if (!IsMCUABI) {
1665 if (SizeInRegs > State.FreeRegs) {
1666 State.FreeRegs = 0;
1667 return false;
1668 }
1669 } else {
1670 // The MCU psABI allows passing parameters in-reg even if there are
1671 // earlier parameters that are passed on the stack. Also,
1672 // it does not allow passing >8-byte structs in-register,
1673 // even if there are 3 free registers available.
1674 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1675 return false;
1676 }
1677
1678 State.FreeRegs -= SizeInRegs;
1679 return true;
1680}
1681
1682bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1683 bool &InReg,
1684 bool &NeedsPadding) const {
1685 // On Windows, aggregates other than HFAs are never passed in registers, and
1686 // they do not consume register slots. Homogenous floating-point aggregates
1687 // (HFAs) have already been dealt with at this point.
1688 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1689 return false;
1690
1691 NeedsPadding = false;
1692 InReg = !IsMCUABI;
1693
1694 if (!updateFreeRegs(Ty, State))
1695 return false;
1696
1697 if (IsMCUABI)
1698 return true;
1699
1700 if (State.CC == llvm::CallingConv::X86_FastCall ||
1701 State.CC == llvm::CallingConv::X86_VectorCall ||
1702 State.CC == llvm::CallingConv::X86_RegCall) {
1703 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1704 NeedsPadding = true;
1705
1706 return false;
1707 }
1708
1709 return true;
1710}
1711
1712bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1713 if (!updateFreeRegs(Ty, State))
1714 return false;
1715
1716 if (IsMCUABI)
1717 return false;
1718
1719 if (State.CC == llvm::CallingConv::X86_FastCall ||
1720 State.CC == llvm::CallingConv::X86_VectorCall ||
1721 State.CC == llvm::CallingConv::X86_RegCall) {
1722 if (getContext().getTypeSize(Ty) > 32)
1723 return false;
1724
1725 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1726 Ty->isReferenceType());
1727 }
1728
1729 return true;
1730}
1731
1732void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
1733 // Vectorcall x86 works subtly different than in x64, so the format is
1734 // a bit different than the x64 version. First, all vector types (not HVAs)
1735 // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
1736 // This differs from the x64 implementation, where the first 6 by INDEX get
1737 // registers.
1738 // In the second pass over the arguments, HVAs are passed in the remaining
1739 // vector registers if possible, or indirectly by address. The address will be
1740 // passed in ECX/EDX if available. Any other arguments are passed according to
1741 // the usual fastcall rules.
1742 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1743 for (int I = 0, E = Args.size(); I < E; ++I) {
1744 const Type *Base = nullptr;
1745 uint64_t NumElts = 0;
1746 const QualType &Ty = Args[I].type;
1747 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1748 isHomogeneousAggregate(Ty, Base, NumElts)) {
1749 if (State.FreeSSERegs >= NumElts) {
1750 State.FreeSSERegs -= NumElts;
1751 Args[I].info = ABIArgInfo::getDirectInReg();
1752 State.IsPreassigned.set(I);
1753 }
1754 }
1755 }
1756}
1757
1758ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1759 CCState &State) const {
1760 // FIXME: Set alignment on indirect arguments.
1761 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
1762 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
1763 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
1764
1765 Ty = useFirstFieldIfTransparentUnion(Ty);
1766 TypeInfo TI = getContext().getTypeInfo(Ty);
1767
1768 // Check with the C++ ABI first.
1769 const RecordType *RT = Ty->getAs<RecordType>();
1770 if (RT) {
1771 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1772 if (RAA == CGCXXABI::RAA_Indirect) {
1773 return getIndirectResult(Ty, false, State);
1774 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1775 // The field index doesn't matter, we'll fix it up later.
1776 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1777 }
1778 }
1779
1780 // Regcall uses the concept of a homogenous vector aggregate, similar
1781 // to other targets.
1782 const Type *Base = nullptr;
1783 uint64_t NumElts = 0;
1784 if ((IsRegCall || IsVectorCall) &&
1785 isHomogeneousAggregate(Ty, Base, NumElts)) {
1786 if (State.FreeSSERegs >= NumElts) {
1787 State.FreeSSERegs -= NumElts;
1788
1789 // Vectorcall passes HVAs directly and does not flatten them, but regcall
1790 // does.
1791 if (IsVectorCall)
1792 return getDirectX86Hva();
1793
1794 if (Ty->isBuiltinType() || Ty->isVectorType())
1795 return ABIArgInfo::getDirect();
1796 return ABIArgInfo::getExpand();
1797 }
1798 return getIndirectResult(Ty, /*ByVal=*/false, State);
1799 }
1800
1801 if (isAggregateTypeForABI(Ty)) {
1802 // Structures with flexible arrays are always indirect.
1803 // FIXME: This should not be byval!
1804 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1805 return getIndirectResult(Ty, true, State);
1806
1807 // Ignore empty structs/unions on non-Windows.
1808 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1809 return ABIArgInfo::getIgnore();
1810
1811 llvm::LLVMContext &LLVMContext = getVMContext();
1812 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1813 bool NeedsPadding = false;
1814 bool InReg;
1815 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1816 unsigned SizeInRegs = (TI.Width + 31) / 32;
1817 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1818 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1819 if (InReg)
1820 return ABIArgInfo::getDirectInReg(Result);
1821 else
1822 return ABIArgInfo::getDirect(Result);
1823 }
1824 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1825
1826 // Pass over-aligned aggregates on Windows indirectly. This behavior was
1827 // added in MSVC 2015.
1828 if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32)
1829 return getIndirectResult(Ty, /*ByVal=*/false, State);
1830
1831 // Expand small (<= 128-bit) record types when we know that the stack layout
1832 // of those arguments will match the struct. This is important because the
1833 // LLVM backend isn't smart enough to remove byval, which inhibits many
1834 // optimizations.
1835 // Don't do this for the MCU if there are still free integer registers
1836 // (see X86_64 ABI for full explanation).
1837 if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
1838 canExpandIndirectArgument(Ty))
1839 return ABIArgInfo::getExpandWithPadding(
1840 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
1841
1842 return getIndirectResult(Ty, true, State);
1843 }
1844
1845 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1846 // On Windows, vectors are passed directly if registers are available, or
1847 // indirectly if not. This avoids the need to align argument memory. Pass
1848 // user-defined vector types larger than 512 bits indirectly for simplicity.
1849 if (IsWin32StructABI) {
1850 if (TI.Width <= 512 && State.FreeSSERegs > 0) {
1851 --State.FreeSSERegs;
1852 return ABIArgInfo::getDirectInReg();
1853 }
1854 return getIndirectResult(Ty, /*ByVal=*/false, State);
1855 }
1856
1857 // On Darwin, some vectors are passed in memory, we handle this by passing
1858 // it as an i8/i16/i32/i64.
1859 if (IsDarwinVectorABI) {
1860 if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
1861 (TI.Width == 64 && VT->getNumElements() == 1))
1862 return ABIArgInfo::getDirect(
1863 llvm::IntegerType::get(getVMContext(), TI.Width));
1864 }
1865
1866 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1867 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1868
1869 return ABIArgInfo::getDirect();
1870 }
1871
1872
1873 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1874 Ty = EnumTy->getDecl()->getIntegerType();
1875
1876 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1877
1878 if (isPromotableIntegerTypeForABI(Ty)) {
1879 if (InReg)
1880 return ABIArgInfo::getExtendInReg(Ty);
1881 return ABIArgInfo::getExtend(Ty);
1882 }
1883
1884 if (const auto * EIT = Ty->getAs<ExtIntType>()) {
1885 if (EIT->getNumBits() <= 64) {
1886 if (InReg)
1887 return ABIArgInfo::getDirectInReg();
1888 return ABIArgInfo::getDirect();
1889 }
1890 return getIndirectResult(Ty, /*ByVal=*/false, State);
1891 }
1892
1893 if (InReg)
1894 return ABIArgInfo::getDirectInReg();
1895 return ABIArgInfo::getDirect();
1896}
1897
1898void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1899 CCState State(FI);
1900 if (IsMCUABI)
1901 State.FreeRegs = 3;
1902 else if (State.CC == llvm::CallingConv::X86_FastCall) {
1903 State.FreeRegs = 2;
1904 State.FreeSSERegs = 3;
1905 } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1906 State.FreeRegs = 2;
1907 State.FreeSSERegs = 6;
1908 } else if (FI.getHasRegParm())
1909 State.FreeRegs = FI.getRegParm();
1910 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1911 State.FreeRegs = 5;
1912 State.FreeSSERegs = 8;
1913 } else if (IsWin32StructABI) {
1914 // Since MSVC 2015, the first three SSE vectors have been passed in
1915 // registers. The rest are passed indirectly.
1916 State.FreeRegs = DefaultNumRegisterParameters;
1917 State.FreeSSERegs = 3;
1918 } else
1919 State.FreeRegs = DefaultNumRegisterParameters;
1920
1921 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
1922 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1923 } else if (FI.getReturnInfo().isIndirect()) {
1924 // The C++ ABI is not aware of register usage, so we have to check if the
1925 // return value was sret and put it in a register ourselves if appropriate.
1926 if (State.FreeRegs) {
1927 --State.FreeRegs; // The sret parameter consumes a register.
1928 if (!IsMCUABI)
1929 FI.getReturnInfo().setInReg(true);
1930 }
1931 }
1932
1933 // The chain argument effectively gives us another free register.
1934 if (FI.isChainCall())
1935 ++State.FreeRegs;
1936
1937 // For vectorcall, do a first pass over the arguments, assigning FP and vector
1938 // arguments to XMM registers as available.
1939 if (State.CC == llvm::CallingConv::X86_VectorCall)
1940 runVectorCallFirstPass(FI, State);
1941
1942 bool UsedInAlloca = false;
1943 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1944 for (int I = 0, E = Args.size(); I < E; ++I) {
1945 // Skip arguments that have already been assigned.
1946 if (State.IsPreassigned.test(I))
1947 continue;
1948
1949 Args[I].info = classifyArgumentType(Args[I].type, State);
1950 UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
1951 }
1952
1953 // If we needed to use inalloca for any argument, do a second pass and rewrite
1954 // all the memory arguments to use inalloca.
1955 if (UsedInAlloca)
1956 rewriteWithInAlloca(FI);
1957}
1958
1959void
1960X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1961 CharUnits &StackOffset, ABIArgInfo &Info,
1962 QualType Type) const {
1963 // Arguments are always 4-byte-aligned.
1964 CharUnits WordSize = CharUnits::fromQuantity(4);
1965 assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct")((StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct"
) ? static_cast<void> (0) : __assert_fail ("StackOffset.isMultipleOf(WordSize) && \"unaligned inalloca struct\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 1965, __PRETTY_FUNCTION__))
;
1966
1967 // sret pointers and indirect things will require an extra pointer
1968 // indirection, unless they are byval. Most things are byval, and will not
1969 // require this indirection.
1970 bool IsIndirect = false;
1971 if (Info.isIndirect() && !Info.getIndirectByVal())
1972 IsIndirect = true;
1973 Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
1974 llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
1975 if (IsIndirect)
1976 LLTy = LLTy->getPointerTo(0);
1977 FrameFields.push_back(LLTy);
1978 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
1979
1980 // Insert padding bytes to respect alignment.
1981 CharUnits FieldEnd = StackOffset;
1982 StackOffset = FieldEnd.alignTo(WordSize);
1983 if (StackOffset != FieldEnd) {
1984 CharUnits NumBytes = StackOffset - FieldEnd;
1985 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1986 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1987 FrameFields.push_back(Ty);
1988 }
1989}
1990
1991static bool isArgInAlloca(const ABIArgInfo &Info) {
1992 // Leave ignored and inreg arguments alone.
1993 switch (Info.getKind()) {
1994 case ABIArgInfo::InAlloca:
1995 return true;
1996 case ABIArgInfo::Ignore:
1997 case ABIArgInfo::IndirectAliased:
1998 return false;
1999 case ABIArgInfo::Indirect:
2000 case ABIArgInfo::Direct:
2001 case ABIArgInfo::Extend:
2002 return !Info.getInReg();
2003 case ABIArgInfo::Expand:
2004 case ABIArgInfo::CoerceAndExpand:
2005 // These are aggregate types which are never passed in registers when
2006 // inalloca is involved.
2007 return true;
2008 }
2009 llvm_unreachable("invalid enum")::llvm::llvm_unreachable_internal("invalid enum", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 2009)
;
2010}
2011
2012void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
2013 assert(IsWin32StructABI && "inalloca only supported on win32")((IsWin32StructABI && "inalloca only supported on win32"
) ? static_cast<void> (0) : __assert_fail ("IsWin32StructABI && \"inalloca only supported on win32\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 2013, __PRETTY_FUNCTION__))
;
2014
2015 // Build a packed struct type for all of the arguments in memory.
2016 SmallVector<llvm::Type *, 6> FrameFields;
2017
2018 // The stack alignment is always 4.
2019 CharUnits StackAlign = CharUnits::fromQuantity(4);
2020
2021 CharUnits StackOffset;
2022 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
2023
2024 // Put 'this' into the struct before 'sret', if necessary.
2025 bool IsThisCall =
2026 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
2027 ABIArgInfo &Ret = FI.getReturnInfo();
2028 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
2029 isArgInAlloca(I->info)) {
2030 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2031 ++I;
2032 }
2033
2034 // Put the sret parameter into the inalloca struct if it's in memory.
2035 if (Ret.isIndirect() && !Ret.getInReg()) {
2036 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
2037 // On Windows, the hidden sret parameter is always returned in eax.
2038 Ret.setInAllocaSRet(IsWin32StructABI);
2039 }
2040
2041 // Skip the 'this' parameter in ecx.
2042 if (IsThisCall)
2043 ++I;
2044
2045 // Put arguments passed in memory into the struct.
2046 for (; I != E; ++I) {
2047 if (isArgInAlloca(I->info))
2048 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2049 }
2050
2051 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
2052 /*isPacked=*/true),
2053 StackAlign);
2054}
2055
2056Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
2057 Address VAListAddr, QualType Ty) const {
2058
2059 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
2060
2061 // x86-32 changes the alignment of certain arguments on the stack.
2062 //
2063 // Just messing with TypeInfo like this works because we never pass
2064 // anything indirectly.
2065 TypeInfo.Align = CharUnits::fromQuantity(
2066 getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
2067
2068 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
2069 TypeInfo, CharUnits::fromQuantity(4),
2070 /*AllowHigherAlign*/ true);
2071}
2072
2073bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
2074 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
2075 assert(Triple.getArch() == llvm::Triple::x86)((Triple.getArch() == llvm::Triple::x86) ? static_cast<void
> (0) : __assert_fail ("Triple.getArch() == llvm::Triple::x86"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 2075, __PRETTY_FUNCTION__))
;
2076
2077 switch (Opts.getStructReturnConvention()) {
2078 case CodeGenOptions::SRCK_Default:
2079 break;
2080 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
2081 return false;
2082 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
2083 return true;
2084 }
2085
2086 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
2087 return true;
2088
2089 switch (Triple.getOS()) {
2090 case llvm::Triple::DragonFly:
2091 case llvm::Triple::FreeBSD:
2092 case llvm::Triple::OpenBSD:
2093 case llvm::Triple::Win32:
2094 return true;
2095 default:
2096 return false;
2097 }
2098}
2099
2100void X86_32TargetCodeGenInfo::setTargetAttributes(
2101 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2102 if (GV->isDeclaration())
2103 return;
2104 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2105 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2106 llvm::Function *Fn = cast<llvm::Function>(GV);
2107 Fn->addFnAttr("stackrealign");
2108 }
2109 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2110 llvm::Function *Fn = cast<llvm::Function>(GV);
2111 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2112 }
2113 }
2114}
2115
2116bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
2117 CodeGen::CodeGenFunction &CGF,
2118 llvm::Value *Address) const {
2119 CodeGen::CGBuilderTy &Builder = CGF.Builder;
2120
2121 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
2122
2123 // 0-7 are the eight integer registers; the order is different
2124 // on Darwin (for EH), but the range is the same.
2125 // 8 is %eip.
2126 AssignToArrayRange(Builder, Address, Four8, 0, 8);
2127
2128 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
2129 // 12-16 are st(0..4). Not sure why we stop at 4.
2130 // These have size 16, which is sizeof(long double) on
2131 // platforms with 8-byte alignment for that type.
2132 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
2133 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
2134
2135 } else {
2136 // 9 is %eflags, which doesn't get a size on Darwin for some
2137 // reason.
2138 Builder.CreateAlignedStore(
2139 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
2140 CharUnits::One());
2141
2142 // 11-16 are st(0..5). Not sure why we stop at 5.
2143 // These have size 12, which is sizeof(long double) on
2144 // platforms with 4-byte alignment for that type.
2145 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
2146 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
2147 }
2148
2149 return false;
2150}
2151
2152//===----------------------------------------------------------------------===//
2153// X86-64 ABI Implementation
2154//===----------------------------------------------------------------------===//
2155
2156
2157namespace {
2158/// The AVX ABI level for X86 targets.
2159enum class X86AVXABILevel {
2160 None,
2161 AVX,
2162 AVX512
2163};
2164
2165/// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
2166static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2167 switch (AVXLevel) {
2168 case X86AVXABILevel::AVX512:
2169 return 512;
2170 case X86AVXABILevel::AVX:
2171 return 256;
2172 case X86AVXABILevel::None:
2173 return 128;
2174 }
2175 llvm_unreachable("Unknown AVXLevel")::llvm::llvm_unreachable_internal("Unknown AVXLevel", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 2175)
;
2176}
2177
2178/// X86_64ABIInfo - The X86_64 ABI information.
2179class X86_64ABIInfo : public SwiftABIInfo {
2180 enum Class {
2181 Integer = 0,
2182 SSE,
2183 SSEUp,
2184 X87,
2185 X87Up,
2186 ComplexX87,
2187 NoClass,
2188 Memory
2189 };
2190
2191 /// merge - Implement the X86_64 ABI merging algorithm.
2192 ///
2193 /// Merge an accumulating classification \arg Accum with a field
2194 /// classification \arg Field.
2195 ///
2196 /// \param Accum - The accumulating classification. This should
2197 /// always be either NoClass or the result of a previous merge
2198 /// call. In addition, this should never be Memory (the caller
2199 /// should just return Memory for the aggregate).
2200 static Class merge(Class Accum, Class Field);
2201
2202 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2203 ///
2204 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2205 /// final MEMORY or SSE classes when necessary.
2206 ///
2207 /// \param AggregateSize - The size of the current aggregate in
2208 /// the classification process.
2209 ///
2210 /// \param Lo - The classification for the parts of the type
2211 /// residing in the low word of the containing object.
2212 ///
2213 /// \param Hi - The classification for the parts of the type
2214 /// residing in the higher words of the containing object.
2215 ///
2216 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2217
2218 /// classify - Determine the x86_64 register classes in which the
2219 /// given type T should be passed.
2220 ///
2221 /// \param Lo - The classification for the parts of the type
2222 /// residing in the low word of the containing object.
2223 ///
2224 /// \param Hi - The classification for the parts of the type
2225 /// residing in the high word of the containing object.
2226 ///
2227 /// \param OffsetBase - The bit offset of this type in the
2228 /// containing object. Some parameters are classified different
2229 /// depending on whether they straddle an eightbyte boundary.
2230 ///
2231 /// \param isNamedArg - Whether the argument in question is a "named"
2232 /// argument, as used in AMD64-ABI 3.5.7.
2233 ///
2234 /// If a word is unused its result will be NoClass; if a type should
2235 /// be passed in Memory then at least the classification of \arg Lo
2236 /// will be Memory.
2237 ///
2238 /// The \arg Lo class will be NoClass iff the argument is ignored.
2239 ///
2240 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2241 /// also be ComplexX87.
2242 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2243 bool isNamedArg) const;
2244
2245 llvm::Type *GetByteVectorType(QualType Ty) const;
2246 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2247 unsigned IROffset, QualType SourceTy,
2248 unsigned SourceOffset) const;
2249 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2250 unsigned IROffset, QualType SourceTy,
2251 unsigned SourceOffset) const;
2252
2253 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2254 /// such that the argument will be returned in memory.
2255 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2256
2257 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2258 /// such that the argument will be passed in memory.
2259 ///
2260 /// \param freeIntRegs - The number of free integer registers remaining
2261 /// available.
2262 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2263
2264 ABIArgInfo classifyReturnType(QualType RetTy) const;
2265
2266 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2267 unsigned &neededInt, unsigned &neededSSE,
2268 bool isNamedArg) const;
2269
2270 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2271 unsigned &NeededSSE) const;
2272
2273 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2274 unsigned &NeededSSE) const;
2275
2276 bool IsIllegalVectorType(QualType Ty) const;
2277
2278 /// The 0.98 ABI revision clarified a lot of ambiguities,
2279 /// unfortunately in ways that were not always consistent with
2280 /// certain previous compilers. In particular, platforms which
2281 /// required strict binary compatibility with older versions of GCC
2282 /// may need to exempt themselves.
2283 bool honorsRevision0_98() const {
2284 return !getTarget().getTriple().isOSDarwin();
2285 }
2286
2287 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2288 /// classify it as INTEGER (for compatibility with older clang compilers).
2289 bool classifyIntegerMMXAsSSE() const {
2290 // Clang <= 3.8 did not do this.
2291 if (getContext().getLangOpts().getClangABICompat() <=
2292 LangOptions::ClangABI::Ver3_8)
2293 return false;
2294
2295 const llvm::Triple &Triple = getTarget().getTriple();
2296 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2297 return false;
2298 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2299 return false;
2300 return true;
2301 }
2302
2303 // GCC classifies vectors of __int128 as memory.
2304 bool passInt128VectorsInMem() const {
2305 // Clang <= 9.0 did not do this.
2306 if (getContext().getLangOpts().getClangABICompat() <=
2307 LangOptions::ClangABI::Ver9)
2308 return false;
2309
2310 const llvm::Triple &T = getTarget().getTriple();
2311 return T.isOSLinux() || T.isOSNetBSD();
2312 }
2313
2314 X86AVXABILevel AVXLevel;
2315 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2316 // 64-bit hardware.
2317 bool Has64BitPointers;
2318
2319public:
2320 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2321 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2322 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2323 }
2324
2325 bool isPassedUsingAVXType(QualType type) const {
2326 unsigned neededInt, neededSSE;
2327 // The freeIntRegs argument doesn't matter here.
2328 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2329 /*isNamedArg*/true);
2330 if (info.isDirect()) {
2331 llvm::Type *ty = info.getCoerceToType();
2332 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2333 return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128;
2334 }
2335 return false;
2336 }
2337
2338 void computeInfo(CGFunctionInfo &FI) const override;
2339
2340 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2341 QualType Ty) const override;
2342 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2343 QualType Ty) const override;
2344
2345 bool has64BitPointers() const {
2346 return Has64BitPointers;
2347 }
2348
2349 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
2350 bool asReturnValue) const override {
2351 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2352 }
2353 bool isSwiftErrorInRegister() const override {
2354 return true;
2355 }
2356};
2357
2358/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2359class WinX86_64ABIInfo : public SwiftABIInfo {
2360public:
2361 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2362 : SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2363 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2364
2365 void computeInfo(CGFunctionInfo &FI) const override;
2366
2367 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2368 QualType Ty) const override;
2369
2370 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2371 // FIXME: Assumes vectorcall is in use.
2372 return isX86VectorTypeForVectorCall(getContext(), Ty);
2373 }
2374
2375 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2376 uint64_t NumMembers) const override {
2377 // FIXME: Assumes vectorcall is in use.
2378 return isX86VectorCallAggregateSmallEnough(NumMembers);
2379 }
2380
2381 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars,
2382 bool asReturnValue) const override {
2383 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2384 }
2385
2386 bool isSwiftErrorInRegister() const override {
2387 return true;
2388 }
2389
2390private:
2391 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2392 bool IsVectorCall, bool IsRegCall) const;
2393 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
2394 const ABIArgInfo &current) const;
2395 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
2396 bool IsVectorCall, bool IsRegCall) const;
2397
2398 X86AVXABILevel AVXLevel;
2399
2400 bool IsMingw64;
2401};
2402
2403class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2404public:
2405 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2406 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {}
2407
2408 const X86_64ABIInfo &getABIInfo() const {
2409 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2410 }
2411
2412 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
2413 /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations.
2414 bool markARCOptimizedReturnCallsAsNoTail() const override { return true; }
2415
2416 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2417 return 7;
2418 }
2419
2420 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2421 llvm::Value *Address) const override {
2422 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2423
2424 // 0-15 are the 16 integer registers.
2425 // 16 is %rip.
2426 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2427 return false;
2428 }
2429
2430 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2431 StringRef Constraint,
2432 llvm::Type* Ty) const override {
2433 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2434 }
2435
2436 bool isNoProtoCallVariadic(const CallArgList &args,
2437 const FunctionNoProtoType *fnType) const override {
2438 // The default CC on x86-64 sets %al to the number of SSA
2439 // registers used, and GCC sets this when calling an unprototyped
2440 // function, so we override the default behavior. However, don't do
2441 // that when AVX types are involved: the ABI explicitly states it is
2442 // undefined, and it doesn't work in practice because of how the ABI
2443 // defines varargs anyway.
2444 if (fnType->getCallConv() == CC_C) {
2445 bool HasAVXType = false;
2446 for (CallArgList::const_iterator
2447 it = args.begin(), ie = args.end(); it != ie; ++it) {
2448 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2449 HasAVXType = true;
2450 break;
2451 }
2452 }
2453
2454 if (!HasAVXType)
2455 return true;
2456 }
2457
2458 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2459 }
2460
2461 llvm::Constant *
2462 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2463 unsigned Sig = (0xeb << 0) | // jmp rel8
2464 (0x06 << 8) | // .+0x08
2465 ('v' << 16) |
2466 ('2' << 24);
2467 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2468 }
2469
2470 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2471 CodeGen::CodeGenModule &CGM) const override {
2472 if (GV->isDeclaration())
2473 return;
2474 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2475 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2476 llvm::Function *Fn = cast<llvm::Function>(GV);
2477 Fn->addFnAttr("stackrealign");
2478 }
2479 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2480 llvm::Function *Fn = cast<llvm::Function>(GV);
2481 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2482 }
2483 }
2484 }
2485
2486 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
2487 const FunctionDecl *Caller,
2488 const FunctionDecl *Callee,
2489 const CallArgList &Args) const override;
2490};
2491
2492static void initFeatureMaps(const ASTContext &Ctx,
2493 llvm::StringMap<bool> &CallerMap,
2494 const FunctionDecl *Caller,
2495 llvm::StringMap<bool> &CalleeMap,
2496 const FunctionDecl *Callee) {
2497 if (CalleeMap.empty() && CallerMap.empty()) {
2498 // The caller is potentially nullptr in the case where the call isn't in a
2499 // function. In this case, the getFunctionFeatureMap ensures we just get
2500 // the TU level setting (since it cannot be modified by 'target'..
2501 Ctx.getFunctionFeatureMap(CallerMap, Caller);
2502 Ctx.getFunctionFeatureMap(CalleeMap, Callee);
2503 }
2504}
2505
2506static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
2507 SourceLocation CallLoc,
2508 const llvm::StringMap<bool> &CallerMap,
2509 const llvm::StringMap<bool> &CalleeMap,
2510 QualType Ty, StringRef Feature,
2511 bool IsArgument) {
2512 bool CallerHasFeat = CallerMap.lookup(Feature);
2513 bool CalleeHasFeat = CalleeMap.lookup(Feature);
2514 if (!CallerHasFeat && !CalleeHasFeat)
2515 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
2516 << IsArgument << Ty << Feature;
2517
2518 // Mixing calling conventions here is very clearly an error.
2519 if (!CallerHasFeat || !CalleeHasFeat)
2520 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
2521 << IsArgument << Ty << Feature;
2522
2523 // Else, both caller and callee have the required feature, so there is no need
2524 // to diagnose.
2525 return false;
2526}
2527
2528static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
2529 SourceLocation CallLoc,
2530 const llvm::StringMap<bool> &CallerMap,
2531 const llvm::StringMap<bool> &CalleeMap, QualType Ty,
2532 bool IsArgument) {
2533 uint64_t Size = Ctx.getTypeSize(Ty);
2534 if (Size > 256)
2535 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
2536 "avx512f", IsArgument);
2537
2538 if (Size > 128)
2539 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
2540 IsArgument);
2541
2542 return false;
2543}
2544
2545void X86_64TargetCodeGenInfo::checkFunctionCallABI(
2546 CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
2547 const FunctionDecl *Callee, const CallArgList &Args) const {
2548 llvm::StringMap<bool> CallerMap;
2549 llvm::StringMap<bool> CalleeMap;
2550 unsigned ArgIndex = 0;
2551
2552 // We need to loop through the actual call arguments rather than the the
2553 // function's parameters, in case this variadic.
2554 for (const CallArg &Arg : Args) {
2555 // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
2556 // additionally changes how vectors >256 in size are passed. Like GCC, we
2557 // warn when a function is called with an argument where this will change.
2558 // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
2559 // the caller and callee features are mismatched.
2560 // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
2561 // change its ABI with attribute-target after this call.
2562 if (Arg.getType()->isVectorType() &&
2563 CGM.getContext().getTypeSize(Arg.getType()) > 128) {
2564 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
2565 QualType Ty = Arg.getType();
2566 // The CallArg seems to have desugared the type already, so for clearer
2567 // diagnostics, replace it with the type in the FunctionDecl if possible.
2568 if (ArgIndex < Callee->getNumParams())
2569 Ty = Callee->getParamDecl(ArgIndex)->getType();
2570
2571 if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
2572 CalleeMap, Ty, /*IsArgument*/ true))
2573 return;
2574 }
2575 ++ArgIndex;
2576 }
2577
2578 // Check return always, as we don't have a good way of knowing in codegen
2579 // whether this value is used, tail-called, etc.
2580 if (Callee->getReturnType()->isVectorType() &&
2581 CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
2582 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
2583 checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
2584 CalleeMap, Callee->getReturnType(),
2585 /*IsArgument*/ false);
2586 }
2587}
2588
2589static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2590 // If the argument does not end in .lib, automatically add the suffix.
2591 // If the argument contains a space, enclose it in quotes.
2592 // This matches the behavior of MSVC.
2593 bool Quote = (Lib.find(" ") != StringRef::npos);
2594 std::string ArgStr = Quote ? "\"" : "";
2595 ArgStr += Lib;
2596 if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a"))
2597 ArgStr += ".lib";
2598 ArgStr += Quote ? "\"" : "";
2599 return ArgStr;
2600}
2601
2602class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2603public:
2604 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2605 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2606 unsigned NumRegisterParameters)
2607 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2608 Win32StructABI, NumRegisterParameters, false) {}
2609
2610 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2611 CodeGen::CodeGenModule &CGM) const override;
2612
2613 void getDependentLibraryOption(llvm::StringRef Lib,
2614 llvm::SmallString<24> &Opt) const override {
2615 Opt = "/DEFAULTLIB:";
2616 Opt += qualifyWindowsLibrary(Lib);
2617 }
2618
2619 void getDetectMismatchOption(llvm::StringRef Name,
2620 llvm::StringRef Value,
2621 llvm::SmallString<32> &Opt) const override {
2622 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2623 }
2624};
2625
2626static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2627 CodeGen::CodeGenModule &CGM) {
2628 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2629
2630 if (CGM.getCodeGenOpts().StackProbeSize != 4096)
2631 Fn->addFnAttr("stack-probe-size",
2632 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2633 if (CGM.getCodeGenOpts().NoStackArgProbe)
2634 Fn->addFnAttr("no-stack-arg-probe");
2635 }
2636}
2637
2638void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2639 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2640 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2641 if (GV->isDeclaration())
2642 return;
2643 addStackProbeTargetAttributes(D, GV, CGM);
2644}
2645
2646class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2647public:
2648 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2649 X86AVXABILevel AVXLevel)
2650 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {}
2651
2652 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2653 CodeGen::CodeGenModule &CGM) const override;
2654
2655 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2656 return 7;
2657 }
2658
2659 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2660 llvm::Value *Address) const override {
2661 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2662
2663 // 0-15 are the 16 integer registers.
2664 // 16 is %rip.
2665 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2666 return false;
2667 }
2668
2669 void getDependentLibraryOption(llvm::StringRef Lib,
2670 llvm::SmallString<24> &Opt) const override {
2671 Opt = "/DEFAULTLIB:";
2672 Opt += qualifyWindowsLibrary(Lib);
2673 }
2674
2675 void getDetectMismatchOption(llvm::StringRef Name,
2676 llvm::StringRef Value,
2677 llvm::SmallString<32> &Opt) const override {
2678 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2679 }
2680};
2681
2682void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2683 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2684 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2685 if (GV->isDeclaration())
2686 return;
2687 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2688 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2689 llvm::Function *Fn = cast<llvm::Function>(GV);
2690 Fn->addFnAttr("stackrealign");
2691 }
2692 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2693 llvm::Function *Fn = cast<llvm::Function>(GV);
2694 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2695 }
2696 }
2697
2698 addStackProbeTargetAttributes(D, GV, CGM);
2699}
2700}
2701
2702void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2703 Class &Hi) const {
2704 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2705 //
2706 // (a) If one of the classes is Memory, the whole argument is passed in
2707 // memory.
2708 //
2709 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2710 // memory.
2711 //
2712 // (c) If the size of the aggregate exceeds two eightbytes and the first
2713 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2714 // argument is passed in memory. NOTE: This is necessary to keep the
2715 // ABI working for processors that don't support the __m256 type.
2716 //
2717 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2718 //
2719 // Some of these are enforced by the merging logic. Others can arise
2720 // only with unions; for example:
2721 // union { _Complex double; unsigned; }
2722 //
2723 // Note that clauses (b) and (c) were added in 0.98.
2724 //
2725 if (Hi == Memory)
2726 Lo = Memory;
2727 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2728 Lo = Memory;
2729 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2730 Lo = Memory;
2731 if (Hi == SSEUp && Lo != SSE)
2732 Hi = SSE;
2733}
2734
2735X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2736 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2737 // classified recursively so that always two fields are
2738 // considered. The resulting class is calculated according to
2739 // the classes of the fields in the eightbyte:
2740 //
2741 // (a) If both classes are equal, this is the resulting class.
2742 //
2743 // (b) If one of the classes is NO_CLASS, the resulting class is
2744 // the other class.
2745 //
2746 // (c) If one of the classes is MEMORY, the result is the MEMORY
2747 // class.
2748 //
2749 // (d) If one of the classes is INTEGER, the result is the
2750 // INTEGER.
2751 //
2752 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2753 // MEMORY is used as class.
2754 //
2755 // (f) Otherwise class SSE is used.
2756
2757 // Accum should never be memory (we should have returned) or
2758 // ComplexX87 (because this cannot be passed in a structure).
2759 assert((Accum != Memory && Accum != ComplexX87) &&(((Accum != Memory && Accum != ComplexX87) &&
"Invalid accumulated classification during merge.") ? static_cast
<void> (0) : __assert_fail ("(Accum != Memory && Accum != ComplexX87) && \"Invalid accumulated classification during merge.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 2760, __PRETTY_FUNCTION__))
2760 "Invalid accumulated classification during merge.")(((Accum != Memory && Accum != ComplexX87) &&
"Invalid accumulated classification during merge.") ? static_cast
<void> (0) : __assert_fail ("(Accum != Memory && Accum != ComplexX87) && \"Invalid accumulated classification during merge.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 2760, __PRETTY_FUNCTION__))
;
2761 if (Accum == Field || Field == NoClass)
2762 return Accum;
2763 if (Field == Memory)
2764 return Memory;
2765 if (Accum == NoClass)
2766 return Field;
2767 if (Accum == Integer || Field == Integer)
2768 return Integer;
2769 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2770 Accum == X87 || Accum == X87Up)
2771 return Memory;
2772 return SSE;
2773}
2774
2775void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2776 Class &Lo, Class &Hi, bool isNamedArg) const {
2777 // FIXME: This code can be simplified by introducing a simple value class for
2778 // Class pairs with appropriate constructor methods for the various
2779 // situations.
2780
2781 // FIXME: Some of the split computations are wrong; unaligned vectors
2782 // shouldn't be passed in registers for example, so there is no chance they
2783 // can straddle an eightbyte. Verify & simplify.
2784
2785 Lo = Hi = NoClass;
2786
2787 Class &Current = OffsetBase < 64 ? Lo : Hi;
2788 Current = Memory;
2789
2790 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2791 BuiltinType::Kind k = BT->getKind();
2792
2793 if (k == BuiltinType::Void) {
2794 Current = NoClass;
2795 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2796 Lo = Integer;
2797 Hi = Integer;
2798 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2799 Current = Integer;
2800 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2801 Current = SSE;
2802 } else if (k == BuiltinType::LongDouble) {
2803 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2804 if (LDF == &llvm::APFloat::IEEEquad()) {
2805 Lo = SSE;
2806 Hi = SSEUp;
2807 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2808 Lo = X87;
2809 Hi = X87Up;
2810 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2811 Current = SSE;
2812 } else
2813 llvm_unreachable("unexpected long double representation!")::llvm::llvm_unreachable_internal("unexpected long double representation!"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 2813)
;
2814 }
2815 // FIXME: _Decimal32 and _Decimal64 are SSE.
2816 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2817 return;
2818 }
2819
2820 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2821 // Classify the underlying integer type.
2822 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2823 return;
2824 }
2825
2826 if (Ty->hasPointerRepresentation()) {
2827 Current = Integer;
2828 return;
2829 }
2830
2831 if (Ty->isMemberPointerType()) {
2832 if (Ty->isMemberFunctionPointerType()) {
2833 if (Has64BitPointers) {
2834 // If Has64BitPointers, this is an {i64, i64}, so classify both
2835 // Lo and Hi now.
2836 Lo = Hi = Integer;
2837 } else {
2838 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2839 // straddles an eightbyte boundary, Hi should be classified as well.
2840 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2841 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2842 if (EB_FuncPtr != EB_ThisAdj) {
2843 Lo = Hi = Integer;
2844 } else {
2845 Current = Integer;
2846 }
2847 }
2848 } else {
2849 Current = Integer;
2850 }
2851 return;
2852 }
2853
2854 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2855 uint64_t Size = getContext().getTypeSize(VT);
2856 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2857 // gcc passes the following as integer:
2858 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2859 // 2 bytes - <2 x char>, <1 x short>
2860 // 1 byte - <1 x char>
2861 Current = Integer;
2862
2863 // If this type crosses an eightbyte boundary, it should be
2864 // split.
2865 uint64_t EB_Lo = (OffsetBase) / 64;
2866 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2867 if (EB_Lo != EB_Hi)
2868 Hi = Lo;
2869 } else if (Size == 64) {
2870 QualType ElementType = VT->getElementType();
2871
2872 // gcc passes <1 x double> in memory. :(
2873 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2874 return;
2875
2876 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2877 // pass them as integer. For platforms where clang is the de facto
2878 // platform compiler, we must continue to use integer.
2879 if (!classifyIntegerMMXAsSSE() &&
2880 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2881 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2882 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2883 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2884 Current = Integer;
2885 else
2886 Current = SSE;
2887
2888 // If this type crosses an eightbyte boundary, it should be
2889 // split.
2890 if (OffsetBase && OffsetBase != 64)
2891 Hi = Lo;
2892 } else if (Size == 128 ||
2893 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2894 QualType ElementType = VT->getElementType();
2895
2896 // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
2897 if (passInt128VectorsInMem() && Size != 128 &&
2898 (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
2899 ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
2900 return;
2901
2902 // Arguments of 256-bits are split into four eightbyte chunks. The
2903 // least significant one belongs to class SSE and all the others to class
2904 // SSEUP. The original Lo and Hi design considers that types can't be
2905 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2906 // This design isn't correct for 256-bits, but since there're no cases
2907 // where the upper parts would need to be inspected, avoid adding
2908 // complexity and just consider Hi to match the 64-256 part.
2909 //
2910 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2911 // registers if they are "named", i.e. not part of the "..." of a
2912 // variadic function.
2913 //
2914 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2915 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2916 Lo = SSE;
2917 Hi = SSEUp;
2918 }
2919 return;
2920 }
2921
2922 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2923 QualType ET = getContext().getCanonicalType(CT->getElementType());
2924
2925 uint64_t Size = getContext().getTypeSize(Ty);
2926 if (ET->isIntegralOrEnumerationType()) {
2927 if (Size <= 64)
2928 Current = Integer;
2929 else if (Size <= 128)
2930 Lo = Hi = Integer;
2931 } else if (ET == getContext().FloatTy) {
2932 Current = SSE;
2933 } else if (ET == getContext().DoubleTy) {
2934 Lo = Hi = SSE;
2935 } else if (ET == getContext().LongDoubleTy) {
2936 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2937 if (LDF == &llvm::APFloat::IEEEquad())
2938 Current = Memory;
2939 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2940 Current = ComplexX87;
2941 else if (LDF == &llvm::APFloat::IEEEdouble())
2942 Lo = Hi = SSE;
2943 else
2944 llvm_unreachable("unexpected long double representation!")::llvm::llvm_unreachable_internal("unexpected long double representation!"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 2944)
;
2945 }
2946
2947 // If this complex type crosses an eightbyte boundary then it
2948 // should be split.
2949 uint64_t EB_Real = (OffsetBase) / 64;
2950 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2951 if (Hi == NoClass && EB_Real != EB_Imag)
2952 Hi = Lo;
2953
2954 return;
2955 }
2956
2957 if (const auto *EITy = Ty->getAs<ExtIntType>()) {
2958 if (EITy->getNumBits() <= 64)
2959 Current = Integer;
2960 else if (EITy->getNumBits() <= 128)
2961 Lo = Hi = Integer;
2962 // Larger values need to get passed in memory.
2963 return;
2964 }
2965
2966 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2967 // Arrays are treated like structures.
2968
2969 uint64_t Size = getContext().getTypeSize(Ty);
2970
2971 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2972 // than eight eightbytes, ..., it has class MEMORY.
2973 if (Size > 512)
2974 return;
2975
2976 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2977 // fields, it has class MEMORY.
2978 //
2979 // Only need to check alignment of array base.
2980 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2981 return;
2982
2983 // Otherwise implement simplified merge. We could be smarter about
2984 // this, but it isn't worth it and would be harder to verify.
2985 Current = NoClass;
2986 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2987 uint64_t ArraySize = AT->getSize().getZExtValue();
2988
2989 // The only case a 256-bit wide vector could be used is when the array
2990 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2991 // to work for sizes wider than 128, early check and fallback to memory.
2992 //
2993 if (Size > 128 &&
2994 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2995 return;
2996
2997 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2998 Class FieldLo, FieldHi;
2999 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
3000 Lo = merge(Lo, FieldLo);
3001 Hi = merge(Hi, FieldHi);
3002 if (Lo == Memory || Hi == Memory)
3003 break;
3004 }
3005
3006 postMerge(Size, Lo, Hi);
3007 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.")(((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."
) ? static_cast<void> (0) : __assert_fail ("(Hi != SSEUp || Lo == SSE) && \"Invalid SSEUp array classification.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3007, __PRETTY_FUNCTION__))
;
3008 return;
3009 }
3010
3011 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3012 uint64_t Size = getContext().getTypeSize(Ty);
3013
3014 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
3015 // than eight eightbytes, ..., it has class MEMORY.
3016 if (Size > 512)
3017 return;
3018
3019 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
3020 // copy constructor or a non-trivial destructor, it is passed by invisible
3021 // reference.
3022 if (getRecordArgABI(RT, getCXXABI()))
3023 return;
3024
3025 const RecordDecl *RD = RT->getDecl();
3026
3027 // Assume variable sized types are passed in memory.
3028 if (RD->hasFlexibleArrayMember())
3029 return;
3030
3031 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
3032
3033 // Reset Lo class, this will be recomputed.
3034 Current = NoClass;
3035
3036 // If this is a C++ record, classify the bases first.
3037 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3038 for (const auto &I : CXXRD->bases()) {
3039 assert(!I.isVirtual() && !I.getType()->isDependentType() &&((!I.isVirtual() && !I.getType()->isDependentType(
) && "Unexpected base class!") ? static_cast<void>
(0) : __assert_fail ("!I.isVirtual() && !I.getType()->isDependentType() && \"Unexpected base class!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3040, __PRETTY_FUNCTION__))
3040 "Unexpected base class!")((!I.isVirtual() && !I.getType()->isDependentType(
) && "Unexpected base class!") ? static_cast<void>
(0) : __assert_fail ("!I.isVirtual() && !I.getType()->isDependentType() && \"Unexpected base class!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3040, __PRETTY_FUNCTION__))
;
3041 const auto *Base =
3042 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3043
3044 // Classify this field.
3045 //
3046 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
3047 // single eightbyte, each is classified separately. Each eightbyte gets
3048 // initialized to class NO_CLASS.
3049 Class FieldLo, FieldHi;
3050 uint64_t Offset =
3051 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
3052 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
3053 Lo = merge(Lo, FieldLo);
3054 Hi = merge(Hi, FieldHi);
3055 if (Lo == Memory || Hi == Memory) {
3056 postMerge(Size, Lo, Hi);
3057 return;
3058 }
3059 }
3060 }
3061
3062 // Classify the fields one at a time, merging the results.
3063 unsigned idx = 0;
3064 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
3065 LangOptions::ClangABI::Ver11 ||
3066 getContext().getTargetInfo().getTriple().isPS4();
3067 bool IsUnion = RT->isUnionType() && !UseClang11Compat;
3068
3069 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3070 i != e; ++i, ++idx) {
3071 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
3072 bool BitField = i->isBitField();
3073
3074 // Ignore padding bit-fields.
3075 if (BitField && i->isUnnamedBitfield())
3076 continue;
3077
3078 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
3079 // eight eightbytes, or it contains unaligned fields, it has class MEMORY.
3080 //
3081 // The only case a 256-bit or a 512-bit wide vector could be used is when
3082 // the struct contains a single 256-bit or 512-bit element. Early check
3083 // and fallback to memory.
3084 //
3085 // FIXME: Extended the Lo and Hi logic properly to work for size wider
3086 // than 128.
3087 if (Size > 128 &&
3088 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
3089 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
3090 Lo = Memory;
3091 postMerge(Size, Lo, Hi);
3092 return;
3093 }
3094 // Note, skip this test for bit-fields, see below.
3095 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
3096 Lo = Memory;
3097 postMerge(Size, Lo, Hi);
3098 return;
3099 }
3100
3101 // Classify this field.
3102 //
3103 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
3104 // exceeds a single eightbyte, each is classified
3105 // separately. Each eightbyte gets initialized to class
3106 // NO_CLASS.
3107 Class FieldLo, FieldHi;
3108
3109 // Bit-fields require special handling, they do not force the
3110 // structure to be passed in memory even if unaligned, and
3111 // therefore they can straddle an eightbyte.
3112 if (BitField) {
3113 assert(!i->isUnnamedBitfield())((!i->isUnnamedBitfield()) ? static_cast<void> (0) :
__assert_fail ("!i->isUnnamedBitfield()", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3113, __PRETTY_FUNCTION__))
;
3114 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
3115 uint64_t Size = i->getBitWidthValue(getContext());
3116
3117 uint64_t EB_Lo = Offset / 64;
3118 uint64_t EB_Hi = (Offset + Size - 1) / 64;
3119
3120 if (EB_Lo) {
3121 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.")((EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."
) ? static_cast<void> (0) : __assert_fail ("EB_Hi == EB_Lo && \"Invalid classification, type > 16 bytes.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3121, __PRETTY_FUNCTION__))
;
3122 FieldLo = NoClass;
3123 FieldHi = Integer;
3124 } else {
3125 FieldLo = Integer;
3126 FieldHi = EB_Hi ? Integer : NoClass;
3127 }
3128 } else
3129 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
3130 Lo = merge(Lo, FieldLo);
3131 Hi = merge(Hi, FieldHi);
3132 if (Lo == Memory || Hi == Memory)
3133 break;
3134 }
3135
3136 postMerge(Size, Lo, Hi);
3137 }
3138}
3139
3140ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
3141 // If this is a scalar LLVM value then assume LLVM will pass it in the right
3142 // place naturally.
3143 if (!isAggregateTypeForABI(Ty)) {
3144 // Treat an enum type as its underlying type.
3145 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3146 Ty = EnumTy->getDecl()->getIntegerType();
3147
3148 if (Ty->isExtIntType())
3149 return getNaturalAlignIndirect(Ty);
3150
3151 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
3152 : ABIArgInfo::getDirect());
3153 }
3154
3155 return getNaturalAlignIndirect(Ty);
3156}
3157
3158bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
3159 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
3160 uint64_t Size = getContext().getTypeSize(VecTy);
3161 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
3162 if (Size <= 64 || Size > LargestVector)
3163 return true;
3164 QualType EltTy = VecTy->getElementType();
3165 if (passInt128VectorsInMem() &&
3166 (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
3167 EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
3168 return true;
3169 }
3170
3171 return false;
3172}
3173
3174ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
3175 unsigned freeIntRegs) const {
3176 // If this is a scalar LLVM value then assume LLVM will pass it in the right
3177 // place naturally.
3178 //
3179 // This assumption is optimistic, as there could be free registers available
3180 // when we need to pass this argument in memory, and LLVM could try to pass
3181 // the argument in the free register. This does not seem to happen currently,
3182 // but this code would be much safer if we could mark the argument with
3183 // 'onstack'. See PR12193.
3184 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
3185 !Ty->isExtIntType()) {
3186 // Treat an enum type as its underlying type.
3187 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3188 Ty = EnumTy->getDecl()->getIntegerType();
3189
3190 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
3191 : ABIArgInfo::getDirect());
3192 }
3193
3194 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
3195 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3196
3197 // Compute the byval alignment. We specify the alignment of the byval in all
3198 // cases so that the mid-level optimizer knows the alignment of the byval.
3199 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
3200
3201 // Attempt to avoid passing indirect results using byval when possible. This
3202 // is important for good codegen.
3203 //
3204 // We do this by coercing the value into a scalar type which the backend can
3205 // handle naturally (i.e., without using byval).
3206 //
3207 // For simplicity, we currently only do this when we have exhausted all of the
3208 // free integer registers. Doing this when there are free integer registers
3209 // would require more care, as we would have to ensure that the coerced value
3210 // did not claim the unused register. That would require either reording the
3211 // arguments to the function (so that any subsequent inreg values came first),
3212 // or only doing this optimization when there were no following arguments that
3213 // might be inreg.
3214 //
3215 // We currently expect it to be rare (particularly in well written code) for
3216 // arguments to be passed on the stack when there are still free integer
3217 // registers available (this would typically imply large structs being passed
3218 // by value), so this seems like a fair tradeoff for now.
3219 //
3220 // We can revisit this if the backend grows support for 'onstack' parameter
3221 // attributes. See PR12193.
3222 if (freeIntRegs == 0) {
3223 uint64_t Size = getContext().getTypeSize(Ty);
3224
3225 // If this type fits in an eightbyte, coerce it into the matching integral
3226 // type, which will end up on the stack (with alignment 8).
3227 if (Align == 8 && Size <= 64)
3228 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
3229 Size));
3230 }
3231
3232 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
3233}
3234
3235/// The ABI specifies that a value should be passed in a full vector XMM/YMM
3236/// register. Pick an LLVM IR type that will be passed as a vector register.
3237llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
3238 // Wrapper structs/arrays that only contain vectors are passed just like
3239 // vectors; strip them off if present.
3240 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
3241 Ty = QualType(InnerTy, 0);
3242
3243 llvm::Type *IRType = CGT.ConvertType(Ty);
3244 if (isa<llvm::VectorType>(IRType)) {
3245 // Don't pass vXi128 vectors in their native type, the backend can't
3246 // legalize them.
3247 if (passInt128VectorsInMem() &&
3248 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
3249 // Use a vXi64 vector.
3250 uint64_t Size = getContext().getTypeSize(Ty);
3251 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
3252 Size / 64);
3253 }
3254
3255 return IRType;
3256 }
3257
3258 if (IRType->getTypeID() == llvm::Type::FP128TyID)
3259 return IRType;
3260
3261 // We couldn't find the preferred IR vector type for 'Ty'.
3262 uint64_t Size = getContext().getTypeSize(Ty);
3263 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!")(((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!"
) ? static_cast<void> (0) : __assert_fail ("(Size == 128 || Size == 256 || Size == 512) && \"Invalid type found!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3263, __PRETTY_FUNCTION__))
;
3264
3265
3266 // Return a LLVM IR vector type based on the size of 'Ty'.
3267 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
3268 Size / 64);
3269}
3270
3271/// BitsContainNoUserData - Return true if the specified [start,end) bit range
3272/// is known to either be off the end of the specified type or being in
3273/// alignment padding. The user type specified is known to be at most 128 bits
3274/// in size, and have passed through X86_64ABIInfo::classify with a successful
3275/// classification that put one of the two halves in the INTEGER class.
3276///
3277/// It is conservatively correct to return false.
3278static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
3279 unsigned EndBit, ASTContext &Context) {
3280 // If the bytes being queried are off the end of the type, there is no user
3281 // data hiding here. This handles analysis of builtins, vectors and other
3282 // types that don't contain interesting padding.
3283 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
3284 if (TySize <= StartBit)
3285 return true;
3286
3287 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3288 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
3289 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3290
3291 // Check each element to see if the element overlaps with the queried range.
3292 for (unsigned i = 0; i != NumElts; ++i) {
3293 // If the element is after the span we care about, then we're done..
3294 unsigned EltOffset = i*EltSize;
3295 if (EltOffset >= EndBit) break;
3296
3297 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3298 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
3299 EndBit-EltOffset, Context))
3300 return false;
3301 }
3302 // If it overlaps no elements, then it is safe to process as padding.
3303 return true;
3304 }
3305
3306 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3307 const RecordDecl *RD = RT->getDecl();
3308 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3309
3310 // If this is a C++ record, check the bases first.
3311 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3312 for (const auto &I : CXXRD->bases()) {
3313 assert(!I.isVirtual() && !I.getType()->isDependentType() &&((!I.isVirtual() && !I.getType()->isDependentType(
) && "Unexpected base class!") ? static_cast<void>
(0) : __assert_fail ("!I.isVirtual() && !I.getType()->isDependentType() && \"Unexpected base class!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3314, __PRETTY_FUNCTION__))
3314 "Unexpected base class!")((!I.isVirtual() && !I.getType()->isDependentType(
) && "Unexpected base class!") ? static_cast<void>
(0) : __assert_fail ("!I.isVirtual() && !I.getType()->isDependentType() && \"Unexpected base class!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3314, __PRETTY_FUNCTION__))
;
3315 const auto *Base =
3316 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3317
3318 // If the base is after the span we care about, ignore it.
3319 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
3320 if (BaseOffset >= EndBit) continue;
3321
3322 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3323 if (!BitsContainNoUserData(I.getType(), BaseStart,
3324 EndBit-BaseOffset, Context))
3325 return false;
3326 }
3327 }
3328
3329 // Verify that no field has data that overlaps the region of interest. Yes
3330 // this could be sped up a lot by being smarter about queried fields,
3331 // however we're only looking at structs up to 16 bytes, so we don't care
3332 // much.
3333 unsigned idx = 0;
3334 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3335 i != e; ++i, ++idx) {
3336 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
3337
3338 // If we found a field after the region we care about, then we're done.
3339 if (FieldOffset >= EndBit) break;
3340
3341 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3342 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
3343 Context))
3344 return false;
3345 }
3346
3347 // If nothing in this record overlapped the area of interest, then we're
3348 // clean.
3349 return true;
3350 }
3351
3352 return false;
3353}
3354
3355/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
3356/// float member at the specified offset. For example, {int,{float}} has a
3357/// float at offset 4. It is conservatively correct for this routine to return
3358/// false.
3359static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
3360 const llvm::DataLayout &TD) {
3361 // Base case if we find a float.
3362 if (IROffset == 0 && IRType->isFloatTy())
3363 return true;
3364
3365 // If this is a struct, recurse into the field at the specified offset.
3366 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3367 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3368 unsigned Elt = SL->getElementContainingOffset(IROffset);
3369 IROffset -= SL->getElementOffset(Elt);
3370 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3371 }
3372
3373 // If this is an array, recurse into the field at the specified offset.
3374 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3375 llvm::Type *EltTy = ATy->getElementType();
3376 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3377 IROffset -= IROffset/EltSize*EltSize;
3378 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3379 }
3380
3381 return false;
3382}
3383
3384
3385/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3386/// low 8 bytes of an XMM register, corresponding to the SSE class.
3387llvm::Type *X86_64ABIInfo::
3388GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3389 QualType SourceTy, unsigned SourceOffset) const {
3390 // The only three choices we have are either double, <2 x float>, or float. We
3391 // pass as float if the last 4 bytes is just padding. This happens for
3392 // structs that contain 3 floats.
3393 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3394 SourceOffset*8+64, getContext()))
3395 return llvm::Type::getFloatTy(getVMContext());
3396
3397 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3398 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3399 // case.
3400 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3401 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3402 return llvm::FixedVectorType::get(llvm::Type::getFloatTy(getVMContext()),
3403 2);
3404
3405 return llvm::Type::getDoubleTy(getVMContext());
3406}
3407
3408
3409/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3410/// an 8-byte GPR. This means that we either have a scalar or we are talking
3411/// about the high or low part of an up-to-16-byte struct. This routine picks
3412/// the best LLVM IR type to represent this, which may be i64 or may be anything
3413/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3414/// etc).
3415///
3416/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3417/// the source type. IROffset is an offset in bytes into the LLVM IR type that
3418/// the 8-byte value references. PrefType may be null.
3419///
3420/// SourceTy is the source-level type for the entire argument. SourceOffset is
3421/// an offset into this that we're processing (which is always either 0 or 8).
3422///
3423llvm::Type *X86_64ABIInfo::
3424GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3425 QualType SourceTy, unsigned SourceOffset) const {
3426 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3427 // returning an 8-byte unit starting with it. See if we can safely use it.
3428 if (IROffset == 0) {
3429 // Pointers and int64's always fill the 8-byte unit.
3430 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3431 IRType->isIntegerTy(64))
3432 return IRType;
3433
3434 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3435 // goodness in the source type is just tail padding. This is allowed to
3436 // kick in for struct {double,int} on the int, but not on
3437 // struct{double,int,int} because we wouldn't return the second int. We
3438 // have to do this analysis on the source type because we can't depend on
3439 // unions being lowered a specific way etc.
3440 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3441 IRType->isIntegerTy(32) ||
3442 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3443 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3444 cast<llvm::IntegerType>(IRType)->getBitWidth();
3445
3446 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3447 SourceOffset*8+64, getContext()))
3448 return IRType;
3449 }
3450 }
3451
3452 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3453 // If this is a struct, recurse into the field at the specified offset.
3454 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3455 if (IROffset < SL->getSizeInBytes()) {
3456 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3457 IROffset -= SL->getElementOffset(FieldIdx);
3458
3459 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3460 SourceTy, SourceOffset);
3461 }
3462 }
3463
3464 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3465 llvm::Type *EltTy = ATy->getElementType();
3466 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3467 unsigned EltOffset = IROffset/EltSize*EltSize;
3468 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3469 SourceOffset);
3470 }
3471
3472 // Okay, we don't have any better idea of what to pass, so we pass this in an
3473 // integer register that isn't too big to fit the rest of the struct.
3474 unsigned TySizeInBytes =
3475 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3476
3477 assert(TySizeInBytes != SourceOffset && "Empty field?")((TySizeInBytes != SourceOffset && "Empty field?") ? static_cast
<void> (0) : __assert_fail ("TySizeInBytes != SourceOffset && \"Empty field?\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3477, __PRETTY_FUNCTION__))
;
3478
3479 // It is always safe to classify this as an integer type up to i64 that
3480 // isn't larger than the structure.
3481 return llvm::IntegerType::get(getVMContext(),
3482 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3483}
3484
3485
3486/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3487/// be used as elements of a two register pair to pass or return, return a
3488/// first class aggregate to represent them. For example, if the low part of
3489/// a by-value argument should be passed as i32* and the high part as float,
3490/// return {i32*, float}.
3491static llvm::Type *
3492GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3493 const llvm::DataLayout &TD) {
3494 // In order to correctly satisfy the ABI, we need to the high part to start
3495 // at offset 8. If the high and low parts we inferred are both 4-byte types
3496 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3497 // the second element at offset 8. Check for this:
3498 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3499 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3500 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3501 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!")((HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"
) ? static_cast<void> (0) : __assert_fail ("HiStart != 0 && HiStart <= 8 && \"Invalid x86-64 argument pair!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3501, __PRETTY_FUNCTION__))
;
3502
3503 // To handle this, we have to increase the size of the low part so that the
3504 // second element will start at an 8 byte offset. We can't increase the size
3505 // of the second element because it might make us access off the end of the
3506 // struct.
3507 if (HiStart != 8) {
3508 // There are usually two sorts of types the ABI generation code can produce
3509 // for the low part of a pair that aren't 8 bytes in size: float or
3510 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3511 // NaCl).
3512 // Promote these to a larger type.
3513 if (Lo->isFloatTy())
3514 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3515 else {
3516 assert((Lo->isIntegerTy() || Lo->isPointerTy())(((Lo->isIntegerTy() || Lo->isPointerTy()) && "Invalid/unknown lo type"
) ? static_cast<void> (0) : __assert_fail ("(Lo->isIntegerTy() || Lo->isPointerTy()) && \"Invalid/unknown lo type\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3517, __PRETTY_FUNCTION__))
3517 && "Invalid/unknown lo type")(((Lo->isIntegerTy() || Lo->isPointerTy()) && "Invalid/unknown lo type"
) ? static_cast<void> (0) : __assert_fail ("(Lo->isIntegerTy() || Lo->isPointerTy()) && \"Invalid/unknown lo type\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3517, __PRETTY_FUNCTION__))
;
3518 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3519 }
3520 }
3521
3522 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3523
3524 // Verify that the second element is at an 8-byte offset.
3525 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&((TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
"Invalid x86-64 argument pair!") ? static_cast<void> (
0) : __assert_fail ("TD.getStructLayout(Result)->getElementOffset(1) == 8 && \"Invalid x86-64 argument pair!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3526, __PRETTY_FUNCTION__))
3526 "Invalid x86-64 argument pair!")((TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
"Invalid x86-64 argument pair!") ? static_cast<void> (
0) : __assert_fail ("TD.getStructLayout(Result)->getElementOffset(1) == 8 && \"Invalid x86-64 argument pair!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3526, __PRETTY_FUNCTION__))
;
3527 return Result;
3528}
3529
3530ABIArgInfo X86_64ABIInfo::
3531classifyReturnType(QualType RetTy) const {
3532 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3533 // classification algorithm.
3534 X86_64ABIInfo::Class Lo, Hi;
3535 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3536
3537 // Check some invariants.
3538 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.")(((Hi != Memory || Lo == Memory) && "Invalid memory classification."
) ? static_cast<void> (0) : __assert_fail ("(Hi != Memory || Lo == Memory) && \"Invalid memory classification.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3538, __PRETTY_FUNCTION__))
;
3539 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.")(((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."
) ? static_cast<void> (0) : __assert_fail ("(Hi != SSEUp || Lo == SSE) && \"Invalid SSEUp classification.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3539, __PRETTY_FUNCTION__))
;
3540
3541 llvm::Type *ResType = nullptr;
3542 switch (Lo) {
3543 case NoClass:
3544 if (Hi == NoClass)
3545 return ABIArgInfo::getIgnore();
3546 // If the low part is just padding, it takes no register, leave ResType
3547 // null.
3548 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&(((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part"
) ? static_cast<void> (0) : __assert_fail ("(Hi == SSE || Hi == Integer || Hi == X87Up) && \"Unknown missing lo part\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3549, __PRETTY_FUNCTION__))
3549 "Unknown missing lo part")(((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part"
) ? static_cast<void> (0) : __assert_fail ("(Hi == SSE || Hi == Integer || Hi == X87Up) && \"Unknown missing lo part\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3549, __PRETTY_FUNCTION__))
;
3550 break;
3551
3552 case SSEUp:
3553 case X87Up:
3554 llvm_unreachable("Invalid classification for lo word.")::llvm::llvm_unreachable_internal("Invalid classification for lo word."
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3554)
;
3555
3556 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3557 // hidden argument.
3558 case Memory:
3559 return getIndirectReturnResult(RetTy);
3560
3561 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3562 // available register of the sequence %rax, %rdx is used.
3563 case Integer:
3564 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3565
3566 // If we have a sign or zero extended integer, make sure to return Extend
3567 // so that the parameter gets the right LLVM IR attributes.
3568 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3569 // Treat an enum type as its underlying type.
3570 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3571 RetTy = EnumTy->getDecl()->getIntegerType();
3572
3573 if (RetTy->isIntegralOrEnumerationType() &&
3574 isPromotableIntegerTypeForABI(RetTy))
3575 return ABIArgInfo::getExtend(RetTy);
3576 }
3577 break;
3578
3579 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3580 // available SSE register of the sequence %xmm0, %xmm1 is used.
3581 case SSE:
3582 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3583 break;
3584
3585 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3586 // returned on the X87 stack in %st0 as 80-bit x87 number.
3587 case X87:
3588 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3589 break;
3590
3591 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3592 // part of the value is returned in %st0 and the imaginary part in
3593 // %st1.
3594 case ComplexX87:
3595 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.")((Hi == ComplexX87 && "Unexpected ComplexX87 classification."
) ? static_cast<void> (0) : __assert_fail ("Hi == ComplexX87 && \"Unexpected ComplexX87 classification.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3595, __PRETTY_FUNCTION__))
;
3596 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3597 llvm::Type::getX86_FP80Ty(getVMContext()));
3598 break;
3599 }
3600
3601 llvm::Type *HighPart = nullptr;
3602 switch (Hi) {
3603 // Memory was handled previously and X87 should
3604 // never occur as a hi class.
3605 case Memory:
3606 case X87:
3607 llvm_unreachable("Invalid classification for hi word.")::llvm::llvm_unreachable_internal("Invalid classification for hi word."
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3607)
;
3608
3609 case ComplexX87: // Previously handled.
3610 case NoClass:
3611 break;
3612
3613 case Integer:
3614 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3615 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3616 return ABIArgInfo::getDirect(HighPart, 8);
3617 break;
3618 case SSE:
3619 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3620 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3621 return ABIArgInfo::getDirect(HighPart, 8);
3622 break;
3623
3624 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3625 // is passed in the next available eightbyte chunk if the last used
3626 // vector register.
3627 //
3628 // SSEUP should always be preceded by SSE, just widen.
3629 case SSEUp:
3630 assert(Lo == SSE && "Unexpected SSEUp classification.")((Lo == SSE && "Unexpected SSEUp classification.") ? static_cast
<void> (0) : __assert_fail ("Lo == SSE && \"Unexpected SSEUp classification.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3630, __PRETTY_FUNCTION__))
;
3631 ResType = GetByteVectorType(RetTy);
3632 break;
3633
3634 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3635 // returned together with the previous X87 value in %st0.
3636 case X87Up:
3637 // If X87Up is preceded by X87, we don't need to do
3638 // anything. However, in some cases with unions it may not be
3639 // preceded by X87. In such situations we follow gcc and pass the
3640 // extra bits in an SSE reg.
3641 if (Lo != X87) {
3642 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3643 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3644 return ABIArgInfo::getDirect(HighPart, 8);
3645 }
3646 break;
3647 }
3648
3649 // If a high part was specified, merge it together with the low part. It is
3650 // known to pass in the high eightbyte of the result. We do this by forming a
3651 // first class struct aggregate with the high and low part: {low, high}
3652 if (HighPart)
3653 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3654
3655 return ABIArgInfo::getDirect(ResType);
3656}
3657
3658ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3659 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3660 bool isNamedArg)
3661 const
3662{
3663 Ty = useFirstFieldIfTransparentUnion(Ty);
3664
3665 X86_64ABIInfo::Class Lo, Hi;
3666 classify(Ty, 0, Lo, Hi, isNamedArg);
3667
3668 // Check some invariants.
3669 // FIXME: Enforce these by construction.
3670 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.")(((Hi != Memory || Lo == Memory) && "Invalid memory classification."
) ? static_cast<void> (0) : __assert_fail ("(Hi != Memory || Lo == Memory) && \"Invalid memory classification.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3670, __PRETTY_FUNCTION__))
;
3671 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.")(((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."
) ? static_cast<void> (0) : __assert_fail ("(Hi != SSEUp || Lo == SSE) && \"Invalid SSEUp classification.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3671, __PRETTY_FUNCTION__))
;
3672
3673 neededInt = 0;
3674 neededSSE = 0;
3675 llvm::Type *ResType = nullptr;
3676 switch (Lo) {
3677 case NoClass:
3678 if (Hi == NoClass)
3679 return ABIArgInfo::getIgnore();
3680 // If the low part is just padding, it takes no register, leave ResType
3681 // null.
3682 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&(((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part"
) ? static_cast<void> (0) : __assert_fail ("(Hi == SSE || Hi == Integer || Hi == X87Up) && \"Unknown missing lo part\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3683, __PRETTY_FUNCTION__))
3683 "Unknown missing lo part")(((Hi == SSE || Hi == Integer || Hi == X87Up) && "Unknown missing lo part"
) ? static_cast<void> (0) : __assert_fail ("(Hi == SSE || Hi == Integer || Hi == X87Up) && \"Unknown missing lo part\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3683, __PRETTY_FUNCTION__))
;
3684 break;
3685
3686 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3687 // on the stack.
3688 case Memory:
3689
3690 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3691 // COMPLEX_X87, it is passed in memory.
3692 case X87:
3693 case ComplexX87:
3694 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3695 ++neededInt;
3696 return getIndirectResult(Ty, freeIntRegs);
3697
3698 case SSEUp:
3699 case X87Up:
3700 llvm_unreachable("Invalid classification for lo word.")::llvm::llvm_unreachable_internal("Invalid classification for lo word."
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3700)
;
3701
3702 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3703 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3704 // and %r9 is used.
3705 case Integer:
3706 ++neededInt;
3707
3708 // Pick an 8-byte type based on the preferred type.
3709 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3710
3711 // If we have a sign or zero extended integer, make sure to return Extend
3712 // so that the parameter gets the right LLVM IR attributes.
3713 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3714 // Treat an enum type as its underlying type.
3715 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3716 Ty = EnumTy->getDecl()->getIntegerType();
3717
3718 if (Ty->isIntegralOrEnumerationType() &&
3719 isPromotableIntegerTypeForABI(Ty))
3720 return ABIArgInfo::getExtend(Ty);
3721 }
3722
3723 break;
3724
3725 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3726 // available SSE register is used, the registers are taken in the
3727 // order from %xmm0 to %xmm7.
3728 case SSE: {
3729 llvm::Type *IRType = CGT.ConvertType(Ty);
3730 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3731 ++neededSSE;
3732 break;
3733 }
3734 }
3735
3736 llvm::Type *HighPart = nullptr;
3737 switch (Hi) {
3738 // Memory was handled previously, ComplexX87 and X87 should
3739 // never occur as hi classes, and X87Up must be preceded by X87,
3740 // which is passed in memory.
3741 case Memory:
3742 case X87:
3743 case ComplexX87:
3744 llvm_unreachable("Invalid classification for hi word.")::llvm::llvm_unreachable_internal("Invalid classification for hi word."
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3744)
;
3745
3746 case NoClass: break;
3747
3748 case Integer:
3749 ++neededInt;
3750 // Pick an 8-byte type based on the preferred type.
3751 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3752
3753 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3754 return ABIArgInfo::getDirect(HighPart, 8);
3755 break;
3756
3757 // X87Up generally doesn't occur here (long double is passed in
3758 // memory), except in situations involving unions.
3759 case X87Up:
3760 case SSE:
3761 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3762
3763 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3764 return ABIArgInfo::getDirect(HighPart, 8);
3765
3766 ++neededSSE;
3767 break;
3768
3769 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3770 // eightbyte is passed in the upper half of the last used SSE
3771 // register. This only happens when 128-bit vectors are passed.
3772 case SSEUp:
3773 assert(Lo == SSE && "Unexpected SSEUp classification")((Lo == SSE && "Unexpected SSEUp classification") ? static_cast
<void> (0) : __assert_fail ("Lo == SSE && \"Unexpected SSEUp classification\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3773, __PRETTY_FUNCTION__))
;
3774 ResType = GetByteVectorType(Ty);
3775 break;
3776 }
3777
3778 // If a high part was specified, merge it together with the low part. It is
3779 // known to pass in the high eightbyte of the result. We do this by forming a
3780 // first class struct aggregate with the high and low part: {low, high}
3781 if (HighPart)
3782 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3783
3784 return ABIArgInfo::getDirect(ResType);
3785}
3786
3787ABIArgInfo
3788X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3789 unsigned &NeededSSE) const {
3790 auto RT = Ty->getAs<RecordType>();
3791 assert(RT && "classifyRegCallStructType only valid with struct types")((RT && "classifyRegCallStructType only valid with struct types"
) ? static_cast<void> (0) : __assert_fail ("RT && \"classifyRegCallStructType only valid with struct types\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 3791, __PRETTY_FUNCTION__))
;
3792
3793 if (RT->getDecl()->hasFlexibleArrayMember())
3794 return getIndirectReturnResult(Ty);
3795
3796 // Sum up bases
3797 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3798 if (CXXRD->isDynamicClass()) {
3799 NeededInt = NeededSSE = 0;
3800 return getIndirectReturnResult(Ty);
3801 }
3802
3803 for (const auto &I : CXXRD->bases())
3804 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3805 .isIndirect()) {
3806 NeededInt = NeededSSE = 0;
3807 return getIndirectReturnResult(Ty);
3808 }
3809 }
3810
3811 // Sum up members
3812 for (const auto *FD : RT->getDecl()->fields()) {
3813 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3814 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3815 .isIndirect()) {
3816 NeededInt = NeededSSE = 0;
3817 return getIndirectReturnResult(Ty);
3818 }
3819 } else {
3820 unsigned LocalNeededInt, LocalNeededSSE;
3821 if (classifyArgumentType(FD->getType(), UINT_MAX(2147483647 *2U +1U), LocalNeededInt,
3822 LocalNeededSSE, true)
3823 .isIndirect()) {
3824 NeededInt = NeededSSE = 0;
3825 return getIndirectReturnResult(Ty);
3826 }
3827 NeededInt += LocalNeededInt;
3828 NeededSSE += LocalNeededSSE;
3829 }
3830 }
3831
3832 return ABIArgInfo::getDirect();
3833}
3834
3835ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3836 unsigned &NeededInt,
3837 unsigned &NeededSSE) const {
3838
3839 NeededInt = 0;
3840 NeededSSE = 0;
3841
3842 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3843}
3844
3845void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3846
3847 const unsigned CallingConv = FI.getCallingConvention();
3848 // It is possible to force Win64 calling convention on any x86_64 target by
3849 // using __attribute__((ms_abi)). In such case to correctly emit Win64
3850 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
3851 if (CallingConv == llvm::CallingConv::Win64) {
3852 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
3853 Win64ABIInfo.computeInfo(FI);
3854 return;
3855 }
3856
3857 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3858
3859 // Keep track of the number of assigned registers.
3860 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3861 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3862 unsigned NeededInt, NeededSSE;
3863
3864 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
3865 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3866 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3867 FI.getReturnInfo() =
3868 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3869 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3870 FreeIntRegs -= NeededInt;
3871 FreeSSERegs -= NeededSSE;
3872 } else {
3873 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3874 }
3875 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
3876 getContext().getCanonicalType(FI.getReturnType()
3877 ->getAs<ComplexType>()
3878 ->getElementType()) ==
3879 getContext().LongDoubleTy)
3880 // Complex Long Double Type is passed in Memory when Regcall
3881 // calling convention is used.
3882 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3883 else
3884 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3885 }
3886
3887 // If the return value is indirect, then the hidden argument is consuming one
3888 // integer register.
3889 if (FI.getReturnInfo().isIndirect())
3890 --FreeIntRegs;
3891
3892 // The chain argument effectively gives us another free register.
3893 if (FI.isChainCall())
3894 ++FreeIntRegs;
3895
3896 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3897 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3898 // get assigned (in left-to-right order) for passing as follows...
3899 unsigned ArgNo = 0;
3900 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3901 it != ie; ++it, ++ArgNo) {
3902 bool IsNamedArg = ArgNo < NumRequiredArgs;
3903
3904 if (IsRegCall && it->type->isStructureOrClassType())
3905 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3906 else
3907 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3908 NeededSSE, IsNamedArg);
3909
3910 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3911 // eightbyte of an argument, the whole argument is passed on the
3912 // stack. If registers have already been assigned for some
3913 // eightbytes of such an argument, the assignments get reverted.
3914 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3915 FreeIntRegs -= NeededInt;
3916 FreeSSERegs -= NeededSSE;
3917 } else {
3918 it->info = getIndirectResult(it->type, FreeIntRegs);
3919 }
3920 }
3921}
3922
3923static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3924 Address VAListAddr, QualType Ty) {
3925 Address overflow_arg_area_p =
3926 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
3927 llvm::Value *overflow_arg_area =
3928 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3929
3930 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3931 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3932 // It isn't stated explicitly in the standard, but in practice we use
3933 // alignment greater than 16 where necessary.
3934 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3935 if (Align > CharUnits::fromQuantity(8)) {
3936 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3937 Align);
3938 }
3939
3940 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3941 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3942 llvm::Value *Res =
3943 CGF.Builder.CreateBitCast(overflow_arg_area,
3944 llvm::PointerType::getUnqual(LTy));
3945
3946 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3947 // l->overflow_arg_area + sizeof(type).
3948 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3949 // an 8 byte boundary.
3950
3951 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3952 llvm::Value *Offset =
3953 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3954 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3955 "overflow_arg_area.next");
3956 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3957
3958 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3959 return Address(Res, Align);
3960}
3961
3962Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3963 QualType Ty) const {
3964 // Assume that va_list type is correct; should be pointer to LLVM type:
3965 // struct {
3966 // i32 gp_offset;
3967 // i32 fp_offset;
3968 // i8* overflow_arg_area;
3969 // i8* reg_save_area;
3970 // };
3971 unsigned neededInt, neededSSE;
3972
3973 Ty = getContext().getCanonicalType(Ty);
3974 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3975 /*isNamedArg*/false);
3976
3977 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3978 // in the registers. If not go to step 7.
3979 if (!neededInt && !neededSSE)
3980 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3981
3982 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3983 // general purpose registers needed to pass type and num_fp to hold
3984 // the number of floating point registers needed.
3985
3986 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3987 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3988 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3989 //
3990 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3991 // register save space).
3992
3993 llvm::Value *InRegs = nullptr;
3994 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3995 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3996 if (neededInt) {
3997 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
3998 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3999 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
4000 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
4001 }
4002
4003 if (neededSSE) {
4004 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
4005 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
4006 llvm::Value *FitsInFP =
4007 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
4008 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
4009 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
4010 }
4011
4012 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4013 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
4014 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4015 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
4016
4017 // Emit code to load the value if it was passed in registers.
4018
4019 CGF.EmitBlock(InRegBlock);
4020
4021 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
4022 // an offset of l->gp_offset and/or l->fp_offset. This may require
4023 // copying to a temporary location in case the parameter is passed
4024 // in different register classes or requires an alignment greater
4025 // than 8 for general purpose registers and 16 for XMM registers.
4026 //
4027 // FIXME: This really results in shameful code when we end up needing to
4028 // collect arguments from different places; often what should result in a
4029 // simple assembling of a structure from scattered addresses has many more
4030 // loads than necessary. Can we clean this up?
4031 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
4032 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
4033 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
4034
4035 Address RegAddr = Address::invalid();
4036 if (neededInt && neededSSE) {
4037 // FIXME: Cleanup.
4038 assert(AI.isDirect() && "Unexpected ABI info for mixed regs")((AI.isDirect() && "Unexpected ABI info for mixed regs"
) ? static_cast<void> (0) : __assert_fail ("AI.isDirect() && \"Unexpected ABI info for mixed regs\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 4038, __PRETTY_FUNCTION__))
;
4039 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
4040 Address Tmp = CGF.CreateMemTemp(Ty);
4041 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
4042 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs")((ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"
) ? static_cast<void> (0) : __assert_fail ("ST->getNumElements() == 2 && \"Unexpected ABI info for mixed regs\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 4042, __PRETTY_FUNCTION__))
;
4043 llvm::Type *TyLo = ST->getElementType(0);
4044 llvm::Type *TyHi = ST->getElementType(1);
4045 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&(((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy())
&& "Unexpected ABI info for mixed regs") ? static_cast
<void> (0) : __assert_fail ("(TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && \"Unexpected ABI info for mixed regs\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 4046, __PRETTY_FUNCTION__))
4046 "Unexpected ABI info for mixed regs")(((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy())
&& "Unexpected ABI info for mixed regs") ? static_cast
<void> (0) : __assert_fail ("(TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && \"Unexpected ABI info for mixed regs\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 4046, __PRETTY_FUNCTION__))
;
4047 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
4048 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
4049 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
4050 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
4051 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
4052 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
4053
4054 // Copy the first element.
4055 // FIXME: Our choice of alignment here and below is probably pessimistic.
4056 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
4057 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
4058 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
4059 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
4060
4061 // Copy the second element.
4062 V = CGF.Builder.CreateAlignedLoad(
4063 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
4064 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
4065 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
4066
4067 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
4068 } else if (neededInt) {
4069 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
4070 CharUnits::fromQuantity(8));
4071 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
4072
4073 // Copy to a temporary if necessary to ensure the appropriate alignment.
4074 auto TInfo = getContext().getTypeInfoInChars(Ty);
4075 uint64_t TySize = TInfo.Width.getQuantity();
4076 CharUnits TyAlign = TInfo.Align;
4077
4078 // Copy into a temporary if the type is more aligned than the
4079 // register save area.
4080 if (TyAlign.getQuantity() > 8) {
4081 Address Tmp = CGF.CreateMemTemp(Ty);
4082 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
4083 RegAddr = Tmp;
4084 }
4085
4086 } else if (neededSSE == 1) {
4087 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
4088 CharUnits::fromQuantity(16));
4089 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
4090 } else {
4091 assert(neededSSE == 2 && "Invalid number of needed registers!")((neededSSE == 2 && "Invalid number of needed registers!"
) ? static_cast<void> (0) : __assert_fail ("neededSSE == 2 && \"Invalid number of needed registers!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 4091, __PRETTY_FUNCTION__))
;
4092 // SSE registers are spaced 16 bytes apart in the register save
4093 // area, we need to collect the two eightbytes together.
4094 // The ABI isn't explicit about this, but it seems reasonable
4095 // to assume that the slots are 16-byte aligned, since the stack is
4096 // naturally 16-byte aligned and the prologue is expected to store
4097 // all the SSE registers to the RSA.
4098 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
4099 CharUnits::fromQuantity(16));
4100 Address RegAddrHi =
4101 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
4102 CharUnits::fromQuantity(16));
4103 llvm::Type *ST = AI.canHaveCoerceToType()
4104 ? AI.getCoerceToType()
4105 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
4106 llvm::Value *V;
4107 Address Tmp = CGF.CreateMemTemp(Ty);
4108 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
4109 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
4110 RegAddrLo, ST->getStructElementType(0)));
4111 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
4112 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
4113 RegAddrHi, ST->getStructElementType(1)));
4114 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
4115
4116 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
4117 }
4118
4119 // AMD64-ABI 3.5.7p5: Step 5. Set:
4120 // l->gp_offset = l->gp_offset + num_gp * 8
4121 // l->fp_offset = l->fp_offset + num_fp * 16.
4122 if (neededInt) {
4123 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
4124 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
4125 gp_offset_p);
4126 }
4127 if (neededSSE) {
4128 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
4129 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
4130 fp_offset_p);
4131 }
4132 CGF.EmitBranch(ContBlock);
4133
4134 // Emit code to load the value if it was passed in memory.
4135
4136 CGF.EmitBlock(InMemBlock);
4137 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
4138
4139 // Return the appropriate result.
4140
4141 CGF.EmitBlock(ContBlock);
4142 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
4143 "vaarg.addr");
4144 return ResAddr;
4145}
4146
4147Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
4148 QualType Ty) const {
4149 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
4150 CGF.getContext().getTypeInfoInChars(Ty),
4151 CharUnits::fromQuantity(8),
4152 /*allowHigherAlign*/ false);
4153}
4154
4155ABIArgInfo
4156WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
4157 const ABIArgInfo &current) const {
4158 // Assumes vectorCall calling convention.
4159 const Type *Base = nullptr;
4160 uint64_t NumElts = 0;
4161
4162 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
4163 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
4164 FreeSSERegs -= NumElts;
4165 return getDirectX86Hva();
4166 }
4167 return current;
4168}
4169
4170ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
4171 bool IsReturnType, bool IsVectorCall,
4172 bool IsRegCall) const {
4173
4174 if (Ty->isVoidType())
4175 return ABIArgInfo::getIgnore();
4176
4177 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4178 Ty = EnumTy->getDecl()->getIntegerType();
4179
4180 TypeInfo Info = getContext().getTypeInfo(Ty);
4181 uint64_t Width = Info.Width;
4182 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
4183
4184 const RecordType *RT = Ty->getAs<RecordType>();
4185 if (RT) {
4186 if (!IsReturnType) {
4187 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
4188 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4189 }
4190
4191 if (RT->getDecl()->hasFlexibleArrayMember())
4192 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4193
4194 }
4195
4196 const Type *Base = nullptr;
4197 uint64_t NumElts = 0;
4198 // vectorcall adds the concept of a homogenous vector aggregate, similar to
4199 // other targets.
4200 if ((IsVectorCall || IsRegCall) &&
4201 isHomogeneousAggregate(Ty, Base, NumElts)) {
4202 if (IsRegCall) {
4203 if (FreeSSERegs >= NumElts) {
4204 FreeSSERegs -= NumElts;
4205 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
4206 return ABIArgInfo::getDirect();
4207 return ABIArgInfo::getExpand();
4208 }
4209 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4210 } else if (IsVectorCall) {
4211 if (FreeSSERegs >= NumElts &&
4212 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
4213 FreeSSERegs -= NumElts;
4214 return ABIArgInfo::getDirect();
4215 } else if (IsReturnType) {
4216 return ABIArgInfo::getExpand();
4217 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
4218 // HVAs are delayed and reclassified in the 2nd step.
4219 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4220 }
4221 }
4222 }
4223
4224 if (Ty->isMemberPointerType()) {
4225 // If the member pointer is represented by an LLVM int or ptr, pass it
4226 // directly.
4227 llvm::Type *LLTy = CGT.ConvertType(Ty);
4228 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
4229 return ABIArgInfo::getDirect();
4230 }
4231
4232 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
4233 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4234 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4235 if (Width > 64 || !llvm::isPowerOf2_64(Width))
4236 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4237
4238 // Otherwise, coerce it to a small integer.
4239 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
4240 }
4241
4242 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4243 switch (BT->getKind()) {
4244 case BuiltinType::Bool:
4245 // Bool type is always extended to the ABI, other builtin types are not
4246 // extended.
4247 return ABIArgInfo::getExtend(Ty);
4248
4249 case BuiltinType::LongDouble:
4250 // Mingw64 GCC uses the old 80 bit extended precision floating point
4251 // unit. It passes them indirectly through memory.
4252 if (IsMingw64) {
4253 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
4254 if (LDF == &llvm::APFloat::x87DoubleExtended())
4255 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4256 }
4257 break;
4258
4259 case BuiltinType::Int128:
4260 case BuiltinType::UInt128:
4261 // If it's a parameter type, the normal ABI rule is that arguments larger
4262 // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
4263 // even though it isn't particularly efficient.
4264 if (!IsReturnType)
4265 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4266
4267 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
4268 // Clang matches them for compatibility.
4269 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
4270 llvm::Type::getInt64Ty(getVMContext()), 2));
4271
4272 default:
4273 break;
4274 }
4275 }
4276
4277 if (Ty->isExtIntType()) {
4278 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4279 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4280 // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes
4281 // anyway as long is it fits in them, so we don't have to check the power of
4282 // 2.
4283 if (Width <= 64)
4284 return ABIArgInfo::getDirect();
4285 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4286 }
4287
4288 return ABIArgInfo::getDirect();
4289}
4290
4291void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
4292 unsigned FreeSSERegs,
4293 bool IsVectorCall,
4294 bool IsRegCall) const {
4295 unsigned Count = 0;
4296 for (auto &I : FI.arguments()) {
4297 // Vectorcall in x64 only permits the first 6 arguments to be passed
4298 // as XMM/YMM registers.
4299 if (Count < VectorcallMaxParamNumAsReg)
4300 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4301 else {
4302 // Since these cannot be passed in registers, pretend no registers
4303 // are left.
4304 unsigned ZeroSSERegsAvail = 0;
4305 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false,
4306 IsVectorCall, IsRegCall);
4307 }
4308 ++Count;
4309 }
4310
4311 for (auto &I : FI.arguments()) {
4312 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
4313 }
4314}
4315
4316void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
4317 const unsigned CC = FI.getCallingConvention();
4318 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4319 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4320
4321 // If __attribute__((sysv_abi)) is in use, use the SysV argument
4322 // classification rules.
4323 if (CC == llvm::CallingConv::X86_64_SysV) {
4324 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
4325 SysVABIInfo.computeInfo(FI);
4326 return;
4327 }
4328
4329 unsigned FreeSSERegs = 0;
4330 if (IsVectorCall) {
4331 // We can use up to 4 SSE return registers with vectorcall.
4332 FreeSSERegs = 4;
4333 } else if (IsRegCall) {
4334 // RegCall gives us 16 SSE registers.
4335 FreeSSERegs = 16;
4336 }
4337
4338 if (!getCXXABI().classifyReturnType(FI))
4339 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
4340 IsVectorCall, IsRegCall);
4341
4342 if (IsVectorCall) {
4343 // We can use up to 6 SSE register parameters with vectorcall.
4344 FreeSSERegs = 6;
4345 } else if (IsRegCall) {
4346 // RegCall gives us 16 SSE registers, we can reuse the return registers.
4347 FreeSSERegs = 16;
4348 }
4349
4350 if (IsVectorCall) {
4351 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4352 } else {
4353 for (auto &I : FI.arguments())
4354 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4355 }
4356
4357}
4358
4359Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4360 QualType Ty) const {
4361
4362 bool IsIndirect = false;
4363
4364 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4365 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4366 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
4367 uint64_t Width = getContext().getTypeSize(Ty);
4368 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4369 }
4370
4371 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4372 CGF.getContext().getTypeInfoInChars(Ty),
4373 CharUnits::fromQuantity(8),
4374 /*allowHigherAlign*/ false);
4375}
4376
4377static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4378 llvm::Value *Address, bool Is64Bit,
4379 bool IsAIX) {
4380 // This is calculated from the LLVM and GCC tables and verified
4381 // against gcc output. AFAIK all PPC ABIs use the same encoding.
4382
4383 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4384
4385 llvm::IntegerType *i8 = CGF.Int8Ty;
4386 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4387 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4388 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4389
4390 // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
4391 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
4392
4393 // 32-63: fp0-31, the 8-byte floating-point registers
4394 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4395
4396 // 64-67 are various 4-byte or 8-byte special-purpose registers:
4397 // 64: mq
4398 // 65: lr
4399 // 66: ctr
4400 // 67: ap
4401 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
4402
4403 // 68-76 are various 4-byte special-purpose registers:
4404 // 68-75 cr0-7
4405 // 76: xer
4406 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4407
4408 // 77-108: v0-31, the 16-byte vector registers
4409 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4410
4411 // 109: vrsave
4412 // 110: vscr
4413 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
4414
4415 // AIX does not utilize the rest of the registers.
4416 if (IsAIX)
4417 return false;
4418
4419 // 111: spe_acc
4420 // 112: spefscr
4421 // 113: sfp
4422 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
4423
4424 if (!Is64Bit)
4425 return false;
4426
4427 // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
4428 // or above CPU.
4429 // 64-bit only registers:
4430 // 114: tfhar
4431 // 115: tfiar
4432 // 116: texasr
4433 AssignToArrayRange(Builder, Address, Eight8, 114, 116);
4434
4435 return false;
4436}
4437
4438// AIX
4439namespace {
4440/// AIXABIInfo - The AIX XCOFF ABI information.
4441class AIXABIInfo : public ABIInfo {
4442 const bool Is64Bit;
4443 const unsigned PtrByteSize;
4444 CharUnits getParamTypeAlignment(QualType Ty) const;
4445
4446public:
4447 AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
4448 : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
4449
4450 bool isPromotableTypeForABI(QualType Ty) const;
4451
4452 ABIArgInfo classifyReturnType(QualType RetTy) const;
4453 ABIArgInfo classifyArgumentType(QualType Ty) const;
4454
4455 void computeInfo(CGFunctionInfo &FI) const override {
4456 if (!getCXXABI().classifyReturnType(FI))
4457 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4458
4459 for (auto &I : FI.arguments())
4460 I.info = classifyArgumentType(I.type);
4461 }
4462
4463 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4464 QualType Ty) const override;
4465};
4466
4467class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
4468 const bool Is64Bit;
4469
4470public:
4471 AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
4472 : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
4473 Is64Bit(Is64Bit) {}
4474 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4475 return 1; // r1 is the dedicated stack pointer
4476 }
4477
4478 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4479 llvm::Value *Address) const override;
4480};
4481} // namespace
4482
4483// Return true if the ABI requires Ty to be passed sign- or zero-
4484// extended to 32/64 bits.
4485bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
4486 // Treat an enum type as its underlying type.
4487 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4488 Ty = EnumTy->getDecl()->getIntegerType();
4489
4490 // Promotable integer types are required to be promoted by the ABI.
4491 if (Ty->isPromotableIntegerType())
4492 return true;
4493
4494 if (!Is64Bit)
4495 return false;
4496
4497 // For 64 bit mode, in addition to the usual promotable integer types, we also
4498 // need to extend all 32-bit types, since the ABI requires promotion to 64
4499 // bits.
4500 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4501 switch (BT->getKind()) {
4502 case BuiltinType::Int:
4503 case BuiltinType::UInt:
4504 return true;
4505 default:
4506 break;
4507 }
4508
4509 return false;
4510}
4511
4512ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
4513 if (RetTy->isAnyComplexType())
4514 return ABIArgInfo::getDirect();
4515
4516 if (RetTy->isVectorType())
4517 llvm::report_fatal_error("vector type is not supported on AIX yet");
4518
4519 if (RetTy->isVoidType())
4520 return ABIArgInfo::getIgnore();
4521
4522 if (isAggregateTypeForABI(RetTy))
4523 return getNaturalAlignIndirect(RetTy);
4524
4525 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
4526 : ABIArgInfo::getDirect());
4527}
4528
4529ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
4530 Ty = useFirstFieldIfTransparentUnion(Ty);
4531
4532 if (Ty->isAnyComplexType())
4533 return ABIArgInfo::getDirect();
4534
4535 if (Ty->isVectorType())
4536 llvm::report_fatal_error("vector type is not supported on AIX yet");
4537
4538 if (isAggregateTypeForABI(Ty)) {
4539 // Records with non-trivial destructors/copy-constructors should not be
4540 // passed by value.
4541 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4542 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4543
4544 CharUnits CCAlign = getParamTypeAlignment(Ty);
4545 CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
4546
4547 return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
4548 /*Realign*/ TyAlign > CCAlign);
4549 }
4550
4551 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
4552 : ABIArgInfo::getDirect());
4553}
4554
4555CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
4556 // Complex types are passed just like their elements.
4557 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4558 Ty = CTy->getElementType();
4559
4560 if (Ty->isVectorType())
4561 llvm::report_fatal_error("vector type is not supported on AIX yet");
4562
4563 // If the structure contains a vector type, the alignment is 16.
4564 if (isRecordWithSIMDVectorType(getContext(), Ty))
4565 return CharUnits::fromQuantity(16);
4566
4567 return CharUnits::fromQuantity(PtrByteSize);
4568}
4569
4570Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4571 QualType Ty) const {
4572 if (Ty->isAnyComplexType())
4573 llvm::report_fatal_error("complex type is not supported on AIX yet");
4574
4575 if (Ty->isVectorType())
4576 llvm::report_fatal_error("vector type is not supported on AIX yet");
4577
4578 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4579 TypeInfo.Align = getParamTypeAlignment(Ty);
4580
4581 CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
4582
4583 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
4584 SlotSize, /*AllowHigher*/ true);
4585}
4586
4587bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
4588 CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
4589 return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
4590}
4591
4592// PowerPC-32
4593namespace {
4594/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
4595class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
4596 bool IsSoftFloatABI;
4597 bool IsRetSmallStructInRegABI;
4598
4599 CharUnits getParamTypeAlignment(QualType Ty) const;
4600
4601public:
4602 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
4603 bool RetSmallStructInRegABI)
4604 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
4605 IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
4606
4607 ABIArgInfo classifyReturnType(QualType RetTy) const;
4608
4609 void computeInfo(CGFunctionInfo &FI) const override {
4610 if (!getCXXABI().classifyReturnType(FI))
4611 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4612 for (auto &I : FI.arguments())
4613 I.info = classifyArgumentType(I.type);
4614 }
4615
4616 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4617 QualType Ty) const override;
4618};
4619
4620class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
4621public:
4622 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
4623 bool RetSmallStructInRegABI)
4624 : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
4625 CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
4626
4627 static bool isStructReturnInRegABI(const llvm::Triple &Triple,
4628 const CodeGenOptions &Opts);
4629
4630 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4631 // This is recovered from gcc output.
4632 return 1; // r1 is the dedicated stack pointer
4633 }
4634
4635 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4636 llvm::Value *Address) const override;
4637};
4638}
4639
4640CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4641 // Complex types are passed just like their elements.
4642 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4643 Ty = CTy->getElementType();
4644
4645 if (Ty->isVectorType())
4646 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
4647 : 4);
4648
4649 // For single-element float/vector structs, we consider the whole type
4650 // to have the same alignment requirements as its single element.
4651 const Type *AlignTy = nullptr;
4652 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
4653 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4654 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4655 (BT && BT->isFloatingPoint()))
4656 AlignTy = EltType;
4657 }
4658
4659 if (AlignTy)
4660 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
4661 return CharUnits::fromQuantity(4);
4662}
4663
4664ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4665 uint64_t Size;
4666
4667 // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
4668 if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
4669 (Size = getContext().getTypeSize(RetTy)) <= 64) {
4670 // System V ABI (1995), page 3-22, specified:
4671 // > A structure or union whose size is less than or equal to 8 bytes
4672 // > shall be returned in r3 and r4, as if it were first stored in the
4673 // > 8-byte aligned memory area and then the low addressed word were
4674 // > loaded into r3 and the high-addressed word into r4. Bits beyond
4675 // > the last member of the structure or union are not defined.
4676 //
4677 // GCC for big-endian PPC32 inserts the pad before the first member,
4678 // not "beyond the last member" of the struct. To stay compatible
4679 // with GCC, we coerce the struct to an integer of the same size.
4680 // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
4681 if (Size == 0)
4682 return ABIArgInfo::getIgnore();
4683 else {
4684 llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
4685 return ABIArgInfo::getDirect(CoerceTy);
4686 }
4687 }
4688
4689 return DefaultABIInfo::classifyReturnType(RetTy);
4690}
4691
4692// TODO: this implementation is now likely redundant with
4693// DefaultABIInfo::EmitVAArg.
4694Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4695 QualType Ty) const {
4696 if (getTarget().getTriple().isOSDarwin()) {
4697 auto TI = getContext().getTypeInfoInChars(Ty);
4698 TI.Align = getParamTypeAlignment(Ty);
4699
4700 CharUnits SlotSize = CharUnits::fromQuantity(4);
4701 return emitVoidPtrVAArg(CGF, VAList, Ty,
4702 classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
4703 /*AllowHigherAlign=*/true);
4704 }
4705
4706 const unsigned OverflowLimit = 8;
4707 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4708 // TODO: Implement this. For now ignore.
4709 (void)CTy;
4710 return Address::invalid(); // FIXME?
4711 }
4712
4713 // struct __va_list_tag {
4714 // unsigned char gpr;
4715 // unsigned char fpr;
4716 // unsigned short reserved;
4717 // void *overflow_arg_area;
4718 // void *reg_save_area;
4719 // };
4720
4721 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4722 bool isInt =
4723 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
4724 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4725
4726 // All aggregates are passed indirectly? That doesn't seem consistent
4727 // with the argument-lowering code.
4728 bool isIndirect = Ty->isAggregateType();
4729
4730 CGBuilderTy &Builder = CGF.Builder;
4731
4732 // The calling convention either uses 1-2 GPRs or 1 FPR.
4733 Address NumRegsAddr = Address::invalid();
4734 if (isInt || IsSoftFloatABI) {
4735 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
4736 } else {
4737 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
4738 }
4739
4740 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4741
4742 // "Align" the register count when TY is i64.
4743 if (isI64 || (isF64 && IsSoftFloatABI)) {
4744 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4745 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4746 }
4747
4748 llvm::Value *CC =
4749 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4750
4751 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4752 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4753 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4754
4755 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4756
4757 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4758 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4759
4760 // Case 1: consume registers.
4761 Address RegAddr = Address::invalid();
4762 {
4763 CGF.EmitBlock(UsingRegs);
4764
4765 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
4766 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4767 CharUnits::fromQuantity(8));
4768 assert(RegAddr.getElementType() == CGF.Int8Ty)((RegAddr.getElementType() == CGF.Int8Ty) ? static_cast<void
> (0) : __assert_fail ("RegAddr.getElementType() == CGF.Int8Ty"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 4768, __PRETTY_FUNCTION__))
;
4769
4770 // Floating-point registers start after the general-purpose registers.
4771 if (!(isInt || IsSoftFloatABI)) {
4772 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4773 CharUnits::fromQuantity(32));
4774 }
4775
4776 // Get the address of the saved value by scaling the number of
4777 // registers we've used by the number of
4778 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4779 llvm::Value *RegOffset =
4780 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4781 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4782 RegAddr.getPointer(), RegOffset),
4783 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4784 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4785
4786 // Increase the used-register count.
4787 NumRegs =
4788 Builder.CreateAdd(NumRegs,
4789 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4790 Builder.CreateStore(NumRegs, NumRegsAddr);
4791
4792 CGF.EmitBranch(Cont);
4793 }
4794
4795 // Case 2: consume space in the overflow area.
4796 Address MemAddr = Address::invalid();
4797 {
4798 CGF.EmitBlock(UsingOverflow);
4799
4800 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4801
4802 // Everything in the overflow area is rounded up to a size of at least 4.
4803 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4804
4805 CharUnits Size;
4806 if (!isIndirect) {
4807 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4808 Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
4809 } else {
4810 Size = CGF.getPointerSize();
4811 }
4812
4813 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
4814 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4815 OverflowAreaAlign);
4816 // Round up address of argument to alignment
4817 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4818 if (Align > OverflowAreaAlign) {
4819 llvm::Value *Ptr = OverflowArea.getPointer();
4820 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4821 Align);
4822 }
4823
4824 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4825
4826 // Increase the overflow area.
4827 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4828 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4829 CGF.EmitBranch(Cont);
4830 }
4831
4832 CGF.EmitBlock(Cont);
4833
4834 // Merge the cases with a phi.
4835 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4836 "vaarg.addr");
4837
4838 // Load the pointer if the argument was passed indirectly.
4839 if (isIndirect) {
4840 Result = Address(Builder.CreateLoad(Result, "aggr"),
4841 getContext().getTypeAlignInChars(Ty));
4842 }
4843
4844 return Result;
4845}
4846
4847bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
4848 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
4849 assert(Triple.getArch() == llvm::Triple::ppc)((Triple.getArch() == llvm::Triple::ppc) ? static_cast<void
> (0) : __assert_fail ("Triple.getArch() == llvm::Triple::ppc"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 4849, __PRETTY_FUNCTION__))
;
4850
4851 switch (Opts.getStructReturnConvention()) {
4852 case CodeGenOptions::SRCK_Default:
4853 break;
4854 case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
4855 return false;
4856 case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
4857 return true;
4858 }
4859
4860 if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
4861 return true;
4862
4863 return false;
4864}
4865
4866bool
4867PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4868 llvm::Value *Address) const {
4869 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
4870 /*IsAIX*/ false);
4871}
4872
4873// PowerPC-64
4874
4875namespace {
4876/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4877class PPC64_SVR4_ABIInfo : public SwiftABIInfo {
4878public:
4879 enum ABIKind {
4880 ELFv1 = 0,
4881 ELFv2
4882 };
4883
4884private:
4885 static const unsigned GPRBits = 64;
4886 ABIKind Kind;
4887 bool HasQPX;
4888 bool IsSoftFloatABI;
4889
4890 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
4891 // will be passed in a QPX register.
4892 bool IsQPXVectorTy(const Type *Ty) const {
4893 if (!HasQPX)
4894 return false;
4895
4896 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4897 unsigned NumElements = VT->getNumElements();
4898 if (NumElements == 1)
4899 return false;
4900
4901 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4902 if (getContext().getTypeSize(Ty) <= 256)
4903 return true;
4904 } else if (VT->getElementType()->
4905 isSpecificBuiltinType(BuiltinType::Float)) {
4906 if (getContext().getTypeSize(Ty) <= 128)
4907 return true;
4908 }
4909 }
4910
4911 return false;
4912 }
4913
4914 bool IsQPXVectorTy(QualType Ty) const {
4915 return IsQPXVectorTy(Ty.getTypePtr());
4916 }
4917
4918public:
4919 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
4920 bool SoftFloatABI)
4921 : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
4922 IsSoftFloatABI(SoftFloatABI) {}
4923
4924 bool isPromotableTypeForABI(QualType Ty) const;
4925 CharUnits getParamTypeAlignment(QualType Ty) const;
4926
4927 ABIArgInfo classifyReturnType(QualType RetTy) const;
4928 ABIArgInfo classifyArgumentType(QualType Ty) const;
4929
4930 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4931 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4932 uint64_t Members) const override;
4933
4934 // TODO: We can add more logic to computeInfo to improve performance.
4935 // Example: For aggregate arguments that fit in a register, we could
4936 // use getDirectInReg (as is done below for structs containing a single
4937 // floating-point value) to avoid pushing them to memory on function
4938 // entry. This would require changing the logic in PPCISelLowering
4939 // when lowering the parameters in the caller and args in the callee.
4940 void computeInfo(CGFunctionInfo &FI) const override {
4941 if (!getCXXABI().classifyReturnType(FI))
4942 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4943 for (auto &I : FI.arguments()) {
4944 // We rely on the default argument classification for the most part.
4945 // One exception: An aggregate containing a single floating-point
4946 // or vector item must be passed in a register if one is available.
4947 const Type *T = isSingleElementStruct(I.type, getContext());
4948 if (T) {
4949 const BuiltinType *BT = T->getAs<BuiltinType>();
4950 if (IsQPXVectorTy(T) ||
4951 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4952 (BT && BT->isFloatingPoint())) {
4953 QualType QT(T, 0);
4954 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4955 continue;
4956 }
4957 }
4958 I.info = classifyArgumentType(I.type);
4959 }
4960 }
4961
4962 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4963 QualType Ty) const override;
4964
4965 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
4966 bool asReturnValue) const override {
4967 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4968 }
4969
4970 bool isSwiftErrorInRegister() const override {
4971 return false;
4972 }
4973};
4974
4975class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4976
4977public:
4978 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4979 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
4980 bool SoftFloatABI)
4981 : TargetCodeGenInfo(std::make_unique<PPC64_SVR4_ABIInfo>(
4982 CGT, Kind, HasQPX, SoftFloatABI)) {}
4983
4984 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4985 // This is recovered from gcc output.
4986 return 1; // r1 is the dedicated stack pointer
4987 }
4988
4989 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4990 llvm::Value *Address) const override;
4991};
4992
4993class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4994public:
4995 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4996
4997 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4998 // This is recovered from gcc output.
4999 return 1; // r1 is the dedicated stack pointer
5000 }
5001
5002 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5003 llvm::Value *Address) const override;
5004};
5005
5006}
5007
5008// Return true if the ABI requires Ty to be passed sign- or zero-
5009// extended to 64 bits.
5010bool
5011PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
5012 // Treat an enum type as its underlying type.
5013 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5014 Ty = EnumTy->getDecl()->getIntegerType();
5015
5016 // Promotable integer types are required to be promoted by the ABI.
5017 if (isPromotableIntegerTypeForABI(Ty))
5018 return true;
5019
5020 // In addition to the usual promotable integer types, we also need to
5021 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
5022 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5023 switch (BT->getKind()) {
5024 case BuiltinType::Int:
5025 case BuiltinType::UInt:
5026 return true;
5027 default:
5028 break;
5029 }
5030
5031 if (const auto *EIT = Ty->getAs<ExtIntType>())
5032 if (EIT->getNumBits() < 64)
5033 return true;
5034
5035 return false;
5036}
5037
5038/// isAlignedParamType - Determine whether a type requires 16-byte or
5039/// higher alignment in the parameter area. Always returns at least 8.
5040CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
5041 // Complex types are passed just like their elements.
5042 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
5043 Ty = CTy->getElementType();
5044
5045 // Only vector types of size 16 bytes need alignment (larger types are
5046 // passed via reference, smaller types are not aligned).
5047 if (IsQPXVectorTy(Ty)) {
5048 if (getContext().getTypeSize(Ty) > 128)
5049 return CharUnits::fromQuantity(32);
5050
5051 return CharUnits::fromQuantity(16);
5052 } else if (Ty->isVectorType()) {
5053 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
5054 } else if (Ty->isRealFloatingType() && getContext().getTypeSize(Ty) == 128) {
5055 // IEEE 128-bit floating numbers are also stored in vector registers.
5056 // And both IEEE quad-precision and IBM extended double (ppc_fp128) should
5057 // be quad-word aligned.
5058 return CharUnits::fromQuantity(16);
5059 }
5060
5061 // For single-element float/vector structs, we consider the whole type
5062 // to have the same alignment requirements as its single element.
5063 const Type *AlignAsType = nullptr;
5064 const Type *EltType = isSingleElementStruct(Ty, getContext());
5065 if (EltType) {
5066 const BuiltinType *BT = EltType->getAs<BuiltinType>();
5067 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
5068 getContext().getTypeSize(EltType) == 128) ||
5069 (BT && BT->isFloatingPoint()))
5070 AlignAsType = EltType;
5071 }
5072
5073 // Likewise for ELFv2 homogeneous aggregates.
5074 const Type *Base = nullptr;
5075 uint64_t Members = 0;
5076 if (!AlignAsType && Kind == ELFv2 &&
5077 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
5078 AlignAsType = Base;
5079
5080 // With special case aggregates, only vector base types need alignment.
5081 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
5082 if (getContext().getTypeSize(AlignAsType) > 128)
5083 return CharUnits::fromQuantity(32);
5084
5085 return CharUnits::fromQuantity(16);
5086 } else if (AlignAsType) {
5087 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
5088 }
5089
5090 // Otherwise, we only need alignment for any aggregate type that
5091 // has an alignment requirement of >= 16 bytes.
5092 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
5093 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
5094 return CharUnits::fromQuantity(32);
5095 return CharUnits::fromQuantity(16);
5096 }
5097
5098 return CharUnits::fromQuantity(8);
5099}
5100
5101/// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
5102/// aggregate. Base is set to the base element type, and Members is set
5103/// to the number of base elements.
5104bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
5105 uint64_t &Members) const {
5106 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
5107 uint64_t NElements = AT->getSize().getZExtValue();
5108 if (NElements == 0)
5109 return false;
5110 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
5111 return false;
5112 Members *= NElements;
5113 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
5114 const RecordDecl *RD = RT->getDecl();
5115 if (RD->hasFlexibleArrayMember())
5116 return false;
5117
5118 Members = 0;
5119
5120 // If this is a C++ record, check the bases first.
5121 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
5122 for (const auto &I : CXXRD->bases()) {
5123 // Ignore empty records.
5124 if (isEmptyRecord(getContext(), I.getType(), true))
5125 continue;
5126
5127 uint64_t FldMembers;
5128 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
5129 return false;
5130
5131 Members += FldMembers;
5132 }
5133 }
5134
5135 for (const auto *FD : RD->fields()) {
5136 // Ignore (non-zero arrays of) empty records.
5137 QualType FT = FD->getType();
5138 while (const ConstantArrayType *AT =
5139 getContext().getAsConstantArrayType(FT)) {
5140 if (AT->getSize().getZExtValue() == 0)
5141 return false;
5142 FT = AT->getElementType();
5143 }
5144 if (isEmptyRecord(getContext(), FT, true))
5145 continue;
5146
5147 // For compatibility with GCC, ignore empty bitfields in C++ mode.
5148 if (getContext().getLangOpts().CPlusPlus &&
5149 FD->isZeroLengthBitField(getContext()))
5150 continue;
5151
5152 uint64_t FldMembers;
5153 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
5154 return false;
5155
5156 Members = (RD->isUnion() ?
5157 std::max(Members, FldMembers) : Members + FldMembers);
5158 }
5159
5160 if (!Base)
5161 return false;
5162
5163 // Ensure there is no padding.
5164 if (getContext().getTypeSize(Base) * Members !=
5165 getContext().getTypeSize(Ty))
5166 return false;
5167 } else {
5168 Members = 1;
5169 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
5170 Members = 2;
5171 Ty = CT->getElementType();
5172 }
5173
5174 // Most ABIs only support float, double, and some vector type widths.
5175 if (!isHomogeneousAggregateBaseType(Ty))
5176 return false;
5177
5178 // The base type must be the same for all members. Types that
5179 // agree in both total size and mode (float vs. vector) are
5180 // treated as being equivalent here.
5181 const Type *TyPtr = Ty.getTypePtr();
5182 if (!Base) {
5183 Base = TyPtr;
5184 // If it's a non-power-of-2 vector, its size is already a power-of-2,
5185 // so make sure to widen it explicitly.
5186 if (const VectorType *VT = Base->getAs<VectorType>()) {
5187 QualType EltTy = VT->getElementType();
5188 unsigned NumElements =
5189 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
5190 Base = getContext()
5191 .getVectorType(EltTy, NumElements, VT->getVectorKind())
5192 .getTypePtr();
5193 }
5194 }
5195
5196 if (Base->isVectorType() != TyPtr->isVectorType() ||
5197 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
5198 return false;
5199 }
5200 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
5201}
5202
5203bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5204 // Homogeneous aggregates for ELFv2 must have base types of float,
5205 // double, long double, or 128-bit vectors.
5206 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5207 if (BT->getKind() == BuiltinType::Float ||
5208 BT->getKind() == BuiltinType::Double ||
5209 BT->getKind() == BuiltinType::LongDouble ||
5210 (getContext().getTargetInfo().hasFloat128Type() &&
5211 (BT->getKind() == BuiltinType::Float128))) {
5212 if (IsSoftFloatABI)
5213 return false;
5214 return true;
5215 }
5216 }
5217 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5218 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
5219 return true;
5220 }
5221 return false;
5222}
5223
5224bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
5225 const Type *Base, uint64_t Members) const {
5226 // Vector and fp128 types require one register, other floating point types
5227 // require one or two registers depending on their size.
5228 uint32_t NumRegs =
5229 ((getContext().getTargetInfo().hasFloat128Type() &&
5230 Base->isFloat128Type()) ||
5231 Base->isVectorType()) ? 1
5232 : (getContext().getTypeSize(Base) + 63) / 64;
5233
5234 // Homogeneous Aggregates may occupy at most 8 registers.
5235 return Members * NumRegs <= 8;
5236}
5237
5238ABIArgInfo
5239PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
5240 Ty = useFirstFieldIfTransparentUnion(Ty);
5241
5242 if (Ty->isAnyComplexType())
5243 return ABIArgInfo::getDirect();
5244
5245 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
5246 // or via reference (larger than 16 bytes).
5247 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
5248 uint64_t Size = getContext().getTypeSize(Ty);
5249 if (Size > 128)
5250 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5251 else if (Size < 128) {
5252 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5253 return ABIArgInfo::getDirect(CoerceTy);
5254 }
5255 }
5256
5257 if (const auto *EIT = Ty->getAs<ExtIntType>())
5258 if (EIT->getNumBits() > 128)
5259 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
5260
5261 if (isAggregateTypeForABI(Ty)) {
5262 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5263 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5264
5265 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
5266 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
5267
5268 // ELFv2 homogeneous aggregates are passed as array types.
5269 const Type *Base = nullptr;
5270 uint64_t Members = 0;
5271 if (Kind == ELFv2 &&
5272 isHomogeneousAggregate(Ty, Base, Members)) {
5273 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
5274 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5275 return ABIArgInfo::getDirect(CoerceTy);
5276 }
5277
5278 // If an aggregate may end up fully in registers, we do not
5279 // use the ByVal method, but pass the aggregate as array.
5280 // This is usually beneficial since we avoid forcing the
5281 // back-end to store the argument to memory.
5282 uint64_t Bits = getContext().getTypeSize(Ty);
5283 if (Bits > 0 && Bits <= 8 * GPRBits) {
5284 llvm::Type *CoerceTy;
5285
5286 // Types up to 8 bytes are passed as integer type (which will be
5287 // properly aligned in the argument save area doubleword).
5288 if (Bits <= GPRBits)
5289 CoerceTy =
5290 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5291 // Larger types are passed as arrays, with the base type selected
5292 // according to the required alignment in the save area.
5293 else {
5294 uint64_t RegBits = ABIAlign * 8;
5295 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
5296 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
5297 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
5298 }
5299
5300 return ABIArgInfo::getDirect(CoerceTy);
5301 }
5302
5303 // All other aggregates are passed ByVal.
5304 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5305 /*ByVal=*/true,
5306 /*Realign=*/TyAlign > ABIAlign);
5307 }
5308
5309 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
5310 : ABIArgInfo::getDirect());
5311}
5312
5313ABIArgInfo
5314PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
5315 if (RetTy->isVoidType())
5316 return ABIArgInfo::getIgnore();
5317
5318 if (RetTy->isAnyComplexType())
5319 return ABIArgInfo::getDirect();
5320
5321 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
5322 // or via reference (larger than 16 bytes).
5323 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
5324 uint64_t Size = getContext().getTypeSize(RetTy);
5325 if (Size > 128)
5326 return getNaturalAlignIndirect(RetTy);
5327 else if (Size < 128) {
5328 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5329 return ABIArgInfo::getDirect(CoerceTy);
5330 }
5331 }
5332
5333 if (const auto *EIT = RetTy->getAs<ExtIntType>())
5334 if (EIT->getNumBits() > 128)
5335 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
5336
5337 if (isAggregateTypeForABI(RetTy)) {
5338 // ELFv2 homogeneous aggregates are returned as array types.
5339 const Type *Base = nullptr;
5340 uint64_t Members = 0;
5341 if (Kind == ELFv2 &&
5342 isHomogeneousAggregate(RetTy, Base, Members)) {
5343 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
5344 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5345 return ABIArgInfo::getDirect(CoerceTy);
5346 }
5347
5348 // ELFv2 small aggregates are returned in up to two registers.
5349 uint64_t Bits = getContext().getTypeSize(RetTy);
5350 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
5351 if (Bits == 0)
5352 return ABIArgInfo::getIgnore();
5353
5354 llvm::Type *CoerceTy;
5355 if (Bits > GPRBits) {
5356 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
5357 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
5358 } else
5359 CoerceTy =
5360 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5361 return ABIArgInfo::getDirect(CoerceTy);
5362 }
5363
5364 // All other aggregates are returned indirectly.
5365 return getNaturalAlignIndirect(RetTy);
5366 }
5367
5368 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
5369 : ABIArgInfo::getDirect());
5370}
5371
5372// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
5373Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5374 QualType Ty) const {
5375 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
5376 TypeInfo.Align = getParamTypeAlignment(Ty);
5377
5378 CharUnits SlotSize = CharUnits::fromQuantity(8);
5379
5380 // If we have a complex type and the base type is smaller than 8 bytes,
5381 // the ABI calls for the real and imaginary parts to be right-adjusted
5382 // in separate doublewords. However, Clang expects us to produce a
5383 // pointer to a structure with the two parts packed tightly. So generate
5384 // loads of the real and imaginary parts relative to the va_list pointer,
5385 // and store them to a temporary structure.
5386 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
5387 CharUnits EltSize = TypeInfo.Width / 2;
5388 if (EltSize < SlotSize) {
5389 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
5390 SlotSize * 2, SlotSize,
5391 SlotSize, /*AllowHigher*/ true);
5392
5393 Address RealAddr = Addr;
5394 Address ImagAddr = RealAddr;
5395 if (CGF.CGM.getDataLayout().isBigEndian()) {
5396 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
5397 SlotSize - EltSize);
5398 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
5399 2 * SlotSize - EltSize);
5400 } else {
5401 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
5402 }
5403
5404 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
5405 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
5406 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
5407 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
5408 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
5409
5410 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
5411 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
5412 /*init*/ true);
5413 return Temp;
5414 }
5415 }
5416
5417 // Otherwise, just use the general rule.
5418 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
5419 TypeInfo, SlotSize, /*AllowHigher*/ true);
5420}
5421
5422bool
5423PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
5424 CodeGen::CodeGenFunction &CGF,
5425 llvm::Value *Address) const {
5426 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
5427 /*IsAIX*/ false);
5428}
5429
5430bool
5431PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5432 llvm::Value *Address) const {
5433 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
5434 /*IsAIX*/ false);
5435}
5436
5437//===----------------------------------------------------------------------===//
5438// AArch64 ABI Implementation
5439//===----------------------------------------------------------------------===//
5440
5441namespace {
5442
5443class AArch64ABIInfo : public SwiftABIInfo {
5444public:
5445 enum ABIKind {
5446 AAPCS = 0,
5447 DarwinPCS,
5448 Win64
5449 };
5450
5451private:
5452 ABIKind Kind;
5453
5454public:
5455 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
5456 : SwiftABIInfo(CGT), Kind(Kind) {}
5457
5458private:
5459 ABIKind getABIKind() const { return Kind; }
5460 bool isDarwinPCS() const { return Kind == DarwinPCS; }
5461
5462 ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
5463 ABIArgInfo classifyArgumentType(QualType RetTy) const;
5464 ABIArgInfo coerceIllegalVector(QualType Ty) const;
5465 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5466 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5467 uint64_t Members) const override;
5468
5469 bool isIllegalVectorType(QualType Ty) const;
5470
5471 void computeInfo(CGFunctionInfo &FI) const override {
5472 if (!::classifyReturnType(getCXXABI(), FI, *this))
5473 FI.getReturnInfo() =
5474 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5475
5476 for (auto &it : FI.arguments())
5477 it.info = classifyArgumentType(it.type);
5478 }
5479
5480 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5481 CodeGenFunction &CGF) const;
5482
5483 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
5484 CodeGenFunction &CGF) const;
5485
5486 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5487 QualType Ty) const override {
5488 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5489 if (isa<llvm::ScalableVectorType>(BaseTy))
5490 llvm::report_fatal_error("Passing SVE types to variadic functions is "
5491 "currently not supported");
5492
5493 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
5494 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
5495 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
5496 }
5497
5498 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5499 QualType Ty) const override;
5500
5501 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5502 bool asReturnValue) const override {
5503 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5504 }
5505 bool isSwiftErrorInRegister() const override {
5506 return true;
5507 }
5508
5509 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5510 unsigned elts) const override;
5511
5512 bool allowBFloatArgsAndRet() const override {
5513 return getTarget().hasBFloat16Type();
5514 }
5515};
5516
5517class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
5518public:
5519 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
5520 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {}
5521
5522 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5523 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5524 }
5525
5526 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5527 return 31;
5528 }
5529
5530 bool doesReturnSlotInterfereWithArgs() const override { return false; }
5531
5532 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5533 CodeGen::CodeGenModule &CGM) const override {
5534 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5535 if (!FD)
5536 return;
5537
5538 const auto *TA = FD->getAttr<TargetAttr>();
5539 if (TA == nullptr)
5540 return;
5541
5542 ParsedTargetAttr Attr = TA->parse();
5543 if (Attr.BranchProtection.empty())
5544 return;
5545
5546 TargetInfo::BranchProtectionInfo BPI;
5547 StringRef Error;
5548 (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
5549 BPI, Error);
5550 assert(Error.empty())((Error.empty()) ? static_cast<void> (0) : __assert_fail
("Error.empty()", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 5550, __PRETTY_FUNCTION__))
;
5551
5552 auto *Fn = cast<llvm::Function>(GV);
5553 static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
5554 Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
5555
5556 if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
5557 Fn->addFnAttr("sign-return-address-key",
5558 BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
5559 ? "a_key"
5560 : "b_key");
5561 }
5562
5563 Fn->addFnAttr("branch-target-enforcement",
5564 BPI.BranchTargetEnforcement ? "true" : "false");
5565 }
5566};
5567
5568class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
5569public:
5570 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
5571 : AArch64TargetCodeGenInfo(CGT, K) {}
5572
5573 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5574 CodeGen::CodeGenModule &CGM) const override;
5575
5576 void getDependentLibraryOption(llvm::StringRef Lib,
5577 llvm::SmallString<24> &Opt) const override {
5578 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5579 }
5580
5581 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5582 llvm::SmallString<32> &Opt) const override {
5583 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5584 }
5585};
5586
5587void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5588 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5589 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5590 if (GV->isDeclaration())
5591 return;
5592 addStackProbeTargetAttributes(D, GV, CGM);
5593}
5594}
5595
5596ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
5597 assert(Ty->isVectorType() && "expected vector type!")((Ty->isVectorType() && "expected vector type!") ?
static_cast<void> (0) : __assert_fail ("Ty->isVectorType() && \"expected vector type!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 5597, __PRETTY_FUNCTION__))
;
5598
5599 const auto *VT = Ty->castAs<VectorType>();
5600 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
5601 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!")((VT->getElementType()->isBuiltinType() && "expected builtin type!"
) ? static_cast<void> (0) : __assert_fail ("VT->getElementType()->isBuiltinType() && \"expected builtin type!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 5601, __PRETTY_FUNCTION__))
;
5602 assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==((VT->getElementType()->castAs<BuiltinType>()->
getKind() == BuiltinType::UChar && "unexpected builtin type for SVE predicate!"
) ? static_cast<void> (0) : __assert_fail ("VT->getElementType()->castAs<BuiltinType>()->getKind() == BuiltinType::UChar && \"unexpected builtin type for SVE predicate!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 5604, __PRETTY_FUNCTION__))
5603 BuiltinType::UChar &&((VT->getElementType()->castAs<BuiltinType>()->
getKind() == BuiltinType::UChar && "unexpected builtin type for SVE predicate!"
) ? static_cast<void> (0) : __assert_fail ("VT->getElementType()->castAs<BuiltinType>()->getKind() == BuiltinType::UChar && \"unexpected builtin type for SVE predicate!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 5604, __PRETTY_FUNCTION__))
5604 "unexpected builtin type for SVE predicate!")((VT->getElementType()->castAs<BuiltinType>()->
getKind() == BuiltinType::UChar && "unexpected builtin type for SVE predicate!"
) ? static_cast<void> (0) : __assert_fail ("VT->getElementType()->castAs<BuiltinType>()->getKind() == BuiltinType::UChar && \"unexpected builtin type for SVE predicate!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 5604, __PRETTY_FUNCTION__))
;
5605 return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
5606 llvm::Type::getInt1Ty(getVMContext()), 16));
5607 }
5608
5609 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) {
5610 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!")((VT->getElementType()->isBuiltinType() && "expected builtin type!"
) ? static_cast<void> (0) : __assert_fail ("VT->getElementType()->isBuiltinType() && \"expected builtin type!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 5610, __PRETTY_FUNCTION__))
;
5611
5612 const auto *BT = VT->getElementType()->castAs<BuiltinType>();
5613 llvm::ScalableVectorType *ResType = nullptr;
5614 switch (BT->getKind()) {
5615 default:
5616 llvm_unreachable("unexpected builtin type for SVE vector!")::llvm::llvm_unreachable_internal("unexpected builtin type for SVE vector!"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 5616)
;
5617 case BuiltinType::SChar:
5618 case BuiltinType::UChar:
5619 ResType = llvm::ScalableVectorType::get(
5620 llvm::Type::getInt8Ty(getVMContext()), 16);
5621 break;
5622 case BuiltinType::Short:
5623 case BuiltinType::UShort:
5624 ResType = llvm::ScalableVectorType::get(
5625 llvm::Type::getInt16Ty(getVMContext()), 8);
5626 break;
5627 case BuiltinType::Int:
5628 case BuiltinType::UInt:
5629 ResType = llvm::ScalableVectorType::get(
5630 llvm::Type::getInt32Ty(getVMContext()), 4);
5631 break;
5632 case BuiltinType::Long:
5633 case BuiltinType::ULong:
5634 ResType = llvm::ScalableVectorType::get(
5635 llvm::Type::getInt64Ty(getVMContext()), 2);
5636 break;
5637 case BuiltinType::Half:
5638 ResType = llvm::ScalableVectorType::get(
5639 llvm::Type::getHalfTy(getVMContext()), 8);
5640 break;
5641 case BuiltinType::Float:
5642 ResType = llvm::ScalableVectorType::get(
5643 llvm::Type::getFloatTy(getVMContext()), 4);
5644 break;
5645 case BuiltinType::Double:
5646 ResType = llvm::ScalableVectorType::get(
5647 llvm::Type::getDoubleTy(getVMContext()), 2);
5648 break;
5649 case BuiltinType::BFloat16:
5650 ResType = llvm::ScalableVectorType::get(
5651 llvm::Type::getBFloatTy(getVMContext()), 8);
5652 break;
5653 }
5654 return ABIArgInfo::getDirect(ResType);
5655 }
5656
5657 uint64_t Size = getContext().getTypeSize(Ty);
5658 // Android promotes <2 x i8> to i16, not i32
5659 if (isAndroid() && (Size <= 16)) {
5660 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
5661 return ABIArgInfo::getDirect(ResType);
5662 }
5663 if (Size <= 32) {
5664 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
5665 return ABIArgInfo::getDirect(ResType);
5666 }
5667 if (Size == 64) {
5668 auto *ResType =
5669 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
5670 return ABIArgInfo::getDirect(ResType);
5671 }
5672 if (Size == 128) {
5673 auto *ResType =
5674 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
5675 return ABIArgInfo::getDirect(ResType);
5676 }
5677 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5678}
5679
5680ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
5681 Ty = useFirstFieldIfTransparentUnion(Ty);
5682
5683 // Handle illegal vector types here.
5684 if (isIllegalVectorType(Ty))
5685 return coerceIllegalVector(Ty);
5686
5687 if (!isAggregateTypeForABI(Ty)) {
5688 // Treat an enum type as its underlying type.
5689 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5690 Ty = EnumTy->getDecl()->getIntegerType();
5691
5692 if (const auto *EIT = Ty->getAs<ExtIntType>())
5693 if (EIT->getNumBits() > 128)
5694 return getNaturalAlignIndirect(Ty);
5695
5696 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
5697 ? ABIArgInfo::getExtend(Ty)
5698 : ABIArgInfo::getDirect());
5699 }
5700
5701 // Structures with either a non-trivial destructor or a non-trivial
5702 // copy constructor are always indirect.
5703 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5704 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
5705 CGCXXABI::RAA_DirectInMemory);
5706 }
5707
5708 // Empty records are always ignored on Darwin, but actually passed in C++ mode
5709 // elsewhere for GNU compatibility.
5710 uint64_t Size = getContext().getTypeSize(Ty);
5711 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
5712 if (IsEmpty || Size == 0) {
5713 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
5714 return ABIArgInfo::getIgnore();
5715
5716 // GNU C mode. The only argument that gets ignored is an empty one with size
5717 // 0.
5718 if (IsEmpty && Size == 0)
5719 return ABIArgInfo::getIgnore();
5720 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5721 }
5722
5723 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
5724 const Type *Base = nullptr;
5725 uint64_t Members = 0;
5726 if (isHomogeneousAggregate(Ty, Base, Members)) {
5727 return ABIArgInfo::getDirect(
5728 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
5729 }
5730
5731 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
5732 if (Size <= 128) {
5733 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5734 // same size and alignment.
5735 if (getTarget().isRenderScriptTarget()) {
5736 return coerceToIntArray(Ty, getContext(), getVMContext());
5737 }
5738 unsigned Alignment;
5739 if (Kind == AArch64ABIInfo::AAPCS) {
5740 Alignment = getContext().getTypeUnadjustedAlign(Ty);
5741 Alignment = Alignment < 128 ? 64 : 128;
5742 } else {
5743 Alignment = std::max(getContext().getTypeAlign(Ty),
5744 (unsigned)getTarget().getPointerWidth(0));
5745 }
5746 Size = llvm::alignTo(Size, Alignment);
5747
5748 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5749 // For aggregates with 16-byte alignment, we use i128.
5750 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
5751 return ABIArgInfo::getDirect(
5752 Size == Alignment ? BaseTy
5753 : llvm::ArrayType::get(BaseTy, Size / Alignment));
5754 }
5755
5756 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5757}
5758
5759ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
5760 bool IsVariadic) const {
5761 if (RetTy->isVoidType())
5762 return ABIArgInfo::getIgnore();
5763
5764 if (const auto *VT = RetTy->getAs<VectorType>()) {
5765 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
5766 VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
5767 return coerceIllegalVector(RetTy);
5768 }
5769
5770 // Large vector types should be returned via memory.
5771 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
5772 return getNaturalAlignIndirect(RetTy);
5773
5774 if (!isAggregateTypeForABI(RetTy)) {
5775 // Treat an enum type as its underlying type.
5776 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5777 RetTy = EnumTy->getDecl()->getIntegerType();
5778
5779 if (const auto *EIT = RetTy->getAs<ExtIntType>())
5780 if (EIT->getNumBits() > 128)
5781 return getNaturalAlignIndirect(RetTy);
5782
5783 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
5784 ? ABIArgInfo::getExtend(RetTy)
5785 : ABIArgInfo::getDirect());
5786 }
5787
5788 uint64_t Size = getContext().getTypeSize(RetTy);
5789 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
5790 return ABIArgInfo::getIgnore();
5791
5792 const Type *Base = nullptr;
5793 uint64_t Members = 0;
5794 if (isHomogeneousAggregate(RetTy, Base, Members) &&
5795 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
5796 IsVariadic))
5797 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
5798 return ABIArgInfo::getDirect();
5799
5800 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
5801 if (Size <= 128) {
5802 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5803 // same size and alignment.
5804 if (getTarget().isRenderScriptTarget()) {
5805 return coerceToIntArray(RetTy, getContext(), getVMContext());
5806 }
5807 unsigned Alignment = getContext().getTypeAlign(RetTy);
5808 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5809
5810 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5811 // For aggregates with 16-byte alignment, we use i128.
5812 if (Alignment < 128 && Size == 128) {
5813 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5814 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5815 }
5816 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5817 }
5818
5819 return getNaturalAlignIndirect(RetTy);
5820}
5821
5822/// isIllegalVectorType - check whether the vector type is legal for AArch64.
5823bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
5824 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5825 // Check whether VT is a fixed-length SVE vector. These types are
5826 // represented as scalable vectors in function args/return and must be
5827 // coerced from fixed vectors.
5828 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
5829 VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
5830 return true;
5831
5832 // Check whether VT is legal.
5833 unsigned NumElements = VT->getNumElements();
5834 uint64_t Size = getContext().getTypeSize(VT);
5835 // NumElements should be power of 2.
5836 if (!llvm::isPowerOf2_32(NumElements))
5837 return true;
5838
5839 // arm64_32 has to be compatible with the ARM logic here, which allows huge
5840 // vectors for some reason.
5841 llvm::Triple Triple = getTarget().getTriple();
5842 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
5843 Triple.isOSBinFormatMachO())
5844 return Size <= 32;
5845
5846 return Size != 64 && (Size != 128 || NumElements == 1);
5847 }
5848 return false;
5849}
5850
5851bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize,
5852 llvm::Type *eltTy,
5853 unsigned elts) const {
5854 if (!llvm::isPowerOf2_32(elts))
5855 return false;
5856 if (totalSize.getQuantity() != 8 &&
5857 (totalSize.getQuantity() != 16 || elts == 1))
5858 return false;
5859 return true;
5860}
5861
5862bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5863 // Homogeneous aggregates for AAPCS64 must have base types of a floating
5864 // point type or a short-vector type. This is the same as the 32-bit ABI,
5865 // but with the difference that any floating-point type is allowed,
5866 // including __fp16.
5867 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5868 if (BT->isFloatingPoint())
5869 return true;
5870 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5871 unsigned VecSize = getContext().getTypeSize(VT);
5872 if (VecSize == 64 || VecSize == 128)
5873 return true;
5874 }
5875 return false;
5876}
5877
5878bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5879 uint64_t Members) const {
5880 return Members <= 4;
5881}
5882
5883Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5884 QualType Ty,
5885 CodeGenFunction &CGF) const {
5886 ABIArgInfo AI = classifyArgumentType(Ty);
5887 bool IsIndirect = AI.isIndirect();
5888
5889 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5890 if (IsIndirect)
5891 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5892 else if (AI.getCoerceToType())
5893 BaseTy = AI.getCoerceToType();
5894
5895 unsigned NumRegs = 1;
5896 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5897 BaseTy = ArrTy->getElementType();
5898 NumRegs = ArrTy->getNumElements();
5899 }
5900 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5901
5902 // The AArch64 va_list type and handling is specified in the Procedure Call
5903 // Standard, section B.4:
5904 //
5905 // struct {
5906 // void *__stack;
5907 // void *__gr_top;
5908 // void *__vr_top;
5909 // int __gr_offs;
5910 // int __vr_offs;
5911 // };
5912
5913 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5914 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5915 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5916 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5917
5918 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
5919 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
5920
5921 Address reg_offs_p = Address::invalid();
5922 llvm::Value *reg_offs = nullptr;
5923 int reg_top_index;
5924 int RegSize = IsIndirect ? 8 : TySize.getQuantity();
5925 if (!IsFPR) {
5926 // 3 is the field number of __gr_offs
5927 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
5928 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5929 reg_top_index = 1; // field number for __gr_top
5930 RegSize = llvm::alignTo(RegSize, 8);
5931 } else {
5932 // 4 is the field number of __vr_offs.
5933 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
5934 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5935 reg_top_index = 2; // field number for __vr_top
5936 RegSize = 16 * NumRegs;
5937 }
5938
5939 //=======================================
5940 // Find out where argument was passed
5941 //=======================================
5942
5943 // If reg_offs >= 0 we're already using the stack for this type of
5944 // argument. We don't want to keep updating reg_offs (in case it overflows,
5945 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5946 // whatever they get).
5947 llvm::Value *UsingStack = nullptr;
5948 UsingStack = CGF.Builder.CreateICmpSGE(
5949 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5950
5951 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5952
5953 // Otherwise, at least some kind of argument could go in these registers, the
5954 // question is whether this particular type is too big.
5955 CGF.EmitBlock(MaybeRegBlock);
5956
5957 // Integer arguments may need to correct register alignment (for example a
5958 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5959 // align __gr_offs to calculate the potential address.
5960 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5961 int Align = TyAlign.getQuantity();
5962
5963 reg_offs = CGF.Builder.CreateAdd(
5964 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5965 "align_regoffs");
5966 reg_offs = CGF.Builder.CreateAnd(
5967 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5968 "aligned_regoffs");
5969 }
5970
5971 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5972 // The fact that this is done unconditionally reflects the fact that
5973 // allocating an argument to the stack also uses up all the remaining
5974 // registers of the appropriate kind.
5975 llvm::Value *NewOffset = nullptr;
5976 NewOffset = CGF.Builder.CreateAdd(
5977 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5978 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5979
5980 // Now we're in a position to decide whether this argument really was in
5981 // registers or not.
5982 llvm::Value *InRegs = nullptr;
5983 InRegs = CGF.Builder.CreateICmpSLE(
5984 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5985
5986 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5987
5988 //=======================================
5989 // Argument was in registers
5990 //=======================================
5991
5992 // Now we emit the code for if the argument was originally passed in
5993 // registers. First start the appropriate block:
5994 CGF.EmitBlock(InRegBlock);
5995
5996 llvm::Value *reg_top = nullptr;
5997 Address reg_top_p =
5998 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
5999 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
6000 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
6001 CharUnits::fromQuantity(IsFPR ? 16 : 8));
6002 Address RegAddr = Address::invalid();
6003 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
6004
6005 if (IsIndirect) {
6006 // If it's been passed indirectly (actually a struct), whatever we find from
6007 // stored registers or on the stack will actually be a struct **.
6008 MemTy = llvm::PointerType::getUnqual(MemTy);
6009 }
6010
6011 const Type *Base = nullptr;
6012 uint64_t NumMembers = 0;
6013 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
6014 if (IsHFA && NumMembers > 1) {
6015 // Homogeneous aggregates passed in registers will have their elements split
6016 // and stored 16-bytes apart regardless of size (they're notionally in qN,
6017 // qN+1, ...). We reload and store into a temporary local variable
6018 // contiguously.
6019 assert(!IsIndirect && "Homogeneous aggregates should be passed directly")((!IsIndirect && "Homogeneous aggregates should be passed directly"
) ? static_cast<void> (0) : __assert_fail ("!IsIndirect && \"Homogeneous aggregates should be passed directly\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 6019, __PRETTY_FUNCTION__))
;
6020 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
6021 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
6022 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
6023 Address Tmp = CGF.CreateTempAlloca(HFATy,
6024 std::max(TyAlign, BaseTyInfo.Align));
6025
6026 // On big-endian platforms, the value will be right-aligned in its slot.
6027 int Offset = 0;
6028 if (CGF.CGM.getDataLayout().isBigEndian() &&
6029 BaseTyInfo.Width.getQuantity() < 16)
6030 Offset = 16 - BaseTyInfo.Width.getQuantity();
6031
6032 for (unsigned i = 0; i < NumMembers; ++i) {
6033 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
6034 Address LoadAddr =
6035 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
6036 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
6037
6038 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
6039
6040 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
6041 CGF.Builder.CreateStore(Elem, StoreAddr);
6042 }
6043
6044 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
6045 } else {
6046 // Otherwise the object is contiguous in memory.
6047
6048 // It might be right-aligned in its slot.
6049 CharUnits SlotSize = BaseAddr.getAlignment();
6050 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
6051 (IsHFA || !isAggregateTypeForABI(Ty)) &&
6052 TySize < SlotSize) {
6053 CharUnits Offset = SlotSize - TySize;
6054 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
6055 }
6056
6057 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
6058 }
6059
6060 CGF.EmitBranch(ContBlock);
6061
6062 //=======================================
6063 // Argument was on the stack
6064 //=======================================
6065 CGF.EmitBlock(OnStackBlock);
6066
6067 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
6068 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
6069
6070 // Again, stack arguments may need realignment. In this case both integer and
6071 // floating-point ones might be affected.
6072 if (!IsIndirect && TyAlign.getQuantity() > 8) {
6073 int Align = TyAlign.getQuantity();
6074
6075 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
6076
6077 OnStackPtr = CGF.Builder.CreateAdd(
6078 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
6079 "align_stack");
6080 OnStackPtr = CGF.Builder.CreateAnd(
6081 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
6082 "align_stack");
6083
6084 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
6085 }
6086 Address OnStackAddr(OnStackPtr,
6087 std::max(CharUnits::fromQuantity(8), TyAlign));
6088
6089 // All stack slots are multiples of 8 bytes.
6090 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
6091 CharUnits StackSize;
6092 if (IsIndirect)
6093 StackSize = StackSlotSize;
6094 else
6095 StackSize = TySize.alignTo(StackSlotSize);
6096
6097 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
6098 llvm::Value *NewStack =
6099 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
6100
6101 // Write the new value of __stack for the next call to va_arg
6102 CGF.Builder.CreateStore(NewStack, stack_p);
6103
6104 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
6105 TySize < StackSlotSize) {
6106 CharUnits Offset = StackSlotSize - TySize;
6107 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
6108 }
6109
6110 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
6111
6112 CGF.EmitBranch(ContBlock);
6113
6114 //=======================================
6115 // Tidy up
6116 //=======================================
6117 CGF.EmitBlock(ContBlock);
6118
6119 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6120 OnStackAddr, OnStackBlock, "vaargs.addr");
6121
6122 if (IsIndirect)
6123 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
6124 TyAlign);
6125
6126 return ResAddr;
6127}
6128
6129Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
6130 CodeGenFunction &CGF) const {
6131 // The backend's lowering doesn't support va_arg for aggregates or
6132 // illegal vector types. Lower VAArg here for these cases and use
6133 // the LLVM va_arg instruction for everything else.
6134 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
6135 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
6136
6137 uint64_t PointerSize = getTarget().getPointerWidth(0) / 8;
6138 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
6139
6140 // Empty records are ignored for parameter passing purposes.
6141 if (isEmptyRecord(getContext(), Ty, true)) {
6142 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
6143 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6144 return Addr;
6145 }
6146
6147 // The size of the actual thing passed, which might end up just
6148 // being a pointer for indirect types.
6149 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6150
6151 // Arguments bigger than 16 bytes which aren't homogeneous
6152 // aggregates should be passed indirectly.
6153 bool IsIndirect = false;
6154 if (TyInfo.Width.getQuantity() > 16) {
6155 const Type *Base = nullptr;
6156 uint64_t Members = 0;
6157 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
6158 }
6159
6160 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
6161 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
6162}
6163
6164Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
6165 QualType Ty) const {
6166 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
6167 CGF.getContext().getTypeInfoInChars(Ty),
6168 CharUnits::fromQuantity(8),
6169 /*allowHigherAlign*/ false);
6170}
6171
6172//===----------------------------------------------------------------------===//
6173// ARM ABI Implementation
6174//===----------------------------------------------------------------------===//
6175
6176namespace {
6177
6178class ARMABIInfo : public SwiftABIInfo {
6179public:
6180 enum ABIKind {
6181 APCS = 0,
6182 AAPCS = 1,
6183 AAPCS_VFP = 2,
6184 AAPCS16_VFP = 3,
6185 };
6186
6187private:
6188 ABIKind Kind;
6189 bool IsFloatABISoftFP;
6190
6191public:
6192 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
6193 : SwiftABIInfo(CGT), Kind(_Kind) {
6194 setCCs();
6195 IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
6196 CGT.getCodeGenOpts().FloatABI == ""; // default
6197 }
6198
6199 bool isEABI() const {
6200 switch (getTarget().getTriple().getEnvironment()) {
6201 case llvm::Triple::Android:
6202 case llvm::Triple::EABI:
6203 case llvm::Triple::EABIHF:
6204 case llvm::Triple::GNUEABI:
6205 case llvm::Triple::GNUEABIHF:
6206 case llvm::Triple::MuslEABI:
6207 case llvm::Triple::MuslEABIHF:
6208 return true;
6209 default:
6210 return false;
6211 }
6212 }
6213
6214 bool isEABIHF() const {
6215 switch (getTarget().getTriple().getEnvironment()) {
6216 case llvm::Triple::EABIHF:
6217 case llvm::Triple::GNUEABIHF:
6218 case llvm::Triple::MuslEABIHF:
6219 return true;
6220 default:
6221 return false;
6222 }
6223 }
6224
6225 ABIKind getABIKind() const { return Kind; }
6226
6227 bool allowBFloatArgsAndRet() const override {
6228 return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
6229 }
6230
6231private:
6232 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
6233 unsigned functionCallConv) const;
6234 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
6235 unsigned functionCallConv) const;
6236 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
6237 uint64_t Members) const;
6238 ABIArgInfo coerceIllegalVector(QualType Ty) const;
6239 bool isIllegalVectorType(QualType Ty) const;
6240 bool containsAnyFP16Vectors(QualType Ty) const;
6241
6242 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
6243 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
6244 uint64_t Members) const override;
6245
6246 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
6247
6248 void computeInfo(CGFunctionInfo &FI) const override;
6249
6250 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6251 QualType Ty) const override;
6252
6253 llvm::CallingConv::ID getLLVMDefaultCC() const;
6254 llvm::CallingConv::ID getABIDefaultCC() const;
6255 void setCCs();
6256
6257 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
6258 bool asReturnValue) const override {
6259 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
6260 }
6261 bool isSwiftErrorInRegister() const override {
6262 return true;
6263 }
6264 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
6265 unsigned elts) const override;
6266};
6267
6268class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
6269public:
6270 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
6271 : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {}
6272
6273 const ARMABIInfo &getABIInfo() const {
6274 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
6275 }
6276
6277 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6278 return 13;
6279 }
6280
6281 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
6282 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
6283 }
6284
6285 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6286 llvm::Value *Address) const override {
6287 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
6288
6289 // 0-15 are the 16 integer registers.
6290 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
6291 return false;
6292 }
6293
6294 unsigned getSizeOfUnwindException() const override {
6295 if (getABIInfo().isEABI()) return 88;
6296 return TargetCodeGenInfo::getSizeOfUnwindException();
6297 }
6298
6299 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6300 CodeGen::CodeGenModule &CGM) const override {
6301 if (GV->isDeclaration())
6302 return;
6303 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6304 if (!FD)
6305 return;
6306
6307 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
6308 if (!Attr)
6309 return;
6310
6311 const char *Kind;
6312 switch (Attr->getInterrupt()) {
6313 case ARMInterruptAttr::Generic: Kind = ""; break;
6314 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
6315 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
6316 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
6317 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
6318 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
6319 }
6320
6321 llvm::Function *Fn = cast<llvm::Function>(GV);
6322
6323 Fn->addFnAttr("interrupt", Kind);
6324
6325 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
6326 if (ABI == ARMABIInfo::APCS)
6327 return;
6328
6329 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
6330 // however this is not necessarily true on taking any interrupt. Instruct
6331 // the backend to perform a realignment as part of the function prologue.
6332 llvm::AttrBuilder B;
6333 B.addStackAlignmentAttr(8);
6334 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
6335 }
6336};
6337
6338class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
6339public:
6340 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
6341 : ARMTargetCodeGenInfo(CGT, K) {}
6342
6343 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6344 CodeGen::CodeGenModule &CGM) const override;
6345
6346 void getDependentLibraryOption(llvm::StringRef Lib,
6347 llvm::SmallString<24> &Opt) const override {
6348 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
6349 }
6350
6351 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
6352 llvm::SmallString<32> &Opt) const override {
6353 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
6354 }
6355};
6356
6357void WindowsARMTargetCodeGenInfo::setTargetAttributes(
6358 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
6359 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
6360 if (GV->isDeclaration())
6361 return;
6362 addStackProbeTargetAttributes(D, GV, CGM);
6363}
6364}
6365
6366void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
6367 if (!::classifyReturnType(getCXXABI(), FI, *this))
6368 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
6369 FI.getCallingConvention());
6370
6371 for (auto &I : FI.arguments())
6372 I.info = classifyArgumentType(I.type, FI.isVariadic(),
6373 FI.getCallingConvention());
6374
6375
6376 // Always honor user-specified calling convention.
6377 if (FI.getCallingConvention() != llvm::CallingConv::C)
6378 return;
6379
6380 llvm::CallingConv::ID cc = getRuntimeCC();
6381 if (cc != llvm::CallingConv::C)
6382 FI.setEffectiveCallingConvention(cc);
6383}
6384
6385/// Return the default calling convention that LLVM will use.
6386llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
6387 // The default calling convention that LLVM will infer.
6388 if (isEABIHF() || getTarget().getTriple().isWatchABI())
6389 return llvm::CallingConv::ARM_AAPCS_VFP;
6390 else if (isEABI())
6391 return llvm::CallingConv::ARM_AAPCS;
6392 else
6393 return llvm::CallingConv::ARM_APCS;
6394}
6395
6396/// Return the calling convention that our ABI would like us to use
6397/// as the C calling convention.
6398llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
6399 switch (getABIKind()) {
6400 case APCS: return llvm::CallingConv::ARM_APCS;
6401 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
6402 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
6403 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
6404 }
6405 llvm_unreachable("bad ABI kind")::llvm::llvm_unreachable_internal("bad ABI kind", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 6405)
;
6406}
6407
6408void ARMABIInfo::setCCs() {
6409 assert(getRuntimeCC() == llvm::CallingConv::C)((getRuntimeCC() == llvm::CallingConv::C) ? static_cast<void
> (0) : __assert_fail ("getRuntimeCC() == llvm::CallingConv::C"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 6409, __PRETTY_FUNCTION__))
;
6410
6411 // Don't muddy up the IR with a ton of explicit annotations if
6412 // they'd just match what LLVM will infer from the triple.
6413 llvm::CallingConv::ID abiCC = getABIDefaultCC();
6414 if (abiCC != getLLVMDefaultCC())
6415 RuntimeCC = abiCC;
6416}
6417
6418ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
6419 uint64_t Size = getContext().getTypeSize(Ty);
6420 if (Size <= 32) {
6421 llvm::Type *ResType =
6422 llvm::Type::getInt32Ty(getVMContext());
6423 return ABIArgInfo::getDirect(ResType);
6424 }
6425 if (Size == 64 || Size == 128) {
6426 auto *ResType = llvm::FixedVectorType::get(
6427 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
6428 return ABIArgInfo::getDirect(ResType);
6429 }
6430 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6431}
6432
6433ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
6434 const Type *Base,
6435 uint64_t Members) const {
6436 assert(Base && "Base class should be set for homogeneous aggregate")((Base && "Base class should be set for homogeneous aggregate"
) ? static_cast<void> (0) : __assert_fail ("Base && \"Base class should be set for homogeneous aggregate\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 6436, __PRETTY_FUNCTION__))
;
6437 // Base can be a floating-point or a vector.
6438 if (const VectorType *VT = Base->getAs<VectorType>()) {
6439 // FP16 vectors should be converted to integer vectors
6440 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
6441 uint64_t Size = getContext().getTypeSize(VT);
6442 auto *NewVecTy = llvm::FixedVectorType::get(
6443 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
6444 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
6445 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
6446 }
6447 }
6448 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
6449}
6450
6451ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
6452 unsigned functionCallConv) const {
6453 // 6.1.2.1 The following argument types are VFP CPRCs:
6454 // A single-precision floating-point type (including promoted
6455 // half-precision types); A double-precision floating-point type;
6456 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
6457 // with a Base Type of a single- or double-precision floating-point type,
6458 // 64-bit containerized vectors or 128-bit containerized vectors with one
6459 // to four Elements.
6460 // Variadic functions should always marshal to the base standard.
6461 bool IsAAPCS_VFP =
6462 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
6463
6464 Ty = useFirstFieldIfTransparentUnion(Ty);
6465
6466 // Handle illegal vector types here.
6467 if (isIllegalVectorType(Ty))
6468 return coerceIllegalVector(Ty);
6469
6470 if (!isAggregateTypeForABI(Ty)) {
6471 // Treat an enum type as its underlying type.
6472 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
6473 Ty = EnumTy->getDecl()->getIntegerType();
6474 }
6475
6476 if (const auto *EIT = Ty->getAs<ExtIntType>())
6477 if (EIT->getNumBits() > 64)
6478 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
6479
6480 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
6481 : ABIArgInfo::getDirect());
6482 }
6483
6484 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
6485 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6486 }
6487
6488 // Ignore empty records.
6489 if (isEmptyRecord(getContext(), Ty, true))
6490 return ABIArgInfo::getIgnore();
6491
6492 if (IsAAPCS_VFP) {
6493 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
6494 // into VFP registers.
6495 const Type *Base = nullptr;
6496 uint64_t Members = 0;
6497 if (isHomogeneousAggregate(Ty, Base, Members))
6498 return classifyHomogeneousAggregate(Ty, Base, Members);
6499 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6500 // WatchOS does have homogeneous aggregates. Note that we intentionally use
6501 // this convention even for a variadic function: the backend will use GPRs
6502 // if needed.
6503 const Type *Base = nullptr;
6504 uint64_t Members = 0;
6505 if (isHomogeneousAggregate(Ty, Base, Members)) {
6506 assert(Base && Members <= 4 && "unexpected homogeneous aggregate")((Base && Members <= 4 && "unexpected homogeneous aggregate"
) ? static_cast<void> (0) : __assert_fail ("Base && Members <= 4 && \"unexpected homogeneous aggregate\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 6506, __PRETTY_FUNCTION__))
;
6507 llvm::Type *Ty =
6508 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
6509 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
6510 }
6511 }
6512
6513 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6514 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
6515 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
6516 // bigger than 128-bits, they get placed in space allocated by the caller,
6517 // and a pointer is passed.
6518 return ABIArgInfo::getIndirect(
6519 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
6520 }
6521
6522 // Support byval for ARM.
6523 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
6524 // most 8-byte. We realign the indirect argument if type alignment is bigger
6525 // than ABI alignment.
6526 uint64_t ABIAlign = 4;
6527 uint64_t TyAlign;
6528 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6529 getABIKind() == ARMABIInfo::AAPCS) {
6530 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
6531 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
6532 } else {
6533 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
6534 }
6535 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
6536 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval")((getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval"
) ? static_cast<void> (0) : __assert_fail ("getABIKind() != ARMABIInfo::AAPCS16_VFP && \"unexpected byval\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 6536, __PRETTY_FUNCTION__))
;
6537 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
6538 /*ByVal=*/true,
6539 /*Realign=*/TyAlign > ABIAlign);
6540 }
6541
6542 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
6543 // same size and alignment.
6544 if (getTarget().isRenderScriptTarget()) {
6545 return coerceToIntArray(Ty, getContext(), getVMContext());
6546 }
6547
6548 // Otherwise, pass by coercing to a structure of the appropriate size.
6549 llvm::Type* ElemTy;
6550 unsigned SizeRegs;
6551 // FIXME: Try to match the types of the arguments more accurately where
6552 // we can.
6553 if (TyAlign <= 4) {
6554 ElemTy = llvm::Type::getInt32Ty(getVMContext());
6555 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
6556 } else {
6557 ElemTy = llvm::Type::getInt64Ty(getVMContext());
6558 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
6559 }
6560
6561 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
6562}
6563
6564static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
6565 llvm::LLVMContext &VMContext) {
6566 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
6567 // is called integer-like if its size is less than or equal to one word, and
6568 // the offset of each of its addressable sub-fields is zero.
6569
6570 uint64_t Size = Context.getTypeSize(Ty);
6571
6572 // Check that the type fits in a word.
6573 if (Size > 32)
6574 return false;
6575
6576 // FIXME: Handle vector types!
6577 if (Ty->isVectorType())
6578 return false;
6579
6580 // Float types are never treated as "integer like".
6581 if (Ty->isRealFloatingType())
6582 return false;
6583
6584 // If this is a builtin or pointer type then it is ok.
6585 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
6586 return true;
6587
6588 // Small complex integer types are "integer like".
6589 if (const ComplexType *CT = Ty->getAs<ComplexType>())
6590 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
6591
6592 // Single element and zero sized arrays should be allowed, by the definition
6593 // above, but they are not.
6594
6595 // Otherwise, it must be a record type.
6596 const RecordType *RT = Ty->getAs<RecordType>();
6597 if (!RT) return false;
6598
6599 // Ignore records with flexible arrays.
6600 const RecordDecl *RD = RT->getDecl();
6601 if (RD->hasFlexibleArrayMember())
6602 return false;
6603
6604 // Check that all sub-fields are at offset 0, and are themselves "integer
6605 // like".
6606 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
6607
6608 bool HadField = false;
6609 unsigned idx = 0;
6610 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6611 i != e; ++i, ++idx) {
6612 const FieldDecl *FD = *i;
6613
6614 // Bit-fields are not addressable, we only need to verify they are "integer
6615 // like". We still have to disallow a subsequent non-bitfield, for example:
6616 // struct { int : 0; int x }
6617 // is non-integer like according to gcc.
6618 if (FD->isBitField()) {
6619 if (!RD->isUnion())
6620 HadField = true;
6621
6622 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6623 return false;
6624
6625 continue;
6626 }
6627
6628 // Check if this field is at offset 0.
6629 if (Layout.getFieldOffset(idx) != 0)
6630 return false;
6631
6632 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6633 return false;
6634
6635 // Only allow at most one field in a structure. This doesn't match the
6636 // wording above, but follows gcc in situations with a field following an
6637 // empty structure.
6638 if (!RD->isUnion()) {
6639 if (HadField)
6640 return false;
6641
6642 HadField = true;
6643 }
6644 }
6645
6646 return true;
6647}
6648
6649ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
6650 unsigned functionCallConv) const {
6651
6652 // Variadic functions should always marshal to the base standard.
6653 bool IsAAPCS_VFP =
6654 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
6655
6656 if (RetTy->isVoidType())
6657 return ABIArgInfo::getIgnore();
6658
6659 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
6660 // Large vector types should be returned via memory.
6661 if (getContext().getTypeSize(RetTy) > 128)
6662 return getNaturalAlignIndirect(RetTy);
6663 // TODO: FP16/BF16 vectors should be converted to integer vectors
6664 // This check is similar to isIllegalVectorType - refactor?
6665 if ((!getTarget().hasLegalHalfType() &&
6666 (VT->getElementType()->isFloat16Type() ||
6667 VT->getElementType()->isHalfType())) ||
6668 (IsFloatABISoftFP &&
6669 VT->getElementType()->isBFloat16Type()))
6670 return coerceIllegalVector(RetTy);
6671 }
6672
6673 if (!isAggregateTypeForABI(RetTy)) {
6674 // Treat an enum type as its underlying type.
6675 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6676 RetTy = EnumTy->getDecl()->getIntegerType();
6677
6678 if (const auto *EIT = RetTy->getAs<ExtIntType>())
6679 if (EIT->getNumBits() > 64)
6680 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
6681
6682 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
6683 : ABIArgInfo::getDirect();
6684 }
6685
6686 // Are we following APCS?
6687 if (getABIKind() == APCS) {
6688 if (isEmptyRecord(getContext(), RetTy, false))
6689 return ABIArgInfo::getIgnore();
6690
6691 // Complex types are all returned as packed integers.
6692 //
6693 // FIXME: Consider using 2 x vector types if the back end handles them
6694 // correctly.
6695 if (RetTy->isAnyComplexType())
6696 return ABIArgInfo::getDirect(llvm::IntegerType::get(
6697 getVMContext(), getContext().getTypeSize(RetTy)));
6698
6699 // Integer like structures are returned in r0.
6700 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
6701 // Return in the smallest viable integer type.
6702 uint64_t Size = getContext().getTypeSize(RetTy);
6703 if (Size <= 8)
6704 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6705 if (Size <= 16)
6706 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6707 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6708 }
6709
6710 // Otherwise return in memory.
6711 return getNaturalAlignIndirect(RetTy);
6712 }
6713
6714 // Otherwise this is an AAPCS variant.
6715
6716 if (isEmptyRecord(getContext(), RetTy, true))
6717 return ABIArgInfo::getIgnore();
6718
6719 // Check for homogeneous aggregates with AAPCS-VFP.
6720 if (IsAAPCS_VFP) {
6721 const Type *Base = nullptr;
6722 uint64_t Members = 0;
6723 if (isHomogeneousAggregate(RetTy, Base, Members))
6724 return classifyHomogeneousAggregate(RetTy, Base, Members);
6725 }
6726
6727 // Aggregates <= 4 bytes are returned in r0; other aggregates
6728 // are returned indirectly.
6729 uint64_t Size = getContext().getTypeSize(RetTy);
6730 if (Size <= 32) {
6731 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
6732 // same size and alignment.
6733 if (getTarget().isRenderScriptTarget()) {
6734 return coerceToIntArray(RetTy, getContext(), getVMContext());
6735 }
6736 if (getDataLayout().isBigEndian())
6737 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
6738 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6739
6740 // Return in the smallest viable integer type.
6741 if (Size <= 8)
6742 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6743 if (Size <= 16)
6744 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6745 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6746 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6747 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
6748 llvm::Type *CoerceTy =
6749 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6750 return ABIArgInfo::getDirect(CoerceTy);
6751 }
6752
6753 return getNaturalAlignIndirect(RetTy);
6754}
6755
6756/// isIllegalVector - check whether Ty is an illegal vector type.
6757bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
6758 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
6759 // On targets that don't support half, fp16 or bfloat, they are expanded
6760 // into float, and we don't want the ABI to depend on whether or not they
6761 // are supported in hardware. Thus return false to coerce vectors of these
6762 // types into integer vectors.
6763 // We do not depend on hasLegalHalfType for bfloat as it is a
6764 // separate IR type.
6765 if ((!getTarget().hasLegalHalfType() &&
6766 (VT->getElementType()->isFloat16Type() ||
6767 VT->getElementType()->isHalfType())) ||
6768 (IsFloatABISoftFP &&
6769 VT->getElementType()->isBFloat16Type()))
6770 return true;
6771 if (isAndroid()) {
6772 // Android shipped using Clang 3.1, which supported a slightly different
6773 // vector ABI. The primary differences were that 3-element vector types
6774 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
6775 // accepts that legacy behavior for Android only.
6776 // Check whether VT is legal.
6777 unsigned NumElements = VT->getNumElements();
6778 // NumElements should be power of 2 or equal to 3.
6779 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6780 return true;
6781 } else {
6782 // Check whether VT is legal.
6783 unsigned NumElements = VT->getNumElements();
6784 uint64_t Size = getContext().getTypeSize(VT);
6785 // NumElements should be power of 2.
6786 if (!llvm::isPowerOf2_32(NumElements))
6787 return true;
6788 // Size should be greater than 32 bits.
6789 return Size <= 32;
6790 }
6791 }
6792 return false;
6793}
6794
6795/// Return true if a type contains any 16-bit floating point vectors
6796bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
6797 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
6798 uint64_t NElements = AT->getSize().getZExtValue();
6799 if (NElements == 0)
6800 return false;
6801 return containsAnyFP16Vectors(AT->getElementType());
6802 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
6803 const RecordDecl *RD = RT->getDecl();
6804
6805 // If this is a C++ record, check the bases first.
6806 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6807 if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
6808 return containsAnyFP16Vectors(B.getType());
6809 }))
6810 return true;
6811
6812 if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
6813 return FD && containsAnyFP16Vectors(FD->getType());
6814 }))
6815 return true;
6816
6817 return false;
6818 } else {
6819 if (const VectorType *VT = Ty->getAs<VectorType>())
6820 return (VT->getElementType()->isFloat16Type() ||
6821 VT->getElementType()->isBFloat16Type() ||
6822 VT->getElementType()->isHalfType());
6823 return false;
6824 }
6825}
6826
6827bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
6828 llvm::Type *eltTy,
6829 unsigned numElts) const {
6830 if (!llvm::isPowerOf2_32(numElts))
6831 return false;
6832 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy);
6833 if (size > 64)
6834 return false;
6835 if (vectorSize.getQuantity() != 8 &&
6836 (vectorSize.getQuantity() != 16 || numElts == 1))
6837 return false;
6838 return true;
6839}
6840
6841bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
6842 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
6843 // double, or 64-bit or 128-bit vectors.
6844 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
6845 if (BT->getKind() == BuiltinType::Float ||
6846 BT->getKind() == BuiltinType::Double ||
6847 BT->getKind() == BuiltinType::LongDouble)
6848 return true;
6849 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
6850 unsigned VecSize = getContext().getTypeSize(VT);
6851 if (VecSize == 64 || VecSize == 128)
6852 return true;
6853 }
6854 return false;
6855}
6856
6857bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
6858 uint64_t Members) const {
6859 return Members <= 4;
6860}
6861
6862bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
6863 bool acceptHalf) const {
6864 // Give precedence to user-specified calling conventions.
6865 if (callConvention != llvm::CallingConv::C)
6866 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
6867 else
6868 return (getABIKind() == AAPCS_VFP) ||
6869 (acceptHalf && (getABIKind() == AAPCS16_VFP));
6870}
6871
6872Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6873 QualType Ty) const {
6874 CharUnits SlotSize = CharUnits::fromQuantity(4);
6875
6876 // Empty records are ignored for parameter passing purposes.
6877 if (isEmptyRecord(getContext(), Ty, true)) {
6878 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
6879 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6880 return Addr;
6881 }
6882
6883 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
6884 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
6885
6886 // Use indirect if size of the illegal vector is bigger than 16 bytes.
6887 bool IsIndirect = false;
6888 const Type *Base = nullptr;
6889 uint64_t Members = 0;
6890 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
6891 IsIndirect = true;
6892
6893 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
6894 // allocated by the caller.
6895 } else if (TySize > CharUnits::fromQuantity(16) &&
6896 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6897 !isHomogeneousAggregate(Ty, Base, Members)) {
6898 IsIndirect = true;
6899
6900 // Otherwise, bound the type's ABI alignment.
6901 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
6902 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
6903 // Our callers should be prepared to handle an under-aligned address.
6904 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6905 getABIKind() == ARMABIInfo::AAPCS) {
6906 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6907 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
6908 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6909 // ARMv7k allows type alignment up to 16 bytes.
6910 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6911 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
6912 } else {
6913 TyAlignForABI = CharUnits::fromQuantity(4);
6914 }
6915
6916 TypeInfoChars TyInfo(TySize, TyAlignForABI, false);
6917 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
6918 SlotSize, /*AllowHigherAlign*/ true);
6919}
6920
6921//===----------------------------------------------------------------------===//
6922// NVPTX ABI Implementation
6923//===----------------------------------------------------------------------===//
6924
6925namespace {
6926
6927class NVPTXTargetCodeGenInfo;
6928
6929class NVPTXABIInfo : public ABIInfo {
6930 NVPTXTargetCodeGenInfo &CGInfo;
6931
6932public:
6933 NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info)
6934 : ABIInfo(CGT), CGInfo(Info) {}
6935
6936 ABIArgInfo classifyReturnType(QualType RetTy) const;
6937 ABIArgInfo classifyArgumentType(QualType Ty) const;
6938
6939 void computeInfo(CGFunctionInfo &FI) const override;
6940 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6941 QualType Ty) const override;
6942 bool isUnsupportedType(QualType T) const;
6943 ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const;
6944};
6945
6946class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
6947public:
6948 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
6949 : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {}
6950
6951 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6952 CodeGen::CodeGenModule &M) const override;
6953 bool shouldEmitStaticExternCAliases() const override;
6954
6955 llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override {
6956 // On the device side, surface reference is represented as an object handle
6957 // in 64-bit integer.
6958 return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
6959 }
6960
6961 llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override {
6962 // On the device side, texture reference is represented as an object handle
6963 // in 64-bit integer.
6964 return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
6965 }
6966
6967 bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst,
6968 LValue Src) const override {
6969 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
6970 return true;
6971 }
6972
6973 bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst,
6974 LValue Src) const override {
6975 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
6976 return true;
6977 }
6978
6979private:
6980 // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the
6981 // resulting MDNode to the nvvm.annotations MDNode.
6982 static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name,
6983 int Operand);
6984
6985 static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst,
6986 LValue Src) {
6987 llvm::Value *Handle = nullptr;
6988 llvm::Constant *C =
6989 llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer());
6990 // Lookup `addrspacecast` through the constant pointer if any.
6991 if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
6992 C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
6993 if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) {
6994 // Load the handle from the specific global variable using
6995 // `nvvm.texsurf.handle.internal` intrinsic.
6996 Handle = CGF.EmitRuntimeCall(
6997 CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal,
6998 {GV->getType()}),
6999 {GV}, "texsurf_handle");
7000 } else
7001 Handle = CGF.EmitLoadOfScalar(Src, SourceLocation());
7002 CGF.EmitStoreOfScalar(Handle, Dst);
7003 }
7004};
7005
7006/// Checks if the type is unsupported directly by the current target.
7007bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
7008 ASTContext &Context = getContext();
7009 if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
7010 return true;
7011 if (!Context.getTargetInfo().hasFloat128Type() &&
7012 (T->isFloat128Type() ||
7013 (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
7014 return true;
7015 if (const auto *EIT = T->getAs<ExtIntType>())
7016 return EIT->getNumBits() >
7017 (Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
7018 if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
7019 Context.getTypeSize(T) > 64U)
7020 return true;
7021 if (const auto *AT = T->getAsArrayTypeUnsafe())
7022 return isUnsupportedType(AT->getElementType());
7023 const auto *RT = T->getAs<RecordType>();
7024 if (!RT)
7025 return false;
7026 const RecordDecl *RD = RT->getDecl();
7027
7028 // If this is a C++ record, check the bases first.
7029 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
7030 for (const CXXBaseSpecifier &I : CXXRD->bases())
7031 if (isUnsupportedType(I.getType()))
7032 return true;
7033
7034 for (const FieldDecl *I : RD->fields())
7035 if (isUnsupportedType(I->getType()))
7036 return true;
7037 return false;
7038}
7039
7040/// Coerce the given type into an array with maximum allowed size of elements.
7041ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty,
7042 unsigned MaxSize) const {
7043 // Alignment and Size are measured in bits.
7044 const uint64_t Size = getContext().getTypeSize(Ty);
7045 const uint64_t Alignment = getContext().getTypeAlign(Ty);
7046 const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
7047 llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div);
7048 const uint64_t NumElements = (Size + Div - 1) / Div;
7049 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
7050}
7051
7052ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
7053 if (RetTy->isVoidType())
7054 return ABIArgInfo::getIgnore();
7055
7056 if (getContext().getLangOpts().OpenMP &&
7057 getContext().getLangOpts().OpenMPIsDevice && isUnsupportedType(RetTy))
7058 return coerceToIntArrayWithLimit(RetTy, 64);
7059
7060 // note: this is different from default ABI
7061 if (!RetTy->isScalarType())
7062 return ABIArgInfo::getDirect();
7063
7064 // Treat an enum type as its underlying type.
7065 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7066 RetTy = EnumTy->getDecl()->getIntegerType();
7067
7068 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
7069 : ABIArgInfo::getDirect());
7070}
7071
7072ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
7073 // Treat an enum type as its underlying type.
7074 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7075 Ty = EnumTy->getDecl()->getIntegerType();
7076
7077 // Return aggregates type as indirect by value
7078 if (isAggregateTypeForABI(Ty)) {
7079 // Under CUDA device compilation, tex/surf builtin types are replaced with
7080 // object types and passed directly.
7081 if (getContext().getLangOpts().CUDAIsDevice) {
7082 if (Ty->isCUDADeviceBuiltinSurfaceType())
7083 return ABIArgInfo::getDirect(
7084 CGInfo.getCUDADeviceBuiltinSurfaceDeviceType());
7085 if (Ty->isCUDADeviceBuiltinTextureType())
7086 return ABIArgInfo::getDirect(
7087 CGInfo.getCUDADeviceBuiltinTextureDeviceType());
7088 }
7089 return getNaturalAlignIndirect(Ty, /* byval */ true);
7090 }
7091
7092 if (const auto *EIT = Ty->getAs<ExtIntType>()) {
7093 if ((EIT->getNumBits() > 128) ||
7094 (!getContext().getTargetInfo().hasInt128Type() &&
7095 EIT->getNumBits() > 64))
7096 return getNaturalAlignIndirect(Ty, /* byval */ true);
7097 }
7098
7099 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
7100 : ABIArgInfo::getDirect());
7101}
7102
7103void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
7104 if (!getCXXABI().classifyReturnType(FI))
7105 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7106 for (auto &I : FI.arguments())
7107 I.info = classifyArgumentType(I.type);
7108
7109 // Always honor user-specified calling convention.
7110 if (FI.getCallingConvention() != llvm::CallingConv::C)
7111 return;
7112
7113 FI.setEffectiveCallingConvention(getRuntimeCC());
7114}
7115
7116Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7117 QualType Ty) const {
7118 llvm_unreachable("NVPTX does not support varargs")::llvm::llvm_unreachable_internal("NVPTX does not support varargs"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 7118)
;
7119}
7120
7121void NVPTXTargetCodeGenInfo::setTargetAttributes(
7122 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7123 if (GV->isDeclaration())
7124 return;
7125 const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
7126 if (VD) {
7127 if (M.getLangOpts().CUDA) {
7128 if (VD->getType()->isCUDADeviceBuiltinSurfaceType())
7129 addNVVMMetadata(GV, "surface", 1);
7130 else if (VD->getType()->isCUDADeviceBuiltinTextureType())
7131 addNVVMMetadata(GV, "texture", 1);
7132 return;
7133 }
7134 }
7135
7136 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7137 if (!FD) return;
7138
7139 llvm::Function *F = cast<llvm::Function>(GV);
7140
7141 // Perform special handling in OpenCL mode
7142 if (M.getLangOpts().OpenCL) {
7143 // Use OpenCL function attributes to check for kernel functions
7144 // By default, all functions are device functions
7145 if (FD->hasAttr<OpenCLKernelAttr>()) {
7146 // OpenCL __kernel functions get kernel metadata
7147 // Create !{<func-ref>, metadata !"kernel", i32 1} node
7148 addNVVMMetadata(F, "kernel", 1);
7149 // And kernel functions are not subject to inlining
7150 F->addFnAttr(llvm::Attribute::NoInline);
7151 }
7152 }
7153
7154 // Perform special handling in CUDA mode.
7155 if (M.getLangOpts().CUDA) {
7156 // CUDA __global__ functions get a kernel metadata entry. Since
7157 // __global__ functions cannot be called from the device, we do not
7158 // need to set the noinline attribute.
7159 if (FD->hasAttr<CUDAGlobalAttr>()) {
7160 // Create !{<func-ref>, metadata !"kernel", i32 1} node
7161 addNVVMMetadata(F, "kernel", 1);
7162 }
7163 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
7164 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
7165 llvm::APSInt MaxThreads(32);
7166 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
7167 if (MaxThreads > 0)
7168 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
7169
7170 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
7171 // not specified in __launch_bounds__ or if the user specified a 0 value,
7172 // we don't have to add a PTX directive.
7173 if (Attr->getMinBlocks()) {
7174 llvm::APSInt MinBlocks(32);
7175 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
7176 if (MinBlocks > 0)
7177 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
7178 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
7179 }
7180 }
7181 }
7182}
7183
7184void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
7185 StringRef Name, int Operand) {
7186 llvm::Module *M = GV->getParent();
7187 llvm::LLVMContext &Ctx = M->getContext();
7188
7189 // Get "nvvm.annotations" metadata node
7190 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
7191
7192 llvm::Metadata *MDVals[] = {
7193 llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name),
7194 llvm::ConstantAsMetadata::get(
7195 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
7196 // Append metadata to nvvm.annotations
7197 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
7198}
7199
7200bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
7201 return false;
7202}
7203}
7204
7205//===----------------------------------------------------------------------===//
7206// SystemZ ABI Implementation
7207//===----------------------------------------------------------------------===//
7208
7209namespace {
7210
7211class SystemZABIInfo : public SwiftABIInfo {
7212 bool HasVector;
7213 bool IsSoftFloatABI;
7214
7215public:
7216 SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF)
7217 : SwiftABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {}
7218
7219 bool isPromotableIntegerTypeForABI(QualType Ty) const;
7220 bool isCompoundType(QualType Ty) const;
7221 bool isVectorArgumentType(QualType Ty) const;
7222 bool isFPArgumentType(QualType Ty) const;
7223 QualType GetSingleElementType(QualType Ty) const;
7224
7225 ABIArgInfo classifyReturnType(QualType RetTy) const;
7226 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
7227
7228 void computeInfo(CGFunctionInfo &FI) const override {
7229 if (!getCXXABI().classifyReturnType(FI))
7230 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7231 for (auto &I : FI.arguments())
7232 I.info = classifyArgumentType(I.type);
7233 }
7234
7235 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7236 QualType Ty) const override;
7237
7238 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
7239 bool asReturnValue) const override {
7240 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
7241 }
7242 bool isSwiftErrorInRegister() const override {
7243 return false;
7244 }
7245};
7246
7247class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
7248public:
7249 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI)
7250 : TargetCodeGenInfo(
7251 std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {}
7252};
7253
7254}
7255
7256bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
7257 // Treat an enum type as its underlying type.
7258 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7259 Ty = EnumTy->getDecl()->getIntegerType();
7260
7261 // Promotable integer types are required to be promoted by the ABI.
7262 if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
7263 return true;
7264
7265 if (const auto *EIT = Ty->getAs<ExtIntType>())
7266 if (EIT->getNumBits() < 64)
7267 return true;
7268
7269 // 32-bit values must also be promoted.
7270 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
7271 switch (BT->getKind()) {
7272 case BuiltinType::Int:
7273 case BuiltinType::UInt:
7274 return true;
7275 default:
7276 return false;
7277 }
7278 return false;
7279}
7280
7281bool SystemZABIInfo::isCompoundType(QualType Ty) const {
7282 return (Ty->isAnyComplexType() ||
7283 Ty->isVectorType() ||
7284 isAggregateTypeForABI(Ty));
7285}
7286
7287bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
7288 return (HasVector &&
7289 Ty->isVectorType() &&
7290 getContext().getTypeSize(Ty) <= 128);
7291}
7292
7293bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
7294 if (IsSoftFloatABI)
7295 return false;
7296
7297 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
7298 switch (BT->getKind()) {
7299 case BuiltinType::Float:
7300 case BuiltinType::Double:
7301 return true;
7302 default:
7303 return false;
7304 }
7305
7306 return false;
7307}
7308
7309QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
7310 const RecordType *RT = Ty->getAs<RecordType>();
7311
7312 if (RT && RT->isStructureOrClassType()) {
7313 const RecordDecl *RD = RT->getDecl();
7314 QualType Found;
7315
7316 // If this is a C++ record, check the bases first.
7317 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
7318 for (const auto &I : CXXRD->bases()) {
7319 QualType Base = I.getType();
7320
7321 // Empty bases don't affect things either way.
7322 if (isEmptyRecord(getContext(), Base, true))
7323 continue;
7324
7325 if (!Found.isNull())
7326 return Ty;
7327 Found = GetSingleElementType(Base);
7328 }
7329
7330 // Check the fields.
7331 for (const auto *FD : RD->fields()) {
7332 // For compatibility with GCC, ignore empty bitfields in C++ mode.
7333 // Unlike isSingleElementStruct(), empty structure and array fields
7334 // do count. So do anonymous bitfields that aren't zero-sized.
7335 if (getContext().getLangOpts().CPlusPlus &&
7336 FD->isZeroLengthBitField(getContext()))
7337 continue;
7338 // Like isSingleElementStruct(), ignore C++20 empty data members.
7339 if (FD->hasAttr<NoUniqueAddressAttr>() &&
7340 isEmptyRecord(getContext(), FD->getType(), true))
7341 continue;
7342
7343 // Unlike isSingleElementStruct(), arrays do not count.
7344 // Nested structures still do though.
7345 if (!Found.isNull())
7346 return Ty;
7347 Found = GetSingleElementType(FD->getType());
7348 }
7349
7350 // Unlike isSingleElementStruct(), trailing padding is allowed.
7351 // An 8-byte aligned struct s { float f; } is passed as a double.
7352 if (!Found.isNull())
7353 return Found;
7354 }
7355
7356 return Ty;
7357}
7358
7359Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7360 QualType Ty) const {
7361 // Assume that va_list type is correct; should be pointer to LLVM type:
7362 // struct {
7363 // i64 __gpr;
7364 // i64 __fpr;
7365 // i8 *__overflow_arg_area;
7366 // i8 *__reg_save_area;
7367 // };
7368
7369 // Every non-vector argument occupies 8 bytes and is passed by preference
7370 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
7371 // always passed on the stack.
7372 Ty = getContext().getCanonicalType(Ty);
7373 auto TyInfo = getContext().getTypeInfoInChars(Ty);
7374 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
7375 llvm::Type *DirectTy = ArgTy;
7376 ABIArgInfo AI = classifyArgumentType(Ty);
7377 bool IsIndirect = AI.isIndirect();
7378 bool InFPRs = false;
7379 bool IsVector = false;
7380 CharUnits UnpaddedSize;
7381 CharUnits DirectAlign;
7382 if (IsIndirect) {
7383 DirectTy = llvm::PointerType::getUnqual(DirectTy);
7384 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
7385 } else {
7386 if (AI.getCoerceToType())
7387 ArgTy = AI.getCoerceToType();
7388 InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy()));
7389 IsVector = ArgTy->isVectorTy();
7390 UnpaddedSize = TyInfo.Width;
7391 DirectAlign = TyInfo.Align;
7392 }
7393 CharUnits PaddedSize = CharUnits::fromQuantity(8);
7394 if (IsVector && UnpaddedSize > PaddedSize)
7395 PaddedSize = CharUnits::fromQuantity(16);
7396 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.")(((UnpaddedSize <= PaddedSize) && "Invalid argument size."
) ? static_cast<void> (0) : __assert_fail ("(UnpaddedSize <= PaddedSize) && \"Invalid argument size.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 7396, __PRETTY_FUNCTION__))
;
7397
7398 CharUnits Padding = (PaddedSize - UnpaddedSize);
7399
7400 llvm::Type *IndexTy = CGF.Int64Ty;
7401 llvm::Value *PaddedSizeV =
7402 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
7403
7404 if (IsVector) {
7405 // Work out the address of a vector argument on the stack.
7406 // Vector arguments are always passed in the high bits of a
7407 // single (8 byte) or double (16 byte) stack slot.
7408 Address OverflowArgAreaPtr =
7409 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
7410 Address OverflowArgArea =
7411 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
7412 TyInfo.Align);
7413 Address MemAddr =
7414 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
7415
7416 // Update overflow_arg_area_ptr pointer
7417 llvm::Value *NewOverflowArgArea =
7418 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
7419 "overflow_arg_area");
7420 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
7421
7422 return MemAddr;
7423 }
7424
7425 assert(PaddedSize.getQuantity() == 8)((PaddedSize.getQuantity() == 8) ? static_cast<void> (0
) : __assert_fail ("PaddedSize.getQuantity() == 8", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 7425, __PRETTY_FUNCTION__))
;
7426
7427 unsigned MaxRegs, RegCountField, RegSaveIndex;
7428 CharUnits RegPadding;
7429 if (InFPRs) {
7430 MaxRegs = 4; // Maximum of 4 FPR arguments
7431 RegCountField = 1; // __fpr
7432 RegSaveIndex = 16; // save offset for f0
7433 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
7434 } else {
7435 MaxRegs = 5; // Maximum of 5 GPR arguments
7436 RegCountField = 0; // __gpr
7437 RegSaveIndex = 2; // save offset for r2
7438 RegPadding = Padding; // values are passed in the low bits of a GPR
7439 }
7440
7441 Address RegCountPtr =
7442 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
7443 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
7444 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
7445 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
7446 "fits_in_regs");
7447
7448 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
7449 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
7450 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
7451 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
7452
7453 // Emit code to load the value if it was passed in registers.
7454 CGF.EmitBlock(InRegBlock);
7455
7456 // Work out the address of an argument register.
7457 llvm::Value *ScaledRegCount =
7458 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
7459 llvm::Value *RegBase =
7460 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
7461 + RegPadding.getQuantity());
7462 llvm::Value *RegOffset =
7463 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
7464 Address RegSaveAreaPtr =
7465 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
7466 llvm::Value *RegSaveArea =
7467 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
7468 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
7469 "raw_reg_addr"),
7470 PaddedSize);
7471 Address RegAddr =
7472 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
7473
7474 // Update the register count
7475 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
7476 llvm::Value *NewRegCount =
7477 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
7478 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
7479 CGF.EmitBranch(ContBlock);
7480
7481 // Emit code to load the value if it was passed in memory.
7482 CGF.EmitBlock(InMemBlock);
7483
7484 // Work out the address of a stack argument.
7485 Address OverflowArgAreaPtr =
7486 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
7487 Address OverflowArgArea =
7488 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
7489 PaddedSize);
7490 Address RawMemAddr =
7491 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
7492 Address MemAddr =
7493 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
7494
7495 // Update overflow_arg_area_ptr pointer
7496 llvm::Value *NewOverflowArgArea =
7497 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
7498 "overflow_arg_area");
7499 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
7500 CGF.EmitBranch(ContBlock);
7501
7502 // Return the appropriate result.
7503 CGF.EmitBlock(ContBlock);
7504 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
7505 MemAddr, InMemBlock, "va_arg.addr");
7506
7507 if (IsIndirect)
7508 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
7509 TyInfo.Align);
7510
7511 return ResAddr;
7512}
7513
7514ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
7515 if (RetTy->isVoidType())
7516 return ABIArgInfo::getIgnore();
7517 if (isVectorArgumentType(RetTy))
7518 return ABIArgInfo::getDirect();
7519 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
7520 return getNaturalAlignIndirect(RetTy);
7521 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
7522 : ABIArgInfo::getDirect());
7523}
7524
7525ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
7526 // Handle the generic C++ ABI.
7527 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7528 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7529
7530 // Integers and enums are extended to full register width.
7531 if (isPromotableIntegerTypeForABI(Ty))
7532 return ABIArgInfo::getExtend(Ty);
7533
7534 // Handle vector types and vector-like structure types. Note that
7535 // as opposed to float-like structure types, we do not allow any
7536 // padding for vector-like structures, so verify the sizes match.
7537 uint64_t Size = getContext().getTypeSize(Ty);
7538 QualType SingleElementTy = GetSingleElementType(Ty);
7539 if (isVectorArgumentType(SingleElementTy) &&
7540 getContext().getTypeSize(SingleElementTy) == Size)
7541 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
7542
7543 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
7544 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
7545 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7546
7547 // Handle small structures.
7548 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7549 // Structures with flexible arrays have variable length, so really
7550 // fail the size test above.
7551 const RecordDecl *RD = RT->getDecl();
7552 if (RD->hasFlexibleArrayMember())
7553 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7554
7555 // The structure is passed as an unextended integer, a float, or a double.
7556 llvm::Type *PassTy;
7557 if (isFPArgumentType(SingleElementTy)) {
7558 assert(Size == 32 || Size == 64)((Size == 32 || Size == 64) ? static_cast<void> (0) : __assert_fail
("Size == 32 || Size == 64", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 7558, __PRETTY_FUNCTION__))
;
7559 if (Size == 32)
7560 PassTy = llvm::Type::getFloatTy(getVMContext());
7561 else
7562 PassTy = llvm::Type::getDoubleTy(getVMContext());
7563 } else
7564 PassTy = llvm::IntegerType::get(getVMContext(), Size);
7565 return ABIArgInfo::getDirect(PassTy);
7566 }
7567
7568 // Non-structure compounds are passed indirectly.
7569 if (isCompoundType(Ty))
7570 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7571
7572 return ABIArgInfo::getDirect(nullptr);
7573}
7574
7575//===----------------------------------------------------------------------===//
7576// MSP430 ABI Implementation
7577//===----------------------------------------------------------------------===//
7578
7579namespace {
7580
7581class MSP430ABIInfo : public DefaultABIInfo {
7582 static ABIArgInfo complexArgInfo() {
7583 ABIArgInfo Info = ABIArgInfo::getDirect();
7584 Info.setCanBeFlattened(false);
7585 return Info;
7586 }
7587
7588public:
7589 MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7590
7591 ABIArgInfo classifyReturnType(QualType RetTy) const {
7592 if (RetTy->isAnyComplexType())
7593 return complexArgInfo();
7594
7595 return DefaultABIInfo::classifyReturnType(RetTy);
7596 }
7597
7598 ABIArgInfo classifyArgumentType(QualType RetTy) const {
7599 if (RetTy->isAnyComplexType())
7600 return complexArgInfo();
7601
7602 return DefaultABIInfo::classifyArgumentType(RetTy);
7603 }
7604
7605 // Just copy the original implementations because
7606 // DefaultABIInfo::classify{Return,Argument}Type() are not virtual
7607 void computeInfo(CGFunctionInfo &FI) const override {
7608 if (!getCXXABI().classifyReturnType(FI))
7609 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7610 for (auto &I : FI.arguments())
7611 I.info = classifyArgumentType(I.type);
7612 }
7613
7614 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7615 QualType Ty) const override {
7616 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
7617 }
7618};
7619
7620class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
7621public:
7622 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
7623 : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {}
7624 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7625 CodeGen::CodeGenModule &M) const override;
7626};
7627
7628}
7629
7630void MSP430TargetCodeGenInfo::setTargetAttributes(
7631 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7632 if (GV->isDeclaration())
7633 return;
7634 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
7635 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
7636 if (!InterruptAttr)
7637 return;
7638
7639 // Handle 'interrupt' attribute:
7640 llvm::Function *F = cast<llvm::Function>(GV);
7641
7642 // Step 1: Set ISR calling convention.
7643 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
7644
7645 // Step 2: Add attributes goodness.
7646 F->addFnAttr(llvm::Attribute::NoInline);
7647 F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
7648 }
7649}
7650
7651//===----------------------------------------------------------------------===//
7652// MIPS ABI Implementation. This works for both little-endian and
7653// big-endian variants.
7654//===----------------------------------------------------------------------===//
7655
7656namespace {
7657class MipsABIInfo : public ABIInfo {
7658 bool IsO32;
7659 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
7660 void CoerceToIntArgs(uint64_t TySize,
7661 SmallVectorImpl<llvm::Type *> &ArgList) const;
7662 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
7663 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
7664 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
7665public:
7666 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
7667 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
7668 StackAlignInBytes(IsO32 ? 8 : 16) {}
7669
7670 ABIArgInfo classifyReturnType(QualType RetTy) const;
7671 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
7672 void computeInfo(CGFunctionInfo &FI) const override;
7673 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7674 QualType Ty) const override;
7675 ABIArgInfo extendType(QualType Ty) const;
7676};
7677
7678class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
7679 unsigned SizeOfUnwindException;
7680public:
7681 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
7682 : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)),
7683 SizeOfUnwindException(IsO32 ? 24 : 32) {}
7684
7685 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
7686 return 29;
7687 }
7688
7689 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7690 CodeGen::CodeGenModule &CGM) const override {
7691 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7692 if (!FD) return;
7693 llvm::Function *Fn = cast<llvm::Function>(GV);
7694
7695 if (FD->hasAttr<MipsLongCallAttr>())
7696 Fn->addFnAttr("long-call");
7697 else if (FD->hasAttr<MipsShortCallAttr>())
7698 Fn->addFnAttr("short-call");
7699
7700 // Other attributes do not have a meaning for declarations.
7701 if (GV->isDeclaration())
7702 return;
7703
7704 if (FD->hasAttr<Mips16Attr>()) {
7705 Fn->addFnAttr("mips16");
7706 }
7707 else if (FD->hasAttr<NoMips16Attr>()) {
7708 Fn->addFnAttr("nomips16");
7709 }
7710
7711 if (FD->hasAttr<MicroMipsAttr>())
7712 Fn->addFnAttr("micromips");
7713 else if (FD->hasAttr<NoMicroMipsAttr>())
7714 Fn->addFnAttr("nomicromips");
7715
7716 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
7717 if (!Attr)
7718 return;
7719
7720 const char *Kind;
7721 switch (Attr->getInterrupt()) {
7722 case MipsInterruptAttr::eic: Kind = "eic"; break;
7723 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
7724 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
7725 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
7726 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
7727 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
7728 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
7729 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
7730 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
7731 }
7732
7733 Fn->addFnAttr("interrupt", Kind);
7734
7735 }
7736
7737 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7738 llvm::Value *Address) const override;
7739
7740 unsigned getSizeOfUnwindException() const override {
7741 return SizeOfUnwindException;
7742 }
7743};
7744}
7745
7746void MipsABIInfo::CoerceToIntArgs(
7747 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
7748 llvm::IntegerType *IntTy =
7749 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
7750
7751 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
7752 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
7753 ArgList.push_back(IntTy);
7754
7755 // If necessary, add one more integer type to ArgList.
7756 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
7757
7758 if (R)
7759 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
7760}
7761
7762// In N32/64, an aligned double precision floating point field is passed in
7763// a register.
7764llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
7765 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
7766
7767 if (IsO32) {
7768 CoerceToIntArgs(TySize, ArgList);
7769 return llvm::StructType::get(getVMContext(), ArgList);
7770 }
7771
7772 if (Ty->isComplexType())
7773 return CGT.ConvertType(Ty);
7774
7775 const RecordType *RT = Ty->getAs<RecordType>();
7776
7777 // Unions/vectors are passed in integer registers.
7778 if (!RT || !RT->isStructureOrClassType()) {
7779 CoerceToIntArgs(TySize, ArgList);
7780 return llvm::StructType::get(getVMContext(), ArgList);
7781 }
7782
7783 const RecordDecl *RD = RT->getDecl();
7784 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7785 assert(!(TySize % 8) && "Size of structure must be multiple of 8.")((!(TySize % 8) && "Size of structure must be multiple of 8."
) ? static_cast<void> (0) : __assert_fail ("!(TySize % 8) && \"Size of structure must be multiple of 8.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 7785, __PRETTY_FUNCTION__))
;
7786
7787 uint64_t LastOffset = 0;
7788 unsigned idx = 0;
7789 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
7790
7791 // Iterate over fields in the struct/class and check if there are any aligned
7792 // double fields.
7793 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
7794 i != e; ++i, ++idx) {
7795 const QualType Ty = i->getType();
7796 const BuiltinType *BT = Ty->getAs<BuiltinType>();
7797
7798 if (!BT || BT->getKind() != BuiltinType::Double)
7799 continue;
7800
7801 uint64_t Offset = Layout.getFieldOffset(idx);
7802 if (Offset % 64) // Ignore doubles that are not aligned.
7803 continue;
7804
7805 // Add ((Offset - LastOffset) / 64) args of type i64.
7806 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
7807 ArgList.push_back(I64);
7808
7809 // Add double type.
7810 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
7811 LastOffset = Offset + 64;
7812 }
7813
7814 CoerceToIntArgs(TySize - LastOffset, IntArgList);
7815 ArgList.append(IntArgList.begin(), IntArgList.end());
7816
7817 return llvm::StructType::get(getVMContext(), ArgList);
7818}
7819
7820llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
7821 uint64_t Offset) const {
7822 if (OrigOffset + MinABIStackAlignInBytes > Offset)
7823 return nullptr;
7824
7825 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
7826}
7827
7828ABIArgInfo
7829MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
7830 Ty = useFirstFieldIfTransparentUnion(Ty);
7831
7832 uint64_t OrigOffset = Offset;
7833 uint64_t TySize = getContext().getTypeSize(Ty);
7834 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
7835
7836 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
7837 (uint64_t)StackAlignInBytes);
7838 unsigned CurrOffset = llvm::alignTo(Offset, Align);
7839 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
7840
7841 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
7842 // Ignore empty aggregates.
7843 if (TySize == 0)
7844 return ABIArgInfo::getIgnore();
7845
7846 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
7847 Offset = OrigOffset + MinABIStackAlignInBytes;
7848 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7849 }
7850
7851 // If we have reached here, aggregates are passed directly by coercing to
7852 // another structure type. Padding is inserted if the offset of the
7853 // aggregate is unaligned.
7854 ABIArgInfo ArgInfo =
7855 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
7856 getPaddingType(OrigOffset, CurrOffset));
7857 ArgInfo.setInReg(true);
7858 return ArgInfo;
7859 }
7860
7861 // Treat an enum type as its underlying type.
7862 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7863 Ty = EnumTy->getDecl()->getIntegerType();
7864
7865 // Make sure we pass indirectly things that are too large.
7866 if (const auto *EIT = Ty->getAs<ExtIntType>())
7867 if (EIT->getNumBits() > 128 ||
7868 (EIT->getNumBits() > 64 &&
7869 !getContext().getTargetInfo().hasInt128Type()))
7870 return getNaturalAlignIndirect(Ty);
7871
7872 // All integral types are promoted to the GPR width.
7873 if (Ty->isIntegralOrEnumerationType())
7874 return extendType(Ty);
7875
7876 return ABIArgInfo::getDirect(
7877 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
7878}
7879
7880llvm::Type*
7881MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
7882 const RecordType *RT = RetTy->getAs<RecordType>();
7883 SmallVector<llvm::Type*, 8> RTList;
7884
7885 if (RT && RT->isStructureOrClassType()) {
7886 const RecordDecl *RD = RT->getDecl();
7887 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7888 unsigned FieldCnt = Layout.getFieldCount();
7889
7890 // N32/64 returns struct/classes in floating point registers if the
7891 // following conditions are met:
7892 // 1. The size of the struct/class is no larger than 128-bit.
7893 // 2. The struct/class has one or two fields all of which are floating
7894 // point types.
7895 // 3. The offset of the first field is zero (this follows what gcc does).
7896 //
7897 // Any other composite results are returned in integer registers.
7898 //
7899 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
7900 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
7901 for (; b != e; ++b) {
7902 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
7903
7904 if (!BT || !BT->isFloatingPoint())
7905 break;
7906
7907 RTList.push_back(CGT.ConvertType(b->getType()));
7908 }
7909
7910 if (b == e)
7911 return llvm::StructType::get(getVMContext(), RTList,
7912 RD->hasAttr<PackedAttr>());
7913
7914 RTList.clear();
7915 }
7916 }
7917
7918 CoerceToIntArgs(Size, RTList);
7919 return llvm::StructType::get(getVMContext(), RTList);
7920}
7921
7922ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
7923 uint64_t Size = getContext().getTypeSize(RetTy);
7924
7925 if (RetTy->isVoidType())
7926 return ABIArgInfo::getIgnore();
7927
7928 // O32 doesn't treat zero-sized structs differently from other structs.
7929 // However, N32/N64 ignores zero sized return values.
7930 if (!IsO32 && Size == 0)
7931 return ABIArgInfo::getIgnore();
7932
7933 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
7934 if (Size <= 128) {
7935 if (RetTy->isAnyComplexType())
7936 return ABIArgInfo::getDirect();
7937
7938 // O32 returns integer vectors in registers and N32/N64 returns all small
7939 // aggregates in registers.
7940 if (!IsO32 ||
7941 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
7942 ABIArgInfo ArgInfo =
7943 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
7944 ArgInfo.setInReg(true);
7945 return ArgInfo;
7946 }
7947 }
7948
7949 return getNaturalAlignIndirect(RetTy);
7950 }
7951
7952 // Treat an enum type as its underlying type.
7953 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7954 RetTy = EnumTy->getDecl()->getIntegerType();
7955
7956 // Make sure we pass indirectly things that are too large.
7957 if (const auto *EIT = RetTy->getAs<ExtIntType>())
7958 if (EIT->getNumBits() > 128 ||
7959 (EIT->getNumBits() > 64 &&
7960 !getContext().getTargetInfo().hasInt128Type()))
7961 return getNaturalAlignIndirect(RetTy);
7962
7963 if (isPromotableIntegerTypeForABI(RetTy))
7964 return ABIArgInfo::getExtend(RetTy);
7965
7966 if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
7967 RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32)
7968 return ABIArgInfo::getSignExtend(RetTy);
7969
7970 return ABIArgInfo::getDirect();
7971}
7972
7973void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
7974 ABIArgInfo &RetInfo = FI.getReturnInfo();
7975 if (!getCXXABI().classifyReturnType(FI))
7976 RetInfo = classifyReturnType(FI.getReturnType());
7977
7978 // Check if a pointer to an aggregate is passed as a hidden argument.
7979 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
7980
7981 for (auto &I : FI.arguments())
7982 I.info = classifyArgumentType(I.type, Offset);
7983}
7984
7985Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7986 QualType OrigTy) const {
7987 QualType Ty = OrigTy;
7988
7989 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
7990 // Pointers are also promoted in the same way but this only matters for N32.
7991 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7992 unsigned PtrWidth = getTarget().getPointerWidth(0);
7993 bool DidPromote = false;
7994 if ((Ty->isIntegerType() &&
7995 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
7996 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
7997 DidPromote = true;
7998 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
7999 Ty->isSignedIntegerType());
8000 }
8001
8002 auto TyInfo = getContext().getTypeInfoInChars(Ty);
8003
8004 // The alignment of things in the argument area is never larger than
8005 // StackAlignInBytes.
8006 TyInfo.Align =
8007 std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes));
8008
8009 // MinABIStackAlignInBytes is the size of argument slots on the stack.
8010 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
8011
8012 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
8013 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
8014
8015
8016 // If there was a promotion, "unpromote" into a temporary.
8017 // TODO: can we just use a pointer into a subset of the original slot?
8018 if (DidPromote) {
8019 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
8020 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
8021
8022 // Truncate down to the right width.
8023 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
8024 : CGF.IntPtrTy);
8025 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
8026 if (OrigTy->isPointerType())
8027 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
8028
8029 CGF.Builder.CreateStore(V, Temp);
8030 Addr = Temp;
8031 }
8032
8033 return Addr;
8034}
8035
8036ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
8037 int TySize = getContext().getTypeSize(Ty);
8038
8039 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
8040 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
8041 return ABIArgInfo::getSignExtend(Ty);
8042
8043 return ABIArgInfo::getExtend(Ty);
8044}
8045
8046bool
8047MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
8048 llvm::Value *Address) const {
8049 // This information comes from gcc's implementation, which seems to
8050 // as canonical as it gets.
8051
8052 // Everything on MIPS is 4 bytes. Double-precision FP registers
8053 // are aliased to pairs of single-precision FP registers.
8054 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
8055
8056 // 0-31 are the general purpose registers, $0 - $31.
8057 // 32-63 are the floating-point registers, $f0 - $f31.
8058 // 64 and 65 are the multiply/divide registers, $hi and $lo.
8059 // 66 is the (notional, I think) register for signal-handler return.
8060 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
8061
8062 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
8063 // They are one bit wide and ignored here.
8064
8065 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
8066 // (coprocessor 1 is the FP unit)
8067 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
8068 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
8069 // 176-181 are the DSP accumulator registers.
8070 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
8071 return false;
8072}
8073
8074//===----------------------------------------------------------------------===//
8075// AVR ABI Implementation.
8076//===----------------------------------------------------------------------===//
8077
8078namespace {
8079class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
8080public:
8081 AVRTargetCodeGenInfo(CodeGenTypes &CGT)
8082 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
8083
8084 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8085 CodeGen::CodeGenModule &CGM) const override {
8086 if (GV->isDeclaration())
8087 return;
8088 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
8089 if (!FD) return;
8090 auto *Fn = cast<llvm::Function>(GV);
8091
8092 if (FD->getAttr<AVRInterruptAttr>())
8093 Fn->addFnAttr("interrupt");
8094
8095 if (FD->getAttr<AVRSignalAttr>())
8096 Fn->addFnAttr("signal");
8097 }
8098};
8099}
8100
8101//===----------------------------------------------------------------------===//
8102// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
8103// Currently subclassed only to implement custom OpenCL C function attribute
8104// handling.
8105//===----------------------------------------------------------------------===//
8106
8107namespace {
8108
8109class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
8110public:
8111 TCETargetCodeGenInfo(CodeGenTypes &CGT)
8112 : DefaultTargetCodeGenInfo(CGT) {}
8113
8114 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8115 CodeGen::CodeGenModule &M) const override;
8116};
8117
8118void TCETargetCodeGenInfo::setTargetAttributes(
8119 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
8120 if (GV->isDeclaration())
8121 return;
8122 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
8123 if (!FD) return;
8124
8125 llvm::Function *F = cast<llvm::Function>(GV);
8126
8127 if (M.getLangOpts().OpenCL) {
8128 if (FD->hasAttr<OpenCLKernelAttr>()) {
8129 // OpenCL C Kernel functions are not subject to inlining
8130 F->addFnAttr(llvm::Attribute::NoInline);
8131 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
8132 if (Attr) {
8133 // Convert the reqd_work_group_size() attributes to metadata.
8134 llvm::LLVMContext &Context = F->getContext();
8135 llvm::NamedMDNode *OpenCLMetadata =
8136 M.getModule().getOrInsertNamedMetadata(
8137 "opencl.kernel_wg_size_info");
8138
8139 SmallVector<llvm::Metadata *, 5> Operands;
8140 Operands.push_back(llvm::ConstantAsMetadata::get(F));
8141
8142 Operands.push_back(
8143 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
8144 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
8145 Operands.push_back(
8146 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
8147 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
8148 Operands.push_back(
8149 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
8150 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
8151
8152 // Add a boolean constant operand for "required" (true) or "hint"
8153 // (false) for implementing the work_group_size_hint attr later.
8154 // Currently always true as the hint is not yet implemented.
8155 Operands.push_back(
8156 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
8157 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
8158 }
8159 }
8160 }
8161}
8162
8163}
8164
8165//===----------------------------------------------------------------------===//
8166// Hexagon ABI Implementation
8167//===----------------------------------------------------------------------===//
8168
8169namespace {
8170
8171class HexagonABIInfo : public DefaultABIInfo {
8172public:
8173 HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8174
8175private:
8176 ABIArgInfo classifyReturnType(QualType RetTy) const;
8177 ABIArgInfo classifyArgumentType(QualType RetTy) const;
8178 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const;
8179
8180 void computeInfo(CGFunctionInfo &FI) const override;
8181
8182 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8183 QualType Ty) const override;
8184 Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr,
8185 QualType Ty) const;
8186 Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr,
8187 QualType Ty) const;
8188 Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr,
8189 QualType Ty) const;
8190};
8191
8192class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
8193public:
8194 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
8195 : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {}
8196
8197 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
8198 return 29;
8199 }
8200
8201 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8202 CodeGen::CodeGenModule &GCM) const override {
8203 if (GV->isDeclaration())
8204 return;
8205 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
8206 if (!FD)
8207 return;
8208 }
8209};
8210
8211} // namespace
8212
8213void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
8214 unsigned RegsLeft = 6;
8215 if (!getCXXABI().classifyReturnType(FI))
8216 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8217 for (auto &I : FI.arguments())
8218 I.info = classifyArgumentType(I.type, &RegsLeft);
8219}
8220
8221static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) {
8222 assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits"((Size <= 64 && "Not expecting to pass arguments larger than 64 bits"
" through registers") ? static_cast<void> (0) : __assert_fail
("Size <= 64 && \"Not expecting to pass arguments larger than 64 bits\" \" through registers\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 8223, __PRETTY_FUNCTION__))
8223 " through registers")((Size <= 64 && "Not expecting to pass arguments larger than 64 bits"
" through registers") ? static_cast<void> (0) : __assert_fail
("Size <= 64 && \"Not expecting to pass arguments larger than 64 bits\" \" through registers\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 8223, __PRETTY_FUNCTION__))
;
8224
8225 if (*RegsLeft == 0)
8226 return false;
8227
8228 if (Size <= 32) {
8229 (*RegsLeft)--;
8230 return true;
8231 }
8232
8233 if (2 <= (*RegsLeft & (~1U))) {
8234 *RegsLeft = (*RegsLeft & (~1U)) - 2;
8235 return true;
8236 }
8237
8238 // Next available register was r5 but candidate was greater than 32-bits so it
8239 // has to go on the stack. However we still consume r5
8240 if (*RegsLeft == 1)
8241 *RegsLeft = 0;
8242
8243 return false;
8244}
8245
8246ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
8247 unsigned *RegsLeft) const {
8248 if (!isAggregateTypeForABI(Ty)) {
8249 // Treat an enum type as its underlying type.
8250 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8251 Ty = EnumTy->getDecl()->getIntegerType();
8252
8253 uint64_t Size = getContext().getTypeSize(Ty);
8254 if (Size <= 64)
8255 HexagonAdjustRegsLeft(Size, RegsLeft);
8256
8257 if (Size > 64 && Ty->isExtIntType())
8258 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
8259
8260 return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
8261 : ABIArgInfo::getDirect();
8262 }
8263
8264 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
8265 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
8266
8267 // Ignore empty records.
8268 if (isEmptyRecord(getContext(), Ty, true))
8269 return ABIArgInfo::getIgnore();
8270
8271 uint64_t Size = getContext().getTypeSize(Ty);
8272 unsigned Align = getContext().getTypeAlign(Ty);
8273
8274 if (Size > 64)
8275 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
8276
8277 if (HexagonAdjustRegsLeft(Size, RegsLeft))
8278 Align = Size <= 32 ? 32 : 64;
8279 if (Size <= Align) {
8280 // Pass in the smallest viable integer type.
8281 if (!llvm::isPowerOf2_64(Size))
8282 Size = llvm::NextPowerOf2(Size);
8283 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
8284 }
8285 return DefaultABIInfo::classifyArgumentType(Ty);
8286}
8287
8288ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
8289 if (RetTy->isVoidType())
8290 return ABIArgInfo::getIgnore();
8291
8292 const TargetInfo &T = CGT.getTarget();
8293 uint64_t Size = getContext().getTypeSize(RetTy);
8294
8295 if (RetTy->getAs<VectorType>()) {
8296 // HVX vectors are returned in vector registers or register pairs.
8297 if (T.hasFeature("hvx")) {
8298 assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b"))((T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b"
)) ? static_cast<void> (0) : __assert_fail ("T.hasFeature(\"hvx-length64b\") || T.hasFeature(\"hvx-length128b\")"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 8298, __PRETTY_FUNCTION__))
;
8299 uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8;
8300 if (Size == VecSize || Size == 2*VecSize)
8301 return ABIArgInfo::getDirectInReg();
8302 }
8303 // Large vector types should be returned via memory.
8304 if (Size > 64)
8305 return getNaturalAlignIndirect(RetTy);
8306 }
8307
8308 if (!isAggregateTypeForABI(RetTy)) {
8309 // Treat an enum type as its underlying type.
8310 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
8311 RetTy = EnumTy->getDecl()->getIntegerType();
8312
8313 if (Size > 64 && RetTy->isExtIntType())
8314 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
8315
8316 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
8317 : ABIArgInfo::getDirect();
8318 }
8319
8320 if (isEmptyRecord(getContext(), RetTy, true))
8321 return ABIArgInfo::getIgnore();
8322
8323 // Aggregates <= 8 bytes are returned in registers, other aggregates
8324 // are returned indirectly.
8325 if (Size <= 64) {
8326 // Return in the smallest viable integer type.
8327 if (!llvm::isPowerOf2_64(Size))
8328 Size = llvm::NextPowerOf2(Size);
8329 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
8330 }
8331 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
8332}
8333
8334Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
8335 Address VAListAddr,
8336 QualType Ty) const {
8337 // Load the overflow area pointer.
8338 Address __overflow_area_pointer_p =
8339 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
8340 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
8341 __overflow_area_pointer_p, "__overflow_area_pointer");
8342
8343 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
8344 if (Align > 4) {
8345 // Alignment should be a power of 2.
8346 assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!")(((Align & (Align - 1)) == 0 && "Alignment is not power of 2!"
) ? static_cast<void> (0) : __assert_fail ("(Align & (Align - 1)) == 0 && \"Alignment is not power of 2!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 8346, __PRETTY_FUNCTION__))
;
8347
8348 // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
8349 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
8350
8351 // Add offset to the current pointer to access the argument.
8352 __overflow_area_pointer =
8353 CGF.Builder.CreateGEP(__overflow_area_pointer, Offset);
8354 llvm::Value *AsInt =
8355 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
8356
8357 // Create a mask which should be "AND"ed
8358 // with (overflow_arg_area + align - 1)
8359 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align);
8360 __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
8361 CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(),
8362 "__overflow_area_pointer.align");
8363 }
8364
8365 // Get the type of the argument from memory and bitcast
8366 // overflow area pointer to the argument type.
8367 llvm::Type *PTy = CGF.ConvertTypeForMem(Ty);
8368 Address AddrTyped = CGF.Builder.CreateBitCast(
8369 Address(__overflow_area_pointer, CharUnits::fromQuantity(Align)),
8370 llvm::PointerType::getUnqual(PTy));
8371
8372 // Round up to the minimum stack alignment for varargs which is 4 bytes.
8373 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
8374
8375 __overflow_area_pointer = CGF.Builder.CreateGEP(
8376 __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
8377 "__overflow_area_pointer.next");
8378 CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p);
8379
8380 return AddrTyped;
8381}
8382
8383Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
8384 Address VAListAddr,
8385 QualType Ty) const {
8386 // FIXME: Need to handle alignment
8387 llvm::Type *BP = CGF.Int8PtrTy;
8388 llvm::Type *BPP = CGF.Int8PtrPtrTy;
8389 CGBuilderTy &Builder = CGF.Builder;
8390 Address VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
8391 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
8392 // Handle address alignment for type alignment > 32 bits
8393 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
8394 if (TyAlign > 4) {
8395 assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!")(((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!"
) ? static_cast<void> (0) : __assert_fail ("(TyAlign & (TyAlign - 1)) == 0 && \"Alignment is not power of 2!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 8395, __PRETTY_FUNCTION__))
;
8396 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
8397 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
8398 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
8399 Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
8400 }
8401 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
8402 Address AddrTyped = Builder.CreateBitCast(
8403 Address(Addr, CharUnits::fromQuantity(TyAlign)), PTy);
8404
8405 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
8406 llvm::Value *NextAddr = Builder.CreateGEP(
8407 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
8408 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
8409
8410 return AddrTyped;
8411}
8412
8413Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
8414 Address VAListAddr,
8415 QualType Ty) const {
8416 int ArgSize = CGF.getContext().getTypeSize(Ty) / 8;
8417
8418 if (ArgSize > 8)
8419 return EmitVAArgFromMemory(CGF, VAListAddr, Ty);
8420
8421 // Here we have check if the argument is in register area or
8422 // in overflow area.
8423 // If the saved register area pointer + argsize rounded up to alignment >
8424 // saved register area end pointer, argument is in overflow area.
8425 unsigned RegsLeft = 6;
8426 Ty = CGF.getContext().getCanonicalType(Ty);
8427 (void)classifyArgumentType(Ty, &RegsLeft);
8428
8429 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
8430 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
8431 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
8432 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
8433
8434 // Get rounded size of the argument.GCC does not allow vararg of
8435 // size < 4 bytes. We follow the same logic here.
8436 ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
8437 int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
8438
8439 // Argument may be in saved register area
8440 CGF.EmitBlock(MaybeRegBlock);
8441
8442 // Load the current saved register area pointer.
8443 Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP(
8444 VAListAddr, 0, "__current_saved_reg_area_pointer_p");
8445 llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad(
8446 __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer");
8447
8448 // Load the saved register area end pointer.
8449 Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP(
8450 VAListAddr, 1, "__saved_reg_area_end_pointer_p");
8451 llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad(
8452 __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer");
8453
8454 // If the size of argument is > 4 bytes, check if the stack
8455 // location is aligned to 8 bytes
8456 if (ArgAlign > 4) {
8457
8458 llvm::Value *__current_saved_reg_area_pointer_int =
8459 CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer,
8460 CGF.Int32Ty);
8461
8462 __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd(
8463 __current_saved_reg_area_pointer_int,
8464 llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)),
8465 "align_current_saved_reg_area_pointer");
8466
8467 __current_saved_reg_area_pointer_int =
8468 CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int,
8469 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
8470 "align_current_saved_reg_area_pointer");
8471
8472 __current_saved_reg_area_pointer =
8473 CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int,
8474 __current_saved_reg_area_pointer->getType(),
8475 "align_current_saved_reg_area_pointer");
8476 }
8477
8478 llvm::Value *__new_saved_reg_area_pointer =
8479 CGF.Builder.CreateGEP(__current_saved_reg_area_pointer,
8480 llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
8481 "__new_saved_reg_area_pointer");
8482
8483 llvm::Value *UsingStack = 0;
8484 UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
8485 __saved_reg_area_end_pointer);
8486
8487 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock);
8488
8489 // Argument in saved register area
8490 // Implement the block where argument is in register saved area
8491 CGF.EmitBlock(InRegBlock);
8492
8493 llvm::Type *PTy = CGF.ConvertType(Ty);
8494 llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast(
8495 __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy));
8496
8497 CGF.Builder.CreateStore(__new_saved_reg_area_pointer,
8498 __current_saved_reg_area_pointer_p);
8499
8500 CGF.EmitBranch(ContBlock);
8501
8502 // Argument in overflow area
8503 // Implement the block where the argument is in overflow area.
8504 CGF.EmitBlock(OnStackBlock);
8505
8506 // Load the overflow area pointer
8507 Address __overflow_area_pointer_p =
8508 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
8509 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
8510 __overflow_area_pointer_p, "__overflow_area_pointer");
8511
8512 // Align the overflow area pointer according to the alignment of the argument
8513 if (ArgAlign > 4) {
8514 llvm::Value *__overflow_area_pointer_int =
8515 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
8516
8517 __overflow_area_pointer_int =
8518 CGF.Builder.CreateAdd(__overflow_area_pointer_int,
8519 llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1),
8520 "align_overflow_area_pointer");
8521
8522 __overflow_area_pointer_int =
8523 CGF.Builder.CreateAnd(__overflow_area_pointer_int,
8524 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
8525 "align_overflow_area_pointer");
8526
8527 __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
8528 __overflow_area_pointer_int, __overflow_area_pointer->getType(),
8529 "align_overflow_area_pointer");
8530 }
8531
8532 // Get the pointer for next argument in overflow area and store it
8533 // to overflow area pointer.
8534 llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP(
8535 __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
8536 "__overflow_area_pointer.next");
8537
8538 CGF.Builder.CreateStore(__new_overflow_area_pointer,
8539 __overflow_area_pointer_p);
8540
8541 CGF.Builder.CreateStore(__new_overflow_area_pointer,
8542 __current_saved_reg_area_pointer_p);
8543
8544 // Bitcast the overflow area pointer to the type of argument.
8545 llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty);
8546 llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast(
8547 __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy));
8548
8549 CGF.EmitBranch(ContBlock);
8550
8551 // Get the correct pointer to load the variable argument
8552 // Implement the ContBlock
8553 CGF.EmitBlock(ContBlock);
8554
8555 llvm::Type *MemPTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
8556 llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr");
8557 ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock);
8558 ArgAddr->addIncoming(__overflow_area_p, OnStackBlock);
8559
8560 return Address(ArgAddr, CharUnits::fromQuantity(ArgAlign));
8561}
8562
8563Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8564 QualType Ty) const {
8565
8566 if (getTarget().getTriple().isMusl())
8567 return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty);
8568
8569 return EmitVAArgForHexagon(CGF, VAListAddr, Ty);
8570}
8571
8572//===----------------------------------------------------------------------===//
8573// Lanai ABI Implementation
8574//===----------------------------------------------------------------------===//
8575
8576namespace {
8577class LanaiABIInfo : public DefaultABIInfo {
8578public:
8579 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8580
8581 bool shouldUseInReg(QualType Ty, CCState &State) const;
8582
8583 void computeInfo(CGFunctionInfo &FI) const override {
8584 CCState State(FI);
8585 // Lanai uses 4 registers to pass arguments unless the function has the
8586 // regparm attribute set.
8587 if (FI.getHasRegParm()) {
8588 State.FreeRegs = FI.getRegParm();
8589 } else {
8590 State.FreeRegs = 4;
8591 }
8592
8593 if (!getCXXABI().classifyReturnType(FI))
8594 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8595 for (auto &I : FI.arguments())
8596 I.info = classifyArgumentType(I.type, State);
8597 }
8598
8599 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
8600 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
8601};
8602} // end anonymous namespace
8603
8604bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
8605 unsigned Size = getContext().getTypeSize(Ty);
8606 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
8607
8608 if (SizeInRegs == 0)
8609 return false;
8610
8611 if (SizeInRegs > State.FreeRegs) {
8612 State.FreeRegs = 0;
8613 return false;
8614 }
8615
8616 State.FreeRegs -= SizeInRegs;
8617
8618 return true;
8619}
8620
8621ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
8622 CCState &State) const {
8623 if (!ByVal) {
8624 if (State.FreeRegs) {
8625 --State.FreeRegs; // Non-byval indirects just use one pointer.
8626 return getNaturalAlignIndirectInReg(Ty);
8627 }
8628 return getNaturalAlignIndirect(Ty, false);
8629 }
8630
8631 // Compute the byval alignment.
8632 const unsigned MinABIStackAlignInBytes = 4;
8633 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
8634 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
8635 /*Realign=*/TypeAlign >
8636 MinABIStackAlignInBytes);
8637}
8638
8639ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
8640 CCState &State) const {
8641 // Check with the C++ ABI first.
8642 const RecordType *RT = Ty->getAs<RecordType>();
8643 if (RT) {
8644 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
8645 if (RAA == CGCXXABI::RAA_Indirect) {
8646 return getIndirectResult(Ty, /*ByVal=*/false, State);
8647 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
8648 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
8649 }
8650 }
8651
8652 if (isAggregateTypeForABI(Ty)) {
8653 // Structures with flexible arrays are always indirect.
8654 if (RT && RT->getDecl()->hasFlexibleArrayMember())
8655 return getIndirectResult(Ty, /*ByVal=*/true, State);
8656
8657 // Ignore empty structs/unions.
8658 if (isEmptyRecord(getContext(), Ty, true))
8659 return ABIArgInfo::getIgnore();
8660
8661 llvm::LLVMContext &LLVMContext = getVMContext();
8662 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
8663 if (SizeInRegs <= State.FreeRegs) {
8664 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
8665 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
8666 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
8667 State.FreeRegs -= SizeInRegs;
8668 return ABIArgInfo::getDirectInReg(Result);
8669 } else {
8670 State.FreeRegs = 0;
8671 }
8672 return getIndirectResult(Ty, true, State);
8673 }
8674
8675 // Treat an enum type as its underlying type.
8676 if (const auto *EnumTy = Ty->getAs<EnumType>())
8677 Ty = EnumTy->getDecl()->getIntegerType();
8678
8679 bool InReg = shouldUseInReg(Ty, State);
8680
8681 // Don't pass >64 bit integers in registers.
8682 if (const auto *EIT = Ty->getAs<ExtIntType>())
8683 if (EIT->getNumBits() > 64)
8684 return getIndirectResult(Ty, /*ByVal=*/true, State);
8685
8686 if (isPromotableIntegerTypeForABI(Ty)) {
8687 if (InReg)
8688 return ABIArgInfo::getDirectInReg();
8689 return ABIArgInfo::getExtend(Ty);
8690 }
8691 if (InReg)
8692 return ABIArgInfo::getDirectInReg();
8693 return ABIArgInfo::getDirect();
8694}
8695
8696namespace {
8697class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
8698public:
8699 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
8700 : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {}
8701};
8702}
8703
8704//===----------------------------------------------------------------------===//
8705// AMDGPU ABI Implementation
8706//===----------------------------------------------------------------------===//
8707
8708namespace {
8709
8710class AMDGPUABIInfo final : public DefaultABIInfo {
8711private:
8712 static const unsigned MaxNumRegsForArgsRet = 16;
8713
8714 unsigned numRegsForType(QualType Ty) const;
8715
8716 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
8717 bool isHomogeneousAggregateSmallEnough(const Type *Base,
8718 uint64_t Members) const override;
8719
8720 // Coerce HIP scalar pointer arguments from generic pointers to global ones.
8721 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
8722 unsigned ToAS) const {
8723 // Single value types.
8724 if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS)
8725 return llvm::PointerType::get(
8726 cast<llvm::PointerType>(Ty)->getElementType(), ToAS);
8727 return Ty;
8728 }
8729
8730public:
8731 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
8732 DefaultABIInfo(CGT) {}
8733
8734 ABIArgInfo classifyReturnType(QualType RetTy) const;
8735 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
8736 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
8737
8738 void computeInfo(CGFunctionInfo &FI) const override;
8739 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8740 QualType Ty) const override;
8741};
8742
8743bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
8744 return true;
8745}
8746
8747bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
8748 const Type *Base, uint64_t Members) const {
8749 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
8750
8751 // Homogeneous Aggregates may occupy at most 16 registers.
8752 return Members * NumRegs <= MaxNumRegsForArgsRet;
8753}
8754
8755/// Estimate number of registers the type will use when passed in registers.
8756unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
8757 unsigned NumRegs = 0;
8758
8759 if (const VectorType *VT = Ty->getAs<VectorType>()) {
8760 // Compute from the number of elements. The reported size is based on the
8761 // in-memory size, which includes the padding 4th element for 3-vectors.
8762 QualType EltTy = VT->getElementType();
8763 unsigned EltSize = getContext().getTypeSize(EltTy);
8764
8765 // 16-bit element vectors should be passed as packed.
8766 if (EltSize == 16)
8767 return (VT->getNumElements() + 1) / 2;
8768
8769 unsigned EltNumRegs = (EltSize + 31) / 32;
8770 return EltNumRegs * VT->getNumElements();
8771 }
8772
8773 if (const RecordType *RT = Ty->getAs<RecordType>()) {
8774 const RecordDecl *RD = RT->getDecl();
8775 assert(!RD->hasFlexibleArrayMember())((!RD->hasFlexibleArrayMember()) ? static_cast<void>
(0) : __assert_fail ("!RD->hasFlexibleArrayMember()", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 8775, __PRETTY_FUNCTION__))
;
8776
8777 for (const FieldDecl *Field : RD->fields()) {
8778 QualType FieldTy = Field->getType();
8779 NumRegs += numRegsForType(FieldTy);
8780 }
8781
8782 return NumRegs;
8783 }
8784
8785 return (getContext().getTypeSize(Ty) + 31) / 32;
8786}
8787
8788void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
8789 llvm::CallingConv::ID CC = FI.getCallingConvention();
8790
8791 if (!getCXXABI().classifyReturnType(FI))
8792 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8793
8794 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
8795 for (auto &Arg : FI.arguments()) {
8796 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
8797 Arg.info = classifyKernelArgumentType(Arg.type);
8798 } else {
8799 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
8800 }
8801 }
8802}
8803
8804Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8805 QualType Ty) const {
8806 llvm_unreachable("AMDGPU does not support varargs")::llvm::llvm_unreachable_internal("AMDGPU does not support varargs"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 8806)
;
8807}
8808
8809ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
8810 if (isAggregateTypeForABI(RetTy)) {
8811 // Records with non-trivial destructors/copy-constructors should not be
8812 // returned by value.
8813 if (!getRecordArgABI(RetTy, getCXXABI())) {
8814 // Ignore empty structs/unions.
8815 if (isEmptyRecord(getContext(), RetTy, true))
8816 return ABIArgInfo::getIgnore();
8817
8818 // Lower single-element structs to just return a regular value.
8819 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
8820 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
8821
8822 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
8823 const RecordDecl *RD = RT->getDecl();
8824 if (RD->hasFlexibleArrayMember())
8825 return DefaultABIInfo::classifyReturnType(RetTy);
8826 }
8827
8828 // Pack aggregates <= 4 bytes into single VGPR or pair.
8829 uint64_t Size = getContext().getTypeSize(RetTy);
8830 if (Size <= 16)
8831 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
8832
8833 if (Size <= 32)
8834 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
8835
8836 if (Size <= 64) {
8837 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
8838 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
8839 }
8840
8841 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
8842 return ABIArgInfo::getDirect();
8843 }
8844 }
8845
8846 // Otherwise just do the default thing.
8847 return DefaultABIInfo::classifyReturnType(RetTy);
8848}
8849
8850/// For kernels all parameters are really passed in a special buffer. It doesn't
8851/// make sense to pass anything byval, so everything must be direct.
8852ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
8853 Ty = useFirstFieldIfTransparentUnion(Ty);
8854
8855 // TODO: Can we omit empty structs?
8856
8857 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
8858 Ty = QualType(SeltTy, 0);
8859
8860 llvm::Type *OrigLTy = CGT.ConvertType(Ty);
8861 llvm::Type *LTy = OrigLTy;
8862 if (getContext().getLangOpts().HIP) {
8863 LTy = coerceKernelArgumentType(
8864 OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
8865 /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device));
8866 }
8867
8868 // FIXME: Should also use this for OpenCL, but it requires addressing the
8869 // problem of kernels being called.
8870 //
8871 // FIXME: This doesn't apply the optimization of coercing pointers in structs
8872 // to global address space when using byref. This would require implementing a
8873 // new kind of coercion of the in-memory type when for indirect arguments.
8874 if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy &&
8875 isAggregateTypeForABI(Ty)) {
8876 return ABIArgInfo::getIndirectAliased(
8877 getContext().getTypeAlignInChars(Ty),
8878 getContext().getTargetAddressSpace(LangAS::opencl_constant),
8879 false /*Realign*/, nullptr /*Padding*/);
8880 }
8881
8882 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
8883 // individual elements, which confuses the Clover OpenCL backend; therefore we
8884 // have to set it to false here. Other args of getDirect() are just defaults.
8885 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
8886}
8887
8888ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
8889 unsigned &NumRegsLeft) const {
8890 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow")((NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow"
) ? static_cast<void> (0) : __assert_fail ("NumRegsLeft <= MaxNumRegsForArgsRet && \"register estimate underflow\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 8890, __PRETTY_FUNCTION__))
;
8891
8892 Ty = useFirstFieldIfTransparentUnion(Ty);
8893
8894 if (isAggregateTypeForABI(Ty)) {
8895 // Records with non-trivial destructors/copy-constructors should not be
8896 // passed by value.
8897 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
8898 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
8899
8900 // Ignore empty structs/unions.
8901 if (isEmptyRecord(getContext(), Ty, true))
8902 return ABIArgInfo::getIgnore();
8903
8904 // Lower single-element structs to just pass a regular value. TODO: We
8905 // could do reasonable-size multiple-element structs too, using getExpand(),
8906 // though watch out for things like bitfields.
8907 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
8908 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
8909
8910 if (const RecordType *RT = Ty->getAs<RecordType>()) {
8911 const RecordDecl *RD = RT->getDecl();
8912 if (RD->hasFlexibleArrayMember())
8913 return DefaultABIInfo::classifyArgumentType(Ty);
8914 }
8915
8916 // Pack aggregates <= 8 bytes into single VGPR or pair.
8917 uint64_t Size = getContext().getTypeSize(Ty);
8918 if (Size <= 64) {
8919 unsigned NumRegs = (Size + 31) / 32;
8920 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
8921
8922 if (Size <= 16)
8923 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
8924
8925 if (Size <= 32)
8926 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
8927
8928 // XXX: Should this be i64 instead, and should the limit increase?
8929 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
8930 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
8931 }
8932
8933 if (NumRegsLeft > 0) {
8934 unsigned NumRegs = numRegsForType(Ty);
8935 if (NumRegsLeft >= NumRegs) {
8936 NumRegsLeft -= NumRegs;
8937 return ABIArgInfo::getDirect();
8938 }
8939 }
8940 }
8941
8942 // Otherwise just do the default thing.
8943 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
8944 if (!ArgInfo.isIndirect()) {
8945 unsigned NumRegs = numRegsForType(Ty);
8946 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
8947 }
8948
8949 return ArgInfo;
8950}
8951
8952class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
8953public:
8954 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
8955 : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
8956 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8957 CodeGen::CodeGenModule &M) const override;
8958 unsigned getOpenCLKernelCallingConv() const override;
8959
8960 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
8961 llvm::PointerType *T, QualType QT) const override;
8962
8963 LangAS getASTAllocaAddressSpace() const override {
8964 return getLangASFromTargetAS(
8965 getABIInfo().getDataLayout().getAllocaAddrSpace());
8966 }
8967 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
8968 const VarDecl *D) const override;
8969 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
8970 SyncScope Scope,
8971 llvm::AtomicOrdering Ordering,
8972 llvm::LLVMContext &Ctx) const override;
8973 llvm::Function *
8974 createEnqueuedBlockKernel(CodeGenFunction &CGF,
8975 llvm::Function *BlockInvokeFunc,
8976 llvm::Value *BlockLiteral) const override;
8977 bool shouldEmitStaticExternCAliases() const override;
8978 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
8979};
8980}
8981
8982static bool requiresAMDGPUProtectedVisibility(const Decl *D,
8983 llvm::GlobalValue *GV) {
8984 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
8985 return false;
8986
8987 return D->hasAttr<OpenCLKernelAttr>() ||
8988 (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
8989 (isa<VarDecl>(D) &&
8990 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
8991 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
8992 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType()));
8993}
8994
8995void AMDGPUTargetCodeGenInfo::setTargetAttributes(
8996 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
8997 if (requiresAMDGPUProtectedVisibility(D, GV)) {
8998 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
8999 GV->setDSOLocal(true);
9000 }
9001
9002 if (GV->isDeclaration())
9003 return;
9004 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
9005 if (!FD)
9006 return;
9007
9008 llvm::Function *F = cast<llvm::Function>(GV);
9009
9010 const auto *ReqdWGS = M.getLangOpts().OpenCL ?
9011 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
9012
9013
9014 const bool IsOpenCLKernel = M.getLangOpts().OpenCL &&
9015 FD->hasAttr<OpenCLKernelAttr>();
9016 const bool IsHIPKernel = M.getLangOpts().HIP &&
9017 FD->hasAttr<CUDAGlobalAttr>();
9018 if ((IsOpenCLKernel || IsHIPKernel) &&
9019 (M.getTriple().getOS() == llvm::Triple::AMDHSA))
9020 F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
9021
9022 if (IsHIPKernel)
9023 F->addFnAttr("uniform-work-group-size", "true");
9024
9025
9026 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
9027 if (ReqdWGS || FlatWGS) {
9028 unsigned Min = 0;
9029 unsigned Max = 0;
9030 if (FlatWGS) {
9031 Min = FlatWGS->getMin()
9032 ->EvaluateKnownConstInt(M.getContext())
9033 .getExtValue();
9034 Max = FlatWGS->getMax()
9035 ->EvaluateKnownConstInt(M.getContext())
9036 .getExtValue();
9037 }
9038 if (ReqdWGS && Min == 0 && Max == 0)
9039 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
9040
9041 if (Min != 0) {
9042 assert(Min <= Max && "Min must be less than or equal Max")((Min <= Max && "Min must be less than or equal Max"
) ? static_cast<void> (0) : __assert_fail ("Min <= Max && \"Min must be less than or equal Max\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9042, __PRETTY_FUNCTION__))
;
9043
9044 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
9045 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
9046 } else
9047 assert(Max == 0 && "Max must be zero")((Max == 0 && "Max must be zero") ? static_cast<void
> (0) : __assert_fail ("Max == 0 && \"Max must be zero\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9047, __PRETTY_FUNCTION__))
;
9048 } else if (IsOpenCLKernel || IsHIPKernel) {
9049 // By default, restrict the maximum size to a value specified by
9050 // --gpu-max-threads-per-block=n or its default value.
9051 std::string AttrVal =
9052 std::string("1,") + llvm::utostr(M.getLangOpts().GPUMaxThreadsPerBlock);
9053 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
9054 }
9055
9056 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
9057 unsigned Min =
9058 Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue();
9059 unsigned Max = Attr->getMax() ? Attr->getMax()
9060 ->EvaluateKnownConstInt(M.getContext())
9061 .getExtValue()
9062 : 0;
9063
9064 if (Min != 0) {
9065 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max")(((Max == 0 || Min <= Max) && "Min must be less than or equal Max"
) ? static_cast<void> (0) : __assert_fail ("(Max == 0 || Min <= Max) && \"Min must be less than or equal Max\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9065, __PRETTY_FUNCTION__))
;
9066
9067 std::string AttrVal = llvm::utostr(Min);
9068 if (Max != 0)
9069 AttrVal = AttrVal + "," + llvm::utostr(Max);
9070 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
9071 } else
9072 assert(Max == 0 && "Max must be zero")((Max == 0 && "Max must be zero") ? static_cast<void
> (0) : __assert_fail ("Max == 0 && \"Max must be zero\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9072, __PRETTY_FUNCTION__))
;
9073 }
9074
9075 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
9076 unsigned NumSGPR = Attr->getNumSGPR();
9077
9078 if (NumSGPR != 0)
9079 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
9080 }
9081
9082 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
9083 uint32_t NumVGPR = Attr->getNumVGPR();
9084
9085 if (NumVGPR != 0)
9086 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
9087 }
9088
9089 if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics())
9090 F->addFnAttr("amdgpu-unsafe-fp-atomics", "true");
9091}
9092
9093unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
9094 return llvm::CallingConv::AMDGPU_KERNEL;
9095}
9096
9097// Currently LLVM assumes null pointers always have value 0,
9098// which results in incorrectly transformed IR. Therefore, instead of
9099// emitting null pointers in private and local address spaces, a null
9100// pointer in generic address space is emitted which is casted to a
9101// pointer in local or private address space.
9102llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
9103 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
9104 QualType QT) const {
9105 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
9106 return llvm::ConstantPointerNull::get(PT);
9107
9108 auto &Ctx = CGM.getContext();
9109 auto NPT = llvm::PointerType::get(PT->getElementType(),
9110 Ctx.getTargetAddressSpace(LangAS::opencl_generic));
9111 return llvm::ConstantExpr::getAddrSpaceCast(
9112 llvm::ConstantPointerNull::get(NPT), PT);
9113}
9114
9115LangAS
9116AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
9117 const VarDecl *D) const {
9118 assert(!CGM.getLangOpts().OpenCL &&((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA
&& CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only"
) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9120, __PRETTY_FUNCTION__))
9119 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA
&& CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only"
) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9120, __PRETTY_FUNCTION__))
9120 "Address space agnostic languages only")((!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA
&& CGM.getLangOpts().CUDAIsDevice) && "Address space agnostic languages only"
) ? static_cast<void> (0) : __assert_fail ("!CGM.getLangOpts().OpenCL && !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && \"Address space agnostic languages only\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9120, __PRETTY_FUNCTION__))
;
9121 LangAS DefaultGlobalAS = getLangASFromTargetAS(
9122 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
9123 if (!D)
9124 return DefaultGlobalAS;
9125
9126 LangAS AddrSpace = D->getType().getAddressSpace();
9127 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace))((AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace
)) ? static_cast<void> (0) : __assert_fail ("AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace)"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9127, __PRETTY_FUNCTION__))
;
9128 if (AddrSpace != LangAS::Default)
9129 return AddrSpace;
9130
9131 if (CGM.isTypeConstant(D->getType(), false)) {
9132 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
9133 return ConstAS.getValue();
9134 }
9135 return DefaultGlobalAS;
9136}
9137
9138llvm::SyncScope::ID
9139AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
9140 SyncScope Scope,
9141 llvm::AtomicOrdering Ordering,
9142 llvm::LLVMContext &Ctx) const {
9143 std::string Name;
9144 switch (Scope) {
9145 case SyncScope::OpenCLWorkGroup:
9146 Name = "workgroup";
9147 break;
9148 case SyncScope::OpenCLDevice:
9149 Name = "agent";
9150 break;
9151 case SyncScope::OpenCLAllSVMDevices:
9152 Name = "";
9153 break;
9154 case SyncScope::OpenCLSubGroup:
9155 Name = "wavefront";
9156 }
9157
9158 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
9159 if (!Name.empty())
9160 Name = Twine(Twine(Name) + Twine("-")).str();
9161
9162 Name = Twine(Twine(Name) + Twine("one-as")).str();
9163 }
9164
9165 return Ctx.getOrInsertSyncScopeID(Name);
9166}
9167
9168bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
9169 return false;
9170}
9171
9172void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
9173 const FunctionType *&FT) const {
9174 FT = getABIInfo().getContext().adjustFunctionType(
9175 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
9176}
9177
9178//===----------------------------------------------------------------------===//
9179// SPARC v8 ABI Implementation.
9180// Based on the SPARC Compliance Definition version 2.4.1.
9181//
9182// Ensures that complex values are passed in registers.
9183//
9184namespace {
9185class SparcV8ABIInfo : public DefaultABIInfo {
9186public:
9187 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
9188
9189private:
9190 ABIArgInfo classifyReturnType(QualType RetTy) const;
9191 void computeInfo(CGFunctionInfo &FI) const override;
9192};
9193} // end anonymous namespace
9194
9195
9196ABIArgInfo
9197SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
9198 if (Ty->isAnyComplexType()) {
9199 return ABIArgInfo::getDirect();
9200 }
9201 else {
9202 return DefaultABIInfo::classifyReturnType(Ty);
9203 }
9204}
9205
9206void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
9207
9208 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
9209 for (auto &Arg : FI.arguments())
9210 Arg.info = classifyArgumentType(Arg.type);
9211}
9212
9213namespace {
9214class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
9215public:
9216 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
9217 : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {}
9218};
9219} // end anonymous namespace
9220
9221//===----------------------------------------------------------------------===//
9222// SPARC v9 ABI Implementation.
9223// Based on the SPARC Compliance Definition version 2.4.1.
9224//
9225// Function arguments a mapped to a nominal "parameter array" and promoted to
9226// registers depending on their type. Each argument occupies 8 or 16 bytes in
9227// the array, structs larger than 16 bytes are passed indirectly.
9228//
9229// One case requires special care:
9230//
9231// struct mixed {
9232// int i;
9233// float f;
9234// };
9235//
9236// When a struct mixed is passed by value, it only occupies 8 bytes in the
9237// parameter array, but the int is passed in an integer register, and the float
9238// is passed in a floating point register. This is represented as two arguments
9239// with the LLVM IR inreg attribute:
9240//
9241// declare void f(i32 inreg %i, float inreg %f)
9242//
9243// The code generator will only allocate 4 bytes from the parameter array for
9244// the inreg arguments. All other arguments are allocated a multiple of 8
9245// bytes.
9246//
9247namespace {
9248class SparcV9ABIInfo : public ABIInfo {
9249public:
9250 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
9251
9252private:
9253 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
9254 void computeInfo(CGFunctionInfo &FI) const override;
9255 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9256 QualType Ty) const override;
9257
9258 // Coercion type builder for structs passed in registers. The coercion type
9259 // serves two purposes:
9260 //
9261 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
9262 // in registers.
9263 // 2. Expose aligned floating point elements as first-level elements, so the
9264 // code generator knows to pass them in floating point registers.
9265 //
9266 // We also compute the InReg flag which indicates that the struct contains
9267 // aligned 32-bit floats.
9268 //
9269 struct CoerceBuilder {
9270 llvm::LLVMContext &Context;
9271 const llvm::DataLayout &DL;
9272 SmallVector<llvm::Type*, 8> Elems;
9273 uint64_t Size;
9274 bool InReg;
9275
9276 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
9277 : Context(c), DL(dl), Size(0), InReg(false) {}
9278
9279 // Pad Elems with integers until Size is ToSize.
9280 void pad(uint64_t ToSize) {
9281 assert(ToSize >= Size && "Cannot remove elements")((ToSize >= Size && "Cannot remove elements") ? static_cast
<void> (0) : __assert_fail ("ToSize >= Size && \"Cannot remove elements\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9281, __PRETTY_FUNCTION__))
;
9282 if (ToSize == Size)
9283 return;
9284
9285 // Finish the current 64-bit word.
9286 uint64_t Aligned = llvm::alignTo(Size, 64);
9287 if (Aligned > Size && Aligned <= ToSize) {
9288 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
9289 Size = Aligned;
9290 }
9291
9292 // Add whole 64-bit words.
9293 while (Size + 64 <= ToSize) {
9294 Elems.push_back(llvm::Type::getInt64Ty(Context));
9295 Size += 64;
9296 }
9297
9298 // Final in-word padding.
9299 if (Size < ToSize) {
9300 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
9301 Size = ToSize;
9302 }
9303 }
9304
9305 // Add a floating point element at Offset.
9306 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
9307 // Unaligned floats are treated as integers.
9308 if (Offset % Bits)
9309 return;
9310 // The InReg flag is only required if there are any floats < 64 bits.
9311 if (Bits < 64)
9312 InReg = true;
9313 pad(Offset);
9314 Elems.push_back(Ty);
9315 Size = Offset + Bits;
9316 }
9317
9318 // Add a struct type to the coercion type, starting at Offset (in bits).
9319 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
9320 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
9321 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
9322 llvm::Type *ElemTy = StrTy->getElementType(i);
9323 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
9324 switch (ElemTy->getTypeID()) {
9325 case llvm::Type::StructTyID:
9326 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
9327 break;
9328 case llvm::Type::FloatTyID:
9329 addFloat(ElemOffset, ElemTy, 32);
9330 break;
9331 case llvm::Type::DoubleTyID:
9332 addFloat(ElemOffset, ElemTy, 64);
9333 break;
9334 case llvm::Type::FP128TyID:
9335 addFloat(ElemOffset, ElemTy, 128);
9336 break;
9337 case llvm::Type::PointerTyID:
9338 if (ElemOffset % 64 == 0) {
9339 pad(ElemOffset);
9340 Elems.push_back(ElemTy);
9341 Size += 64;
9342 }
9343 break;
9344 default:
9345 break;
9346 }
9347 }
9348 }
9349
9350 // Check if Ty is a usable substitute for the coercion type.
9351 bool isUsableType(llvm::StructType *Ty) const {
9352 return llvm::makeArrayRef(Elems) == Ty->elements();
9353 }
9354
9355 // Get the coercion type as a literal struct type.
9356 llvm::Type *getType() const {
9357 if (Elems.size() == 1)
9358 return Elems.front();
9359 else
9360 return llvm::StructType::get(Context, Elems);
9361 }
9362 };
9363};
9364} // end anonymous namespace
9365
9366ABIArgInfo
9367SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
9368 if (Ty->isVoidType())
9369 return ABIArgInfo::getIgnore();
9370
9371 uint64_t Size = getContext().getTypeSize(Ty);
9372
9373 // Anything too big to fit in registers is passed with an explicit indirect
9374 // pointer / sret pointer.
9375 if (Size > SizeLimit)
9376 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
9377
9378 // Treat an enum type as its underlying type.
9379 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
9380 Ty = EnumTy->getDecl()->getIntegerType();
9381
9382 // Integer types smaller than a register are extended.
9383 if (Size < 64 && Ty->isIntegerType())
9384 return ABIArgInfo::getExtend(Ty);
9385
9386 if (const auto *EIT = Ty->getAs<ExtIntType>())
9387 if (EIT->getNumBits() < 64)
9388 return ABIArgInfo::getExtend(Ty);
9389
9390 // Other non-aggregates go in registers.
9391 if (!isAggregateTypeForABI(Ty))
9392 return ABIArgInfo::getDirect();
9393
9394 // If a C++ object has either a non-trivial copy constructor or a non-trivial
9395 // destructor, it is passed with an explicit indirect pointer / sret pointer.
9396 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
9397 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
9398
9399 // This is a small aggregate type that should be passed in registers.
9400 // Build a coercion type from the LLVM struct type.
9401 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
9402 if (!StrTy)
9403 return ABIArgInfo::getDirect();
9404
9405 CoerceBuilder CB(getVMContext(), getDataLayout());
9406 CB.addStruct(0, StrTy);
9407 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
9408
9409 // Try to use the original type for coercion.
9410 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
9411
9412 if (CB.InReg)
9413 return ABIArgInfo::getDirectInReg(CoerceTy);
9414 else
9415 return ABIArgInfo::getDirect(CoerceTy);
9416}
9417
9418Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9419 QualType Ty) const {
9420 ABIArgInfo AI = classifyType(Ty, 16 * 8);
9421 llvm::Type *ArgTy = CGT.ConvertType(Ty);
9422 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
9423 AI.setCoerceToType(ArgTy);
9424
9425 CharUnits SlotSize = CharUnits::fromQuantity(8);
9426
9427 CGBuilderTy &Builder = CGF.Builder;
9428 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
9429 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
9430
9431 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
9432
9433 Address ArgAddr = Address::invalid();
9434 CharUnits Stride;
9435 switch (AI.getKind()) {
9436 case ABIArgInfo::Expand:
9437 case ABIArgInfo::CoerceAndExpand:
9438 case ABIArgInfo::InAlloca:
9439 llvm_unreachable("Unsupported ABI kind for va_arg")::llvm::llvm_unreachable_internal("Unsupported ABI kind for va_arg"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9439)
;
9440
9441 case ABIArgInfo::Extend: {
9442 Stride = SlotSize;
9443 CharUnits Offset = SlotSize - TypeInfo.Width;
9444 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
9445 break;
9446 }
9447
9448 case ABIArgInfo::Direct: {
9449 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
9450 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
9451 ArgAddr = Addr;
9452 break;
9453 }
9454
9455 case ABIArgInfo::Indirect:
9456 case ABIArgInfo::IndirectAliased:
9457 Stride = SlotSize;
9458 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
9459 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
9460 TypeInfo.Align);
9461 break;
9462
9463 case ABIArgInfo::Ignore:
9464 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.Align);
9465 }
9466
9467 // Update VAList.
9468 Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
9469 Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
9470
9471 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
9472}
9473
9474void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
9475 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
9476 for (auto &I : FI.arguments())
9477 I.info = classifyType(I.type, 16 * 8);
9478}
9479
9480namespace {
9481class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
9482public:
9483 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
9484 : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {}
9485
9486 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
9487 return 14;
9488 }
9489
9490 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
9491 llvm::Value *Address) const override;
9492};
9493} // end anonymous namespace
9494
9495bool
9496SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
9497 llvm::Value *Address) const {
9498 // This is calculated from the LLVM and GCC tables and verified
9499 // against gcc output. AFAIK all ABIs use the same encoding.
9500
9501 CodeGen::CGBuilderTy &Builder = CGF.Builder;
9502
9503 llvm::IntegerType *i8 = CGF.Int8Ty;
9504 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
9505 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
9506
9507 // 0-31: the 8-byte general-purpose registers
9508 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
9509
9510 // 32-63: f0-31, the 4-byte floating-point registers
9511 AssignToArrayRange(Builder, Address, Four8, 32, 63);
9512
9513 // Y = 64
9514 // PSR = 65
9515 // WIM = 66
9516 // TBR = 67
9517 // PC = 68
9518 // NPC = 69
9519 // FSR = 70
9520 // CSR = 71
9521 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
9522
9523 // 72-87: d0-15, the 8-byte floating-point registers
9524 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
9525
9526 return false;
9527}
9528
9529// ARC ABI implementation.
9530namespace {
9531
9532class ARCABIInfo : public DefaultABIInfo {
9533public:
9534 using DefaultABIInfo::DefaultABIInfo;
9535
9536private:
9537 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9538 QualType Ty) const override;
9539
9540 void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
9541 if (!State.FreeRegs)
9542 return;
9543 if (Info.isIndirect() && Info.getInReg())
9544 State.FreeRegs--;
9545 else if (Info.isDirect() && Info.getInReg()) {
9546 unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
9547 if (sz < State.FreeRegs)
9548 State.FreeRegs -= sz;
9549 else
9550 State.FreeRegs = 0;
9551 }
9552 }
9553
9554 void computeInfo(CGFunctionInfo &FI) const override {
9555 CCState State(FI);
9556 // ARC uses 8 registers to pass arguments.
9557 State.FreeRegs = 8;
9558
9559 if (!getCXXABI().classifyReturnType(FI))
9560 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
9561 updateState(FI.getReturnInfo(), FI.getReturnType(), State);
9562 for (auto &I : FI.arguments()) {
9563 I.info = classifyArgumentType(I.type, State.FreeRegs);
9564 updateState(I.info, I.type, State);
9565 }
9566 }
9567
9568 ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
9569 ABIArgInfo getIndirectByValue(QualType Ty) const;
9570 ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
9571 ABIArgInfo classifyReturnType(QualType RetTy) const;
9572};
9573
9574class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
9575public:
9576 ARCTargetCodeGenInfo(CodeGenTypes &CGT)
9577 : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {}
9578};
9579
9580
9581ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
9582 return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
9583 getNaturalAlignIndirect(Ty, false);
9584}
9585
9586ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
9587 // Compute the byval alignment.
9588 const unsigned MinABIStackAlignInBytes = 4;
9589 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
9590 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
9591 TypeAlign > MinABIStackAlignInBytes);
9592}
9593
9594Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9595 QualType Ty) const {
9596 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
9597 getContext().getTypeInfoInChars(Ty),
9598 CharUnits::fromQuantity(4), true);
9599}
9600
9601ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
9602 uint8_t FreeRegs) const {
9603 // Handle the generic C++ ABI.
9604 const RecordType *RT = Ty->getAs<RecordType>();
9605 if (RT) {
9606 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
9607 if (RAA == CGCXXABI::RAA_Indirect)
9608 return getIndirectByRef(Ty, FreeRegs > 0);
9609
9610 if (RAA == CGCXXABI::RAA_DirectInMemory)
9611 return getIndirectByValue(Ty);
9612 }
9613
9614 // Treat an enum type as its underlying type.
9615 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
9616 Ty = EnumTy->getDecl()->getIntegerType();
9617
9618 auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
9619
9620 if (isAggregateTypeForABI(Ty)) {
9621 // Structures with flexible arrays are always indirect.
9622 if (RT && RT->getDecl()->hasFlexibleArrayMember())
9623 return getIndirectByValue(Ty);
9624
9625 // Ignore empty structs/unions.
9626 if (isEmptyRecord(getContext(), Ty, true))
9627 return ABIArgInfo::getIgnore();
9628
9629 llvm::LLVMContext &LLVMContext = getVMContext();
9630
9631 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
9632 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
9633 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
9634
9635 return FreeRegs >= SizeInRegs ?
9636 ABIArgInfo::getDirectInReg(Result) :
9637 ABIArgInfo::getDirect(Result, 0, nullptr, false);
9638 }
9639
9640 if (const auto *EIT = Ty->getAs<ExtIntType>())
9641 if (EIT->getNumBits() > 64)
9642 return getIndirectByValue(Ty);
9643
9644 return isPromotableIntegerTypeForABI(Ty)
9645 ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty)
9646 : ABIArgInfo::getExtend(Ty))
9647 : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg()
9648 : ABIArgInfo::getDirect());
9649}
9650
9651ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
9652 if (RetTy->isAnyComplexType())
9653 return ABIArgInfo::getDirectInReg();
9654
9655 // Arguments of size > 4 registers are indirect.
9656 auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
9657 if (RetSize > 4)
9658 return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
9659
9660 return DefaultABIInfo::classifyReturnType(RetTy);
9661}
9662
9663} // End anonymous namespace.
9664
9665//===----------------------------------------------------------------------===//
9666// XCore ABI Implementation
9667//===----------------------------------------------------------------------===//
9668
9669namespace {
9670
9671/// A SmallStringEnc instance is used to build up the TypeString by passing
9672/// it by reference between functions that append to it.
9673typedef llvm::SmallString<128> SmallStringEnc;
9674
9675/// TypeStringCache caches the meta encodings of Types.
9676///
9677/// The reason for caching TypeStrings is two fold:
9678/// 1. To cache a type's encoding for later uses;
9679/// 2. As a means to break recursive member type inclusion.
9680///
9681/// A cache Entry can have a Status of:
9682/// NonRecursive: The type encoding is not recursive;
9683/// Recursive: The type encoding is recursive;
9684/// Incomplete: An incomplete TypeString;
9685/// IncompleteUsed: An incomplete TypeString that has been used in a
9686/// Recursive type encoding.
9687///
9688/// A NonRecursive entry will have all of its sub-members expanded as fully
9689/// as possible. Whilst it may contain types which are recursive, the type
9690/// itself is not recursive and thus its encoding may be safely used whenever
9691/// the type is encountered.
9692///
9693/// A Recursive entry will have all of its sub-members expanded as fully as
9694/// possible. The type itself is recursive and it may contain other types which
9695/// are recursive. The Recursive encoding must not be used during the expansion
9696/// of a recursive type's recursive branch. For simplicity the code uses
9697/// IncompleteCount to reject all usage of Recursive encodings for member types.
9698///
9699/// An Incomplete entry is always a RecordType and only encodes its
9700/// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
9701/// are placed into the cache during type expansion as a means to identify and
9702/// handle recursive inclusion of types as sub-members. If there is recursion
9703/// the entry becomes IncompleteUsed.
9704///
9705/// During the expansion of a RecordType's members:
9706///
9707/// If the cache contains a NonRecursive encoding for the member type, the
9708/// cached encoding is used;
9709///
9710/// If the cache contains a Recursive encoding for the member type, the
9711/// cached encoding is 'Swapped' out, as it may be incorrect, and...
9712///
9713/// If the member is a RecordType, an Incomplete encoding is placed into the
9714/// cache to break potential recursive inclusion of itself as a sub-member;
9715///
9716/// Once a member RecordType has been expanded, its temporary incomplete
9717/// entry is removed from the cache. If a Recursive encoding was swapped out
9718/// it is swapped back in;
9719///
9720/// If an incomplete entry is used to expand a sub-member, the incomplete
9721/// entry is marked as IncompleteUsed. The cache keeps count of how many
9722/// IncompleteUsed entries it currently contains in IncompleteUsedCount;
9723///
9724/// If a member's encoding is found to be a NonRecursive or Recursive viz:
9725/// IncompleteUsedCount==0, the member's encoding is added to the cache.
9726/// Else the member is part of a recursive type and thus the recursion has
9727/// been exited too soon for the encoding to be correct for the member.
9728///
9729class TypeStringCache {
9730 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
9731 struct Entry {
9732 std::string Str; // The encoded TypeString for the type.
9733 enum Status State; // Information about the encoding in 'Str'.
9734 std::string Swapped; // A temporary place holder for a Recursive encoding
9735 // during the expansion of RecordType's members.
9736 };
9737 std::map<const IdentifierInfo *, struct Entry> Map;
9738 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
9739 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
9740public:
9741 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
9742 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
9743 bool removeIncomplete(const IdentifierInfo *ID);
9744 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
9745 bool IsRecursive);
9746 StringRef lookupStr(const IdentifierInfo *ID);
9747};
9748
9749/// TypeString encodings for enum & union fields must be order.
9750/// FieldEncoding is a helper for this ordering process.
9751class FieldEncoding {
9752 bool HasName;
9753 std::string Enc;
9754public:
9755 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
9756 StringRef str() { return Enc; }
9757 bool operator<(const FieldEncoding &rhs) const {
9758 if (HasName != rhs.HasName) return HasName;
9759 return Enc < rhs.Enc;
9760 }
9761};
9762
9763class XCoreABIInfo : public DefaultABIInfo {
9764public:
9765 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
9766 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9767 QualType Ty) const override;
9768};
9769
9770class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
9771 mutable TypeStringCache TSC;
9772 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
9773 const CodeGen::CodeGenModule &M) const;
9774
9775public:
9776 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
9777 : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {}
9778 void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
9779 const llvm::MapVector<GlobalDecl, StringRef>
9780 &MangledDeclNames) const override;
9781};
9782
9783} // End anonymous namespace.
9784
9785// TODO: this implementation is likely now redundant with the default
9786// EmitVAArg.
9787Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9788 QualType Ty) const {
9789 CGBuilderTy &Builder = CGF.Builder;
9790
9791 // Get the VAList.
9792 CharUnits SlotSize = CharUnits::fromQuantity(4);
9793 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
9794
9795 // Handle the argument.
9796 ABIArgInfo AI = classifyArgumentType(Ty);
9797 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
9798 llvm::Type *ArgTy = CGT.ConvertType(Ty);
9799 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
9800 AI.setCoerceToType(ArgTy);
9801 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
9802
9803 Address Val = Address::invalid();
9804 CharUnits ArgSize = CharUnits::Zero();
9805 switch (AI.getKind()) {
9806 case ABIArgInfo::Expand:
9807 case ABIArgInfo::CoerceAndExpand:
9808 case ABIArgInfo::InAlloca:
9809 llvm_unreachable("Unsupported ABI kind for va_arg")::llvm::llvm_unreachable_internal("Unsupported ABI kind for va_arg"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9809)
;
9810 case ABIArgInfo::Ignore:
9811 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
9812 ArgSize = CharUnits::Zero();
9813 break;
9814 case ABIArgInfo::Extend:
9815 case ABIArgInfo::Direct:
9816 Val = Builder.CreateBitCast(AP, ArgPtrTy);
9817 ArgSize = CharUnits::fromQuantity(
9818 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
9819 ArgSize = ArgSize.alignTo(SlotSize);
9820 break;
9821 case ABIArgInfo::Indirect:
9822 case ABIArgInfo::IndirectAliased:
9823 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
9824 Val = Address(Builder.CreateLoad(Val), TypeAlign);
9825 ArgSize = SlotSize;
9826 break;
9827 }
9828
9829 // Increment the VAList.
9830 if (!ArgSize.isZero()) {
9831 Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
9832 Builder.CreateStore(APN.getPointer(), VAListAddr);
9833 }
9834
9835 return Val;
9836}
9837
9838/// During the expansion of a RecordType, an incomplete TypeString is placed
9839/// into the cache as a means to identify and break recursion.
9840/// If there is a Recursive encoding in the cache, it is swapped out and will
9841/// be reinserted by removeIncomplete().
9842/// All other types of encoding should have been used rather than arriving here.
9843void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
9844 std::string StubEnc) {
9845 if (!ID)
9846 return;
9847 Entry &E = Map[ID];
9848 assert( (E.Str.empty() || E.State == Recursive) &&(((E.Str.empty() || E.State == Recursive) && "Incorrectly use of addIncomplete"
) ? static_cast<void> (0) : __assert_fail ("(E.Str.empty() || E.State == Recursive) && \"Incorrectly use of addIncomplete\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9849, __PRETTY_FUNCTION__))
9849 "Incorrectly use of addIncomplete")(((E.Str.empty() || E.State == Recursive) && "Incorrectly use of addIncomplete"
) ? static_cast<void> (0) : __assert_fail ("(E.Str.empty() || E.State == Recursive) && \"Incorrectly use of addIncomplete\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9849, __PRETTY_FUNCTION__))
;
9850 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()")((!StubEnc.empty() && "Passing an empty string to addIncomplete()"
) ? static_cast<void> (0) : __assert_fail ("!StubEnc.empty() && \"Passing an empty string to addIncomplete()\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9850, __PRETTY_FUNCTION__))
;
9851 E.Swapped.swap(E.Str); // swap out the Recursive
9852 E.Str.swap(StubEnc);
9853 E.State = Incomplete;
9854 ++IncompleteCount;
9855}
9856
9857/// Once the RecordType has been expanded, the temporary incomplete TypeString
9858/// must be removed from the cache.
9859/// If a Recursive was swapped out by addIncomplete(), it will be replaced.
9860/// Returns true if the RecordType was defined recursively.
9861bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
9862 if (!ID)
9863 return false;
9864 auto I = Map.find(ID);
9865 assert(I != Map.end() && "Entry not present")((I != Map.end() && "Entry not present") ? static_cast
<void> (0) : __assert_fail ("I != Map.end() && \"Entry not present\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9865, __PRETTY_FUNCTION__))
;
9866 Entry &E = I->second;
9867 assert( (E.State == Incomplete ||(((E.State == Incomplete || E.State == IncompleteUsed) &&
"Entry must be an incomplete type") ? static_cast<void>
(0) : __assert_fail ("(E.State == Incomplete || E.State == IncompleteUsed) && \"Entry must be an incomplete type\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9869, __PRETTY_FUNCTION__))
9868 E.State == IncompleteUsed) &&(((E.State == Incomplete || E.State == IncompleteUsed) &&
"Entry must be an incomplete type") ? static_cast<void>
(0) : __assert_fail ("(E.State == Incomplete || E.State == IncompleteUsed) && \"Entry must be an incomplete type\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9869, __PRETTY_FUNCTION__))
9869 "Entry must be an incomplete type")(((E.State == Incomplete || E.State == IncompleteUsed) &&
"Entry must be an incomplete type") ? static_cast<void>
(0) : __assert_fail ("(E.State == Incomplete || E.State == IncompleteUsed) && \"Entry must be an incomplete type\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9869, __PRETTY_FUNCTION__))
;
9870 bool IsRecursive = false;
9871 if (E.State == IncompleteUsed) {
9872 // We made use of our Incomplete encoding, thus we are recursive.
9873 IsRecursive = true;
9874 --IncompleteUsedCount;
9875 }
9876 if (E.Swapped.empty())
9877 Map.erase(I);
9878 else {
9879 // Swap the Recursive back.
9880 E.Swapped.swap(E.Str);
9881 E.Swapped.clear();
9882 E.State = Recursive;
9883 }
9884 --IncompleteCount;
9885 return IsRecursive;
9886}
9887
9888/// Add the encoded TypeString to the cache only if it is NonRecursive or
9889/// Recursive (viz: all sub-members were expanded as fully as possible).
9890void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
9891 bool IsRecursive) {
9892 if (!ID || IncompleteUsedCount)
9893 return; // No key or it is is an incomplete sub-type so don't add.
9894 Entry &E = Map[ID];
9895 if (IsRecursive && !E.Str.empty()) {
9896 assert(E.State==Recursive && E.Str.size() == Str.size() &&((E.State==Recursive && E.Str.size() == Str.size() &&
"This is not the same Recursive entry") ? static_cast<void
> (0) : __assert_fail ("E.State==Recursive && E.Str.size() == Str.size() && \"This is not the same Recursive entry\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9897, __PRETTY_FUNCTION__))
9897 "This is not the same Recursive entry")((E.State==Recursive && E.Str.size() == Str.size() &&
"This is not the same Recursive entry") ? static_cast<void
> (0) : __assert_fail ("E.State==Recursive && E.Str.size() == Str.size() && \"This is not the same Recursive entry\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9897, __PRETTY_FUNCTION__))
;
9898 // The parent container was not recursive after all, so we could have used
9899 // this Recursive sub-member entry after all, but we assumed the worse when
9900 // we started viz: IncompleteCount!=0.
9901 return;
9902 }
9903 assert(E.Str.empty() && "Entry already present")((E.Str.empty() && "Entry already present") ? static_cast
<void> (0) : __assert_fail ("E.Str.empty() && \"Entry already present\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 9903, __PRETTY_FUNCTION__))
;
9904 E.Str = Str.str();
9905 E.State = IsRecursive? Recursive : NonRecursive;
9906}
9907
9908/// Return a cached TypeString encoding for the ID. If there isn't one, or we
9909/// are recursively expanding a type (IncompleteCount != 0) and the cached
9910/// encoding is Recursive, return an empty StringRef.
9911StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
9912 if (!ID)
9913 return StringRef(); // We have no key.
9914 auto I = Map.find(ID);
9915 if (I == Map.end())
9916 return StringRef(); // We have no encoding.
9917 Entry &E = I->second;
9918 if (E.State == Recursive && IncompleteCount)
9919 return StringRef(); // We don't use Recursive encodings for member types.
9920
9921 if (E.State == Incomplete) {
9922 // The incomplete type is being used to break out of recursion.
9923 E.State = IncompleteUsed;
9924 ++IncompleteUsedCount;
9925 }
9926 return E.Str;
9927}
9928
9929/// The XCore ABI includes a type information section that communicates symbol
9930/// type information to the linker. The linker uses this information to verify
9931/// safety/correctness of things such as array bound and pointers et al.
9932/// The ABI only requires C (and XC) language modules to emit TypeStrings.
9933/// This type information (TypeString) is emitted into meta data for all global
9934/// symbols: definitions, declarations, functions & variables.
9935///
9936/// The TypeString carries type, qualifier, name, size & value details.
9937/// Please see 'Tools Development Guide' section 2.16.2 for format details:
9938/// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
9939/// The output is tested by test/CodeGen/xcore-stringtype.c.
9940///
9941static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
9942 const CodeGen::CodeGenModule &CGM,
9943 TypeStringCache &TSC);
9944
9945/// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
9946void XCoreTargetCodeGenInfo::emitTargetMD(
9947 const Decl *D, llvm::GlobalValue *GV,
9948 const CodeGen::CodeGenModule &CGM) const {
9949 SmallStringEnc Enc;
9950 if (getTypeString(Enc, D, CGM, TSC)) {
9951 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
9952 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
9953 llvm::MDString::get(Ctx, Enc.str())};
9954 llvm::NamedMDNode *MD =
9955 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
9956 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
9957 }
9958}
9959
9960void XCoreTargetCodeGenInfo::emitTargetMetadata(
9961 CodeGen::CodeGenModule &CGM,
9962 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
9963 // Warning, new MangledDeclNames may be appended within this loop.
9964 // We rely on MapVector insertions adding new elements to the end
9965 // of the container.
9966 for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
9967 auto Val = *(MangledDeclNames.begin() + I);
9968 llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second);
9969 if (GV) {
9970 const Decl *D = Val.first.getDecl()->getMostRecentDecl();
9971 emitTargetMD(D, GV, CGM);
9972 }
9973 }
9974}
9975//===----------------------------------------------------------------------===//
9976// SPIR ABI Implementation
9977//===----------------------------------------------------------------------===//
9978
9979namespace {
9980class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
9981public:
9982 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
9983 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
9984 unsigned getOpenCLKernelCallingConv() const override;
9985};
9986
9987} // End anonymous namespace.
9988
9989namespace clang {
9990namespace CodeGen {
9991void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
9992 DefaultABIInfo SPIRABI(CGM.getTypes());
9993 SPIRABI.computeInfo(FI);
9994}
9995}
9996}
9997
9998unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
9999 return llvm::CallingConv::SPIR_KERNEL;
10000}
10001
10002static bool appendType(SmallStringEnc &Enc, QualType QType,
10003 const CodeGen::CodeGenModule &CGM,
10004 TypeStringCache &TSC);
10005
10006/// Helper function for appendRecordType().
10007/// Builds a SmallVector containing the encoded field types in declaration
10008/// order.
10009static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
10010 const RecordDecl *RD,
10011 const CodeGen::CodeGenModule &CGM,
10012 TypeStringCache &TSC) {
10013 for (const auto *Field : RD->fields()) {
10014 SmallStringEnc Enc;
10015 Enc += "m(";
10016 Enc += Field->getName();
10017 Enc += "){";
10018 if (Field->isBitField()) {
10019 Enc += "b(";
10020 llvm::raw_svector_ostream OS(Enc);
10021 OS << Field->getBitWidthValue(CGM.getContext());
10022 Enc += ':';
10023 }
10024 if (!appendType(Enc, Field->getType(), CGM, TSC))
10025 return false;
10026 if (Field->isBitField())
10027 Enc += ')';
10028 Enc += '}';
10029 FE.emplace_back(!Field->getName().empty(), Enc);
10030 }
10031 return true;
10032}
10033
10034/// Appends structure and union types to Enc and adds encoding to cache.
10035/// Recursively calls appendType (via extractFieldType) for each field.
10036/// Union types have their fields ordered according to the ABI.
10037static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
10038 const CodeGen::CodeGenModule &CGM,
10039 TypeStringCache &TSC, const IdentifierInfo *ID) {
10040 // Append the cached TypeString if we have one.
10041 StringRef TypeString = TSC.lookupStr(ID);
10042 if (!TypeString.empty()) {
10043 Enc += TypeString;
10044 return true;
10045 }
10046
10047 // Start to emit an incomplete TypeString.
10048 size_t Start = Enc.size();
10049 Enc += (RT->isUnionType()? 'u' : 's');
10050 Enc += '(';
10051 if (ID)
10052 Enc += ID->getName();
10053 Enc += "){";
10054
10055 // We collect all encoded fields and order as necessary.
10056 bool IsRecursive = false;
10057 const RecordDecl *RD = RT->getDecl()->getDefinition();
10058 if (RD && !RD->field_empty()) {
10059 // An incomplete TypeString stub is placed in the cache for this RecordType
10060 // so that recursive calls to this RecordType will use it whilst building a
10061 // complete TypeString for this RecordType.
10062 SmallVector<FieldEncoding, 16> FE;
10063 std::string StubEnc(Enc.substr(Start).str());
10064 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
10065 TSC.addIncomplete(ID, std::move(StubEnc));
10066 if (!extractFieldType(FE, RD, CGM, TSC)) {
10067 (void) TSC.removeIncomplete(ID);
10068 return false;
10069 }
10070 IsRecursive = TSC.removeIncomplete(ID);
10071 // The ABI requires unions to be sorted but not structures.
10072 // See FieldEncoding::operator< for sort algorithm.
10073 if (RT->isUnionType())
10074 llvm::sort(FE);
10075 // We can now complete the TypeString.
10076 unsigned E = FE.size();
10077 for (unsigned I = 0; I != E; ++I) {
10078 if (I)
10079 Enc += ',';
10080 Enc += FE[I].str();
10081 }
10082 }
10083 Enc += '}';
10084 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
10085 return true;
10086}
10087
10088/// Appends enum types to Enc and adds the encoding to the cache.
10089static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
10090 TypeStringCache &TSC,
10091 const IdentifierInfo *ID) {
10092 // Append the cached TypeString if we have one.
10093 StringRef TypeString = TSC.lookupStr(ID);
10094 if (!TypeString.empty()) {
10095 Enc += TypeString;
10096 return true;
10097 }
10098
10099 size_t Start = Enc.size();
10100 Enc += "e(";
10101 if (ID)
10102 Enc += ID->getName();
10103 Enc += "){";
10104
10105 // We collect all encoded enumerations and order them alphanumerically.
10106 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
10107 SmallVector<FieldEncoding, 16> FE;
10108 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
10109 ++I) {
10110 SmallStringEnc EnumEnc;
10111 EnumEnc += "m(";
10112 EnumEnc += I->getName();
10113 EnumEnc += "){";
10114 I->getInitVal().toString(EnumEnc);
10115 EnumEnc += '}';
10116 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
10117 }
10118 llvm::sort(FE);
10119 unsigned E = FE.size();
10120 for (unsigned I = 0; I != E; ++I) {
10121 if (I)
10122 Enc += ',';
10123 Enc += FE[I].str();
10124 }
10125 }
10126 Enc += '}';
10127 TSC.addIfComplete(ID, Enc.substr(Start), false);
10128 return true;
10129}
10130
10131/// Appends type's qualifier to Enc.
10132/// This is done prior to appending the type's encoding.
10133static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
10134 // Qualifiers are emitted in alphabetical order.
10135 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
10136 int Lookup = 0;
10137 if (QT.isConstQualified())
10138 Lookup += 1<<0;
10139 if (QT.isRestrictQualified())
10140 Lookup += 1<<1;
10141 if (QT.isVolatileQualified())
10142 Lookup += 1<<2;
10143 Enc += Table[Lookup];
10144}
10145
10146/// Appends built-in types to Enc.
10147static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
10148 const char *EncType;
10149 switch (BT->getKind()) {
10150 case BuiltinType::Void:
10151 EncType = "0";
10152 break;
10153 case BuiltinType::Bool:
10154 EncType = "b";
10155 break;
10156 case BuiltinType::Char_U:
10157 EncType = "uc";
10158 break;
10159 case BuiltinType::UChar:
10160 EncType = "uc";
10161 break;
10162 case BuiltinType::SChar:
10163 EncType = "sc";
10164 break;
10165 case BuiltinType::UShort:
10166 EncType = "us";
10167 break;
10168 case BuiltinType::Short:
10169 EncType = "ss";
10170 break;
10171 case BuiltinType::UInt:
10172 EncType = "ui";
10173 break;
10174 case BuiltinType::Int:
10175 EncType = "si";
10176 break;
10177 case BuiltinType::ULong:
10178 EncType = "ul";
10179 break;
10180 case BuiltinType::Long:
10181 EncType = "sl";
10182 break;
10183 case BuiltinType::ULongLong:
10184 EncType = "ull";
10185 break;
10186 case BuiltinType::LongLong:
10187 EncType = "sll";
10188 break;
10189 case BuiltinType::Float:
10190 EncType = "ft";
10191 break;
10192 case BuiltinType::Double:
10193 EncType = "d";
10194 break;
10195 case BuiltinType::LongDouble:
10196 EncType = "ld";
10197 break;
10198 default:
10199 return false;
10200 }
10201 Enc += EncType;
10202 return true;
10203}
10204
10205/// Appends a pointer encoding to Enc before calling appendType for the pointee.
10206static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
10207 const CodeGen::CodeGenModule &CGM,
10208 TypeStringCache &TSC) {
10209 Enc += "p(";
10210 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
10211 return false;
10212 Enc += ')';
10213 return true;
10214}
10215
10216/// Appends array encoding to Enc before calling appendType for the element.
10217static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
10218 const ArrayType *AT,
10219 const CodeGen::CodeGenModule &CGM,
10220 TypeStringCache &TSC, StringRef NoSizeEnc) {
10221 if (AT->getSizeModifier() != ArrayType::Normal)
10222 return false;
10223 Enc += "a(";
10224 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
10225 CAT->getSize().toStringUnsigned(Enc);
10226 else
10227 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
10228 Enc += ':';
10229 // The Qualifiers should be attached to the type rather than the array.
10230 appendQualifier(Enc, QT);
10231 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
10232 return false;
10233 Enc += ')';
10234 return true;
10235}
10236
10237/// Appends a function encoding to Enc, calling appendType for the return type
10238/// and the arguments.
10239static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
10240 const CodeGen::CodeGenModule &CGM,
10241 TypeStringCache &TSC) {
10242 Enc += "f{";
10243 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
10244 return false;
10245 Enc += "}(";
10246 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
10247 // N.B. we are only interested in the adjusted param types.
10248 auto I = FPT->param_type_begin();
10249 auto E = FPT->param_type_end();
10250 if (I != E) {
10251 do {
10252 if (!appendType(Enc, *I, CGM, TSC))
10253 return false;
10254 ++I;
10255 if (I != E)
10256 Enc += ',';
10257 } while (I != E);
10258 if (FPT->isVariadic())
10259 Enc += ",va";
10260 } else {
10261 if (FPT->isVariadic())
10262 Enc += "va";
10263 else
10264 Enc += '0';
10265 }
10266 }
10267 Enc += ')';
10268 return true;
10269}
10270
10271/// Handles the type's qualifier before dispatching a call to handle specific
10272/// type encodings.
10273static bool appendType(SmallStringEnc &Enc, QualType QType,
10274 const CodeGen::CodeGenModule &CGM,
10275 TypeStringCache &TSC) {
10276
10277 QualType QT = QType.getCanonicalType();
10278
10279 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
10280 // The Qualifiers should be attached to the type rather than the array.
10281 // Thus we don't call appendQualifier() here.
10282 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
10283
10284 appendQualifier(Enc, QT);
10285
10286 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
10287 return appendBuiltinType(Enc, BT);
10288
10289 if (const PointerType *PT = QT->getAs<PointerType>())
10290 return appendPointerType(Enc, PT, CGM, TSC);
10291
10292 if (const EnumType *ET = QT->getAs<EnumType>())
10293 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
10294
10295 if (const RecordType *RT = QT->getAsStructureType())
10296 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
10297
10298 if (const RecordType *RT = QT->getAsUnionType())
10299 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
10300
10301 if (const FunctionType *FT = QT->getAs<FunctionType>())
10302 return appendFunctionType(Enc, FT, CGM, TSC);
10303
10304 return false;
10305}
10306
10307static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
10308 const CodeGen::CodeGenModule &CGM,
10309 TypeStringCache &TSC) {
10310 if (!D)
10311 return false;
10312
10313 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
10314 if (FD->getLanguageLinkage() != CLanguageLinkage)
10315 return false;
10316 return appendType(Enc, FD->getType(), CGM, TSC);
10317 }
10318
10319 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
10320 if (VD->getLanguageLinkage() != CLanguageLinkage)
10321 return false;
10322 QualType QT = VD->getType().getCanonicalType();
10323 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
10324 // Global ArrayTypes are given a size of '*' if the size is unknown.
10325 // The Qualifiers should be attached to the type rather than the array.
10326 // Thus we don't call appendQualifier() here.
10327 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
10328 }
10329 return appendType(Enc, QT, CGM, TSC);
10330 }
10331 return false;
10332}
10333
10334//===----------------------------------------------------------------------===//
10335// RISCV ABI Implementation
10336//===----------------------------------------------------------------------===//
10337
10338namespace {
10339class RISCVABIInfo : public DefaultABIInfo {
10340private:
10341 // Size of the integer ('x') registers in bits.
10342 unsigned XLen;
10343 // Size of the floating point ('f') registers in bits. Note that the target
10344 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
10345 // with soft float ABI has FLen==0).
10346 unsigned FLen;
10347 static const int NumArgGPRs = 8;
10348 static const int NumArgFPRs = 8;
10349 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
10350 llvm::Type *&Field1Ty,
10351 CharUnits &Field1Off,
10352 llvm::Type *&Field2Ty,
10353 CharUnits &Field2Off) const;
10354
10355public:
10356 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
10357 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
10358
10359 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
10360 // non-virtual, but computeInfo is virtual, so we overload it.
10361 void computeInfo(CGFunctionInfo &FI) const override;
10362
10363 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
10364 int &ArgFPRsLeft) const;
10365 ABIArgInfo classifyReturnType(QualType RetTy) const;
10366
10367 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
10368 QualType Ty) const override;
10369
10370 ABIArgInfo extendType(QualType Ty) const;
10371
10372 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
10373 CharUnits &Field1Off, llvm::Type *&Field2Ty,
10374 CharUnits &Field2Off, int &NeededArgGPRs,
10375 int &NeededArgFPRs) const;
10376 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
10377 CharUnits Field1Off,
10378 llvm::Type *Field2Ty,
10379 CharUnits Field2Off) const;
10380};
10381} // end anonymous namespace
10382
10383void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
10384 QualType RetTy = FI.getReturnType();
10385 if (!getCXXABI().classifyReturnType(FI))
1
Assuming the condition is false
2
Taking false branch
10386 FI.getReturnInfo() = classifyReturnType(RetTy);
10387
10388 // IsRetIndirect is true if classifyArgumentType indicated the value should
10389 // be passed indirect, or if the type size is a scalar greater than 2*XLen
10390 // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
10391 // in LLVM IR, relying on the backend lowering code to rewrite the argument
10392 // list and pass indirectly on RV32.
10393 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
3
Assuming the condition is false
10394 if (!IsRetIndirect
3.1
'IsRetIndirect' is false
3.1
'IsRetIndirect' is false
3.1
'IsRetIndirect' is false
&& RetTy->isScalarType() &&
4
Calling 'Type::isScalarType'
19
Returning from 'Type::isScalarType'
21
Taking true branch
10395 getContext().getTypeSize(RetTy) > (2 * XLen)) {
20
Assuming the condition is true
10396 if (RetTy->isComplexType() && FLen) {
22
Assuming the condition is true
23
Assuming field 'FLen' is not equal to 0
24
Taking true branch
10397 QualType EltTy = RetTy->getAs<ComplexType>()->getElementType();
25
Assuming the object is not a 'ComplexType'
26
Called C++ object pointer is null
10398 IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
10399 } else {
10400 // This is a normal scalar > 2*XLen, such as fp128 on RV32.
10401 IsRetIndirect = true;
10402 }
10403 }
10404
10405 // We must track the number of GPRs used in order to conform to the RISC-V
10406 // ABI, as integer scalars passed in registers should have signext/zeroext
10407 // when promoted, but are anyext if passed on the stack. As GPR usage is
10408 // different for variadic arguments, we must also track whether we are
10409 // examining a vararg or not.
10410 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
10411 int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
10412 int NumFixedArgs = FI.getNumRequiredArgs();
10413
10414 int ArgNum = 0;
10415 for (auto &ArgInfo : FI.arguments()) {
10416 bool IsFixed = ArgNum < NumFixedArgs;
10417 ArgInfo.info =
10418 classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
10419 ArgNum++;
10420 }
10421}
10422
10423// Returns true if the struct is a potential candidate for the floating point
10424// calling convention. If this function returns true, the caller is
10425// responsible for checking that if there is only a single field then that
10426// field is a float.
10427bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
10428 llvm::Type *&Field1Ty,
10429 CharUnits &Field1Off,
10430 llvm::Type *&Field2Ty,
10431 CharUnits &Field2Off) const {
10432 bool IsInt = Ty->isIntegralOrEnumerationType();
10433 bool IsFloat = Ty->isRealFloatingType();
10434
10435 if (IsInt || IsFloat) {
10436 uint64_t Size = getContext().getTypeSize(Ty);
10437 if (IsInt && Size > XLen)
10438 return false;
10439 // Can't be eligible if larger than the FP registers. Half precision isn't
10440 // currently supported on RISC-V and the ABI hasn't been confirmed, so
10441 // default to the integer ABI in that case.
10442 if (IsFloat && (Size > FLen || Size < 32))
10443 return false;
10444 // Can't be eligible if an integer type was already found (int+int pairs
10445 // are not eligible).
10446 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
10447 return false;
10448 if (!Field1Ty) {
10449 Field1Ty = CGT.ConvertType(Ty);
10450 Field1Off = CurOff;
10451 return true;
10452 }
10453 if (!Field2Ty) {
10454 Field2Ty = CGT.ConvertType(Ty);
10455 Field2Off = CurOff;
10456 return true;
10457 }
10458 return false;
10459 }
10460
10461 if (auto CTy = Ty->getAs<ComplexType>()) {
10462 if (Field1Ty)
10463 return false;
10464 QualType EltTy = CTy->getElementType();
10465 if (getContext().getTypeSize(EltTy) > FLen)
10466 return false;
10467 Field1Ty = CGT.ConvertType(EltTy);
10468 Field1Off = CurOff;
10469 assert(CurOff.isZero() && "Unexpected offset for first field")((CurOff.isZero() && "Unexpected offset for first field"
) ? static_cast<void> (0) : __assert_fail ("CurOff.isZero() && \"Unexpected offset for first field\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 10469, __PRETTY_FUNCTION__))
;
10470 Field2Ty = Field1Ty;
10471 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
10472 return true;
10473 }
10474
10475 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
10476 uint64_t ArraySize = ATy->getSize().getZExtValue();
10477 QualType EltTy = ATy->getElementType();
10478 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
10479 for (uint64_t i = 0; i < ArraySize; ++i) {
10480 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
10481 Field1Off, Field2Ty, Field2Off);
10482 if (!Ret)
10483 return false;
10484 CurOff += EltSize;
10485 }
10486 return true;
10487 }
10488
10489 if (const auto *RTy = Ty->getAs<RecordType>()) {
10490 // Structures with either a non-trivial destructor or a non-trivial
10491 // copy constructor are not eligible for the FP calling convention.
10492 if (getRecordArgABI(Ty, CGT.getCXXABI()))
10493 return false;
10494 if (isEmptyRecord(getContext(), Ty, true))
10495 return true;
10496 const RecordDecl *RD = RTy->getDecl();
10497 // Unions aren't eligible unless they're empty (which is caught above).
10498 if (RD->isUnion())
10499 return false;
10500 int ZeroWidthBitFieldCount = 0;
10501 for (const FieldDecl *FD : RD->fields()) {
10502 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
10503 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
10504 QualType QTy = FD->getType();
10505 if (FD->isBitField()) {
10506 unsigned BitWidth = FD->getBitWidthValue(getContext());
10507 // Allow a bitfield with a type greater than XLen as long as the
10508 // bitwidth is XLen or less.
10509 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
10510 QTy = getContext().getIntTypeForBitwidth(XLen, false);
10511 if (BitWidth == 0) {
10512 ZeroWidthBitFieldCount++;
10513 continue;
10514 }
10515 }
10516
10517 bool Ret = detectFPCCEligibleStructHelper(
10518 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
10519 Field1Ty, Field1Off, Field2Ty, Field2Off);
10520 if (!Ret)
10521 return false;
10522
10523 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
10524 // or int+fp structs, but are ignored for a struct with an fp field and
10525 // any number of zero-width bitfields.
10526 if (Field2Ty && ZeroWidthBitFieldCount > 0)
10527 return false;
10528 }
10529 return Field1Ty != nullptr;
10530 }
10531
10532 return false;
10533}
10534
10535// Determine if a struct is eligible for passing according to the floating
10536// point calling convention (i.e., when flattened it contains a single fp
10537// value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
10538// NeededArgGPRs are incremented appropriately.
10539bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
10540 CharUnits &Field1Off,
10541 llvm::Type *&Field2Ty,
10542 CharUnits &Field2Off,
10543 int &NeededArgGPRs,
10544 int &NeededArgFPRs) const {
10545 Field1Ty = nullptr;
10546 Field2Ty = nullptr;
10547 NeededArgGPRs = 0;
10548 NeededArgFPRs = 0;
10549 bool IsCandidate = detectFPCCEligibleStructHelper(
10550 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
10551 // Not really a candidate if we have a single int but no float.
10552 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
10553 return false;
10554 if (!IsCandidate)
10555 return false;
10556 if (Field1Ty && Field1Ty->isFloatingPointTy())
10557 NeededArgFPRs++;
10558 else if (Field1Ty)
10559 NeededArgGPRs++;
10560 if (Field2Ty && Field2Ty->isFloatingPointTy())
10561 NeededArgFPRs++;
10562 else if (Field2Ty)
10563 NeededArgGPRs++;
10564 return IsCandidate;
10565}
10566
10567// Call getCoerceAndExpand for the two-element flattened struct described by
10568// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
10569// appropriate coerceToType and unpaddedCoerceToType.
10570ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
10571 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
10572 CharUnits Field2Off) const {
10573 SmallVector<llvm::Type *, 3> CoerceElts;
10574 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
10575 if (!Field1Off.isZero())
10576 CoerceElts.push_back(llvm::ArrayType::get(
10577 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
10578
10579 CoerceElts.push_back(Field1Ty);
10580 UnpaddedCoerceElts.push_back(Field1Ty);
10581
10582 if (!Field2Ty) {
10583 return ABIArgInfo::getCoerceAndExpand(
10584 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
10585 UnpaddedCoerceElts[0]);
10586 }
10587
10588 CharUnits Field2Align =
10589 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty));
10590 CharUnits Field1Size =
10591 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
10592 CharUnits Field2OffNoPadNoPack = Field1Size.alignTo(Field2Align);
10593
10594 CharUnits Padding = CharUnits::Zero();
10595 if (Field2Off > Field2OffNoPadNoPack)
10596 Padding = Field2Off - Field2OffNoPadNoPack;
10597 else if (Field2Off != Field2Align && Field2Off > Field1Size)
10598 Padding = Field2Off - Field1Size;
10599
10600 bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
10601
10602 if (!Padding.isZero())
10603 CoerceElts.push_back(llvm::ArrayType::get(
10604 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
10605
10606 CoerceElts.push_back(Field2Ty);
10607 UnpaddedCoerceElts.push_back(Field2Ty);
10608
10609 auto CoerceToType =
10610 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
10611 auto UnpaddedCoerceToType =
10612 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
10613
10614 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
10615}
10616
10617ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
10618 int &ArgGPRsLeft,
10619 int &ArgFPRsLeft) const {
10620 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow")((ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow"
) ? static_cast<void> (0) : __assert_fail ("ArgGPRsLeft <= NumArgGPRs && \"Arg GPR tracking underflow\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 10620, __PRETTY_FUNCTION__))
;
10621 Ty = useFirstFieldIfTransparentUnion(Ty);
10622
10623 // Structures with either a non-trivial destructor or a non-trivial
10624 // copy constructor are always passed indirectly.
10625 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
10626 if (ArgGPRsLeft)
10627 ArgGPRsLeft -= 1;
10628 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
10629 CGCXXABI::RAA_DirectInMemory);
10630 }
10631
10632 // Ignore empty structs/unions.
10633 if (isEmptyRecord(getContext(), Ty, true))
10634 return ABIArgInfo::getIgnore();
10635
10636 uint64_t Size = getContext().getTypeSize(Ty);
10637
10638 // Pass floating point values via FPRs if possible.
10639 if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
10640 FLen >= Size && ArgFPRsLeft) {
10641 ArgFPRsLeft--;
10642 return ABIArgInfo::getDirect();
10643 }
10644
10645 // Complex types for the hard float ABI must be passed direct rather than
10646 // using CoerceAndExpand.
10647 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
10648 QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
10649 if (getContext().getTypeSize(EltTy) <= FLen) {
10650 ArgFPRsLeft -= 2;
10651 return ABIArgInfo::getDirect();
10652 }
10653 }
10654
10655 if (IsFixed && FLen && Ty->isStructureOrClassType()) {
10656 llvm::Type *Field1Ty = nullptr;
10657 llvm::Type *Field2Ty = nullptr;
10658 CharUnits Field1Off = CharUnits::Zero();
10659 CharUnits Field2Off = CharUnits::Zero();
10660 int NeededArgGPRs;
10661 int NeededArgFPRs;
10662 bool IsCandidate =
10663 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
10664 NeededArgGPRs, NeededArgFPRs);
10665 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
10666 NeededArgFPRs <= ArgFPRsLeft) {
10667 ArgGPRsLeft -= NeededArgGPRs;
10668 ArgFPRsLeft -= NeededArgFPRs;
10669 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
10670 Field2Off);
10671 }
10672 }
10673
10674 uint64_t NeededAlign = getContext().getTypeAlign(Ty);
10675 bool MustUseStack = false;
10676 // Determine the number of GPRs needed to pass the current argument
10677 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
10678 // register pairs, so may consume 3 registers.
10679 int NeededArgGPRs = 1;
10680 if (!IsFixed && NeededAlign == 2 * XLen)
10681 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
10682 else if (Size > XLen && Size <= 2 * XLen)
10683 NeededArgGPRs = 2;
10684
10685 if (NeededArgGPRs > ArgGPRsLeft) {
10686 MustUseStack = true;
10687 NeededArgGPRs = ArgGPRsLeft;
10688 }
10689
10690 ArgGPRsLeft -= NeededArgGPRs;
10691
10692 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
10693 // Treat an enum type as its underlying type.
10694 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
10695 Ty = EnumTy->getDecl()->getIntegerType();
10696
10697 // All integral types are promoted to XLen width, unless passed on the
10698 // stack.
10699 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
10700 return extendType(Ty);
10701 }
10702
10703 if (const auto *EIT = Ty->getAs<ExtIntType>()) {
10704 if (EIT->getNumBits() < XLen && !MustUseStack)
10705 return extendType(Ty);
10706 if (EIT->getNumBits() > 128 ||
10707 (!getContext().getTargetInfo().hasInt128Type() &&
10708 EIT->getNumBits() > 64))
10709 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
10710 }
10711
10712 return ABIArgInfo::getDirect();
10713 }
10714
10715 // Aggregates which are <= 2*XLen will be passed in registers if possible,
10716 // so coerce to integers.
10717 if (Size <= 2 * XLen) {
10718 unsigned Alignment = getContext().getTypeAlign(Ty);
10719
10720 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
10721 // required, and a 2-element XLen array if only XLen alignment is required.
10722 if (Size <= XLen) {
10723 return ABIArgInfo::getDirect(
10724 llvm::IntegerType::get(getVMContext(), XLen));
10725 } else if (Alignment == 2 * XLen) {
10726 return ABIArgInfo::getDirect(
10727 llvm::IntegerType::get(getVMContext(), 2 * XLen));
10728 } else {
10729 return ABIArgInfo::getDirect(llvm::ArrayType::get(
10730 llvm::IntegerType::get(getVMContext(), XLen), 2));
10731 }
10732 }
10733 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
10734}
10735
10736ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
10737 if (RetTy->isVoidType())
10738 return ABIArgInfo::getIgnore();
10739
10740 int ArgGPRsLeft = 2;
10741 int ArgFPRsLeft = FLen ? 2 : 0;
10742
10743 // The rules for return and argument types are the same, so defer to
10744 // classifyArgumentType.
10745 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
10746 ArgFPRsLeft);
10747}
10748
10749Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
10750 QualType Ty) const {
10751 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
10752
10753 // Empty records are ignored for parameter passing purposes.
10754 if (isEmptyRecord(getContext(), Ty, true)) {
10755 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
10756 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
10757 return Addr;
10758 }
10759
10760 auto TInfo = getContext().getTypeInfoInChars(Ty);
10761
10762 // Arguments bigger than 2*Xlen bytes are passed indirectly.
10763 bool IsIndirect = TInfo.Width > 2 * SlotSize;
10764
10765 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo,
10766 SlotSize, /*AllowHigherAlign=*/true);
10767}
10768
10769ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
10770 int TySize = getContext().getTypeSize(Ty);
10771 // RV64 ABI requires unsigned 32 bit integers to be sign extended.
10772 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
10773 return ABIArgInfo::getSignExtend(Ty);
10774 return ABIArgInfo::getExtend(Ty);
10775}
10776
10777namespace {
10778class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
10779public:
10780 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
10781 unsigned FLen)
10782 : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {}
10783
10784 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
10785 CodeGen::CodeGenModule &CGM) const override {
10786 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
10787 if (!FD) return;
10788
10789 const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
10790 if (!Attr)
10791 return;
10792
10793 const char *Kind;
10794 switch (Attr->getInterrupt()) {
10795 case RISCVInterruptAttr::user: Kind = "user"; break;
10796 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
10797 case RISCVInterruptAttr::machine: Kind = "machine"; break;
10798 }
10799
10800 auto *Fn = cast<llvm::Function>(GV);
10801
10802 Fn->addFnAttr("interrupt", Kind);
10803 }
10804};
10805} // namespace
10806
10807//===----------------------------------------------------------------------===//
10808// VE ABI Implementation.
10809//
10810namespace {
10811class VEABIInfo : public DefaultABIInfo {
10812public:
10813 VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
10814
10815private:
10816 ABIArgInfo classifyReturnType(QualType RetTy) const;
10817 ABIArgInfo classifyArgumentType(QualType RetTy) const;
10818 void computeInfo(CGFunctionInfo &FI) const override;
10819};
10820} // end anonymous namespace
10821
10822ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const {
10823 if (Ty->isAnyComplexType())
10824 return ABIArgInfo::getDirect();
10825 uint64_t Size = getContext().getTypeSize(Ty);
10826 if (Size < 64 && Ty->isIntegerType())
10827 return ABIArgInfo::getExtend(Ty);
10828 return DefaultABIInfo::classifyReturnType(Ty);
10829}
10830
10831ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const {
10832 if (Ty->isAnyComplexType())
10833 return ABIArgInfo::getDirect();
10834 uint64_t Size = getContext().getTypeSize(Ty);
10835 if (Size < 64 && Ty->isIntegerType())
10836 return ABIArgInfo::getExtend(Ty);
10837 return DefaultABIInfo::classifyArgumentType(Ty);
10838}
10839
10840void VEABIInfo::computeInfo(CGFunctionInfo &FI) const {
10841 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
10842 for (auto &Arg : FI.arguments())
10843 Arg.info = classifyArgumentType(Arg.type);
10844}
10845
10846namespace {
10847class VETargetCodeGenInfo : public TargetCodeGenInfo {
10848public:
10849 VETargetCodeGenInfo(CodeGenTypes &CGT)
10850 : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {}
10851 // VE ABI requires the arguments of variadic and prototype-less functions
10852 // are passed in both registers and memory.
10853 bool isNoProtoCallVariadic(const CallArgList &args,
10854 const FunctionNoProtoType *fnType) const override {
10855 return true;
10856 }
10857};
10858} // end anonymous namespace
10859
10860//===----------------------------------------------------------------------===//
10861// Driver code
10862//===----------------------------------------------------------------------===//
10863
10864bool CodeGenModule::supportsCOMDAT() const {
10865 return getTriple().supportsCOMDAT();
10866}
10867
10868const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
10869 if (TheTargetCodeGenInfo)
10870 return *TheTargetCodeGenInfo;
10871
10872 // Helper to set the unique_ptr while still keeping the return value.
10873 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
10874 this->TheTargetCodeGenInfo.reset(P);
10875 return *P;
10876 };
10877
10878 const llvm::Triple &Triple = getTarget().getTriple();
10879 switch (Triple.getArch()) {
10880 default:
10881 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
10882
10883 case llvm::Triple::le32:
10884 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
10885 case llvm::Triple::mips:
10886 case llvm::Triple::mipsel:
10887 if (Triple.getOS() == llvm::Triple::NaCl)
10888 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
10889 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
10890
10891 case llvm::Triple::mips64:
10892 case llvm::Triple::mips64el:
10893 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
10894
10895 case llvm::Triple::avr:
10896 return SetCGInfo(new AVRTargetCodeGenInfo(Types));
10897
10898 case llvm::Triple::aarch64:
10899 case llvm::Triple::aarch64_32:
10900 case llvm::Triple::aarch64_be: {
10901 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
10902 if (getTarget().getABI() == "darwinpcs")
10903 Kind = AArch64ABIInfo::DarwinPCS;
10904 else if (Triple.isOSWindows())
10905 return SetCGInfo(
10906 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
10907
10908 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
10909 }
10910
10911 case llvm::Triple::wasm32:
10912 case llvm::Triple::wasm64: {
10913 WebAssemblyABIInfo::ABIKind Kind = WebAssemblyABIInfo::MVP;
10914 if (getTarget().getABI() == "experimental-mv")
10915 Kind = WebAssemblyABIInfo::ExperimentalMV;
10916 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types, Kind));
10917 }
10918
10919 case llvm::Triple::arm:
10920 case llvm::Triple::armeb:
10921 case llvm::Triple::thumb:
10922 case llvm::Triple::thumbeb: {
10923 if (Triple.getOS() == llvm::Triple::Win32) {
10924 return SetCGInfo(
10925 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
10926 }
10927
10928 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
10929 StringRef ABIStr = getTarget().getABI();
10930 if (ABIStr == "apcs-gnu")
10931 Kind = ARMABIInfo::APCS;
10932 else if (ABIStr == "aapcs16")
10933 Kind = ARMABIInfo::AAPCS16_VFP;
10934 else if (CodeGenOpts.FloatABI == "hard" ||
10935 (CodeGenOpts.FloatABI != "soft" &&
10936 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
10937 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
10938 Triple.getEnvironment() == llvm::Triple::EABIHF)))
10939 Kind = ARMABIInfo::AAPCS_VFP;
10940
10941 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
10942 }
10943
10944 case llvm::Triple::ppc: {
10945 if (Triple.isOSAIX())
10946 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ false));
10947
10948 bool IsSoftFloat =
10949 CodeGenOpts.FloatABI == "soft" || getTarget().hasFeature("spe");
10950 bool RetSmallStructInRegABI =
10951 PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
10952 return SetCGInfo(
10953 new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
10954 }
10955 case llvm::Triple::ppc64:
10956 if (Triple.isOSAIX())
10957 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ true));
10958
10959 if (Triple.isOSBinFormatELF()) {
10960 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
10961 if (getTarget().getABI() == "elfv2")
10962 Kind = PPC64_SVR4_ABIInfo::ELFv2;
10963 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
10964 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
10965
10966 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
10967 IsSoftFloat));
10968 }
10969 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
10970 case llvm::Triple::ppc64le: {
10971 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!")((Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"
) ? static_cast<void> (0) : __assert_fail ("Triple.isOSBinFormatELF() && \"PPC64 LE non-ELF not supported!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/lib/CodeGen/TargetInfo.cpp"
, 10971, __PRETTY_FUNCTION__))
;
10972 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
10973 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
10974 Kind = PPC64_SVR4_ABIInfo::ELFv1;
10975 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
10976 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
10977
10978 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
10979 IsSoftFloat));
10980 }
10981
10982 case llvm::Triple::nvptx:
10983 case llvm::Triple::nvptx64:
10984 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
10985
10986 case llvm::Triple::msp430:
10987 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
10988
10989 case llvm::Triple::riscv32:
10990 case llvm::Triple::riscv64: {
10991 StringRef ABIStr = getTarget().getABI();
10992 unsigned XLen = getTarget().getPointerWidth(0);
10993 unsigned ABIFLen = 0;
10994 if (ABIStr.endswith("f"))
10995 ABIFLen = 32;
10996 else if (ABIStr.endswith("d"))
10997 ABIFLen = 64;
10998 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen));
10999 }
11000
11001 case llvm::Triple::systemz: {
11002 bool SoftFloat = CodeGenOpts.FloatABI == "soft";
11003 bool HasVector = !SoftFloat && getTarget().getABI() == "vector";
11004 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector, SoftFloat));
11005 }
11006
11007 case llvm::Triple::tce:
11008 case llvm::Triple::tcele:
11009 return SetCGInfo(new TCETargetCodeGenInfo(Types));
11010
11011 case llvm::Triple::x86: {
11012 bool IsDarwinVectorABI = Triple.isOSDarwin();
11013 bool RetSmallStructInRegABI =
11014 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
11015 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
11016
11017 if (Triple.getOS() == llvm::Triple::Win32) {
11018 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
11019 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
11020 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
11021 } else {
11022 return SetCGInfo(new X86_32TargetCodeGenInfo(
11023 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
11024 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
11025 CodeGenOpts.FloatABI == "soft"));
11026 }
11027 }
11028
11029 case llvm::Triple::x86_64: {
11030 StringRef ABI = getTarget().getABI();
11031 X86AVXABILevel AVXLevel =
11032 (ABI == "avx512"
11033 ? X86AVXABILevel::AVX512
11034 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
11035
11036 switch (Triple.getOS()) {
11037 case llvm::Triple::Win32:
11038 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
11039 default:
11040 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
11041 }
11042 }
11043 case llvm::Triple::hexagon:
11044 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
11045 case llvm::Triple::lanai:
11046 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
11047 case llvm::Triple::r600:
11048 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
11049 case llvm::Triple::amdgcn:
11050 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
11051 case llvm::Triple::sparc:
11052 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
11053 case llvm::Triple::sparcv9:
11054 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
11055 case llvm::Triple::xcore:
11056 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
11057 case llvm::Triple::arc:
11058 return SetCGInfo(new ARCTargetCodeGenInfo(Types));
11059 case llvm::Triple::spir:
11060 case llvm::Triple::spir64:
11061 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
11062 case llvm::Triple::ve:
11063 return SetCGInfo(new VETargetCodeGenInfo(Types));
11064 }
11065}
11066
11067/// Create an OpenCL kernel for an enqueued block.
11068///
11069/// The kernel has the same function type as the block invoke function. Its
11070/// name is the name of the block invoke function postfixed with "_kernel".
11071/// It simply calls the block invoke function then returns.
11072llvm::Function *
11073TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
11074 llvm::Function *Invoke,
11075 llvm::Value *BlockLiteral) const {
11076 auto *InvokeFT = Invoke->getFunctionType();
11077 llvm::SmallVector<llvm::Type *, 2> ArgTys;
11078 for (auto &P : InvokeFT->params())
11079 ArgTys.push_back(P);
11080 auto &C = CGF.getLLVMContext();
11081 std::string Name = Invoke->getName().str() + "_kernel";
11082 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
11083 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
11084 &CGF.CGM.getModule());
11085 auto IP = CGF.Builder.saveIP();
11086 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
11087 auto &Builder = CGF.Builder;
11088 Builder.SetInsertPoint(BB);
11089 llvm::SmallVector<llvm::Value *, 2> Args;
11090 for (auto &A : F->args())
11091 Args.push_back(&A);
11092 Builder.CreateCall(Invoke, Args);
11093 Builder.CreateRetVoid();
11094 Builder.restoreIP(IP);
11095 return F;
11096}
11097
11098/// Create an OpenCL kernel for an enqueued block.
11099///
11100/// The type of the first argument (the block literal) is the struct type
11101/// of the block literal instead of a pointer type. The first argument
11102/// (block literal) is passed directly by value to the kernel. The kernel
11103/// allocates the same type of struct on stack and stores the block literal
11104/// to it and passes its pointer to the block invoke function. The kernel
11105/// has "enqueued-block" function attribute and kernel argument metadata.
11106llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
11107 CodeGenFunction &CGF, llvm::Function *Invoke,
11108 llvm::Value *BlockLiteral) const {
11109 auto &Builder = CGF.Builder;
11110 auto &C = CGF.getLLVMContext();
11111
11112 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
11113 auto *InvokeFT = Invoke->getFunctionType();
11114 llvm::SmallVector<llvm::Type *, 2> ArgTys;
11115 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
11116 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
11117 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
11118 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
11119 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
11120 llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
11121
11122 ArgTys.push_back(BlockTy);
11123 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
11124 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
11125 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
11126 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
11127 AccessQuals.push_back(llvm::MDString::get(C, "none"));
11128 ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
11129 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
11130 ArgTys.push_back(InvokeFT->getParamType(I));
11131 ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
11132 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
11133 AccessQuals.push_back(llvm::MDString::get(C, "none"));
11134 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
11135 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
11136 ArgNames.push_back(
11137 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
11138 }
11139 std::string Name = Invoke->getName().str() + "_kernel";
11140 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
11141 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
11142 &CGF.CGM.getModule());
11143 F->addFnAttr("enqueued-block");
11144 auto IP = CGF.Builder.saveIP();
11145 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
11146 Builder.SetInsertPoint(BB);
11147 const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);
11148 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
11149 BlockPtr->setAlignment(BlockAlign);
11150 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
11151 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
11152 llvm::SmallVector<llvm::Value *, 2> Args;
11153 Args.push_back(Cast);
11154 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
11155 Args.push_back(I);
11156 Builder.CreateCall(Invoke, Args);
11157 Builder.CreateRetVoid();
11158 Builder.restoreIP(IP);
11159
11160 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
11161 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
11162 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
11163 F->setMetadata("kernel_arg_base_type",
11164 llvm::MDNode::get(C, ArgBaseTypeNames));
11165 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
11166 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
11167 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));
11168
11169 return F;
11170}

/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h

1//===- Type.h - C Language Family Type Representation -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// C Language Family Type Representation
11///
12/// This file defines the clang::Type interface and subclasses, used to
13/// represent types for languages in the C family.
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_CLANG_AST_TYPE_H
18#define LLVM_CLANG_AST_TYPE_H
19
20#include "clang/AST/DependenceFlags.h"
21#include "clang/AST/NestedNameSpecifier.h"
22#include "clang/AST/TemplateName.h"
23#include "clang/Basic/AddressSpaces.h"
24#include "clang/Basic/AttrKinds.h"
25#include "clang/Basic/Diagnostic.h"
26#include "clang/Basic/ExceptionSpecificationType.h"
27#include "clang/Basic/LLVM.h"
28#include "clang/Basic/Linkage.h"
29#include "clang/Basic/PartialDiagnostic.h"
30#include "clang/Basic/SourceLocation.h"
31#include "clang/Basic/Specifiers.h"
32#include "clang/Basic/Visibility.h"
33#include "llvm/ADT/APInt.h"
34#include "llvm/ADT/APSInt.h"
35#include "llvm/ADT/ArrayRef.h"
36#include "llvm/ADT/FoldingSet.h"
37#include "llvm/ADT/None.h"
38#include "llvm/ADT/Optional.h"
39#include "llvm/ADT/PointerIntPair.h"
40#include "llvm/ADT/PointerUnion.h"
41#include "llvm/ADT/StringRef.h"
42#include "llvm/ADT/Twine.h"
43#include "llvm/ADT/iterator_range.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/Compiler.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/PointerLikeTypeTraits.h"
48#include "llvm/Support/TrailingObjects.h"
49#include "llvm/Support/type_traits.h"
50#include <cassert>
51#include <cstddef>
52#include <cstdint>
53#include <cstring>
54#include <string>
55#include <type_traits>
56#include <utility>
57
58namespace clang {
59
60class ExtQuals;
61class QualType;
62class ConceptDecl;
63class TagDecl;
64class TemplateParameterList;
65class Type;
66
67enum {
68 TypeAlignmentInBits = 4,
69 TypeAlignment = 1 << TypeAlignmentInBits
70};
71
72namespace serialization {
73 template <class T> class AbstractTypeReader;
74 template <class T> class AbstractTypeWriter;
75}
76
77} // namespace clang
78
79namespace llvm {
80
81 template <typename T>
82 struct PointerLikeTypeTraits;
83 template<>
84 struct PointerLikeTypeTraits< ::clang::Type*> {
85 static inline void *getAsVoidPointer(::clang::Type *P) { return P; }
86
87 static inline ::clang::Type *getFromVoidPointer(void *P) {
88 return static_cast< ::clang::Type*>(P);
89 }
90
91 static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits;
92 };
93
94 template<>
95 struct PointerLikeTypeTraits< ::clang::ExtQuals*> {
96 static inline void *getAsVoidPointer(::clang::ExtQuals *P) { return P; }
97
98 static inline ::clang::ExtQuals *getFromVoidPointer(void *P) {
99 return static_cast< ::clang::ExtQuals*>(P);
100 }
101
102 static constexpr int NumLowBitsAvailable = clang::TypeAlignmentInBits;
103 };
104
105} // namespace llvm
106
107namespace clang {
108
109class ASTContext;
110template <typename> class CanQual;
111class CXXRecordDecl;
112class DeclContext;
113class EnumDecl;
114class Expr;
115class ExtQualsTypeCommonBase;
116class FunctionDecl;
117class IdentifierInfo;
118class NamedDecl;
119class ObjCInterfaceDecl;
120class ObjCProtocolDecl;
121class ObjCTypeParamDecl;
122struct PrintingPolicy;
123class RecordDecl;
124class Stmt;
125class TagDecl;
126class TemplateArgument;
127class TemplateArgumentListInfo;
128class TemplateArgumentLoc;
129class TemplateTypeParmDecl;
130class TypedefNameDecl;
131class UnresolvedUsingTypenameDecl;
132
133using CanQualType = CanQual<Type>;
134
135// Provide forward declarations for all of the *Type classes.
136#define TYPE(Class, Base) class Class##Type;
137#include "clang/AST/TypeNodes.inc"
138
139/// The collection of all-type qualifiers we support.
140/// Clang supports five independent qualifiers:
141/// * C99: const, volatile, and restrict
142/// * MS: __unaligned
143/// * Embedded C (TR18037): address spaces
144/// * Objective C: the GC attributes (none, weak, or strong)
145class Qualifiers {
146public:
147 enum TQ { // NOTE: These flags must be kept in sync with DeclSpec::TQ.
148 Const = 0x1,
149 Restrict = 0x2,
150 Volatile = 0x4,
151 CVRMask = Const | Volatile | Restrict
152 };
153
154 enum GC {
155 GCNone = 0,
156 Weak,
157 Strong
158 };
159
160 enum ObjCLifetime {
161 /// There is no lifetime qualification on this type.
162 OCL_None,
163
164 /// This object can be modified without requiring retains or
165 /// releases.
166 OCL_ExplicitNone,
167
168 /// Assigning into this object requires the old value to be
169 /// released and the new value to be retained. The timing of the
170 /// release of the old value is inexact: it may be moved to
171 /// immediately after the last known point where the value is
172 /// live.
173 OCL_Strong,
174
175 /// Reading or writing from this object requires a barrier call.
176 OCL_Weak,
177
178 /// Assigning into this object requires a lifetime extension.
179 OCL_Autoreleasing
180 };
181
182 enum {
183 /// The maximum supported address space number.
184 /// 23 bits should be enough for anyone.
185 MaxAddressSpace = 0x7fffffu,
186
187 /// The width of the "fast" qualifier mask.
188 FastWidth = 3,
189
190 /// The fast qualifier mask.
191 FastMask = (1 << FastWidth) - 1
192 };
193
194 /// Returns the common set of qualifiers while removing them from
195 /// the given sets.
196 static Qualifiers removeCommonQualifiers(Qualifiers &L, Qualifiers &R) {
197 // If both are only CVR-qualified, bit operations are sufficient.
198 if (!(L.Mask & ~CVRMask) && !(R.Mask & ~CVRMask)) {
199 Qualifiers Q;
200 Q.Mask = L.Mask & R.Mask;
201 L.Mask &= ~Q.Mask;
202 R.Mask &= ~Q.Mask;
203 return Q;
204 }
205
206 Qualifiers Q;
207 unsigned CommonCRV = L.getCVRQualifiers() & R.getCVRQualifiers();
208 Q.addCVRQualifiers(CommonCRV);
209 L.removeCVRQualifiers(CommonCRV);
210 R.removeCVRQualifiers(CommonCRV);
211
212 if (L.getObjCGCAttr() == R.getObjCGCAttr()) {
213 Q.setObjCGCAttr(L.getObjCGCAttr());
214 L.removeObjCGCAttr();
215 R.removeObjCGCAttr();
216 }
217
218 if (L.getObjCLifetime() == R.getObjCLifetime()) {
219 Q.setObjCLifetime(L.getObjCLifetime());
220 L.removeObjCLifetime();
221 R.removeObjCLifetime();
222 }
223
224 if (L.getAddressSpace() == R.getAddressSpace()) {
225 Q.setAddressSpace(L.getAddressSpace());
226 L.removeAddressSpace();
227 R.removeAddressSpace();
228 }
229 return Q;
230 }
231
232 static Qualifiers fromFastMask(unsigned Mask) {
233 Qualifiers Qs;
234 Qs.addFastQualifiers(Mask);
235 return Qs;
236 }
237
238 static Qualifiers fromCVRMask(unsigned CVR) {
239 Qualifiers Qs;
240 Qs.addCVRQualifiers(CVR);
241 return Qs;
242 }
243
244 static Qualifiers fromCVRUMask(unsigned CVRU) {
245 Qualifiers Qs;
246 Qs.addCVRUQualifiers(CVRU);
247 return Qs;
248 }
249
250 // Deserialize qualifiers from an opaque representation.
251 static Qualifiers fromOpaqueValue(unsigned opaque) {
252 Qualifiers Qs;
253 Qs.Mask = opaque;
254 return Qs;
255 }
256
257 // Serialize these qualifiers into an opaque representation.
258 unsigned getAsOpaqueValue() const {
259 return Mask;
260 }
261
262 bool hasConst() const { return Mask & Const; }
263 bool hasOnlyConst() const { return Mask == Const; }
264 void removeConst() { Mask &= ~Const; }
265 void addConst() { Mask |= Const; }
266
267 bool hasVolatile() const { return Mask & Volatile; }
268 bool hasOnlyVolatile() const { return Mask == Volatile; }
269 void removeVolatile() { Mask &= ~Volatile; }
270 void addVolatile() { Mask |= Volatile; }
271
272 bool hasRestrict() const { return Mask & Restrict; }
273 bool hasOnlyRestrict() const { return Mask == Restrict; }
274 void removeRestrict() { Mask &= ~Restrict; }
275 void addRestrict() { Mask |= Restrict; }
276
277 bool hasCVRQualifiers() const { return getCVRQualifiers(); }
278 unsigned getCVRQualifiers() const { return Mask & CVRMask; }
279 unsigned getCVRUQualifiers() const { return Mask & (CVRMask | UMask); }
280
281 void setCVRQualifiers(unsigned mask) {
282 assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits")((!(mask & ~CVRMask) && "bitmask contains non-CVR bits"
) ? static_cast<void> (0) : __assert_fail ("!(mask & ~CVRMask) && \"bitmask contains non-CVR bits\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 282, __PRETTY_FUNCTION__))
;
283 Mask = (Mask & ~CVRMask) | mask;
284 }
285 void removeCVRQualifiers(unsigned mask) {
286 assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits")((!(mask & ~CVRMask) && "bitmask contains non-CVR bits"
) ? static_cast<void> (0) : __assert_fail ("!(mask & ~CVRMask) && \"bitmask contains non-CVR bits\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 286, __PRETTY_FUNCTION__))
;
287 Mask &= ~mask;
288 }
289 void removeCVRQualifiers() {
290 removeCVRQualifiers(CVRMask);
291 }
292 void addCVRQualifiers(unsigned mask) {
293 assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits")((!(mask & ~CVRMask) && "bitmask contains non-CVR bits"
) ? static_cast<void> (0) : __assert_fail ("!(mask & ~CVRMask) && \"bitmask contains non-CVR bits\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 293, __PRETTY_FUNCTION__))
;
294 Mask |= mask;
295 }
296 void addCVRUQualifiers(unsigned mask) {
297 assert(!(mask & ~CVRMask & ~UMask) && "bitmask contains non-CVRU bits")((!(mask & ~CVRMask & ~UMask) && "bitmask contains non-CVRU bits"
) ? static_cast<void> (0) : __assert_fail ("!(mask & ~CVRMask & ~UMask) && \"bitmask contains non-CVRU bits\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 297, __PRETTY_FUNCTION__))
;
298 Mask |= mask;
299 }
300
301 bool hasUnaligned() const { return Mask & UMask; }
302 void setUnaligned(bool flag) {
303 Mask = (Mask & ~UMask) | (flag ? UMask : 0);
304 }
305 void removeUnaligned() { Mask &= ~UMask; }
306 void addUnaligned() { Mask |= UMask; }
307
308 bool hasObjCGCAttr() const { return Mask & GCAttrMask; }
309 GC getObjCGCAttr() const { return GC((Mask & GCAttrMask) >> GCAttrShift); }
310 void setObjCGCAttr(GC type) {
311 Mask = (Mask & ~GCAttrMask) | (type << GCAttrShift);
312 }
313 void removeObjCGCAttr() { setObjCGCAttr(GCNone); }
314 void addObjCGCAttr(GC type) {
315 assert(type)((type) ? static_cast<void> (0) : __assert_fail ("type"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 315, __PRETTY_FUNCTION__))
;
316 setObjCGCAttr(type);
317 }
318 Qualifiers withoutObjCGCAttr() const {
319 Qualifiers qs = *this;
320 qs.removeObjCGCAttr();
321 return qs;
322 }
323 Qualifiers withoutObjCLifetime() const {
324 Qualifiers qs = *this;
325 qs.removeObjCLifetime();
326 return qs;
327 }
328 Qualifiers withoutAddressSpace() const {
329 Qualifiers qs = *this;
330 qs.removeAddressSpace();
331 return qs;
332 }
333
334 bool hasObjCLifetime() const { return Mask & LifetimeMask; }
335 ObjCLifetime getObjCLifetime() const {
336 return ObjCLifetime((Mask & LifetimeMask) >> LifetimeShift);
337 }
338 void setObjCLifetime(ObjCLifetime type) {
339 Mask = (Mask & ~LifetimeMask) | (type << LifetimeShift);
340 }
341 void removeObjCLifetime() { setObjCLifetime(OCL_None); }
342 void addObjCLifetime(ObjCLifetime type) {
343 assert(type)((type) ? static_cast<void> (0) : __assert_fail ("type"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 343, __PRETTY_FUNCTION__))
;
344 assert(!hasObjCLifetime())((!hasObjCLifetime()) ? static_cast<void> (0) : __assert_fail
("!hasObjCLifetime()", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 344, __PRETTY_FUNCTION__))
;
345 Mask |= (type << LifetimeShift);
346 }
347
348 /// True if the lifetime is neither None or ExplicitNone.
349 bool hasNonTrivialObjCLifetime() const {
350 ObjCLifetime lifetime = getObjCLifetime();
351 return (lifetime > OCL_ExplicitNone);
352 }
353
354 /// True if the lifetime is either strong or weak.
355 bool hasStrongOrWeakObjCLifetime() const {
356 ObjCLifetime lifetime = getObjCLifetime();
357 return (lifetime == OCL_Strong || lifetime == OCL_Weak);
358 }
359
360 bool hasAddressSpace() const { return Mask & AddressSpaceMask; }
361 LangAS getAddressSpace() const {
362 return static_cast<LangAS>(Mask >> AddressSpaceShift);
363 }
364 bool hasTargetSpecificAddressSpace() const {
365 return isTargetAddressSpace(getAddressSpace());
366 }
367 /// Get the address space attribute value to be printed by diagnostics.
368 unsigned getAddressSpaceAttributePrintValue() const {
369 auto Addr = getAddressSpace();
370 // This function is not supposed to be used with language specific
371 // address spaces. If that happens, the diagnostic message should consider
372 // printing the QualType instead of the address space value.
373 assert(Addr == LangAS::Default || hasTargetSpecificAddressSpace())((Addr == LangAS::Default || hasTargetSpecificAddressSpace())
? static_cast<void> (0) : __assert_fail ("Addr == LangAS::Default || hasTargetSpecificAddressSpace()"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 373, __PRETTY_FUNCTION__))
;
374 if (Addr != LangAS::Default)
375 return toTargetAddressSpace(Addr);
376 // TODO: The diagnostic messages where Addr may be 0 should be fixed
377 // since it cannot differentiate the situation where 0 denotes the default
378 // address space or user specified __attribute__((address_space(0))).
379 return 0;
380 }
381 void setAddressSpace(LangAS space) {
382 assert((unsigned)space <= MaxAddressSpace)(((unsigned)space <= MaxAddressSpace) ? static_cast<void
> (0) : __assert_fail ("(unsigned)space <= MaxAddressSpace"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 382, __PRETTY_FUNCTION__))
;
383 Mask = (Mask & ~AddressSpaceMask)
384 | (((uint32_t) space) << AddressSpaceShift);
385 }
386 void removeAddressSpace() { setAddressSpace(LangAS::Default); }
387 void addAddressSpace(LangAS space) {
388 assert(space != LangAS::Default)((space != LangAS::Default) ? static_cast<void> (0) : __assert_fail
("space != LangAS::Default", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 388, __PRETTY_FUNCTION__))
;
389 setAddressSpace(space);
390 }
391
392 // Fast qualifiers are those that can be allocated directly
393 // on a QualType object.
394 bool hasFastQualifiers() const { return getFastQualifiers(); }
395 unsigned getFastQualifiers() const { return Mask & FastMask; }
396 void setFastQualifiers(unsigned mask) {
397 assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits")((!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits"
) ? static_cast<void> (0) : __assert_fail ("!(mask & ~FastMask) && \"bitmask contains non-fast qualifier bits\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 397, __PRETTY_FUNCTION__))
;
398 Mask = (Mask & ~FastMask) | mask;
399 }
400 void removeFastQualifiers(unsigned mask) {
401 assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits")((!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits"
) ? static_cast<void> (0) : __assert_fail ("!(mask & ~FastMask) && \"bitmask contains non-fast qualifier bits\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 401, __PRETTY_FUNCTION__))
;
402 Mask &= ~mask;
403 }
404 void removeFastQualifiers() {
405 removeFastQualifiers(FastMask);
406 }
407 void addFastQualifiers(unsigned mask) {
408 assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits")((!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits"
) ? static_cast<void> (0) : __assert_fail ("!(mask & ~FastMask) && \"bitmask contains non-fast qualifier bits\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 408, __PRETTY_FUNCTION__))
;
409 Mask |= mask;
410 }
411
412 /// Return true if the set contains any qualifiers which require an ExtQuals
413 /// node to be allocated.
414 bool hasNonFastQualifiers() const { return Mask & ~FastMask; }
415 Qualifiers getNonFastQualifiers() const {
416 Qualifiers Quals = *this;
417 Quals.setFastQualifiers(0);
418 return Quals;
419 }
420
421 /// Return true if the set contains any qualifiers.
422 bool hasQualifiers() const { return Mask; }
423 bool empty() const { return !Mask; }
424
425 /// Add the qualifiers from the given set to this set.
426 void addQualifiers(Qualifiers Q) {
427 // If the other set doesn't have any non-boolean qualifiers, just
428 // bit-or it in.
429 if (!(Q.Mask & ~CVRMask))
430 Mask |= Q.Mask;
431 else {
432 Mask |= (Q.Mask & CVRMask);
433 if (Q.hasAddressSpace())
434 addAddressSpace(Q.getAddressSpace());
435 if (Q.hasObjCGCAttr())
436 addObjCGCAttr(Q.getObjCGCAttr());
437 if (Q.hasObjCLifetime())
438 addObjCLifetime(Q.getObjCLifetime());
439 }
440 }
441
442 /// Remove the qualifiers from the given set from this set.
443 void removeQualifiers(Qualifiers Q) {
444 // If the other set doesn't have any non-boolean qualifiers, just
445 // bit-and the inverse in.
446 if (!(Q.Mask & ~CVRMask))
447 Mask &= ~Q.Mask;
448 else {
449 Mask &= ~(Q.Mask & CVRMask);
450 if (getObjCGCAttr() == Q.getObjCGCAttr())
451 removeObjCGCAttr();
452 if (getObjCLifetime() == Q.getObjCLifetime())
453 removeObjCLifetime();
454 if (getAddressSpace() == Q.getAddressSpace())
455 removeAddressSpace();
456 }
457 }
458
459 /// Add the qualifiers from the given set to this set, given that
460 /// they don't conflict.
461 void addConsistentQualifiers(Qualifiers qs) {
462 assert(getAddressSpace() == qs.getAddressSpace() ||((getAddressSpace() == qs.getAddressSpace() || !hasAddressSpace
() || !qs.hasAddressSpace()) ? static_cast<void> (0) : __assert_fail
("getAddressSpace() == qs.getAddressSpace() || !hasAddressSpace() || !qs.hasAddressSpace()"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 463, __PRETTY_FUNCTION__))
463 !hasAddressSpace() || !qs.hasAddressSpace())((getAddressSpace() == qs.getAddressSpace() || !hasAddressSpace
() || !qs.hasAddressSpace()) ? static_cast<void> (0) : __assert_fail
("getAddressSpace() == qs.getAddressSpace() || !hasAddressSpace() || !qs.hasAddressSpace()"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 463, __PRETTY_FUNCTION__))
;
464 assert(getObjCGCAttr() == qs.getObjCGCAttr() ||((getObjCGCAttr() == qs.getObjCGCAttr() || !hasObjCGCAttr() ||
!qs.hasObjCGCAttr()) ? static_cast<void> (0) : __assert_fail
("getObjCGCAttr() == qs.getObjCGCAttr() || !hasObjCGCAttr() || !qs.hasObjCGCAttr()"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 465, __PRETTY_FUNCTION__))
465 !hasObjCGCAttr() || !qs.hasObjCGCAttr())((getObjCGCAttr() == qs.getObjCGCAttr() || !hasObjCGCAttr() ||
!qs.hasObjCGCAttr()) ? static_cast<void> (0) : __assert_fail
("getObjCGCAttr() == qs.getObjCGCAttr() || !hasObjCGCAttr() || !qs.hasObjCGCAttr()"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 465, __PRETTY_FUNCTION__))
;
466 assert(getObjCLifetime() == qs.getObjCLifetime() ||((getObjCLifetime() == qs.getObjCLifetime() || !hasObjCLifetime
() || !qs.hasObjCLifetime()) ? static_cast<void> (0) : __assert_fail
("getObjCLifetime() == qs.getObjCLifetime() || !hasObjCLifetime() || !qs.hasObjCLifetime()"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 467, __PRETTY_FUNCTION__))
467 !hasObjCLifetime() || !qs.hasObjCLifetime())((getObjCLifetime() == qs.getObjCLifetime() || !hasObjCLifetime
() || !qs.hasObjCLifetime()) ? static_cast<void> (0) : __assert_fail
("getObjCLifetime() == qs.getObjCLifetime() || !hasObjCLifetime() || !qs.hasObjCLifetime()"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 467, __PRETTY_FUNCTION__))
;
468 Mask |= qs.Mask;
469 }
470
471 /// Returns true if address space A is equal to or a superset of B.
472 /// OpenCL v2.0 defines conversion rules (OpenCLC v2.0 s6.5.5) and notion of
473 /// overlapping address spaces.
474 /// CL1.1 or CL1.2:
475 /// every address space is a superset of itself.
476 /// CL2.0 adds:
477 /// __generic is a superset of any address space except for __constant.
478 static bool isAddressSpaceSupersetOf(LangAS A, LangAS B) {
479 // Address spaces must match exactly.
480 return A == B ||
481 // Otherwise in OpenCLC v2.0 s6.5.5: every address space except
482 // for __constant can be used as __generic.
483 (A == LangAS::opencl_generic && B != LangAS::opencl_constant) ||
484 // We also define global_device and global_host address spaces,
485 // to distinguish global pointers allocated on host from pointers
486 // allocated on device, which are a subset of __global.
487 (A == LangAS::opencl_global && (B == LangAS::opencl_global_device ||
488 B == LangAS::opencl_global_host)) ||
489 // Consider pointer size address spaces to be equivalent to default.
490 ((isPtrSizeAddressSpace(A) || A == LangAS::Default) &&
491 (isPtrSizeAddressSpace(B) || B == LangAS::Default));
492 }
493
494 /// Returns true if the address space in these qualifiers is equal to or
495 /// a superset of the address space in the argument qualifiers.
496 bool isAddressSpaceSupersetOf(Qualifiers other) const {
497 return isAddressSpaceSupersetOf(getAddressSpace(), other.getAddressSpace());
498 }
499
500 /// Determines if these qualifiers compatibly include another set.
501 /// Generally this answers the question of whether an object with the other
502 /// qualifiers can be safely used as an object with these qualifiers.
503 bool compatiblyIncludes(Qualifiers other) const {
504 return isAddressSpaceSupersetOf(other) &&
505 // ObjC GC qualifiers can match, be added, or be removed, but can't
506 // be changed.
507 (getObjCGCAttr() == other.getObjCGCAttr() || !hasObjCGCAttr() ||
508 !other.hasObjCGCAttr()) &&
509 // ObjC lifetime qualifiers must match exactly.
510 getObjCLifetime() == other.getObjCLifetime() &&
511 // CVR qualifiers may subset.
512 (((Mask & CVRMask) | (other.Mask & CVRMask)) == (Mask & CVRMask)) &&
513 // U qualifier may superset.
514 (!other.hasUnaligned() || hasUnaligned());
515 }
516
517 /// Determines if these qualifiers compatibly include another set of
518 /// qualifiers from the narrow perspective of Objective-C ARC lifetime.
519 ///
520 /// One set of Objective-C lifetime qualifiers compatibly includes the other
521 /// if the lifetime qualifiers match, or if both are non-__weak and the
522 /// including set also contains the 'const' qualifier, or both are non-__weak
523 /// and one is None (which can only happen in non-ARC modes).
524 bool compatiblyIncludesObjCLifetime(Qualifiers other) const {
525 if (getObjCLifetime() == other.getObjCLifetime())
526 return true;
527
528 if (getObjCLifetime() == OCL_Weak || other.getObjCLifetime() == OCL_Weak)
529 return false;
530
531 if (getObjCLifetime() == OCL_None || other.getObjCLifetime() == OCL_None)
532 return true;
533
534 return hasConst();
535 }
536
537 /// Determine whether this set of qualifiers is a strict superset of
538 /// another set of qualifiers, not considering qualifier compatibility.
539 bool isStrictSupersetOf(Qualifiers Other) const;
540
541 bool operator==(Qualifiers Other) const { return Mask == Other.Mask; }
542 bool operator!=(Qualifiers Other) const { return Mask != Other.Mask; }
543
544 explicit operator bool() const { return hasQualifiers(); }
545
546 Qualifiers &operator+=(Qualifiers R) {
547 addQualifiers(R);
548 return *this;
549 }
550
551 // Union two qualifier sets. If an enumerated qualifier appears
552 // in both sets, use the one from the right.
553 friend Qualifiers operator+(Qualifiers L, Qualifiers R) {
554 L += R;
555 return L;
556 }
557
558 Qualifiers &operator-=(Qualifiers R) {
559 removeQualifiers(R);
560 return *this;
561 }
562
563 /// Compute the difference between two qualifier sets.
564 friend Qualifiers operator-(Qualifiers L, Qualifiers R) {
565 L -= R;
566 return L;
567 }
568
569 std::string getAsString() const;
570 std::string getAsString(const PrintingPolicy &Policy) const;
571
572 static std::string getAddrSpaceAsString(LangAS AS);
573
574 bool isEmptyWhenPrinted(const PrintingPolicy &Policy) const;
575 void print(raw_ostream &OS, const PrintingPolicy &Policy,
576 bool appendSpaceIfNonEmpty = false) const;
577
578 void Profile(llvm::FoldingSetNodeID &ID) const {
579 ID.AddInteger(Mask);
580 }
581
582private:
583 // bits: |0 1 2|3|4 .. 5|6 .. 8|9 ... 31|
584 // |C R V|U|GCAttr|Lifetime|AddressSpace|
585 uint32_t Mask = 0;
586
587 static const uint32_t UMask = 0x8;
588 static const uint32_t UShift = 3;
589 static const uint32_t GCAttrMask = 0x30;
590 static const uint32_t GCAttrShift = 4;
591 static const uint32_t LifetimeMask = 0x1C0;
592 static const uint32_t LifetimeShift = 6;
593 static const uint32_t AddressSpaceMask =
594 ~(CVRMask | UMask | GCAttrMask | LifetimeMask);
595 static const uint32_t AddressSpaceShift = 9;
596};
597
598/// A std::pair-like structure for storing a qualified type split
599/// into its local qualifiers and its locally-unqualified type.
600struct SplitQualType {
601 /// The locally-unqualified type.
602 const Type *Ty = nullptr;
603
604 /// The local qualifiers.
605 Qualifiers Quals;
606
607 SplitQualType() = default;
608 SplitQualType(const Type *ty, Qualifiers qs) : Ty(ty), Quals(qs) {}
609
610 SplitQualType getSingleStepDesugaredType() const; // end of this file
611
612 // Make std::tie work.
613 std::pair<const Type *,Qualifiers> asPair() const {
614 return std::pair<const Type *, Qualifiers>(Ty, Quals);
615 }
616
617 friend bool operator==(SplitQualType a, SplitQualType b) {
618 return a.Ty == b.Ty && a.Quals == b.Quals;
619 }
620 friend bool operator!=(SplitQualType a, SplitQualType b) {
621 return a.Ty != b.Ty || a.Quals != b.Quals;
622 }
623};
624
625/// The kind of type we are substituting Objective-C type arguments into.
626///
627/// The kind of substitution affects the replacement of type parameters when
628/// no concrete type information is provided, e.g., when dealing with an
629/// unspecialized type.
630enum class ObjCSubstitutionContext {
631 /// An ordinary type.
632 Ordinary,
633
634 /// The result type of a method or function.
635 Result,
636
637 /// The parameter type of a method or function.
638 Parameter,
639
640 /// The type of a property.
641 Property,
642
643 /// The superclass of a type.
644 Superclass,
645};
646
647/// A (possibly-)qualified type.
648///
649/// For efficiency, we don't store CV-qualified types as nodes on their
650/// own: instead each reference to a type stores the qualifiers. This
651/// greatly reduces the number of nodes we need to allocate for types (for
652/// example we only need one for 'int', 'const int', 'volatile int',
653/// 'const volatile int', etc).
654///
655/// As an added efficiency bonus, instead of making this a pair, we
656/// just store the two bits we care about in the low bits of the
657/// pointer. To handle the packing/unpacking, we make QualType be a
658/// simple wrapper class that acts like a smart pointer. A third bit
659/// indicates whether there are extended qualifiers present, in which
660/// case the pointer points to a special structure.
661class QualType {
662 friend class QualifierCollector;
663
664 // Thankfully, these are efficiently composable.
665 llvm::PointerIntPair<llvm::PointerUnion<const Type *, const ExtQuals *>,
666 Qualifiers::FastWidth> Value;
667
668 const ExtQuals *getExtQualsUnsafe() const {
669 return Value.getPointer().get<const ExtQuals*>();
670 }
671
672 const Type *getTypePtrUnsafe() const {
673 return Value.getPointer().get<const Type*>();
674 }
675
676 const ExtQualsTypeCommonBase *getCommonPtr() const {
677 assert(!isNull() && "Cannot retrieve a NULL type pointer")((!isNull() && "Cannot retrieve a NULL type pointer")
? static_cast<void> (0) : __assert_fail ("!isNull() && \"Cannot retrieve a NULL type pointer\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 677, __PRETTY_FUNCTION__))
;
678 auto CommonPtrVal = reinterpret_cast<uintptr_t>(Value.getOpaqueValue());
679 CommonPtrVal &= ~(uintptr_t)((1 << TypeAlignmentInBits) - 1);
680 return reinterpret_cast<ExtQualsTypeCommonBase*>(CommonPtrVal);
681 }
682
683public:
684 QualType() = default;
685 QualType(const Type *Ptr, unsigned Quals) : Value(Ptr, Quals) {}
686 QualType(const ExtQuals *Ptr, unsigned Quals) : Value(Ptr, Quals) {}
687
688 unsigned getLocalFastQualifiers() const { return Value.getInt(); }
689 void setLocalFastQualifiers(unsigned Quals) { Value.setInt(Quals); }
690
691 /// Retrieves a pointer to the underlying (unqualified) type.
692 ///
693 /// This function requires that the type not be NULL. If the type might be
694 /// NULL, use the (slightly less efficient) \c getTypePtrOrNull().
695 const Type *getTypePtr() const;
696
697 const Type *getTypePtrOrNull() const;
698
699 /// Retrieves a pointer to the name of the base type.
700 const IdentifierInfo *getBaseTypeIdentifier() const;
701
702 /// Divides a QualType into its unqualified type and a set of local
703 /// qualifiers.
704 SplitQualType split() const;
705
706 void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
707
708 static QualType getFromOpaquePtr(const void *Ptr) {
709 QualType T;
710 T.Value.setFromOpaqueValue(const_cast<void*>(Ptr));
711 return T;
712 }
713
714 const Type &operator*() const {
715 return *getTypePtr();
716 }
717
718 const Type *operator->() const {
719 return getTypePtr();
720 }
721
722 bool isCanonical() const;
723 bool isCanonicalAsParam() const;
724
725 /// Return true if this QualType doesn't point to a type yet.
726 bool isNull() const {
727 return Value.getPointer().isNull();
728 }
729
730 /// Determine whether this particular QualType instance has the
731 /// "const" qualifier set, without looking through typedefs that may have
732 /// added "const" at a different level.
733 bool isLocalConstQualified() const {
734 return (getLocalFastQualifiers() & Qualifiers::Const);
735 }
736
737 /// Determine whether this type is const-qualified.
738 bool isConstQualified() const;
739
740 /// Determine whether this particular QualType instance has the
741 /// "restrict" qualifier set, without looking through typedefs that may have
742 /// added "restrict" at a different level.
743 bool isLocalRestrictQualified() const {
744 return (getLocalFastQualifiers() & Qualifiers::Restrict);
745 }
746
747 /// Determine whether this type is restrict-qualified.
748 bool isRestrictQualified() const;
749
750 /// Determine whether this particular QualType instance has the
751 /// "volatile" qualifier set, without looking through typedefs that may have
752 /// added "volatile" at a different level.
753 bool isLocalVolatileQualified() const {
754 return (getLocalFastQualifiers() & Qualifiers::Volatile);
755 }
756
757 /// Determine whether this type is volatile-qualified.
758 bool isVolatileQualified() const;
759
760 /// Determine whether this particular QualType instance has any
761 /// qualifiers, without looking through any typedefs that might add
762 /// qualifiers at a different level.
763 bool hasLocalQualifiers() const {
764 return getLocalFastQualifiers() || hasLocalNonFastQualifiers();
765 }
766
767 /// Determine whether this type has any qualifiers.
768 bool hasQualifiers() const;
769
770 /// Determine whether this particular QualType instance has any
771 /// "non-fast" qualifiers, e.g., those that are stored in an ExtQualType
772 /// instance.
773 bool hasLocalNonFastQualifiers() const {
774 return Value.getPointer().is<const ExtQuals*>();
775 }
776
777 /// Retrieve the set of qualifiers local to this particular QualType
778 /// instance, not including any qualifiers acquired through typedefs or
779 /// other sugar.
780 Qualifiers getLocalQualifiers() const;
781
782 /// Retrieve the set of qualifiers applied to this type.
783 Qualifiers getQualifiers() const;
784
785 /// Retrieve the set of CVR (const-volatile-restrict) qualifiers
786 /// local to this particular QualType instance, not including any qualifiers
787 /// acquired through typedefs or other sugar.
788 unsigned getLocalCVRQualifiers() const {
789 return getLocalFastQualifiers();
790 }
791
792 /// Retrieve the set of CVR (const-volatile-restrict) qualifiers
793 /// applied to this type.
794 unsigned getCVRQualifiers() const;
795
796 bool isConstant(const ASTContext& Ctx) const {
797 return QualType::isConstant(*this, Ctx);
798 }
799
800 /// Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
801 bool isPODType(const ASTContext &Context) const;
802
803 /// Return true if this is a POD type according to the rules of the C++98
804 /// standard, regardless of the current compilation's language.
805 bool isCXX98PODType(const ASTContext &Context) const;
806
807 /// Return true if this is a POD type according to the more relaxed rules
808 /// of the C++11 standard, regardless of the current compilation's language.
809 /// (C++0x [basic.types]p9). Note that, unlike
810 /// CXXRecordDecl::isCXX11StandardLayout, this takes DRs into account.
811 bool isCXX11PODType(const ASTContext &Context) const;
812
813 /// Return true if this is a trivial type per (C++0x [basic.types]p9)
814 bool isTrivialType(const ASTContext &Context) const;
815
816 /// Return true if this is a trivially copyable type (C++0x [basic.types]p9)
817 bool isTriviallyCopyableType(const ASTContext &Context) const;
818
819
820 /// Returns true if it is a class and it might be dynamic.
821 bool mayBeDynamicClass() const;
822
823 /// Returns true if it is not a class or if the class might not be dynamic.
824 bool mayBeNotDynamicClass() const;
825
826 // Don't promise in the API that anything besides 'const' can be
827 // easily added.
828
829 /// Add the `const` type qualifier to this QualType.
830 void addConst() {
831 addFastQualifiers(Qualifiers::Const);
832 }
833 QualType withConst() const {
834 return withFastQualifiers(Qualifiers::Const);
835 }
836
837 /// Add the `volatile` type qualifier to this QualType.
838 void addVolatile() {
839 addFastQualifiers(Qualifiers::Volatile);
840 }
841 QualType withVolatile() const {
842 return withFastQualifiers(Qualifiers::Volatile);
843 }
844
845 /// Add the `restrict` qualifier to this QualType.
846 void addRestrict() {
847 addFastQualifiers(Qualifiers::Restrict);
848 }
849 QualType withRestrict() const {
850 return withFastQualifiers(Qualifiers::Restrict);
851 }
852
853 QualType withCVRQualifiers(unsigned CVR) const {
854 return withFastQualifiers(CVR);
855 }
856
857 void addFastQualifiers(unsigned TQs) {
858 assert(!(TQs & ~Qualifiers::FastMask)((!(TQs & ~Qualifiers::FastMask) && "non-fast qualifier bits set in mask!"
) ? static_cast<void> (0) : __assert_fail ("!(TQs & ~Qualifiers::FastMask) && \"non-fast qualifier bits set in mask!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 859, __PRETTY_FUNCTION__))
859 && "non-fast qualifier bits set in mask!")((!(TQs & ~Qualifiers::FastMask) && "non-fast qualifier bits set in mask!"
) ? static_cast<void> (0) : __assert_fail ("!(TQs & ~Qualifiers::FastMask) && \"non-fast qualifier bits set in mask!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 859, __PRETTY_FUNCTION__))
;
860 Value.setInt(Value.getInt() | TQs);
861 }
862
863 void removeLocalConst();
864 void removeLocalVolatile();
865 void removeLocalRestrict();
866 void removeLocalCVRQualifiers(unsigned Mask);
867
868 void removeLocalFastQualifiers() { Value.setInt(0); }
869 void removeLocalFastQualifiers(unsigned Mask) {
870 assert(!(Mask & ~Qualifiers::FastMask) && "mask has non-fast qualifiers")((!(Mask & ~Qualifiers::FastMask) && "mask has non-fast qualifiers"
) ? static_cast<void> (0) : __assert_fail ("!(Mask & ~Qualifiers::FastMask) && \"mask has non-fast qualifiers\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 870, __PRETTY_FUNCTION__))
;
871 Value.setInt(Value.getInt() & ~Mask);
872 }
873
874 // Creates a type with the given qualifiers in addition to any
875 // qualifiers already on this type.
876 QualType withFastQualifiers(unsigned TQs) const {
877 QualType T = *this;
878 T.addFastQualifiers(TQs);
879 return T;
880 }
881
882 // Creates a type with exactly the given fast qualifiers, removing
883 // any existing fast qualifiers.
884 QualType withExactLocalFastQualifiers(unsigned TQs) const {
885 return withoutLocalFastQualifiers().withFastQualifiers(TQs);
886 }
887
888 // Removes fast qualifiers, but leaves any extended qualifiers in place.
889 QualType withoutLocalFastQualifiers() const {
890 QualType T = *this;
891 T.removeLocalFastQualifiers();
892 return T;
893 }
894
895 QualType getCanonicalType() const;
896
897 /// Return this type with all of the instance-specific qualifiers
898 /// removed, but without removing any qualifiers that may have been applied
899 /// through typedefs.
900 QualType getLocalUnqualifiedType() const { return QualType(getTypePtr(), 0); }
901
902 /// Retrieve the unqualified variant of the given type,
903 /// removing as little sugar as possible.
904 ///
905 /// This routine looks through various kinds of sugar to find the
906 /// least-desugared type that is unqualified. For example, given:
907 ///
908 /// \code
909 /// typedef int Integer;
910 /// typedef const Integer CInteger;
911 /// typedef CInteger DifferenceType;
912 /// \endcode
913 ///
914 /// Executing \c getUnqualifiedType() on the type \c DifferenceType will
915 /// desugar until we hit the type \c Integer, which has no qualifiers on it.
916 ///
917 /// The resulting type might still be qualified if it's sugar for an array
918 /// type. To strip qualifiers even from within a sugared array type, use
919 /// ASTContext::getUnqualifiedArrayType.
920 inline QualType getUnqualifiedType() const;
921
922 /// Retrieve the unqualified variant of the given type, removing as little
923 /// sugar as possible.
924 ///
925 /// Like getUnqualifiedType(), but also returns the set of
926 /// qualifiers that were built up.
927 ///
928 /// The resulting type might still be qualified if it's sugar for an array
929 /// type. To strip qualifiers even from within a sugared array type, use
930 /// ASTContext::getUnqualifiedArrayType.
931 inline SplitQualType getSplitUnqualifiedType() const;
932
933 /// Determine whether this type is more qualified than the other
934 /// given type, requiring exact equality for non-CVR qualifiers.
935 bool isMoreQualifiedThan(QualType Other) const;
936
937 /// Determine whether this type is at least as qualified as the other
938 /// given type, requiring exact equality for non-CVR qualifiers.
939 bool isAtLeastAsQualifiedAs(QualType Other) const;
940
941 QualType getNonReferenceType() const;
942
943 /// Determine the type of a (typically non-lvalue) expression with the
944 /// specified result type.
945 ///
946 /// This routine should be used for expressions for which the return type is
947 /// explicitly specified (e.g., in a cast or call) and isn't necessarily
948 /// an lvalue. It removes a top-level reference (since there are no
949 /// expressions of reference type) and deletes top-level cvr-qualifiers
950 /// from non-class types (in C++) or all types (in C).
951 QualType getNonLValueExprType(const ASTContext &Context) const;
952
953 /// Remove an outer pack expansion type (if any) from this type. Used as part
954 /// of converting the type of a declaration to the type of an expression that
955 /// references that expression. It's meaningless for an expression to have a
956 /// pack expansion type.
957 QualType getNonPackExpansionType() const;
958
959 /// Return the specified type with any "sugar" removed from
960 /// the type. This takes off typedefs, typeof's etc. If the outer level of
961 /// the type is already concrete, it returns it unmodified. This is similar
962 /// to getting the canonical type, but it doesn't remove *all* typedefs. For
963 /// example, it returns "T*" as "T*", (not as "int*"), because the pointer is
964 /// concrete.
965 ///
966 /// Qualifiers are left in place.
967 QualType getDesugaredType(const ASTContext &Context) const {
968 return getDesugaredType(*this, Context);
969 }
970
971 SplitQualType getSplitDesugaredType() const {
972 return getSplitDesugaredType(*this);
973 }
974
975 /// Return the specified type with one level of "sugar" removed from
976 /// the type.
977 ///
978 /// This routine takes off the first typedef, typeof, etc. If the outer level
979 /// of the type is already concrete, it returns it unmodified.
980 QualType getSingleStepDesugaredType(const ASTContext &Context) const {
981 return getSingleStepDesugaredTypeImpl(*this, Context);
982 }
983
984 /// Returns the specified type after dropping any
985 /// outer-level parentheses.
986 QualType IgnoreParens() const {
987 if (isa<ParenType>(*this))
988 return QualType::IgnoreParens(*this);
989 return *this;
990 }
991
992 /// Indicate whether the specified types and qualifiers are identical.
993 friend bool operator==(const QualType &LHS, const QualType &RHS) {
994 return LHS.Value == RHS.Value;
995 }
996 friend bool operator!=(const QualType &LHS, const QualType &RHS) {
997 return LHS.Value != RHS.Value;
998 }
999 friend bool operator<(const QualType &LHS, const QualType &RHS) {
1000 return LHS.Value < RHS.Value;
1001 }
1002
1003 static std::string getAsString(SplitQualType split,
1004 const PrintingPolicy &Policy) {
1005 return getAsString(split.Ty, split.Quals, Policy);
1006 }
1007 static std::string getAsString(const Type *ty, Qualifiers qs,
1008 const PrintingPolicy &Policy);
1009
1010 std::string getAsString() const;
1011 std::string getAsString(const PrintingPolicy &Policy) const;
1012
1013 void print(raw_ostream &OS, const PrintingPolicy &Policy,
1014 const Twine &PlaceHolder = Twine(),
1015 unsigned Indentation = 0) const;
1016
1017 static void print(SplitQualType split, raw_ostream &OS,
1018 const PrintingPolicy &policy, const Twine &PlaceHolder,
1019 unsigned Indentation = 0) {
1020 return print(split.Ty, split.Quals, OS, policy, PlaceHolder, Indentation);
1021 }
1022
1023 static void print(const Type *ty, Qualifiers qs,
1024 raw_ostream &OS, const PrintingPolicy &policy,
1025 const Twine &PlaceHolder,
1026 unsigned Indentation = 0);
1027
1028 void getAsStringInternal(std::string &Str,
1029 const PrintingPolicy &Policy) const;
1030
1031 static void getAsStringInternal(SplitQualType split, std::string &out,
1032 const PrintingPolicy &policy) {
1033 return getAsStringInternal(split.Ty, split.Quals, out, policy);
1034 }
1035
1036 static void getAsStringInternal(const Type *ty, Qualifiers qs,
1037 std::string &out,
1038 const PrintingPolicy &policy);
1039
1040 class StreamedQualTypeHelper {
1041 const QualType &T;
1042 const PrintingPolicy &Policy;
1043 const Twine &PlaceHolder;
1044 unsigned Indentation;
1045
1046 public:
1047 StreamedQualTypeHelper(const QualType &T, const PrintingPolicy &Policy,
1048 const Twine &PlaceHolder, unsigned Indentation)
1049 : T(T), Policy(Policy), PlaceHolder(PlaceHolder),
1050 Indentation(Indentation) {}
1051
1052 friend raw_ostream &operator<<(raw_ostream &OS,
1053 const StreamedQualTypeHelper &SQT) {
1054 SQT.T.print(OS, SQT.Policy, SQT.PlaceHolder, SQT.Indentation);
1055 return OS;
1056 }
1057 };
1058
1059 StreamedQualTypeHelper stream(const PrintingPolicy &Policy,
1060 const Twine &PlaceHolder = Twine(),
1061 unsigned Indentation = 0) const {
1062 return StreamedQualTypeHelper(*this, Policy, PlaceHolder, Indentation);
1063 }
1064
1065 void dump(const char *s) const;
1066 void dump() const;
1067 void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
1068
1069 void Profile(llvm::FoldingSetNodeID &ID) const {
1070 ID.AddPointer(getAsOpaquePtr());
1071 }
1072
1073 /// Check if this type has any address space qualifier.
1074 inline bool hasAddressSpace() const;
1075
1076 /// Return the address space of this type.
1077 inline LangAS getAddressSpace() const;
1078
1079 /// Returns true if address space qualifiers overlap with T address space
1080 /// qualifiers.
1081 /// OpenCL C defines conversion rules for pointers to different address spaces
1082 /// and notion of overlapping address spaces.
1083 /// CL1.1 or CL1.2:
1084 /// address spaces overlap iff they are they same.
1085 /// OpenCL C v2.0 s6.5.5 adds:
1086 /// __generic overlaps with any address space except for __constant.
1087 bool isAddressSpaceOverlapping(QualType T) const {
1088 Qualifiers Q = getQualifiers();
1089 Qualifiers TQ = T.getQualifiers();
1090 // Address spaces overlap if at least one of them is a superset of another
1091 return Q.isAddressSpaceSupersetOf(TQ) || TQ.isAddressSpaceSupersetOf(Q);
1092 }
1093
1094 /// Returns gc attribute of this type.
1095 inline Qualifiers::GC getObjCGCAttr() const;
1096
1097 /// true when Type is objc's weak.
1098 bool isObjCGCWeak() const {
1099 return getObjCGCAttr() == Qualifiers::Weak;
1100 }
1101
1102 /// true when Type is objc's strong.
1103 bool isObjCGCStrong() const {
1104 return getObjCGCAttr() == Qualifiers::Strong;
1105 }
1106
1107 /// Returns lifetime attribute of this type.
1108 Qualifiers::ObjCLifetime getObjCLifetime() const {
1109 return getQualifiers().getObjCLifetime();
1110 }
1111
1112 bool hasNonTrivialObjCLifetime() const {
1113 return getQualifiers().hasNonTrivialObjCLifetime();
1114 }
1115
1116 bool hasStrongOrWeakObjCLifetime() const {
1117 return getQualifiers().hasStrongOrWeakObjCLifetime();
1118 }
1119
1120 // true when Type is objc's weak and weak is enabled but ARC isn't.
1121 bool isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const;
1122
1123 enum PrimitiveDefaultInitializeKind {
1124 /// The type does not fall into any of the following categories. Note that
1125 /// this case is zero-valued so that values of this enum can be used as a
1126 /// boolean condition for non-triviality.
1127 PDIK_Trivial,
1128
1129 /// The type is an Objective-C retainable pointer type that is qualified
1130 /// with the ARC __strong qualifier.
1131 PDIK_ARCStrong,
1132
1133 /// The type is an Objective-C retainable pointer type that is qualified
1134 /// with the ARC __weak qualifier.
1135 PDIK_ARCWeak,
1136
1137 /// The type is a struct containing a field whose type is not PCK_Trivial.
1138 PDIK_Struct
1139 };
1140
1141 /// Functions to query basic properties of non-trivial C struct types.
1142
1143 /// Check if this is a non-trivial type that would cause a C struct
1144 /// transitively containing this type to be non-trivial to default initialize
1145 /// and return the kind.
1146 PrimitiveDefaultInitializeKind
1147 isNonTrivialToPrimitiveDefaultInitialize() const;
1148
1149 enum PrimitiveCopyKind {
1150 /// The type does not fall into any of the following categories. Note that
1151 /// this case is zero-valued so that values of this enum can be used as a
1152 /// boolean condition for non-triviality.
1153 PCK_Trivial,
1154
1155 /// The type would be trivial except that it is volatile-qualified. Types
1156 /// that fall into one of the other non-trivial cases may additionally be
1157 /// volatile-qualified.
1158 PCK_VolatileTrivial,
1159
1160 /// The type is an Objective-C retainable pointer type that is qualified
1161 /// with the ARC __strong qualifier.
1162 PCK_ARCStrong,
1163
1164 /// The type is an Objective-C retainable pointer type that is qualified
1165 /// with the ARC __weak qualifier.
1166 PCK_ARCWeak,
1167
1168 /// The type is a struct containing a field whose type is neither
1169 /// PCK_Trivial nor PCK_VolatileTrivial.
1170 /// Note that a C++ struct type does not necessarily match this; C++ copying
1171 /// semantics are too complex to express here, in part because they depend
1172 /// on the exact constructor or assignment operator that is chosen by
1173 /// overload resolution to do the copy.
1174 PCK_Struct
1175 };
1176
1177 /// Check if this is a non-trivial type that would cause a C struct
1178 /// transitively containing this type to be non-trivial to copy and return the
1179 /// kind.
1180 PrimitiveCopyKind isNonTrivialToPrimitiveCopy() const;
1181
1182 /// Check if this is a non-trivial type that would cause a C struct
1183 /// transitively containing this type to be non-trivial to destructively
1184 /// move and return the kind. Destructive move in this context is a C++-style
1185 /// move in which the source object is placed in a valid but unspecified state
1186 /// after it is moved, as opposed to a truly destructive move in which the
1187 /// source object is placed in an uninitialized state.
1188 PrimitiveCopyKind isNonTrivialToPrimitiveDestructiveMove() const;
1189
1190 enum DestructionKind {
1191 DK_none,
1192 DK_cxx_destructor,
1193 DK_objc_strong_lifetime,
1194 DK_objc_weak_lifetime,
1195 DK_nontrivial_c_struct
1196 };
1197
1198 /// Returns a nonzero value if objects of this type require
1199 /// non-trivial work to clean up after. Non-zero because it's
1200 /// conceivable that qualifiers (objc_gc(weak)?) could make
1201 /// something require destruction.
1202 DestructionKind isDestructedType() const {
1203 return isDestructedTypeImpl(*this);
1204 }
1205
1206 /// Check if this is or contains a C union that is non-trivial to
1207 /// default-initialize, which is a union that has a member that is non-trivial
1208 /// to default-initialize. If this returns true,
1209 /// isNonTrivialToPrimitiveDefaultInitialize returns PDIK_Struct.
1210 bool hasNonTrivialToPrimitiveDefaultInitializeCUnion() const;
1211
1212 /// Check if this is or contains a C union that is non-trivial to destruct,
1213 /// which is a union that has a member that is non-trivial to destruct. If
1214 /// this returns true, isDestructedType returns DK_nontrivial_c_struct.
1215 bool hasNonTrivialToPrimitiveDestructCUnion() const;
1216
1217 /// Check if this is or contains a C union that is non-trivial to copy, which
1218 /// is a union that has a member that is non-trivial to copy. If this returns
1219 /// true, isNonTrivialToPrimitiveCopy returns PCK_Struct.
1220 bool hasNonTrivialToPrimitiveCopyCUnion() const;
1221
1222 /// Determine whether expressions of the given type are forbidden
1223 /// from being lvalues in C.
1224 ///
1225 /// The expression types that are forbidden to be lvalues are:
1226 /// - 'void', but not qualified void
1227 /// - function types
1228 ///
1229 /// The exact rule here is C99 6.3.2.1:
1230 /// An lvalue is an expression with an object type or an incomplete
1231 /// type other than void.
1232 bool isCForbiddenLValueType() const;
1233
1234 /// Substitute type arguments for the Objective-C type parameters used in the
1235 /// subject type.
1236 ///
1237 /// \param ctx ASTContext in which the type exists.
1238 ///
1239 /// \param typeArgs The type arguments that will be substituted for the
1240 /// Objective-C type parameters in the subject type, which are generally
1241 /// computed via \c Type::getObjCSubstitutions. If empty, the type
1242 /// parameters will be replaced with their bounds or id/Class, as appropriate
1243 /// for the context.
1244 ///
1245 /// \param context The context in which the subject type was written.
1246 ///
1247 /// \returns the resulting type.
1248 QualType substObjCTypeArgs(ASTContext &ctx,
1249 ArrayRef<QualType> typeArgs,
1250 ObjCSubstitutionContext context) const;
1251
1252 /// Substitute type arguments from an object type for the Objective-C type
1253 /// parameters used in the subject type.
1254 ///
1255 /// This operation combines the computation of type arguments for
1256 /// substitution (\c Type::getObjCSubstitutions) with the actual process of
1257 /// substitution (\c QualType::substObjCTypeArgs) for the convenience of
1258 /// callers that need to perform a single substitution in isolation.
1259 ///
1260 /// \param objectType The type of the object whose member type we're
1261 /// substituting into. For example, this might be the receiver of a message
1262 /// or the base of a property access.
1263 ///
1264 /// \param dc The declaration context from which the subject type was
1265 /// retrieved, which indicates (for example) which type parameters should
1266 /// be substituted.
1267 ///
1268 /// \param context The context in which the subject type was written.
1269 ///
1270 /// \returns the subject type after replacing all of the Objective-C type
1271 /// parameters with their corresponding arguments.
1272 QualType substObjCMemberType(QualType objectType,
1273 const DeclContext *dc,
1274 ObjCSubstitutionContext context) const;
1275
1276 /// Strip Objective-C "__kindof" types from the given type.
1277 QualType stripObjCKindOfType(const ASTContext &ctx) const;
1278
1279 /// Remove all qualifiers including _Atomic.
1280 QualType getAtomicUnqualifiedType() const;
1281
1282private:
1283 // These methods are implemented in a separate translation unit;
1284 // "static"-ize them to avoid creating temporary QualTypes in the
1285 // caller.
1286 static bool isConstant(QualType T, const ASTContext& Ctx);
1287 static QualType getDesugaredType(QualType T, const ASTContext &Context);
1288 static SplitQualType getSplitDesugaredType(QualType T);
1289 static SplitQualType getSplitUnqualifiedTypeImpl(QualType type);
1290 static QualType getSingleStepDesugaredTypeImpl(QualType type,
1291 const ASTContext &C);
1292 static QualType IgnoreParens(QualType T);
1293 static DestructionKind isDestructedTypeImpl(QualType type);
1294
1295 /// Check if \param RD is or contains a non-trivial C union.
1296 static bool hasNonTrivialToPrimitiveDefaultInitializeCUnion(const RecordDecl *RD);
1297 static bool hasNonTrivialToPrimitiveDestructCUnion(const RecordDecl *RD);
1298 static bool hasNonTrivialToPrimitiveCopyCUnion(const RecordDecl *RD);
1299};
1300
1301} // namespace clang
1302
1303namespace llvm {
1304
1305/// Implement simplify_type for QualType, so that we can dyn_cast from QualType
1306/// to a specific Type class.
1307template<> struct simplify_type< ::clang::QualType> {
1308 using SimpleType = const ::clang::Type *;
1309
1310 static SimpleType getSimplifiedValue(::clang::QualType Val) {
1311 return Val.getTypePtr();
1312 }
1313};
1314
1315// Teach SmallPtrSet that QualType is "basically a pointer".
1316template<>
1317struct PointerLikeTypeTraits<clang::QualType> {
1318 static inline void *getAsVoidPointer(clang::QualType P) {
1319 return P.getAsOpaquePtr();
1320 }
1321
1322 static inline clang::QualType getFromVoidPointer(void *P) {
1323 return clang::QualType::getFromOpaquePtr(P);
1324 }
1325
1326 // Various qualifiers go in low bits.
1327 static constexpr int NumLowBitsAvailable = 0;
1328};
1329
1330} // namespace llvm
1331
1332namespace clang {
1333
1334/// Base class that is common to both the \c ExtQuals and \c Type
1335/// classes, which allows \c QualType to access the common fields between the
1336/// two.
1337class ExtQualsTypeCommonBase {
1338 friend class ExtQuals;
1339 friend class QualType;
1340 friend class Type;
1341
1342 /// The "base" type of an extended qualifiers type (\c ExtQuals) or
1343 /// a self-referential pointer (for \c Type).
1344 ///
1345 /// This pointer allows an efficient mapping from a QualType to its
1346 /// underlying type pointer.
1347 const Type *const BaseType;
1348
1349 /// The canonical type of this type. A QualType.
1350 QualType CanonicalType;
1351
1352 ExtQualsTypeCommonBase(const Type *baseType, QualType canon)
1353 : BaseType(baseType), CanonicalType(canon) {}
1354};
1355
1356/// We can encode up to four bits in the low bits of a
1357/// type pointer, but there are many more type qualifiers that we want
1358/// to be able to apply to an arbitrary type. Therefore we have this
1359/// struct, intended to be heap-allocated and used by QualType to
1360/// store qualifiers.
1361///
1362/// The current design tags the 'const', 'restrict', and 'volatile' qualifiers
1363/// in three low bits on the QualType pointer; a fourth bit records whether
1364/// the pointer is an ExtQuals node. The extended qualifiers (address spaces,
1365/// Objective-C GC attributes) are much more rare.
1366class ExtQuals : public ExtQualsTypeCommonBase, public llvm::FoldingSetNode {
1367 // NOTE: changing the fast qualifiers should be straightforward as
1368 // long as you don't make 'const' non-fast.
1369 // 1. Qualifiers:
1370 // a) Modify the bitmasks (Qualifiers::TQ and DeclSpec::TQ).
1371 // Fast qualifiers must occupy the low-order bits.
1372 // b) Update Qualifiers::FastWidth and FastMask.
1373 // 2. QualType:
1374 // a) Update is{Volatile,Restrict}Qualified(), defined inline.
1375 // b) Update remove{Volatile,Restrict}, defined near the end of
1376 // this header.
1377 // 3. ASTContext:
1378 // a) Update get{Volatile,Restrict}Type.
1379
1380 /// The immutable set of qualifiers applied by this node. Always contains
1381 /// extended qualifiers.
1382 Qualifiers Quals;
1383
1384 ExtQuals *this_() { return this; }
1385
1386public:
1387 ExtQuals(const Type *baseType, QualType canon, Qualifiers quals)
1388 : ExtQualsTypeCommonBase(baseType,
1389 canon.isNull() ? QualType(this_(), 0) : canon),
1390 Quals(quals) {
1391 assert(Quals.hasNonFastQualifiers()((Quals.hasNonFastQualifiers() && "ExtQuals created with no fast qualifiers"
) ? static_cast<void> (0) : __assert_fail ("Quals.hasNonFastQualifiers() && \"ExtQuals created with no fast qualifiers\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 1392, __PRETTY_FUNCTION__))
1392 && "ExtQuals created with no fast qualifiers")((Quals.hasNonFastQualifiers() && "ExtQuals created with no fast qualifiers"
) ? static_cast<void> (0) : __assert_fail ("Quals.hasNonFastQualifiers() && \"ExtQuals created with no fast qualifiers\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 1392, __PRETTY_FUNCTION__))
;
1393 assert(!Quals.hasFastQualifiers()((!Quals.hasFastQualifiers() && "ExtQuals created with fast qualifiers"
) ? static_cast<void> (0) : __assert_fail ("!Quals.hasFastQualifiers() && \"ExtQuals created with fast qualifiers\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 1394, __PRETTY_FUNCTION__))
1394 && "ExtQuals created with fast qualifiers")((!Quals.hasFastQualifiers() && "ExtQuals created with fast qualifiers"
) ? static_cast<void> (0) : __assert_fail ("!Quals.hasFastQualifiers() && \"ExtQuals created with fast qualifiers\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 1394, __PRETTY_FUNCTION__))
;
1395 }
1396
1397 Qualifiers getQualifiers() const { return Quals; }
1398
1399 bool hasObjCGCAttr() const { return Quals.hasObjCGCAttr(); }
1400 Qualifiers::GC getObjCGCAttr() const { return Quals.getObjCGCAttr(); }
1401
1402 bool hasObjCLifetime() const { return Quals.hasObjCLifetime(); }
1403 Qualifiers::ObjCLifetime getObjCLifetime() const {
1404 return Quals.getObjCLifetime();
1405 }
1406
1407 bool hasAddressSpace() const { return Quals.hasAddressSpace(); }
1408 LangAS getAddressSpace() const { return Quals.getAddressSpace(); }
1409
1410 const Type *getBaseType() const { return BaseType; }
1411
1412public:
1413 void Profile(llvm::FoldingSetNodeID &ID) const {
1414 Profile(ID, getBaseType(), Quals);
1415 }
1416
1417 static void Profile(llvm::FoldingSetNodeID &ID,
1418 const Type *BaseType,
1419 Qualifiers Quals) {
1420 assert(!Quals.hasFastQualifiers() && "fast qualifiers in ExtQuals hash!")((!Quals.hasFastQualifiers() && "fast qualifiers in ExtQuals hash!"
) ? static_cast<void> (0) : __assert_fail ("!Quals.hasFastQualifiers() && \"fast qualifiers in ExtQuals hash!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 1420, __PRETTY_FUNCTION__))
;
1421 ID.AddPointer(BaseType);
1422 Quals.Profile(ID);
1423 }
1424};
1425
1426/// The kind of C++11 ref-qualifier associated with a function type.
1427/// This determines whether a member function's "this" object can be an
1428/// lvalue, rvalue, or neither.
1429enum RefQualifierKind {
1430 /// No ref-qualifier was provided.
1431 RQ_None = 0,
1432
1433 /// An lvalue ref-qualifier was provided (\c &).
1434 RQ_LValue,
1435
1436 /// An rvalue ref-qualifier was provided (\c &&).
1437 RQ_RValue
1438};
1439
1440/// Which keyword(s) were used to create an AutoType.
1441enum class AutoTypeKeyword {
1442 /// auto
1443 Auto,
1444
1445 /// decltype(auto)
1446 DecltypeAuto,
1447
1448 /// __auto_type (GNU extension)
1449 GNUAutoType
1450};
1451
1452/// The base class of the type hierarchy.
1453///
1454/// A central concept with types is that each type always has a canonical
1455/// type. A canonical type is the type with any typedef names stripped out
1456/// of it or the types it references. For example, consider:
1457///
1458/// typedef int foo;
1459/// typedef foo* bar;
1460/// 'int *' 'foo *' 'bar'
1461///
1462/// There will be a Type object created for 'int'. Since int is canonical, its
1463/// CanonicalType pointer points to itself. There is also a Type for 'foo' (a
1464/// TypedefType). Its CanonicalType pointer points to the 'int' Type. Next
1465/// there is a PointerType that represents 'int*', which, like 'int', is
1466/// canonical. Finally, there is a PointerType type for 'foo*' whose canonical
1467/// type is 'int*', and there is a TypedefType for 'bar', whose canonical type
1468/// is also 'int*'.
1469///
1470/// Non-canonical types are useful for emitting diagnostics, without losing
1471/// information about typedefs being used. Canonical types are useful for type
1472/// comparisons (they allow by-pointer equality tests) and useful for reasoning
1473/// about whether something has a particular form (e.g. is a function type),
1474/// because they implicitly, recursively, strip all typedefs out of a type.
1475///
1476/// Types, once created, are immutable.
1477///
1478class alignas(8) Type : public ExtQualsTypeCommonBase {
1479public:
1480 enum TypeClass {
1481#define TYPE(Class, Base) Class,
1482#define LAST_TYPE(Class) TypeLast = Class
1483#define ABSTRACT_TYPE(Class, Base)
1484#include "clang/AST/TypeNodes.inc"
1485 };
1486
1487private:
1488 /// Bitfields required by the Type class.
1489 class TypeBitfields {
1490 friend class Type;
1491 template <class T> friend class TypePropertyCache;
1492
1493 /// TypeClass bitfield - Enum that specifies what subclass this belongs to.
1494 unsigned TC : 8;
1495
1496 /// Store information on the type dependency.
1497 unsigned Dependence : llvm::BitWidth<TypeDependence>;
1498
1499 /// True if the cache (i.e. the bitfields here starting with
1500 /// 'Cache') is valid.
1501 mutable unsigned CacheValid : 1;
1502
1503 /// Linkage of this type.
1504 mutable unsigned CachedLinkage : 3;
1505
1506 /// Whether this type involves and local or unnamed types.
1507 mutable unsigned CachedLocalOrUnnamed : 1;
1508
1509 /// Whether this type comes from an AST file.
1510 mutable unsigned FromAST : 1;
1511
1512 bool isCacheValid() const {
1513 return CacheValid;
1514 }
1515
1516 Linkage getLinkage() const {
1517 assert(isCacheValid() && "getting linkage from invalid cache")((isCacheValid() && "getting linkage from invalid cache"
) ? static_cast<void> (0) : __assert_fail ("isCacheValid() && \"getting linkage from invalid cache\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 1517, __PRETTY_FUNCTION__))
;
1518 return static_cast<Linkage>(CachedLinkage);
1519 }
1520
1521 bool hasLocalOrUnnamedType() const {
1522 assert(isCacheValid() && "getting linkage from invalid cache")((isCacheValid() && "getting linkage from invalid cache"
) ? static_cast<void> (0) : __assert_fail ("isCacheValid() && \"getting linkage from invalid cache\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 1522, __PRETTY_FUNCTION__))
;
1523 return CachedLocalOrUnnamed;
1524 }
1525 };
1526 enum { NumTypeBits = 8 + llvm::BitWidth<TypeDependence> + 6 };
1527
1528protected:
1529 // These classes allow subclasses to somewhat cleanly pack bitfields
1530 // into Type.
1531
1532 class ArrayTypeBitfields {
1533 friend class ArrayType;
1534
1535 unsigned : NumTypeBits;
1536
1537 /// CVR qualifiers from declarations like
1538 /// 'int X[static restrict 4]'. For function parameters only.
1539 unsigned IndexTypeQuals : 3;
1540
1541 /// Storage class qualifiers from declarations like
1542 /// 'int X[static restrict 4]'. For function parameters only.
1543 /// Actually an ArrayType::ArraySizeModifier.
1544 unsigned SizeModifier : 3;
1545 };
1546
1547 class ConstantArrayTypeBitfields {
1548 friend class ConstantArrayType;
1549
1550 unsigned : NumTypeBits + 3 + 3;
1551
1552 /// Whether we have a stored size expression.
1553 unsigned HasStoredSizeExpr : 1;
1554 };
1555
1556 class BuiltinTypeBitfields {
1557 friend class BuiltinType;
1558
1559 unsigned : NumTypeBits;
1560
1561 /// The kind (BuiltinType::Kind) of builtin type this is.
1562 unsigned Kind : 8;
1563 };
1564
1565 /// FunctionTypeBitfields store various bits belonging to FunctionProtoType.
1566 /// Only common bits are stored here. Additional uncommon bits are stored
1567 /// in a trailing object after FunctionProtoType.
1568 class FunctionTypeBitfields {
1569 friend class FunctionProtoType;
1570 friend class FunctionType;
1571
1572 unsigned : NumTypeBits;
1573
1574 /// Extra information which affects how the function is called, like
1575 /// regparm and the calling convention.
1576 unsigned ExtInfo : 13;
1577
1578 /// The ref-qualifier associated with a \c FunctionProtoType.
1579 ///
1580 /// This is a value of type \c RefQualifierKind.
1581 unsigned RefQualifier : 2;
1582
1583 /// Used only by FunctionProtoType, put here to pack with the
1584 /// other bitfields.
1585 /// The qualifiers are part of FunctionProtoType because...
1586 ///
1587 /// C++ 8.3.5p4: The return type, the parameter type list and the
1588 /// cv-qualifier-seq, [...], are part of the function type.
1589 unsigned FastTypeQuals : Qualifiers::FastWidth;
1590 /// Whether this function has extended Qualifiers.
1591 unsigned HasExtQuals : 1;
1592
1593 /// The number of parameters this function has, not counting '...'.
1594 /// According to [implimits] 8 bits should be enough here but this is
1595 /// somewhat easy to exceed with metaprogramming and so we would like to
1596 /// keep NumParams as wide as reasonably possible.
1597 unsigned NumParams : 16;
1598
1599 /// The type of exception specification this function has.
1600 unsigned ExceptionSpecType : 4;
1601
1602 /// Whether this function has extended parameter information.
1603 unsigned HasExtParameterInfos : 1;
1604
1605 /// Whether the function is variadic.
1606 unsigned Variadic : 1;
1607
1608 /// Whether this function has a trailing return type.
1609 unsigned HasTrailingReturn : 1;
1610 };
1611
1612 class ObjCObjectTypeBitfields {
1613 friend class ObjCObjectType;
1614
1615 unsigned : NumTypeBits;
1616
1617 /// The number of type arguments stored directly on this object type.
1618 unsigned NumTypeArgs : 7;
1619
1620 /// The number of protocols stored directly on this object type.
1621 unsigned NumProtocols : 6;
1622
1623 /// Whether this is a "kindof" type.
1624 unsigned IsKindOf : 1;
1625 };
1626
1627 class ReferenceTypeBitfields {
1628 friend class ReferenceType;
1629
1630 unsigned : NumTypeBits;
1631
1632 /// True if the type was originally spelled with an lvalue sigil.
1633 /// This is never true of rvalue references but can also be false
1634 /// on lvalue references because of C++0x [dcl.typedef]p9,
1635 /// as follows:
1636 ///
1637 /// typedef int &ref; // lvalue, spelled lvalue
1638 /// typedef int &&rvref; // rvalue
1639 /// ref &a; // lvalue, inner ref, spelled lvalue
1640 /// ref &&a; // lvalue, inner ref
1641 /// rvref &a; // lvalue, inner ref, spelled lvalue
1642 /// rvref &&a; // rvalue, inner ref
1643 unsigned SpelledAsLValue : 1;
1644
1645 /// True if the inner type is a reference type. This only happens
1646 /// in non-canonical forms.
1647 unsigned InnerRef : 1;
1648 };
1649
1650 class TypeWithKeywordBitfields {
1651 friend class TypeWithKeyword;
1652
1653 unsigned : NumTypeBits;
1654
1655 /// An ElaboratedTypeKeyword. 8 bits for efficient access.
1656 unsigned Keyword : 8;
1657 };
1658
1659 enum { NumTypeWithKeywordBits = 8 };
1660
1661 class ElaboratedTypeBitfields {
1662 friend class ElaboratedType;
1663
1664 unsigned : NumTypeBits;
1665 unsigned : NumTypeWithKeywordBits;
1666
1667 /// Whether the ElaboratedType has a trailing OwnedTagDecl.
1668 unsigned HasOwnedTagDecl : 1;
1669 };
1670
1671 class VectorTypeBitfields {
1672 friend class VectorType;
1673 friend class DependentVectorType;
1674
1675 unsigned : NumTypeBits;
1676
1677 /// The kind of vector, either a generic vector type or some
1678 /// target-specific vector type such as for AltiVec or Neon.
1679 unsigned VecKind : 3;
1680 /// The number of elements in the vector.
1681 uint32_t NumElements;
1682 };
1683
1684 class AttributedTypeBitfields {
1685 friend class AttributedType;
1686
1687 unsigned : NumTypeBits;
1688
1689 /// An AttributedType::Kind
1690 unsigned AttrKind : 32 - NumTypeBits;
1691 };
1692
1693 class AutoTypeBitfields {
1694 friend class AutoType;
1695
1696 unsigned : NumTypeBits;
1697
1698 /// Was this placeholder type spelled as 'auto', 'decltype(auto)',
1699 /// or '__auto_type'? AutoTypeKeyword value.
1700 unsigned Keyword : 2;
1701
1702 /// The number of template arguments in the type-constraints, which is
1703 /// expected to be able to hold at least 1024 according to [implimits].
1704 /// However as this limit is somewhat easy to hit with template
1705 /// metaprogramming we'd prefer to keep it as large as possible.
1706 /// At the moment it has been left as a non-bitfield since this type
1707 /// safely fits in 64 bits as an unsigned, so there is no reason to
1708 /// introduce the performance impact of a bitfield.
1709 unsigned NumArgs;
1710 };
1711
1712 class SubstTemplateTypeParmPackTypeBitfields {
1713 friend class SubstTemplateTypeParmPackType;
1714
1715 unsigned : NumTypeBits;
1716
1717 /// The number of template arguments in \c Arguments, which is
1718 /// expected to be able to hold at least 1024 according to [implimits].
1719 /// However as this limit is somewhat easy to hit with template
1720 /// metaprogramming we'd prefer to keep it as large as possible.
1721 /// At the moment it has been left as a non-bitfield since this type
1722 /// safely fits in 64 bits as an unsigned, so there is no reason to
1723 /// introduce the performance impact of a bitfield.
1724 unsigned NumArgs;
1725 };
1726
1727 class TemplateSpecializationTypeBitfields {
1728 friend class TemplateSpecializationType;
1729
1730 unsigned : NumTypeBits;
1731
1732 /// Whether this template specialization type is a substituted type alias.
1733 unsigned TypeAlias : 1;
1734
1735 /// The number of template arguments named in this class template
1736 /// specialization, which is expected to be able to hold at least 1024
1737 /// according to [implimits]. However, as this limit is somewhat easy to
1738 /// hit with template metaprogramming we'd prefer to keep it as large
1739 /// as possible. At the moment it has been left as a non-bitfield since
1740 /// this type safely fits in 64 bits as an unsigned, so there is no reason
1741 /// to introduce the performance impact of a bitfield.
1742 unsigned NumArgs;
1743 };
1744
1745 class DependentTemplateSpecializationTypeBitfields {
1746 friend class DependentTemplateSpecializationType;
1747
1748 unsigned : NumTypeBits;
1749 unsigned : NumTypeWithKeywordBits;
1750
1751 /// The number of template arguments named in this class template
1752 /// specialization, which is expected to be able to hold at least 1024
1753 /// according to [implimits]. However, as this limit is somewhat easy to
1754 /// hit with template metaprogramming we'd prefer to keep it as large
1755 /// as possible. At the moment it has been left as a non-bitfield since
1756 /// this type safely fits in 64 bits as an unsigned, so there is no reason
1757 /// to introduce the performance impact of a bitfield.
1758 unsigned NumArgs;
1759 };
1760
1761 class PackExpansionTypeBitfields {
1762 friend class PackExpansionType;
1763
1764 unsigned : NumTypeBits;
1765
1766 /// The number of expansions that this pack expansion will
1767 /// generate when substituted (+1), which is expected to be able to
1768 /// hold at least 1024 according to [implimits]. However, as this limit
1769 /// is somewhat easy to hit with template metaprogramming we'd prefer to
1770 /// keep it as large as possible. At the moment it has been left as a
1771 /// non-bitfield since this type safely fits in 64 bits as an unsigned, so
1772 /// there is no reason to introduce the performance impact of a bitfield.
1773 ///
1774 /// This field will only have a non-zero value when some of the parameter
1775 /// packs that occur within the pattern have been substituted but others
1776 /// have not.
1777 unsigned NumExpansions;
1778 };
1779
1780 union {
1781 TypeBitfields TypeBits;
1782 ArrayTypeBitfields ArrayTypeBits;
1783 ConstantArrayTypeBitfields ConstantArrayTypeBits;
1784 AttributedTypeBitfields AttributedTypeBits;
1785 AutoTypeBitfields AutoTypeBits;
1786 BuiltinTypeBitfields BuiltinTypeBits;
1787 FunctionTypeBitfields FunctionTypeBits;
1788 ObjCObjectTypeBitfields ObjCObjectTypeBits;
1789 ReferenceTypeBitfields ReferenceTypeBits;
1790 TypeWithKeywordBitfields TypeWithKeywordBits;
1791 ElaboratedTypeBitfields ElaboratedTypeBits;
1792 VectorTypeBitfields VectorTypeBits;
1793 SubstTemplateTypeParmPackTypeBitfields SubstTemplateTypeParmPackTypeBits;
1794 TemplateSpecializationTypeBitfields TemplateSpecializationTypeBits;
1795 DependentTemplateSpecializationTypeBitfields
1796 DependentTemplateSpecializationTypeBits;
1797 PackExpansionTypeBitfields PackExpansionTypeBits;
1798 };
1799
1800private:
1801 template <class T> friend class TypePropertyCache;
1802
1803 /// Set whether this type comes from an AST file.
1804 void setFromAST(bool V = true) const {
1805 TypeBits.FromAST = V;
1806 }
1807
1808protected:
1809 friend class ASTContext;
1810
1811 Type(TypeClass tc, QualType canon, TypeDependence Dependence)
1812 : ExtQualsTypeCommonBase(this,
1813 canon.isNull() ? QualType(this_(), 0) : canon) {
1814 static_assert(sizeof(*this) <= 8 + sizeof(ExtQualsTypeCommonBase),
1815 "changing bitfields changed sizeof(Type)!");
1816 static_assert(alignof(decltype(*this)) % sizeof(void *) == 0,
1817 "Insufficient alignment!");
1818 TypeBits.TC = tc;
1819 TypeBits.Dependence = static_cast<unsigned>(Dependence);
1820 TypeBits.CacheValid = false;
1821 TypeBits.CachedLocalOrUnnamed = false;
1822 TypeBits.CachedLinkage = NoLinkage;
1823 TypeBits.FromAST = false;
1824 }
1825
1826 // silence VC++ warning C4355: 'this' : used in base member initializer list
1827 Type *this_() { return this; }
1828
1829 void setDependence(TypeDependence D) {
1830 TypeBits.Dependence = static_cast<unsigned>(D);
1831 }
1832
1833 void addDependence(TypeDependence D) { setDependence(getDependence() | D); }
1834
1835public:
1836 friend class ASTReader;
1837 friend class ASTWriter;
1838 template <class T> friend class serialization::AbstractTypeReader;
1839 template <class T> friend class serialization::AbstractTypeWriter;
1840
1841 Type(const Type &) = delete;
1842 Type(Type &&) = delete;
1843 Type &operator=(const Type &) = delete;
1844 Type &operator=(Type &&) = delete;
1845
1846 TypeClass getTypeClass() const { return static_cast<TypeClass>(TypeBits.TC); }
1847
1848 /// Whether this type comes from an AST file.
1849 bool isFromAST() const { return TypeBits.FromAST; }
1850
1851 /// Whether this type is or contains an unexpanded parameter
1852 /// pack, used to support C++0x variadic templates.
1853 ///
1854 /// A type that contains a parameter pack shall be expanded by the
1855 /// ellipsis operator at some point. For example, the typedef in the
1856 /// following example contains an unexpanded parameter pack 'T':
1857 ///
1858 /// \code
1859 /// template<typename ...T>
1860 /// struct X {
1861 /// typedef T* pointer_types; // ill-formed; T is a parameter pack.
1862 /// };
1863 /// \endcode
1864 ///
1865 /// Note that this routine does not specify which
1866 bool containsUnexpandedParameterPack() const {
1867 return getDependence() & TypeDependence::UnexpandedPack;
1868 }
1869
1870 /// Determines if this type would be canonical if it had no further
1871 /// qualification.
1872 bool isCanonicalUnqualified() const {
1873 return CanonicalType == QualType(this, 0);
1874 }
1875
1876 /// Pull a single level of sugar off of this locally-unqualified type.
1877 /// Users should generally prefer SplitQualType::getSingleStepDesugaredType()
1878 /// or QualType::getSingleStepDesugaredType(const ASTContext&).
1879 QualType getLocallyUnqualifiedSingleStepDesugaredType() const;
1880
1881 /// As an extension, we classify types as one of "sized" or "sizeless";
1882 /// every type is one or the other. Standard types are all sized;
1883 /// sizeless types are purely an extension.
1884 ///
1885 /// Sizeless types contain data with no specified size, alignment,
1886 /// or layout.
1887 bool isSizelessType() const;
1888 bool isSizelessBuiltinType() const;
1889
1890 /// Determines if this is a sizeless type supported by the
1891 /// 'arm_sve_vector_bits' type attribute, which can be applied to a single
1892 /// SVE vector or predicate, excluding tuple types such as svint32x4_t.
1893 bool isVLSTBuiltinType() const;
1894
1895 /// Returns the representative type for the element of an SVE builtin type.
1896 /// This is used to represent fixed-length SVE vectors created with the
1897 /// 'arm_sve_vector_bits' type attribute as VectorType.
1898 QualType getSveEltType(const ASTContext &Ctx) const;
1899
1900 /// Types are partitioned into 3 broad categories (C99 6.2.5p1):
1901 /// object types, function types, and incomplete types.
1902
1903 /// Return true if this is an incomplete type.
1904 /// A type that can describe objects, but which lacks information needed to
1905 /// determine its size (e.g. void, or a fwd declared struct). Clients of this
1906 /// routine will need to determine if the size is actually required.
1907 ///
1908 /// Def If non-null, and the type refers to some kind of declaration
1909 /// that can be completed (such as a C struct, C++ class, or Objective-C
1910 /// class), will be set to the declaration.
1911 bool isIncompleteType(NamedDecl **Def = nullptr) const;
1912
1913 /// Return true if this is an incomplete or object
1914 /// type, in other words, not a function type.
1915 bool isIncompleteOrObjectType() const {
1916 return !isFunctionType();
1917 }
1918
1919 /// Determine whether this type is an object type.
1920 bool isObjectType() const {
1921 // C++ [basic.types]p8:
1922 // An object type is a (possibly cv-qualified) type that is not a
1923 // function type, not a reference type, and not a void type.
1924 return !isReferenceType() && !isFunctionType() && !isVoidType();
1925 }
1926
1927 /// Return true if this is a literal type
1928 /// (C++11 [basic.types]p10)
1929 bool isLiteralType(const ASTContext &Ctx) const;
1930
1931 /// Determine if this type is a structural type, per C++20 [temp.param]p7.
1932 bool isStructuralType() const;
1933
1934 /// Test if this type is a standard-layout type.
1935 /// (C++0x [basic.type]p9)
1936 bool isStandardLayoutType() const;
1937
1938 /// Helper methods to distinguish type categories. All type predicates
1939 /// operate on the canonical type, ignoring typedefs and qualifiers.
1940
1941 /// Returns true if the type is a builtin type.
1942 bool isBuiltinType() const;
1943
1944 /// Test for a particular builtin type.
1945 bool isSpecificBuiltinType(unsigned K) const;
1946
1947 /// Test for a type which does not represent an actual type-system type but
1948 /// is instead used as a placeholder for various convenient purposes within
1949 /// Clang. All such types are BuiltinTypes.
1950 bool isPlaceholderType() const;
1951 const BuiltinType *getAsPlaceholderType() const;
1952
1953 /// Test for a specific placeholder type.
1954 bool isSpecificPlaceholderType(unsigned K) const;
1955
1956 /// Test for a placeholder type other than Overload; see
1957 /// BuiltinType::isNonOverloadPlaceholderType.
1958 bool isNonOverloadPlaceholderType() const;
1959
1960 /// isIntegerType() does *not* include complex integers (a GCC extension).
1961 /// isComplexIntegerType() can be used to test for complex integers.
1962 bool isIntegerType() const; // C99 6.2.5p17 (int, char, bool, enum)
1963 bool isEnumeralType() const;
1964
1965 /// Determine whether this type is a scoped enumeration type.
1966 bool isScopedEnumeralType() const;
1967 bool isBooleanType() const;
1968 bool isCharType() const;
1969 bool isWideCharType() const;
1970 bool isChar8Type() const;
1971 bool isChar16Type() const;
1972 bool isChar32Type() const;
1973 bool isAnyCharacterType() const;
1974 bool isIntegralType(const ASTContext &Ctx) const;
1975
1976 /// Determine whether this type is an integral or enumeration type.
1977 bool isIntegralOrEnumerationType() const;
1978
1979 /// Determine whether this type is an integral or unscoped enumeration type.
1980 bool isIntegralOrUnscopedEnumerationType() const;
1981 bool isUnscopedEnumerationType() const;
1982
1983 /// Floating point categories.
1984 bool isRealFloatingType() const; // C99 6.2.5p10 (float, double, long double)
1985 /// isComplexType() does *not* include complex integers (a GCC extension).
1986 /// isComplexIntegerType() can be used to test for complex integers.
1987 bool isComplexType() const; // C99 6.2.5p11 (complex)
1988 bool isAnyComplexType() const; // C99 6.2.5p11 (complex) + Complex Int.
1989 bool isFloatingType() const; // C99 6.2.5p11 (real floating + complex)
1990 bool isHalfType() const; // OpenCL 6.1.1.1, NEON (IEEE 754-2008 half)
1991 bool isFloat16Type() const; // C11 extension ISO/IEC TS 18661
1992 bool isBFloat16Type() const;
1993 bool isFloat128Type() const;
1994 bool isRealType() const; // C99 6.2.5p17 (real floating + integer)
1995 bool isArithmeticType() const; // C99 6.2.5p18 (integer + floating)
1996 bool isVoidType() const; // C99 6.2.5p19
1997 bool isScalarType() const; // C99 6.2.5p21 (arithmetic + pointers)
1998 bool isAggregateType() const;
1999 bool isFundamentalType() const;
2000 bool isCompoundType() const;
2001
2002 // Type Predicates: Check to see if this type is structurally the specified
2003 // type, ignoring typedefs and qualifiers.
2004 bool isFunctionType() const;
2005 bool isFunctionNoProtoType() const { return getAs<FunctionNoProtoType>(); }
2006 bool isFunctionProtoType() const { return getAs<FunctionProtoType>(); }
2007 bool isPointerType() const;
2008 bool isAnyPointerType() const; // Any C pointer or ObjC object pointer
2009 bool isBlockPointerType() const;
2010 bool isVoidPointerType() const;
2011 bool isReferenceType() const;
2012 bool isLValueReferenceType() const;
2013 bool isRValueReferenceType() const;
2014 bool isObjectPointerType() const;
2015 bool isFunctionPointerType() const;
2016 bool isFunctionReferenceType() const;
2017 bool isMemberPointerType() const;
2018 bool isMemberFunctionPointerType() const;
2019 bool isMemberDataPointerType() const;
2020 bool isArrayType() const;
2021 bool isConstantArrayType() const;
2022 bool isIncompleteArrayType() const;
2023 bool isVariableArrayType() const;
2024 bool isDependentSizedArrayType() const;
2025 bool isRecordType() const;
2026 bool isClassType() const;
2027 bool isStructureType() const;
2028 bool isObjCBoxableRecordType() const;
2029 bool isInterfaceType() const;
2030 bool isStructureOrClassType() const;
2031 bool isUnionType() const;
2032 bool isComplexIntegerType() const; // GCC _Complex integer type.
2033 bool isVectorType() const; // GCC vector type.
2034 bool isExtVectorType() const; // Extended vector type.
2035 bool isMatrixType() const; // Matrix type.
2036 bool isConstantMatrixType() const; // Constant matrix type.
2037 bool isDependentAddressSpaceType() const; // value-dependent address space qualifier
2038 bool isObjCObjectPointerType() const; // pointer to ObjC object
2039 bool isObjCRetainableType() const; // ObjC object or block pointer
2040 bool isObjCLifetimeType() const; // (array of)* retainable type
2041 bool isObjCIndirectLifetimeType() const; // (pointer to)* lifetime type
2042 bool isObjCNSObjectType() const; // __attribute__((NSObject))
2043 bool isObjCIndependentClassType() const; // __attribute__((objc_independent_class))
2044 // FIXME: change this to 'raw' interface type, so we can used 'interface' type
2045 // for the common case.
2046 bool isObjCObjectType() const; // NSString or typeof(*(id)0)
2047 bool isObjCQualifiedInterfaceType() const; // NSString<foo>
2048 bool isObjCQualifiedIdType() const; // id<foo>
2049 bool isObjCQualifiedClassType() const; // Class<foo>
2050 bool isObjCObjectOrInterfaceType() const;
2051 bool isObjCIdType() const; // id
2052 bool isDecltypeType() const;
2053 /// Was this type written with the special inert-in-ARC __unsafe_unretained
2054 /// qualifier?
2055 ///
2056 /// This approximates the answer to the following question: if this
2057 /// translation unit were compiled in ARC, would this type be qualified
2058 /// with __unsafe_unretained?
2059 bool isObjCInertUnsafeUnretainedType() const {
2060 return hasAttr(attr::ObjCInertUnsafeUnretained);
2061 }
2062
2063 /// Whether the type is Objective-C 'id' or a __kindof type of an
2064 /// object type, e.g., __kindof NSView * or __kindof id
2065 /// <NSCopying>.
2066 ///
2067 /// \param bound Will be set to the bound on non-id subtype types,
2068 /// which will be (possibly specialized) Objective-C class type, or
2069 /// null for 'id.
2070 bool isObjCIdOrObjectKindOfType(const ASTContext &ctx,
2071 const ObjCObjectType *&bound) const;
2072
2073 bool isObjCClassType() const; // Class
2074
2075 /// Whether the type is Objective-C 'Class' or a __kindof type of an
2076 /// Class type, e.g., __kindof Class <NSCopying>.
2077 ///
2078 /// Unlike \c isObjCIdOrObjectKindOfType, there is no relevant bound
2079 /// here because Objective-C's type system cannot express "a class
2080 /// object for a subclass of NSFoo".
2081 bool isObjCClassOrClassKindOfType() const;
2082
2083 bool isBlockCompatibleObjCPointerType(ASTContext &ctx) const;
2084 bool isObjCSelType() const; // Class
2085 bool isObjCBuiltinType() const; // 'id' or 'Class'
2086 bool isObjCARCBridgableType() const;
2087 bool isCARCBridgableType() const;
2088 bool isTemplateTypeParmType() const; // C++ template type parameter
2089 bool isNullPtrType() const; // C++11 std::nullptr_t
2090 bool isNothrowT() const; // C++ std::nothrow_t
2091 bool isAlignValT() const; // C++17 std::align_val_t
2092 bool isStdByteType() const; // C++17 std::byte
2093 bool isAtomicType() const; // C11 _Atomic()
2094 bool isUndeducedAutoType() const; // C++11 auto or
2095 // C++14 decltype(auto)
2096
2097#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2098 bool is##Id##Type() const;
2099#include "clang/Basic/OpenCLImageTypes.def"
2100
2101 bool isImageType() const; // Any OpenCL image type
2102
2103 bool isSamplerT() const; // OpenCL sampler_t
2104 bool isEventT() const; // OpenCL event_t
2105 bool isClkEventT() const; // OpenCL clk_event_t
2106 bool isQueueT() const; // OpenCL queue_t
2107 bool isReserveIDT() const; // OpenCL reserve_id_t
2108
2109#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2110 bool is##Id##Type() const;
2111#include "clang/Basic/OpenCLExtensionTypes.def"
2112 // Type defined in cl_intel_device_side_avc_motion_estimation OpenCL extension
2113 bool isOCLIntelSubgroupAVCType() const;
2114 bool isOCLExtOpaqueType() const; // Any OpenCL extension type
2115
2116 bool isPipeType() const; // OpenCL pipe type
2117 bool isExtIntType() const; // Extended Int Type
2118 bool isOpenCLSpecificType() const; // Any OpenCL specific type
2119
2120 /// Determines if this type, which must satisfy
2121 /// isObjCLifetimeType(), is implicitly __unsafe_unretained rather
2122 /// than implicitly __strong.
2123 bool isObjCARCImplicitlyUnretainedType() const;
2124
2125 /// Check if the type is the CUDA device builtin surface type.
2126 bool isCUDADeviceBuiltinSurfaceType() const;
2127 /// Check if the type is the CUDA device builtin texture type.
2128 bool isCUDADeviceBuiltinTextureType() const;
2129
2130 /// Return the implicit lifetime for this type, which must not be dependent.
2131 Qualifiers::ObjCLifetime getObjCARCImplicitLifetime() const;
2132
2133 enum ScalarTypeKind {
2134 STK_CPointer,
2135 STK_BlockPointer,
2136 STK_ObjCObjectPointer,
2137 STK_MemberPointer,
2138 STK_Bool,
2139 STK_Integral,
2140 STK_Floating,
2141 STK_IntegralComplex,
2142 STK_FloatingComplex,
2143 STK_FixedPoint
2144 };
2145
2146 /// Given that this is a scalar type, classify it.
2147 ScalarTypeKind getScalarTypeKind() const;
2148
2149 TypeDependence getDependence() const {
2150 return static_cast<TypeDependence>(TypeBits.Dependence);
2151 }
2152
2153 /// Whether this type is an error type.
2154 bool containsErrors() const {
2155 return getDependence() & TypeDependence::Error;
2156 }
2157
2158 /// Whether this type is a dependent type, meaning that its definition
2159 /// somehow depends on a template parameter (C++ [temp.dep.type]).
2160 bool isDependentType() const {
2161 return getDependence() & TypeDependence::Dependent;
2162 }
2163
2164 /// Determine whether this type is an instantiation-dependent type,
2165 /// meaning that the type involves a template parameter (even if the
2166 /// definition does not actually depend on the type substituted for that
2167 /// template parameter).
2168 bool isInstantiationDependentType() const {
2169 return getDependence() & TypeDependence::Instantiation;
2170 }
2171
2172 /// Determine whether this type is an undeduced type, meaning that
2173 /// it somehow involves a C++11 'auto' type or similar which has not yet been
2174 /// deduced.
2175 bool isUndeducedType() const;
2176
2177 /// Whether this type is a variably-modified type (C99 6.7.5).
2178 bool isVariablyModifiedType() const {
2179 return getDependence() & TypeDependence::VariablyModified;
2180 }
2181
2182 /// Whether this type involves a variable-length array type
2183 /// with a definite size.
2184 bool hasSizedVLAType() const;
2185
2186 /// Whether this type is or contains a local or unnamed type.
2187 bool hasUnnamedOrLocalType() const;
2188
2189 bool isOverloadableType() const;
2190
2191 /// Determine wither this type is a C++ elaborated-type-specifier.
2192 bool isElaboratedTypeSpecifier() const;
2193
2194 bool canDecayToPointerType() const;
2195
2196 /// Whether this type is represented natively as a pointer. This includes
2197 /// pointers, references, block pointers, and Objective-C interface,
2198 /// qualified id, and qualified interface types, as well as nullptr_t.
2199 bool hasPointerRepresentation() const;
2200
2201 /// Whether this type can represent an objective pointer type for the
2202 /// purpose of GC'ability
2203 bool hasObjCPointerRepresentation() const;
2204
2205 /// Determine whether this type has an integer representation
2206 /// of some sort, e.g., it is an integer type or a vector.
2207 bool hasIntegerRepresentation() const;
2208
2209 /// Determine whether this type has an signed integer representation
2210 /// of some sort, e.g., it is an signed integer type or a vector.
2211 bool hasSignedIntegerRepresentation() const;
2212
2213 /// Determine whether this type has an unsigned integer representation
2214 /// of some sort, e.g., it is an unsigned integer type or a vector.
2215 bool hasUnsignedIntegerRepresentation() const;
2216
2217 /// Determine whether this type has a floating-point representation
2218 /// of some sort, e.g., it is a floating-point type or a vector thereof.
2219 bool hasFloatingRepresentation() const;
2220
2221 // Type Checking Functions: Check to see if this type is structurally the
2222 // specified type, ignoring typedefs and qualifiers, and return a pointer to
2223 // the best type we can.
2224 const RecordType *getAsStructureType() const;
2225 /// NOTE: getAs*ArrayType are methods on ASTContext.
2226 const RecordType *getAsUnionType() const;
2227 const ComplexType *getAsComplexIntegerType() const; // GCC complex int type.
2228 const ObjCObjectType *getAsObjCInterfaceType() const;
2229
2230 // The following is a convenience method that returns an ObjCObjectPointerType
2231 // for object declared using an interface.
2232 const ObjCObjectPointerType *getAsObjCInterfacePointerType() const;
2233 const ObjCObjectPointerType *getAsObjCQualifiedIdType() const;
2234 const ObjCObjectPointerType *getAsObjCQualifiedClassType() const;
2235 const ObjCObjectType *getAsObjCQualifiedInterfaceType() const;
2236
2237 /// Retrieves the CXXRecordDecl that this type refers to, either
2238 /// because the type is a RecordType or because it is the injected-class-name
2239 /// type of a class template or class template partial specialization.
2240 CXXRecordDecl *getAsCXXRecordDecl() const;
2241
2242 /// Retrieves the RecordDecl this type refers to.
2243 RecordDecl *getAsRecordDecl() const;
2244
2245 /// Retrieves the TagDecl that this type refers to, either
2246 /// because the type is a TagType or because it is the injected-class-name
2247 /// type of a class template or class template partial specialization.
2248 TagDecl *getAsTagDecl() const;
2249
2250 /// If this is a pointer or reference to a RecordType, return the
2251 /// CXXRecordDecl that the type refers to.
2252 ///
2253 /// If this is not a pointer or reference, or the type being pointed to does
2254 /// not refer to a CXXRecordDecl, returns NULL.
2255 const CXXRecordDecl *getPointeeCXXRecordDecl() const;
2256
2257 /// Get the DeducedType whose type will be deduced for a variable with
2258 /// an initializer of this type. This looks through declarators like pointer
2259 /// types, but not through decltype or typedefs.
2260 DeducedType *getContainedDeducedType() const;
2261
2262 /// Get the AutoType whose type will be deduced for a variable with
2263 /// an initializer of this type. This looks through declarators like pointer
2264 /// types, but not through decltype or typedefs.
2265 AutoType *getContainedAutoType() const {
2266 return dyn_cast_or_null<AutoType>(getContainedDeducedType());
2267 }
2268
2269 /// Determine whether this type was written with a leading 'auto'
2270 /// corresponding to a trailing return type (possibly for a nested
2271 /// function type within a pointer to function type or similar).
2272 bool hasAutoForTrailingReturnType() const;
2273
2274 /// Member-template getAs<specific type>'. Look through sugar for
2275 /// an instance of \<specific type>. This scheme will eventually
2276 /// replace the specific getAsXXXX methods above.
2277 ///
2278 /// There are some specializations of this member template listed
2279 /// immediately following this class.
2280 template <typename T> const T *getAs() const;
2281
2282 /// Member-template getAsAdjusted<specific type>. Look through specific kinds
2283 /// of sugar (parens, attributes, etc) for an instance of \<specific type>.
2284 /// This is used when you need to walk over sugar nodes that represent some
2285 /// kind of type adjustment from a type that was written as a \<specific type>
2286 /// to another type that is still canonically a \<specific type>.
2287 template <typename T> const T *getAsAdjusted() const;
2288
2289 /// A variant of getAs<> for array types which silently discards
2290 /// qualifiers from the outermost type.
2291 const ArrayType *getAsArrayTypeUnsafe() const;
2292
2293 /// Member-template castAs<specific type>. Look through sugar for
2294 /// the underlying instance of \<specific type>.
2295 ///
2296 /// This method has the same relationship to getAs<T> as cast<T> has
2297 /// to dyn_cast<T>; which is to say, the underlying type *must*
2298 /// have the intended type, and this method will never return null.
2299 template <typename T> const T *castAs() const;
2300
2301 /// A variant of castAs<> for array type which silently discards
2302 /// qualifiers from the outermost type.
2303 const ArrayType *castAsArrayTypeUnsafe() const;
2304
2305 /// Determine whether this type had the specified attribute applied to it
2306 /// (looking through top-level type sugar).
2307 bool hasAttr(attr::Kind AK) const;
2308
2309 /// Get the base element type of this type, potentially discarding type
2310 /// qualifiers. This should never be used when type qualifiers
2311 /// are meaningful.
2312 const Type *getBaseElementTypeUnsafe() const;
2313
2314 /// If this is an array type, return the element type of the array,
2315 /// potentially with type qualifiers missing.
2316 /// This should never be used when type qualifiers are meaningful.
2317 const Type *getArrayElementTypeNoTypeQual() const;
2318
2319 /// If this is a pointer type, return the pointee type.
2320 /// If this is an array type, return the array element type.
2321 /// This should never be used when type qualifiers are meaningful.
2322 const Type *getPointeeOrArrayElementType() const;
2323
2324 /// If this is a pointer, ObjC object pointer, or block
2325 /// pointer, this returns the respective pointee.
2326 QualType getPointeeType() const;
2327
2328 /// Return the specified type with any "sugar" removed from the type,
2329 /// removing any typedefs, typeofs, etc., as well as any qualifiers.
2330 const Type *getUnqualifiedDesugaredType() const;
2331
2332 /// More type predicates useful for type checking/promotion
2333 bool isPromotableIntegerType() const; // C99 6.3.1.1p2
2334
2335 /// Return true if this is an integer type that is
2336 /// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
2337 /// or an enum decl which has a signed representation.
2338 bool isSignedIntegerType() const;
2339
2340 /// Return true if this is an integer type that is
2341 /// unsigned, according to C99 6.2.5p6 [which returns true for _Bool],
2342 /// or an enum decl which has an unsigned representation.
2343 bool isUnsignedIntegerType() const;
2344
2345 /// Determines whether this is an integer type that is signed or an
2346 /// enumeration types whose underlying type is a signed integer type.
2347 bool isSignedIntegerOrEnumerationType() const;
2348
2349 /// Determines whether this is an integer type that is unsigned or an
2350 /// enumeration types whose underlying type is a unsigned integer type.
2351 bool isUnsignedIntegerOrEnumerationType() const;
2352
2353 /// Return true if this is a fixed point type according to
2354 /// ISO/IEC JTC1 SC22 WG14 N1169.
2355 bool isFixedPointType() const;
2356
2357 /// Return true if this is a fixed point or integer type.
2358 bool isFixedPointOrIntegerType() const;
2359
2360 /// Return true if this is a saturated fixed point type according to
2361 /// ISO/IEC JTC1 SC22 WG14 N1169. This type can be signed or unsigned.
2362 bool isSaturatedFixedPointType() const;
2363
2364 /// Return true if this is a saturated fixed point type according to
2365 /// ISO/IEC JTC1 SC22 WG14 N1169. This type can be signed or unsigned.
2366 bool isUnsaturatedFixedPointType() const;
2367
2368 /// Return true if this is a fixed point type that is signed according
2369 /// to ISO/IEC JTC1 SC22 WG14 N1169. This type can also be saturated.
2370 bool isSignedFixedPointType() const;
2371
2372 /// Return true if this is a fixed point type that is unsigned according
2373 /// to ISO/IEC JTC1 SC22 WG14 N1169. This type can also be saturated.
2374 bool isUnsignedFixedPointType() const;
2375
2376 /// Return true if this is not a variable sized type,
2377 /// according to the rules of C99 6.7.5p3. It is not legal to call this on
2378 /// incomplete types.
2379 bool isConstantSizeType() const;
2380
2381 /// Returns true if this type can be represented by some
2382 /// set of type specifiers.
2383 bool isSpecifierType() const;
2384
2385 /// Determine the linkage of this type.
2386 Linkage getLinkage() const;
2387
2388 /// Determine the visibility of this type.
2389 Visibility getVisibility() const {
2390 return getLinkageAndVisibility().getVisibility();
2391 }
2392
2393 /// Return true if the visibility was explicitly set is the code.
2394 bool isVisibilityExplicit() const {
2395 return getLinkageAndVisibility().isVisibilityExplicit();
2396 }
2397
2398 /// Determine the linkage and visibility of this type.
2399 LinkageInfo getLinkageAndVisibility() const;
2400
2401 /// True if the computed linkage is valid. Used for consistency
2402 /// checking. Should always return true.
2403 bool isLinkageValid() const;
2404
2405 /// Determine the nullability of the given type.
2406 ///
2407 /// Note that nullability is only captured as sugar within the type
2408 /// system, not as part of the canonical type, so nullability will
2409 /// be lost by canonicalization and desugaring.
2410 Optional<NullabilityKind> getNullability(const ASTContext &context) const;
2411
2412 /// Determine whether the given type can have a nullability
2413 /// specifier applied to it, i.e., if it is any kind of pointer type.
2414 ///
2415 /// \param ResultIfUnknown The value to return if we don't yet know whether
2416 /// this type can have nullability because it is dependent.
2417 bool canHaveNullability(bool ResultIfUnknown = true) const;
2418
2419 /// Retrieve the set of substitutions required when accessing a member
2420 /// of the Objective-C receiver type that is declared in the given context.
2421 ///
2422 /// \c *this is the type of the object we're operating on, e.g., the
2423 /// receiver for a message send or the base of a property access, and is
2424 /// expected to be of some object or object pointer type.
2425 ///
2426 /// \param dc The declaration context for which we are building up a
2427 /// substitution mapping, which should be an Objective-C class, extension,
2428 /// category, or method within.
2429 ///
2430 /// \returns an array of type arguments that can be substituted for
2431 /// the type parameters of the given declaration context in any type described
2432 /// within that context, or an empty optional to indicate that no
2433 /// substitution is required.
2434 Optional<ArrayRef<QualType>>
2435 getObjCSubstitutions(const DeclContext *dc) const;
2436
2437 /// Determines if this is an ObjC interface type that may accept type
2438 /// parameters.
2439 bool acceptsObjCTypeParams() const;
2440
2441 const char *getTypeClassName() const;
2442
2443 QualType getCanonicalTypeInternal() const {
2444 return CanonicalType;
2445 }
2446
2447 CanQualType getCanonicalTypeUnqualified() const; // in CanonicalType.h
2448 void dump() const;
2449 void dump(llvm::raw_ostream &OS, const ASTContext &Context) const;
2450};
2451
2452/// This will check for a TypedefType by removing any existing sugar
2453/// until it reaches a TypedefType or a non-sugared type.
2454template <> const TypedefType *Type::getAs() const;
2455
2456/// This will check for a TemplateSpecializationType by removing any
2457/// existing sugar until it reaches a TemplateSpecializationType or a
2458/// non-sugared type.
2459template <> const TemplateSpecializationType *Type::getAs() const;
2460
2461/// This will check for an AttributedType by removing any existing sugar
2462/// until it reaches an AttributedType or a non-sugared type.
2463template <> const AttributedType *Type::getAs() const;
2464
2465// We can do canonical leaf types faster, because we don't have to
2466// worry about preserving child type decoration.
2467#define TYPE(Class, Base)
2468#define LEAF_TYPE(Class) \
2469template <> inline const Class##Type *Type::getAs() const { \
2470 return dyn_cast<Class##Type>(CanonicalType); \
2471} \
2472template <> inline const Class##Type *Type::castAs() const { \
2473 return cast<Class##Type>(CanonicalType); \
2474}
2475#include "clang/AST/TypeNodes.inc"
2476
2477/// This class is used for builtin types like 'int'. Builtin
2478/// types are always canonical and have a literal name field.
2479class BuiltinType : public Type {
2480public:
2481 enum Kind {
2482// OpenCL image types
2483#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) Id,
2484#include "clang/Basic/OpenCLImageTypes.def"
2485// OpenCL extension types
2486#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) Id,
2487#include "clang/Basic/OpenCLExtensionTypes.def"
2488// SVE Types
2489#define SVE_TYPE(Name, Id, SingletonId) Id,
2490#include "clang/Basic/AArch64SVEACLETypes.def"
2491// PPC MMA Types
2492#define PPC_MMA_VECTOR_TYPE(Name, Id, Size) Id,
2493#include "clang/Basic/PPCTypes.def"
2494// All other builtin types
2495#define BUILTIN_TYPE(Id, SingletonId) Id,
2496#define LAST_BUILTIN_TYPE(Id) LastKind = Id
2497#include "clang/AST/BuiltinTypes.def"
2498 };
2499
2500private:
2501 friend class ASTContext; // ASTContext creates these.
2502
2503 BuiltinType(Kind K)
2504 : Type(Builtin, QualType(),
2505 K == Dependent ? TypeDependence::DependentInstantiation
2506 : TypeDependence::None) {
2507 BuiltinTypeBits.Kind = K;
2508 }
2509
2510public:
2511 Kind getKind() const { return static_cast<Kind>(BuiltinTypeBits.Kind); }
2512 StringRef getName(const PrintingPolicy &Policy) const;
2513
2514 const char *getNameAsCString(const PrintingPolicy &Policy) const {
2515 // The StringRef is null-terminated.
2516 StringRef str = getName(Policy);
2517 assert(!str.empty() && str.data()[str.size()] == '\0')((!str.empty() && str.data()[str.size()] == '\0') ? static_cast
<void> (0) : __assert_fail ("!str.empty() && str.data()[str.size()] == '\\0'"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 2517, __PRETTY_FUNCTION__))
;
2518 return str.data();
2519 }
2520
2521 bool isSugared() const { return false; }
2522 QualType desugar() const { return QualType(this, 0); }
2523
2524 bool isInteger() const {
2525 return getKind() >= Bool && getKind() <= Int128;
2526 }
2527
2528 bool isSignedInteger() const {
2529 return getKind() >= Char_S && getKind() <= Int128;
2530 }
2531
2532 bool isUnsignedInteger() const {
2533 return getKind() >= Bool && getKind() <= UInt128;
2534 }
2535
2536 bool isFloatingPoint() const {
2537 return getKind() >= Half && getKind() <= Float128;
2538 }
2539
2540 /// Determines whether the given kind corresponds to a placeholder type.
2541 static bool isPlaceholderTypeKind(Kind K) {
2542 return K >= Overload;
2543 }
2544
2545 /// Determines whether this type is a placeholder type, i.e. a type
2546 /// which cannot appear in arbitrary positions in a fully-formed
2547 /// expression.
2548 bool isPlaceholderType() const {
2549 return isPlaceholderTypeKind(getKind());
2550 }
2551
2552 /// Determines whether this type is a placeholder type other than
2553 /// Overload. Most placeholder types require only syntactic
2554 /// information about their context in order to be resolved (e.g.
2555 /// whether it is a call expression), which means they can (and
2556 /// should) be resolved in an earlier "phase" of analysis.
2557 /// Overload expressions sometimes pick up further information
2558 /// from their context, like whether the context expects a
2559 /// specific function-pointer type, and so frequently need
2560 /// special treatment.
2561 bool isNonOverloadPlaceholderType() const {
2562 return getKind() > Overload;
2563 }
2564
2565 static bool classof(const Type *T) { return T->getTypeClass() == Builtin; }
2566};
2567
2568/// Complex values, per C99 6.2.5p11. This supports the C99 complex
2569/// types (_Complex float etc) as well as the GCC integer complex extensions.
2570class ComplexType : public Type, public llvm::FoldingSetNode {
2571 friend class ASTContext; // ASTContext creates these.
2572
2573 QualType ElementType;
2574
2575 ComplexType(QualType Element, QualType CanonicalPtr)
2576 : Type(Complex, CanonicalPtr, Element->getDependence()),
2577 ElementType(Element) {}
2578
2579public:
2580 QualType getElementType() const { return ElementType; }
2581
2582 bool isSugared() const { return false; }
2583 QualType desugar() const { return QualType(this, 0); }
2584
2585 void Profile(llvm::FoldingSetNodeID &ID) {
2586 Profile(ID, getElementType());
2587 }
2588
2589 static void Profile(llvm::FoldingSetNodeID &ID, QualType Element) {
2590 ID.AddPointer(Element.getAsOpaquePtr());
2591 }
2592
2593 static bool classof(const Type *T) { return T->getTypeClass() == Complex; }
2594};
2595
2596/// Sugar for parentheses used when specifying types.
2597class ParenType : public Type, public llvm::FoldingSetNode {
2598 friend class ASTContext; // ASTContext creates these.
2599
2600 QualType Inner;
2601
2602 ParenType(QualType InnerType, QualType CanonType)
2603 : Type(Paren, CanonType, InnerType->getDependence()), Inner(InnerType) {}
2604
2605public:
2606 QualType getInnerType() const { return Inner; }
2607
2608 bool isSugared() const { return true; }
2609 QualType desugar() const { return getInnerType(); }
2610
2611 void Profile(llvm::FoldingSetNodeID &ID) {
2612 Profile(ID, getInnerType());
2613 }
2614
2615 static void Profile(llvm::FoldingSetNodeID &ID, QualType Inner) {
2616 Inner.Profile(ID);
2617 }
2618
2619 static bool classof(const Type *T) { return T->getTypeClass() == Paren; }
2620};
2621
2622/// PointerType - C99 6.7.5.1 - Pointer Declarators.
2623class PointerType : public Type, public llvm::FoldingSetNode {
2624 friend class ASTContext; // ASTContext creates these.
2625
2626 QualType PointeeType;
2627
2628 PointerType(QualType Pointee, QualType CanonicalPtr)
2629 : Type(Pointer, CanonicalPtr, Pointee->getDependence()),
2630 PointeeType(Pointee) {}
2631
2632public:
2633 QualType getPointeeType() const { return PointeeType; }
2634
2635 bool isSugared() const { return false; }
2636 QualType desugar() const { return QualType(this, 0); }
2637
2638 void Profile(llvm::FoldingSetNodeID &ID) {
2639 Profile(ID, getPointeeType());
2640 }
2641
2642 static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) {
2643 ID.AddPointer(Pointee.getAsOpaquePtr());
2644 }
2645
2646 static bool classof(const Type *T) { return T->getTypeClass() == Pointer; }
2647};
2648
2649/// Represents a type which was implicitly adjusted by the semantic
2650/// engine for arbitrary reasons. For example, array and function types can
2651/// decay, and function types can have their calling conventions adjusted.
2652class AdjustedType : public Type, public llvm::FoldingSetNode {
2653 QualType OriginalTy;
2654 QualType AdjustedTy;
2655
2656protected:
2657 friend class ASTContext; // ASTContext creates these.
2658
2659 AdjustedType(TypeClass TC, QualType OriginalTy, QualType AdjustedTy,
2660 QualType CanonicalPtr)
2661 : Type(TC, CanonicalPtr, OriginalTy->getDependence()),
2662 OriginalTy(OriginalTy), AdjustedTy(AdjustedTy) {}
2663
2664public:
2665 QualType getOriginalType() const { return OriginalTy; }
2666 QualType getAdjustedType() const { return AdjustedTy; }
2667
2668 bool isSugared() const { return true; }
2669 QualType desugar() const { return AdjustedTy; }
2670
2671 void Profile(llvm::FoldingSetNodeID &ID) {
2672 Profile(ID, OriginalTy, AdjustedTy);
2673 }
2674
2675 static void Profile(llvm::FoldingSetNodeID &ID, QualType Orig, QualType New) {
2676 ID.AddPointer(Orig.getAsOpaquePtr());
2677 ID.AddPointer(New.getAsOpaquePtr());
2678 }
2679
2680 static bool classof(const Type *T) {
2681 return T->getTypeClass() == Adjusted || T->getTypeClass() == Decayed;
2682 }
2683};
2684
2685/// Represents a pointer type decayed from an array or function type.
2686class DecayedType : public AdjustedType {
2687 friend class ASTContext; // ASTContext creates these.
2688
2689 inline
2690 DecayedType(QualType OriginalType, QualType Decayed, QualType Canonical);
2691
2692public:
2693 QualType getDecayedType() const { return getAdjustedType(); }
2694
2695 inline QualType getPointeeType() const;
2696
2697 static bool classof(const Type *T) { return T->getTypeClass() == Decayed; }
2698};
2699
2700/// Pointer to a block type.
2701/// This type is to represent types syntactically represented as
2702/// "void (^)(int)", etc. Pointee is required to always be a function type.
2703class BlockPointerType : public Type, public llvm::FoldingSetNode {
2704 friend class ASTContext; // ASTContext creates these.
2705
2706 // Block is some kind of pointer type
2707 QualType PointeeType;
2708
2709 BlockPointerType(QualType Pointee, QualType CanonicalCls)
2710 : Type(BlockPointer, CanonicalCls, Pointee->getDependence()),
2711 PointeeType(Pointee) {}
2712
2713public:
2714 // Get the pointee type. Pointee is required to always be a function type.
2715 QualType getPointeeType() const { return PointeeType; }
2716
2717 bool isSugared() const { return false; }
2718 QualType desugar() const { return QualType(this, 0); }
2719
2720 void Profile(llvm::FoldingSetNodeID &ID) {
2721 Profile(ID, getPointeeType());
2722 }
2723
2724 static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) {
2725 ID.AddPointer(Pointee.getAsOpaquePtr());
2726 }
2727
2728 static bool classof(const Type *T) {
2729 return T->getTypeClass() == BlockPointer;
2730 }
2731};
2732
2733/// Base for LValueReferenceType and RValueReferenceType
2734class ReferenceType : public Type, public llvm::FoldingSetNode {
2735 QualType PointeeType;
2736
2737protected:
2738 ReferenceType(TypeClass tc, QualType Referencee, QualType CanonicalRef,
2739 bool SpelledAsLValue)
2740 : Type(tc, CanonicalRef, Referencee->getDependence()),
2741 PointeeType(Referencee) {
2742 ReferenceTypeBits.SpelledAsLValue = SpelledAsLValue;
2743 ReferenceTypeBits.InnerRef = Referencee->isReferenceType();
2744 }
2745
2746public:
2747 bool isSpelledAsLValue() const { return ReferenceTypeBits.SpelledAsLValue; }
2748 bool isInnerRef() const { return ReferenceTypeBits.InnerRef; }
2749
2750 QualType getPointeeTypeAsWritten() const { return PointeeType; }
2751
2752 QualType getPointeeType() const {
2753 // FIXME: this might strip inner qualifiers; okay?
2754 const ReferenceType *T = this;
2755 while (T->isInnerRef())
2756 T = T->PointeeType->castAs<ReferenceType>();
2757 return T->PointeeType;
2758 }
2759
2760 void Profile(llvm::FoldingSetNodeID &ID) {
2761 Profile(ID, PointeeType, isSpelledAsLValue());
2762 }
2763
2764 static void Profile(llvm::FoldingSetNodeID &ID,
2765 QualType Referencee,
2766 bool SpelledAsLValue) {
2767 ID.AddPointer(Referencee.getAsOpaquePtr());
2768 ID.AddBoolean(SpelledAsLValue);
2769 }
2770
2771 static bool classof(const Type *T) {
2772 return T->getTypeClass() == LValueReference ||
2773 T->getTypeClass() == RValueReference;
2774 }
2775};
2776
2777/// An lvalue reference type, per C++11 [dcl.ref].
2778class LValueReferenceType : public ReferenceType {
2779 friend class ASTContext; // ASTContext creates these
2780
2781 LValueReferenceType(QualType Referencee, QualType CanonicalRef,
2782 bool SpelledAsLValue)
2783 : ReferenceType(LValueReference, Referencee, CanonicalRef,
2784 SpelledAsLValue) {}
2785
2786public:
2787 bool isSugared() const { return false; }
2788 QualType desugar() const { return QualType(this, 0); }
2789
2790 static bool classof(const Type *T) {
2791 return T->getTypeClass() == LValueReference;
2792 }
2793};
2794
2795/// An rvalue reference type, per C++11 [dcl.ref].
2796class RValueReferenceType : public ReferenceType {
2797 friend class ASTContext; // ASTContext creates these
2798
2799 RValueReferenceType(QualType Referencee, QualType CanonicalRef)
2800 : ReferenceType(RValueReference, Referencee, CanonicalRef, false) {}
2801
2802public:
2803 bool isSugared() const { return false; }
2804 QualType desugar() const { return QualType(this, 0); }
2805
2806 static bool classof(const Type *T) {
2807 return T->getTypeClass() == RValueReference;
2808 }
2809};
2810
2811/// A pointer to member type per C++ 8.3.3 - Pointers to members.
2812///
2813/// This includes both pointers to data members and pointer to member functions.
2814class MemberPointerType : public Type, public llvm::FoldingSetNode {
2815 friend class ASTContext; // ASTContext creates these.
2816
2817 QualType PointeeType;
2818
2819 /// The class of which the pointee is a member. Must ultimately be a
2820 /// RecordType, but could be a typedef or a template parameter too.
2821 const Type *Class;
2822
2823 MemberPointerType(QualType Pointee, const Type *Cls, QualType CanonicalPtr)
2824 : Type(MemberPointer, CanonicalPtr,
2825 (Cls->getDependence() & ~TypeDependence::VariablyModified) |
2826 Pointee->getDependence()),
2827 PointeeType(Pointee), Class(Cls) {}
2828
2829public:
2830 QualType getPointeeType() const { return PointeeType; }
2831
2832 /// Returns true if the member type (i.e. the pointee type) is a
2833 /// function type rather than a data-member type.
2834 bool isMemberFunctionPointer() const {
2835 return PointeeType->isFunctionProtoType();
2836 }
2837
2838 /// Returns true if the member type (i.e. the pointee type) is a
2839 /// data type rather than a function type.
2840 bool isMemberDataPointer() const {
2841 return !PointeeType->isFunctionProtoType();
2842 }
2843
2844 const Type *getClass() const { return Class; }
2845 CXXRecordDecl *getMostRecentCXXRecordDecl() const;
2846
2847 bool isSugared() const { return false; }
2848 QualType desugar() const { return QualType(this, 0); }
2849
2850 void Profile(llvm::FoldingSetNodeID &ID) {
2851 Profile(ID, getPointeeType(), getClass());
2852 }
2853
2854 static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee,
2855 const Type *Class) {
2856 ID.AddPointer(Pointee.getAsOpaquePtr());
2857 ID.AddPointer(Class);
2858 }
2859
2860 static bool classof(const Type *T) {
2861 return T->getTypeClass() == MemberPointer;
2862 }
2863};
2864
2865/// Represents an array type, per C99 6.7.5.2 - Array Declarators.
2866class ArrayType : public Type, public llvm::FoldingSetNode {
2867public:
2868 /// Capture whether this is a normal array (e.g. int X[4])
2869 /// an array with a static size (e.g. int X[static 4]), or an array
2870 /// with a star size (e.g. int X[*]).
2871 /// 'static' is only allowed on function parameters.
2872 enum ArraySizeModifier {
2873 Normal, Static, Star
2874 };
2875
2876private:
2877 /// The element type of the array.
2878 QualType ElementType;
2879
2880protected:
2881 friend class ASTContext; // ASTContext creates these.
2882
2883 ArrayType(TypeClass tc, QualType et, QualType can, ArraySizeModifier sm,
2884 unsigned tq, const Expr *sz = nullptr);
2885
2886public:
2887 QualType getElementType() const { return ElementType; }
2888
2889 ArraySizeModifier getSizeModifier() const {
2890 return ArraySizeModifier(ArrayTypeBits.SizeModifier);
2891 }
2892
2893 Qualifiers getIndexTypeQualifiers() const {
2894 return Qualifiers::fromCVRMask(getIndexTypeCVRQualifiers());
2895 }
2896
2897 unsigned getIndexTypeCVRQualifiers() const {
2898 return ArrayTypeBits.IndexTypeQuals;
2899 }
2900
2901 static bool classof(const Type *T) {
2902 return T->getTypeClass() == ConstantArray ||
2903 T->getTypeClass() == VariableArray ||
2904 T->getTypeClass() == IncompleteArray ||
2905 T->getTypeClass() == DependentSizedArray;
2906 }
2907};
2908
2909/// Represents the canonical version of C arrays with a specified constant size.
2910/// For example, the canonical type for 'int A[4 + 4*100]' is a
2911/// ConstantArrayType where the element type is 'int' and the size is 404.
2912class ConstantArrayType final
2913 : public ArrayType,
2914 private llvm::TrailingObjects<ConstantArrayType, const Expr *> {
2915 friend class ASTContext; // ASTContext creates these.
2916 friend TrailingObjects;
2917
2918 llvm::APInt Size; // Allows us to unique the type.
2919
2920 ConstantArrayType(QualType et, QualType can, const llvm::APInt &size,
2921 const Expr *sz, ArraySizeModifier sm, unsigned tq)
2922 : ArrayType(ConstantArray, et, can, sm, tq, sz), Size(size) {
2923 ConstantArrayTypeBits.HasStoredSizeExpr = sz != nullptr;
2924 if (ConstantArrayTypeBits.HasStoredSizeExpr) {
2925 assert(!can.isNull() && "canonical constant array should not have size")((!can.isNull() && "canonical constant array should not have size"
) ? static_cast<void> (0) : __assert_fail ("!can.isNull() && \"canonical constant array should not have size\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 2925, __PRETTY_FUNCTION__))
;
2926 *getTrailingObjects<const Expr*>() = sz;
2927 }
2928 }
2929
2930 unsigned numTrailingObjects(OverloadToken<const Expr*>) const {
2931 return ConstantArrayTypeBits.HasStoredSizeExpr;
2932 }
2933
2934public:
2935 const llvm::APInt &getSize() const { return Size; }
2936 const Expr *getSizeExpr() const {
2937 return ConstantArrayTypeBits.HasStoredSizeExpr
2938 ? *getTrailingObjects<const Expr *>()
2939 : nullptr;
2940 }
2941 bool isSugared() const { return false; }
2942 QualType desugar() const { return QualType(this, 0); }
2943
2944 /// Determine the number of bits required to address a member of
2945 // an array with the given element type and number of elements.
2946 static unsigned getNumAddressingBits(const ASTContext &Context,
2947 QualType ElementType,
2948 const llvm::APInt &NumElements);
2949
2950 /// Determine the maximum number of active bits that an array's size
2951 /// can require, which limits the maximum size of the array.
2952 static unsigned getMaxSizeBits(const ASTContext &Context);
2953
2954 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) {
2955 Profile(ID, Ctx, getElementType(), getSize(), getSizeExpr(),
2956 getSizeModifier(), getIndexTypeCVRQualifiers());
2957 }
2958
2959 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx,
2960 QualType ET, const llvm::APInt &ArraySize,
2961 const Expr *SizeExpr, ArraySizeModifier SizeMod,
2962 unsigned TypeQuals);
2963
2964 static bool classof(const Type *T) {
2965 return T->getTypeClass() == ConstantArray;
2966 }
2967};
2968
2969/// Represents a C array with an unspecified size. For example 'int A[]' has
2970/// an IncompleteArrayType where the element type is 'int' and the size is
2971/// unspecified.
2972class IncompleteArrayType : public ArrayType {
2973 friend class ASTContext; // ASTContext creates these.
2974
2975 IncompleteArrayType(QualType et, QualType can,
2976 ArraySizeModifier sm, unsigned tq)
2977 : ArrayType(IncompleteArray, et, can, sm, tq) {}
2978
2979public:
2980 friend class StmtIteratorBase;
2981
2982 bool isSugared() const { return false; }
2983 QualType desugar() const { return QualType(this, 0); }
2984
2985 static bool classof(const Type *T) {
2986 return T->getTypeClass() == IncompleteArray;
2987 }
2988
2989 void Profile(llvm::FoldingSetNodeID &ID) {
2990 Profile(ID, getElementType(), getSizeModifier(),
2991 getIndexTypeCVRQualifiers());
2992 }
2993
2994 static void Profile(llvm::FoldingSetNodeID &ID, QualType ET,
2995 ArraySizeModifier SizeMod, unsigned TypeQuals) {
2996 ID.AddPointer(ET.getAsOpaquePtr());
2997 ID.AddInteger(SizeMod);
2998 ID.AddInteger(TypeQuals);
2999 }
3000};
3001
3002/// Represents a C array with a specified size that is not an
3003/// integer-constant-expression. For example, 'int s[x+foo()]'.
3004/// Since the size expression is an arbitrary expression, we store it as such.
3005///
3006/// Note: VariableArrayType's aren't uniqued (since the expressions aren't) and
3007/// should not be: two lexically equivalent variable array types could mean
3008/// different things, for example, these variables do not have the same type
3009/// dynamically:
3010///
3011/// void foo(int x) {
3012/// int Y[x];
3013/// ++x;
3014/// int Z[x];
3015/// }
3016class VariableArrayType : public ArrayType {
3017 friend class ASTContext; // ASTContext creates these.
3018
3019 /// An assignment-expression. VLA's are only permitted within
3020 /// a function block.
3021 Stmt *SizeExpr;
3022
3023 /// The range spanned by the left and right array brackets.
3024 SourceRange Brackets;
3025
3026 VariableArrayType(QualType et, QualType can, Expr *e,
3027 ArraySizeModifier sm, unsigned tq,
3028 SourceRange brackets)
3029 : ArrayType(VariableArray, et, can, sm, tq, e),
3030 SizeExpr((Stmt*) e), Brackets(brackets) {}
3031
3032public:
3033 friend class StmtIteratorBase;
3034
3035 Expr *getSizeExpr() const {
3036 // We use C-style casts instead of cast<> here because we do not wish
3037 // to have a dependency of Type.h on Stmt.h/Expr.h.
3038 return (Expr*) SizeExpr;
3039 }
3040
3041 SourceRange getBracketsRange() const { return Brackets; }
3042 SourceLocation getLBracketLoc() const { return Brackets.getBegin(); }
3043 SourceLocation getRBracketLoc() const { return Brackets.getEnd(); }
3044
3045 bool isSugared() const { return false; }
3046 QualType desugar() const { return QualType(this, 0); }
3047
3048 static bool classof(const Type *T) {
3049 return T->getTypeClass() == VariableArray;
3050 }
3051
3052 void Profile(llvm::FoldingSetNodeID &ID) {
3053 llvm_unreachable("Cannot unique VariableArrayTypes.")::llvm::llvm_unreachable_internal("Cannot unique VariableArrayTypes."
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 3053)
;
3054 }
3055};
3056
3057/// Represents an array type in C++ whose size is a value-dependent expression.
3058///
3059/// For example:
3060/// \code
3061/// template<typename T, int Size>
3062/// class array {
3063/// T data[Size];
3064/// };
3065/// \endcode
3066///
3067/// For these types, we won't actually know what the array bound is
3068/// until template instantiation occurs, at which point this will
3069/// become either a ConstantArrayType or a VariableArrayType.
3070class DependentSizedArrayType : public ArrayType {
3071 friend class ASTContext; // ASTContext creates these.
3072
3073 const ASTContext &Context;
3074
3075 /// An assignment expression that will instantiate to the
3076 /// size of the array.
3077 ///
3078 /// The expression itself might be null, in which case the array
3079 /// type will have its size deduced from an initializer.
3080 Stmt *SizeExpr;
3081
3082 /// The range spanned by the left and right array brackets.
3083 SourceRange Brackets;
3084
3085 DependentSizedArrayType(const ASTContext &Context, QualType et, QualType can,
3086 Expr *e, ArraySizeModifier sm, unsigned tq,
3087 SourceRange brackets);
3088
3089public:
3090 friend class StmtIteratorBase;
3091
3092 Expr *getSizeExpr() const {
3093 // We use C-style casts instead of cast<> here because we do not wish
3094 // to have a dependency of Type.h on Stmt.h/Expr.h.
3095 return (Expr*) SizeExpr;
3096 }
3097
3098 SourceRange getBracketsRange() const { return Brackets; }
3099 SourceLocation getLBracketLoc() const { return Brackets.getBegin(); }
3100 SourceLocation getRBracketLoc() const { return Brackets.getEnd(); }
3101
3102 bool isSugared() const { return false; }
3103 QualType desugar() const { return QualType(this, 0); }
3104
3105 static bool classof(const Type *T) {
3106 return T->getTypeClass() == DependentSizedArray;
3107 }
3108
3109 void Profile(llvm::FoldingSetNodeID &ID) {
3110 Profile(ID, Context, getElementType(),
3111 getSizeModifier(), getIndexTypeCVRQualifiers(), getSizeExpr());
3112 }
3113
3114 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3115 QualType ET, ArraySizeModifier SizeMod,
3116 unsigned TypeQuals, Expr *E);
3117};
3118
3119/// Represents an extended address space qualifier where the input address space
3120/// value is dependent. Non-dependent address spaces are not represented with a
3121/// special Type subclass; they are stored on an ExtQuals node as part of a QualType.
3122///
3123/// For example:
3124/// \code
3125/// template<typename T, int AddrSpace>
3126/// class AddressSpace {
3127/// typedef T __attribute__((address_space(AddrSpace))) type;
3128/// }
3129/// \endcode
3130class DependentAddressSpaceType : public Type, public llvm::FoldingSetNode {
3131 friend class ASTContext;
3132
3133 const ASTContext &Context;
3134 Expr *AddrSpaceExpr;
3135 QualType PointeeType;
3136 SourceLocation loc;
3137
3138 DependentAddressSpaceType(const ASTContext &Context, QualType PointeeType,
3139 QualType can, Expr *AddrSpaceExpr,
3140 SourceLocation loc);
3141
3142public:
3143 Expr *getAddrSpaceExpr() const { return AddrSpaceExpr; }
3144 QualType getPointeeType() const { return PointeeType; }
3145 SourceLocation getAttributeLoc() const { return loc; }
3146
3147 bool isSugared() const { return false; }
3148 QualType desugar() const { return QualType(this, 0); }
3149
3150 static bool classof(const Type *T) {
3151 return T->getTypeClass() == DependentAddressSpace;
3152 }
3153
3154 void Profile(llvm::FoldingSetNodeID &ID) {
3155 Profile(ID, Context, getPointeeType(), getAddrSpaceExpr());
3156 }
3157
3158 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3159 QualType PointeeType, Expr *AddrSpaceExpr);
3160};
3161
3162/// Represents an extended vector type where either the type or size is
3163/// dependent.
3164///
3165/// For example:
3166/// \code
3167/// template<typename T, int Size>
3168/// class vector {
3169/// typedef T __attribute__((ext_vector_type(Size))) type;
3170/// }
3171/// \endcode
3172class DependentSizedExtVectorType : public Type, public llvm::FoldingSetNode {
3173 friend class ASTContext;
3174
3175 const ASTContext &Context;
3176 Expr *SizeExpr;
3177
3178 /// The element type of the array.
3179 QualType ElementType;
3180
3181 SourceLocation loc;
3182
3183 DependentSizedExtVectorType(const ASTContext &Context, QualType ElementType,
3184 QualType can, Expr *SizeExpr, SourceLocation loc);
3185
3186public:
3187 Expr *getSizeExpr() const { return SizeExpr; }
3188 QualType getElementType() const { return ElementType; }
3189 SourceLocation getAttributeLoc() const { return loc; }
3190
3191 bool isSugared() const { return false; }
3192 QualType desugar() const { return QualType(this, 0); }
3193
3194 static bool classof(const Type *T) {
3195 return T->getTypeClass() == DependentSizedExtVector;
3196 }
3197
3198 void Profile(llvm::FoldingSetNodeID &ID) {
3199 Profile(ID, Context, getElementType(), getSizeExpr());
3200 }
3201
3202 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3203 QualType ElementType, Expr *SizeExpr);
3204};
3205
3206
3207/// Represents a GCC generic vector type. This type is created using
3208/// __attribute__((vector_size(n)), where "n" specifies the vector size in
3209/// bytes; or from an Altivec __vector or vector declaration.
3210/// Since the constructor takes the number of vector elements, the
3211/// client is responsible for converting the size into the number of elements.
3212class VectorType : public Type, public llvm::FoldingSetNode {
3213public:
3214 enum VectorKind {
3215 /// not a target-specific vector type
3216 GenericVector,
3217
3218 /// is AltiVec vector
3219 AltiVecVector,
3220
3221 /// is AltiVec 'vector Pixel'
3222 AltiVecPixel,
3223
3224 /// is AltiVec 'vector bool ...'
3225 AltiVecBool,
3226
3227 /// is ARM Neon vector
3228 NeonVector,
3229
3230 /// is ARM Neon polynomial vector
3231 NeonPolyVector,
3232
3233 /// is AArch64 SVE fixed-length data vector
3234 SveFixedLengthDataVector,
3235
3236 /// is AArch64 SVE fixed-length predicate vector
3237 SveFixedLengthPredicateVector
3238 };
3239
3240protected:
3241 friend class ASTContext; // ASTContext creates these.
3242
3243 /// The element type of the vector.
3244 QualType ElementType;
3245
3246 VectorType(QualType vecType, unsigned nElements, QualType canonType,
3247 VectorKind vecKind);
3248
3249 VectorType(TypeClass tc, QualType vecType, unsigned nElements,
3250 QualType canonType, VectorKind vecKind);
3251
3252public:
3253 QualType getElementType() const { return ElementType; }
3254 unsigned getNumElements() const { return VectorTypeBits.NumElements; }
3255
3256 bool isSugared() const { return false; }
3257 QualType desugar() const { return QualType(this, 0); }
3258
3259 VectorKind getVectorKind() const {
3260 return VectorKind(VectorTypeBits.VecKind);
3261 }
3262
3263 void Profile(llvm::FoldingSetNodeID &ID) {
3264 Profile(ID, getElementType(), getNumElements(),
3265 getTypeClass(), getVectorKind());
3266 }
3267
3268 static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
3269 unsigned NumElements, TypeClass TypeClass,
3270 VectorKind VecKind) {
3271 ID.AddPointer(ElementType.getAsOpaquePtr());
3272 ID.AddInteger(NumElements);
3273 ID.AddInteger(TypeClass);
3274 ID.AddInteger(VecKind);
3275 }
3276
3277 static bool classof(const Type *T) {
3278 return T->getTypeClass() == Vector || T->getTypeClass() == ExtVector;
3279 }
3280};
3281
3282/// Represents a vector type where either the type or size is dependent.
3283////
3284/// For example:
3285/// \code
3286/// template<typename T, int Size>
3287/// class vector {
3288/// typedef T __attribute__((vector_size(Size))) type;
3289/// }
3290/// \endcode
3291class DependentVectorType : public Type, public llvm::FoldingSetNode {
3292 friend class ASTContext;
3293
3294 const ASTContext &Context;
3295 QualType ElementType;
3296 Expr *SizeExpr;
3297 SourceLocation Loc;
3298
3299 DependentVectorType(const ASTContext &Context, QualType ElementType,
3300 QualType CanonType, Expr *SizeExpr,
3301 SourceLocation Loc, VectorType::VectorKind vecKind);
3302
3303public:
3304 Expr *getSizeExpr() const { return SizeExpr; }
3305 QualType getElementType() const { return ElementType; }
3306 SourceLocation getAttributeLoc() const { return Loc; }
3307 VectorType::VectorKind getVectorKind() const {
3308 return VectorType::VectorKind(VectorTypeBits.VecKind);
3309 }
3310
3311 bool isSugared() const { return false; }
3312 QualType desugar() const { return QualType(this, 0); }
3313
3314 static bool classof(const Type *T) {
3315 return T->getTypeClass() == DependentVector;
3316 }
3317
3318 void Profile(llvm::FoldingSetNodeID &ID) {
3319 Profile(ID, Context, getElementType(), getSizeExpr(), getVectorKind());
3320 }
3321
3322 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3323 QualType ElementType, const Expr *SizeExpr,
3324 VectorType::VectorKind VecKind);
3325};
3326
3327/// ExtVectorType - Extended vector type. This type is created using
3328/// __attribute__((ext_vector_type(n)), where "n" is the number of elements.
3329/// Unlike vector_size, ext_vector_type is only allowed on typedef's. This
3330/// class enables syntactic extensions, like Vector Components for accessing
3331/// points (as .xyzw), colors (as .rgba), and textures (modeled after OpenGL
3332/// Shading Language).
3333class ExtVectorType : public VectorType {
3334 friend class ASTContext; // ASTContext creates these.
3335
3336 ExtVectorType(QualType vecType, unsigned nElements, QualType canonType)
3337 : VectorType(ExtVector, vecType, nElements, canonType, GenericVector) {}
3338
3339public:
3340 static int getPointAccessorIdx(char c) {
3341 switch (c) {
3342 default: return -1;
3343 case 'x': case 'r': return 0;
3344 case 'y': case 'g': return 1;
3345 case 'z': case 'b': return 2;
3346 case 'w': case 'a': return 3;
3347 }
3348 }
3349
3350 static int getNumericAccessorIdx(char c) {
3351 switch (c) {
3352 default: return -1;
3353 case '0': return 0;
3354 case '1': return 1;
3355 case '2': return 2;
3356 case '3': return 3;
3357 case '4': return 4;
3358 case '5': return 5;
3359 case '6': return 6;
3360 case '7': return 7;
3361 case '8': return 8;
3362 case '9': return 9;
3363 case 'A':
3364 case 'a': return 10;
3365 case 'B':
3366 case 'b': return 11;
3367 case 'C':
3368 case 'c': return 12;
3369 case 'D':
3370 case 'd': return 13;
3371 case 'E':
3372 case 'e': return 14;
3373 case 'F':
3374 case 'f': return 15;
3375 }
3376 }
3377
3378 static int getAccessorIdx(char c, bool isNumericAccessor) {
3379 if (isNumericAccessor)
3380 return getNumericAccessorIdx(c);
3381 else
3382 return getPointAccessorIdx(c);
3383 }
3384
3385 bool isAccessorWithinNumElements(char c, bool isNumericAccessor) const {
3386 if (int idx = getAccessorIdx(c, isNumericAccessor)+1)
3387 return unsigned(idx-1) < getNumElements();
3388 return false;
3389 }
3390
3391 bool isSugared() const { return false; }
3392 QualType desugar() const { return QualType(this, 0); }
3393
3394 static bool classof(const Type *T) {
3395 return T->getTypeClass() == ExtVector;
3396 }
3397};
3398
3399/// Represents a matrix type, as defined in the Matrix Types clang extensions.
3400/// __attribute__((matrix_type(rows, columns))), where "rows" specifies
3401/// number of rows and "columns" specifies the number of columns.
3402class MatrixType : public Type, public llvm::FoldingSetNode {
3403protected:
3404 friend class ASTContext;
3405
3406 /// The element type of the matrix.
3407 QualType ElementType;
3408
3409 MatrixType(QualType ElementTy, QualType CanonElementTy);
3410
3411 MatrixType(TypeClass TypeClass, QualType ElementTy, QualType CanonElementTy,
3412 const Expr *RowExpr = nullptr, const Expr *ColumnExpr = nullptr);
3413
3414public:
3415 /// Returns type of the elements being stored in the matrix
3416 QualType getElementType() const { return ElementType; }
3417
3418 /// Valid elements types are the following:
3419 /// * an integer type (as in C2x 6.2.5p19), but excluding enumerated types
3420 /// and _Bool
3421 /// * the standard floating types float or double
3422 /// * a half-precision floating point type, if one is supported on the target
3423 static bool isValidElementType(QualType T) {
3424 return T->isDependentType() ||
3425 (T->isRealType() && !T->isBooleanType() && !T->isEnumeralType());
3426 }
3427
3428 bool isSugared() const { return false; }
3429 QualType desugar() const { return QualType(this, 0); }
3430
3431 static bool classof(const Type *T) {
3432 return T->getTypeClass() == ConstantMatrix ||
3433 T->getTypeClass() == DependentSizedMatrix;
3434 }
3435};
3436
3437/// Represents a concrete matrix type with constant number of rows and columns
3438class ConstantMatrixType final : public MatrixType {
3439protected:
3440 friend class ASTContext;
3441
3442 /// The element type of the matrix.
3443 // FIXME: Appears to be unused? There is also MatrixType::ElementType...
3444 QualType ElementType;
3445
3446 /// Number of rows and columns.
3447 unsigned NumRows;
3448 unsigned NumColumns;
3449
3450 static constexpr unsigned MaxElementsPerDimension = (1 << 20) - 1;
3451
3452 ConstantMatrixType(QualType MatrixElementType, unsigned NRows,
3453 unsigned NColumns, QualType CanonElementType);
3454
3455 ConstantMatrixType(TypeClass typeClass, QualType MatrixType, unsigned NRows,
3456 unsigned NColumns, QualType CanonElementType);
3457
3458public:
3459 /// Returns the number of rows in the matrix.
3460 unsigned getNumRows() const { return NumRows; }
3461
3462 /// Returns the number of columns in the matrix.
3463 unsigned getNumColumns() const { return NumColumns; }
3464
3465 /// Returns the number of elements required to embed the matrix into a vector.
3466 unsigned getNumElementsFlattened() const {
3467 return getNumRows() * getNumColumns();
3468 }
3469
3470 /// Returns true if \p NumElements is a valid matrix dimension.
3471 static constexpr bool isDimensionValid(size_t NumElements) {
3472 return NumElements > 0 && NumElements <= MaxElementsPerDimension;
3473 }
3474
3475 /// Returns the maximum number of elements per dimension.
3476 static constexpr unsigned getMaxElementsPerDimension() {
3477 return MaxElementsPerDimension;
3478 }
3479
3480 void Profile(llvm::FoldingSetNodeID &ID) {
3481 Profile(ID, getElementType(), getNumRows(), getNumColumns(),
3482 getTypeClass());
3483 }
3484
3485 static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
3486 unsigned NumRows, unsigned NumColumns,
3487 TypeClass TypeClass) {
3488 ID.AddPointer(ElementType.getAsOpaquePtr());
3489 ID.AddInteger(NumRows);
3490 ID.AddInteger(NumColumns);
3491 ID.AddInteger(TypeClass);
3492 }
3493
3494 static bool classof(const Type *T) {
3495 return T->getTypeClass() == ConstantMatrix;
3496 }
3497};
3498
3499/// Represents a matrix type where the type and the number of rows and columns
3500/// is dependent on a template.
3501class DependentSizedMatrixType final : public MatrixType {
3502 friend class ASTContext;
3503
3504 const ASTContext &Context;
3505 Expr *RowExpr;
3506 Expr *ColumnExpr;
3507
3508 SourceLocation loc;
3509
3510 DependentSizedMatrixType(const ASTContext &Context, QualType ElementType,
3511 QualType CanonicalType, Expr *RowExpr,
3512 Expr *ColumnExpr, SourceLocation loc);
3513
3514public:
3515 QualType getElementType() const { return ElementType; }
3516 Expr *getRowExpr() const { return RowExpr; }
3517 Expr *getColumnExpr() const { return ColumnExpr; }
3518 SourceLocation getAttributeLoc() const { return loc; }
3519
3520 bool isSugared() const { return false; }
3521 QualType desugar() const { return QualType(this, 0); }
3522
3523 static bool classof(const Type *T) {
3524 return T->getTypeClass() == DependentSizedMatrix;
3525 }
3526
3527 void Profile(llvm::FoldingSetNodeID &ID) {
3528 Profile(ID, Context, getElementType(), getRowExpr(), getColumnExpr());
3529 }
3530
3531 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
3532 QualType ElementType, Expr *RowExpr, Expr *ColumnExpr);
3533};
3534
3535/// FunctionType - C99 6.7.5.3 - Function Declarators. This is the common base
3536/// class of FunctionNoProtoType and FunctionProtoType.
3537class FunctionType : public Type {
3538 // The type returned by the function.
3539 QualType ResultType;
3540
3541public:
3542 /// Interesting information about a specific parameter that can't simply
3543 /// be reflected in parameter's type. This is only used by FunctionProtoType
3544 /// but is in FunctionType to make this class available during the
3545 /// specification of the bases of FunctionProtoType.
3546 ///
3547 /// It makes sense to model language features this way when there's some
3548 /// sort of parameter-specific override (such as an attribute) that
3549 /// affects how the function is called. For example, the ARC ns_consumed
3550 /// attribute changes whether a parameter is passed at +0 (the default)
3551 /// or +1 (ns_consumed). This must be reflected in the function type,
3552 /// but isn't really a change to the parameter type.
3553 ///
3554 /// One serious disadvantage of modelling language features this way is
3555 /// that they generally do not work with language features that attempt
3556 /// to destructure types. For example, template argument deduction will
3557 /// not be able to match a parameter declared as
3558 /// T (*)(U)
3559 /// against an argument of type
3560 /// void (*)(__attribute__((ns_consumed)) id)
3561 /// because the substitution of T=void, U=id into the former will
3562 /// not produce the latter.
3563 class ExtParameterInfo {
3564 enum {
3565 ABIMask = 0x0F,
3566 IsConsumed = 0x10,
3567 HasPassObjSize = 0x20,
3568 IsNoEscape = 0x40,
3569 };
3570 unsigned char Data = 0;
3571
3572 public:
3573 ExtParameterInfo() = default;
3574
3575 /// Return the ABI treatment of this parameter.
3576 ParameterABI getABI() const { return ParameterABI(Data & ABIMask); }
3577 ExtParameterInfo withABI(ParameterABI kind) const {
3578 ExtParameterInfo copy = *this;
3579 copy.Data = (copy.Data & ~ABIMask) | unsigned(kind);
3580 return copy;
3581 }
3582
3583 /// Is this parameter considered "consumed" by Objective-C ARC?
3584 /// Consumed parameters must have retainable object type.
3585 bool isConsumed() const { return (Data & IsConsumed); }
3586 ExtParameterInfo withIsConsumed(bool consumed) const {
3587 ExtParameterInfo copy = *this;
3588 if (consumed)
3589 copy.Data |= IsConsumed;
3590 else
3591 copy.Data &= ~IsConsumed;
3592 return copy;
3593 }
3594
3595 bool hasPassObjectSize() const { return Data & HasPassObjSize; }
3596 ExtParameterInfo withHasPassObjectSize() const {
3597 ExtParameterInfo Copy = *this;
3598 Copy.Data |= HasPassObjSize;
3599 return Copy;
3600 }
3601
3602 bool isNoEscape() const { return Data & IsNoEscape; }
3603 ExtParameterInfo withIsNoEscape(bool NoEscape) const {
3604 ExtParameterInfo Copy = *this;
3605 if (NoEscape)
3606 Copy.Data |= IsNoEscape;
3607 else
3608 Copy.Data &= ~IsNoEscape;
3609 return Copy;
3610 }
3611
3612 unsigned char getOpaqueValue() const { return Data; }
3613 static ExtParameterInfo getFromOpaqueValue(unsigned char data) {
3614 ExtParameterInfo result;
3615 result.Data = data;
3616 return result;
3617 }
3618
3619 friend bool operator==(ExtParameterInfo lhs, ExtParameterInfo rhs) {
3620 return lhs.Data == rhs.Data;
3621 }
3622
3623 friend bool operator!=(ExtParameterInfo lhs, ExtParameterInfo rhs) {
3624 return lhs.Data != rhs.Data;
3625 }
3626 };
3627
3628 /// A class which abstracts out some details necessary for
3629 /// making a call.
3630 ///
3631 /// It is not actually used directly for storing this information in
3632 /// a FunctionType, although FunctionType does currently use the
3633 /// same bit-pattern.
3634 ///
3635 // If you add a field (say Foo), other than the obvious places (both,
3636 // constructors, compile failures), what you need to update is
3637 // * Operator==
3638 // * getFoo
3639 // * withFoo
3640 // * functionType. Add Foo, getFoo.
3641 // * ASTContext::getFooType
3642 // * ASTContext::mergeFunctionTypes
3643 // * FunctionNoProtoType::Profile
3644 // * FunctionProtoType::Profile
3645 // * TypePrinter::PrintFunctionProto
3646 // * AST read and write
3647 // * Codegen
3648 class ExtInfo {
3649 friend class FunctionType;
3650
3651 // Feel free to rearrange or add bits, but if you go over 16, you'll need to
3652 // adjust the Bits field below, and if you add bits, you'll need to adjust
3653 // Type::FunctionTypeBitfields::ExtInfo as well.
3654
3655 // | CC |noreturn|produces|nocallersavedregs|regparm|nocfcheck|cmsenscall|
3656 // |0 .. 4| 5 | 6 | 7 |8 .. 10| 11 | 12 |
3657 //
3658 // regparm is either 0 (no regparm attribute) or the regparm value+1.
3659 enum { CallConvMask = 0x1F };
3660 enum { NoReturnMask = 0x20 };
3661 enum { ProducesResultMask = 0x40 };
3662 enum { NoCallerSavedRegsMask = 0x80 };
3663 enum {
3664 RegParmMask = 0x700,
3665 RegParmOffset = 8
3666 };
3667 enum { NoCfCheckMask = 0x800 };
3668 enum { CmseNSCallMask = 0x1000 };
3669 uint16_t Bits = CC_C;
3670
3671 ExtInfo(unsigned Bits) : Bits(static_cast<uint16_t>(Bits)) {}
3672
3673 public:
3674 // Constructor with no defaults. Use this when you know that you
3675 // have all the elements (when reading an AST file for example).
3676 ExtInfo(bool noReturn, bool hasRegParm, unsigned regParm, CallingConv cc,
3677 bool producesResult, bool noCallerSavedRegs, bool NoCfCheck,
3678 bool cmseNSCall) {
3679 assert((!hasRegParm || regParm < 7) && "Invalid regparm value")(((!hasRegParm || regParm < 7) && "Invalid regparm value"
) ? static_cast<void> (0) : __assert_fail ("(!hasRegParm || regParm < 7) && \"Invalid regparm value\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 3679, __PRETTY_FUNCTION__))
;
3680 Bits = ((unsigned)cc) | (noReturn ? NoReturnMask : 0) |
3681 (producesResult ? ProducesResultMask : 0) |
3682 (noCallerSavedRegs ? NoCallerSavedRegsMask : 0) |
3683 (hasRegParm ? ((regParm + 1) << RegParmOffset) : 0) |
3684 (NoCfCheck ? NoCfCheckMask : 0) |
3685 (cmseNSCall ? CmseNSCallMask : 0);
3686 }
3687
3688 // Constructor with all defaults. Use when for example creating a
3689 // function known to use defaults.
3690 ExtInfo() = default;
3691
3692 // Constructor with just the calling convention, which is an important part
3693 // of the canonical type.
3694 ExtInfo(CallingConv CC) : Bits(CC) {}
3695
3696 bool getNoReturn() const { return Bits & NoReturnMask; }
3697 bool getProducesResult() const { return Bits & ProducesResultMask; }
3698 bool getCmseNSCall() const { return Bits & CmseNSCallMask; }
3699 bool getNoCallerSavedRegs() const { return Bits & NoCallerSavedRegsMask; }
3700 bool getNoCfCheck() const { return Bits & NoCfCheckMask; }
3701 bool getHasRegParm() const { return ((Bits & RegParmMask) >> RegParmOffset) != 0; }
3702
3703 unsigned getRegParm() const {
3704 unsigned RegParm = (Bits & RegParmMask) >> RegParmOffset;
3705 if (RegParm > 0)
3706 --RegParm;
3707 return RegParm;
3708 }
3709
3710 CallingConv getCC() const { return CallingConv(Bits & CallConvMask); }
3711
3712 bool operator==(ExtInfo Other) const {
3713 return Bits == Other.Bits;
3714 }
3715 bool operator!=(ExtInfo Other) const {
3716 return Bits != Other.Bits;
3717 }
3718
3719 // Note that we don't have setters. That is by design, use
3720 // the following with methods instead of mutating these objects.
3721
3722 ExtInfo withNoReturn(bool noReturn) const {
3723 if (noReturn)
3724 return ExtInfo(Bits | NoReturnMask);
3725 else
3726 return ExtInfo(Bits & ~NoReturnMask);
3727 }
3728
3729 ExtInfo withProducesResult(bool producesResult) const {
3730 if (producesResult)
3731 return ExtInfo(Bits | ProducesResultMask);
3732 else
3733 return ExtInfo(Bits & ~ProducesResultMask);
3734 }
3735
3736 ExtInfo withCmseNSCall(bool cmseNSCall) const {
3737 if (cmseNSCall)
3738 return ExtInfo(Bits | CmseNSCallMask);
3739 else
3740 return ExtInfo(Bits & ~CmseNSCallMask);
3741 }
3742
3743 ExtInfo withNoCallerSavedRegs(bool noCallerSavedRegs) const {
3744 if (noCallerSavedRegs)
3745 return ExtInfo(Bits | NoCallerSavedRegsMask);
3746 else
3747 return ExtInfo(Bits & ~NoCallerSavedRegsMask);
3748 }
3749
3750 ExtInfo withNoCfCheck(bool noCfCheck) const {
3751 if (noCfCheck)
3752 return ExtInfo(Bits | NoCfCheckMask);
3753 else
3754 return ExtInfo(Bits & ~NoCfCheckMask);
3755 }
3756
3757 ExtInfo withRegParm(unsigned RegParm) const {
3758 assert(RegParm < 7 && "Invalid regparm value")((RegParm < 7 && "Invalid regparm value") ? static_cast
<void> (0) : __assert_fail ("RegParm < 7 && \"Invalid regparm value\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 3758, __PRETTY_FUNCTION__))
;
3759 return ExtInfo((Bits & ~RegParmMask) |
3760 ((RegParm + 1) << RegParmOffset));
3761 }
3762
3763 ExtInfo withCallingConv(CallingConv cc) const {
3764 return ExtInfo((Bits & ~CallConvMask) | (unsigned) cc);
3765 }
3766
3767 void Profile(llvm::FoldingSetNodeID &ID) const {
3768 ID.AddInteger(Bits);
3769 }
3770 };
3771
3772 /// A simple holder for a QualType representing a type in an
3773 /// exception specification. Unfortunately needed by FunctionProtoType
3774 /// because TrailingObjects cannot handle repeated types.
3775 struct ExceptionType { QualType Type; };
3776
3777 /// A simple holder for various uncommon bits which do not fit in
3778 /// FunctionTypeBitfields. Aligned to alignof(void *) to maintain the
3779 /// alignment of subsequent objects in TrailingObjects. You must update
3780 /// hasExtraBitfields in FunctionProtoType after adding extra data here.
3781 struct alignas(void *) FunctionTypeExtraBitfields {
3782 /// The number of types in the exception specification.
3783 /// A whole unsigned is not needed here and according to
3784 /// [implimits] 8 bits would be enough here.
3785 unsigned NumExceptionType;
3786 };
3787
3788protected:
3789 FunctionType(TypeClass tc, QualType res, QualType Canonical,
3790 TypeDependence Dependence, ExtInfo Info)
3791 : Type(tc, Canonical, Dependence), ResultType(res) {
3792 FunctionTypeBits.ExtInfo = Info.Bits;
3793 }
3794
3795 Qualifiers getFastTypeQuals() const {
3796 return Qualifiers::fromFastMask(FunctionTypeBits.FastTypeQuals);
3797 }
3798
3799public:
3800 QualType getReturnType() const { return ResultType; }
3801
3802 bool getHasRegParm() const { return getExtInfo().getHasRegParm(); }
3803 unsigned getRegParmType() const { return getExtInfo().getRegParm(); }
3804
3805 /// Determine whether this function type includes the GNU noreturn
3806 /// attribute. The C++11 [[noreturn]] attribute does not affect the function
3807 /// type.
3808 bool getNoReturnAttr() const { return getExtInfo().getNoReturn(); }
3809
3810 bool getCmseNSCallAttr() const { return getExtInfo().getCmseNSCall(); }
3811 CallingConv getCallConv() const { return getExtInfo().getCC(); }
3812 ExtInfo getExtInfo() const { return ExtInfo(FunctionTypeBits.ExtInfo); }
3813
3814 static_assert((~Qualifiers::FastMask & Qualifiers::CVRMask) == 0,
3815 "Const, volatile and restrict are assumed to be a subset of "
3816 "the fast qualifiers.");
3817
3818 bool isConst() const { return getFastTypeQuals().hasConst(); }
3819 bool isVolatile() const { return getFastTypeQuals().hasVolatile(); }
3820 bool isRestrict() const { return getFastTypeQuals().hasRestrict(); }
3821
3822 /// Determine the type of an expression that calls a function of
3823 /// this type.
3824 QualType getCallResultType(const ASTContext &Context) const {
3825 return getReturnType().getNonLValueExprType(Context);
3826 }
3827
3828 static StringRef getNameForCallConv(CallingConv CC);
3829
3830 static bool classof(const Type *T) {
3831 return T->getTypeClass() == FunctionNoProto ||
3832 T->getTypeClass() == FunctionProto;
3833 }
3834};
3835
3836/// Represents a K&R-style 'int foo()' function, which has
3837/// no information available about its arguments.
3838class FunctionNoProtoType : public FunctionType, public llvm::FoldingSetNode {
3839 friend class ASTContext; // ASTContext creates these.
3840
3841 FunctionNoProtoType(QualType Result, QualType Canonical, ExtInfo Info)
3842 : FunctionType(FunctionNoProto, Result, Canonical,
3843 Result->getDependence() &
3844 ~(TypeDependence::DependentInstantiation |
3845 TypeDependence::UnexpandedPack),
3846 Info) {}
3847
3848public:
3849 // No additional state past what FunctionType provides.
3850
3851 bool isSugared() const { return false; }
3852 QualType desugar() const { return QualType(this, 0); }
3853
3854 void Profile(llvm::FoldingSetNodeID &ID) {
3855 Profile(ID, getReturnType(), getExtInfo());
3856 }
3857
3858 static void Profile(llvm::FoldingSetNodeID &ID, QualType ResultType,
3859 ExtInfo Info) {
3860 Info.Profile(ID);
3861 ID.AddPointer(ResultType.getAsOpaquePtr());
3862 }
3863
3864 static bool classof(const Type *T) {
3865 return T->getTypeClass() == FunctionNoProto;
3866 }
3867};
3868
3869/// Represents a prototype with parameter type info, e.g.
3870/// 'int foo(int)' or 'int foo(void)'. 'void' is represented as having no
3871/// parameters, not as having a single void parameter. Such a type can have
3872/// an exception specification, but this specification is not part of the
3873/// canonical type. FunctionProtoType has several trailing objects, some of
3874/// which optional. For more information about the trailing objects see
3875/// the first comment inside FunctionProtoType.
3876class FunctionProtoType final
3877 : public FunctionType,
3878 public llvm::FoldingSetNode,
3879 private llvm::TrailingObjects<
3880 FunctionProtoType, QualType, SourceLocation,
3881 FunctionType::FunctionTypeExtraBitfields, FunctionType::ExceptionType,
3882 Expr *, FunctionDecl *, FunctionType::ExtParameterInfo, Qualifiers> {
3883 friend class ASTContext; // ASTContext creates these.
3884 friend TrailingObjects;
3885
3886 // FunctionProtoType is followed by several trailing objects, some of
3887 // which optional. They are in order:
3888 //
3889 // * An array of getNumParams() QualType holding the parameter types.
3890 // Always present. Note that for the vast majority of FunctionProtoType,
3891 // these will be the only trailing objects.
3892 //
3893 // * Optionally if the function is variadic, the SourceLocation of the
3894 // ellipsis.
3895 //
3896 // * Optionally if some extra data is stored in FunctionTypeExtraBitfields
3897 // (see FunctionTypeExtraBitfields and FunctionTypeBitfields):
3898 // a single FunctionTypeExtraBitfields. Present if and only if
3899 // hasExtraBitfields() is true.
3900 //
3901 // * Optionally exactly one of:
3902 // * an array of getNumExceptions() ExceptionType,
3903 // * a single Expr *,
3904 // * a pair of FunctionDecl *,
3905 // * a single FunctionDecl *
3906 // used to store information about the various types of exception
3907 // specification. See getExceptionSpecSize for the details.
3908 //
3909 // * Optionally an array of getNumParams() ExtParameterInfo holding
3910 // an ExtParameterInfo for each of the parameters. Present if and
3911 // only if hasExtParameterInfos() is true.
3912 //
3913 // * Optionally a Qualifiers object to represent extra qualifiers that can't
3914 // be represented by FunctionTypeBitfields.FastTypeQuals. Present if and only
3915 // if hasExtQualifiers() is true.
3916 //
3917 // The optional FunctionTypeExtraBitfields has to be before the data
3918 // related to the exception specification since it contains the number
3919 // of exception types.
3920 //
3921 // We put the ExtParameterInfos last. If all were equal, it would make
3922 // more sense to put these before the exception specification, because
3923 // it's much easier to skip past them compared to the elaborate switch
3924 // required to skip the exception specification. However, all is not
3925 // equal; ExtParameterInfos are used to model very uncommon features,
3926 // and it's better not to burden the more common paths.
3927
3928public:
3929 /// Holds information about the various types of exception specification.
3930 /// ExceptionSpecInfo is not stored as such in FunctionProtoType but is
3931 /// used to group together the various bits of information about the
3932 /// exception specification.
3933 struct ExceptionSpecInfo {
3934 /// The kind of exception specification this is.
3935 ExceptionSpecificationType Type = EST_None;
3936
3937 /// Explicitly-specified list of exception types.
3938 ArrayRef<QualType> Exceptions;
3939
3940 /// Noexcept expression, if this is a computed noexcept specification.
3941 Expr *NoexceptExpr = nullptr;
3942
3943 /// The function whose exception specification this is, for
3944 /// EST_Unevaluated and EST_Uninstantiated.
3945 FunctionDecl *SourceDecl = nullptr;
3946
3947 /// The function template whose exception specification this is instantiated
3948 /// from, for EST_Uninstantiated.
3949 FunctionDecl *SourceTemplate = nullptr;
3950
3951 ExceptionSpecInfo() = default;
3952
3953 ExceptionSpecInfo(ExceptionSpecificationType EST) : Type(EST) {}
3954 };
3955
3956 /// Extra information about a function prototype. ExtProtoInfo is not
3957 /// stored as such in FunctionProtoType but is used to group together
3958 /// the various bits of extra information about a function prototype.
3959 struct ExtProtoInfo {
3960 FunctionType::ExtInfo ExtInfo;
3961 bool Variadic : 1;
3962 bool HasTrailingReturn : 1;
3963 Qualifiers TypeQuals;
3964 RefQualifierKind RefQualifier = RQ_None;
3965 ExceptionSpecInfo ExceptionSpec;
3966 const ExtParameterInfo *ExtParameterInfos = nullptr;
3967 SourceLocation EllipsisLoc;
3968
3969 ExtProtoInfo() : Variadic(false), HasTrailingReturn(false) {}
3970
3971 ExtProtoInfo(CallingConv CC)
3972 : ExtInfo(CC), Variadic(false), HasTrailingReturn(false) {}
3973
3974 ExtProtoInfo withExceptionSpec(const ExceptionSpecInfo &ESI) {
3975 ExtProtoInfo Result(*this);
3976 Result.ExceptionSpec = ESI;
3977 return Result;
3978 }
3979 };
3980
3981private:
3982 unsigned numTrailingObjects(OverloadToken<QualType>) const {
3983 return getNumParams();
3984 }
3985
3986 unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
3987 return isVariadic();
3988 }
3989
3990 unsigned numTrailingObjects(OverloadToken<FunctionTypeExtraBitfields>) const {
3991 return hasExtraBitfields();
3992 }
3993
3994 unsigned numTrailingObjects(OverloadToken<ExceptionType>) const {
3995 return getExceptionSpecSize().NumExceptionType;
3996 }
3997
3998 unsigned numTrailingObjects(OverloadToken<Expr *>) const {
3999 return getExceptionSpecSize().NumExprPtr;
4000 }
4001
4002 unsigned numTrailingObjects(OverloadToken<FunctionDecl *>) const {
4003 return getExceptionSpecSize().NumFunctionDeclPtr;
4004 }
4005
4006 unsigned numTrailingObjects(OverloadToken<ExtParameterInfo>) const {
4007 return hasExtParameterInfos() ? getNumParams() : 0;
4008 }
4009
4010 /// Determine whether there are any argument types that
4011 /// contain an unexpanded parameter pack.
4012 static bool containsAnyUnexpandedParameterPack(const QualType *ArgArray,
4013 unsigned numArgs) {
4014 for (unsigned Idx = 0; Idx < numArgs; ++Idx)
4015 if (ArgArray[Idx]->containsUnexpandedParameterPack())
4016 return true;
4017
4018 return false;
4019 }
4020
4021 FunctionProtoType(QualType result, ArrayRef<QualType> params,
4022 QualType canonical, const ExtProtoInfo &epi);
4023
4024 /// This struct is returned by getExceptionSpecSize and is used to
4025 /// translate an ExceptionSpecificationType to the number and kind
4026 /// of trailing objects related to the exception specification.
4027 struct ExceptionSpecSizeHolder {
4028 unsigned NumExceptionType;
4029 unsigned NumExprPtr;
4030 unsigned NumFunctionDeclPtr;
4031 };
4032
4033 /// Return the number and kind of trailing objects
4034 /// related to the exception specification.
4035 static ExceptionSpecSizeHolder
4036 getExceptionSpecSize(ExceptionSpecificationType EST, unsigned NumExceptions) {
4037 switch (EST) {
4038 case EST_None:
4039 case EST_DynamicNone:
4040 case EST_MSAny:
4041 case EST_BasicNoexcept:
4042 case EST_Unparsed:
4043 case EST_NoThrow:
4044 return {0, 0, 0};
4045
4046 case EST_Dynamic:
4047 return {NumExceptions, 0, 0};
4048
4049 case EST_DependentNoexcept:
4050 case EST_NoexceptFalse:
4051 case EST_NoexceptTrue:
4052 return {0, 1, 0};
4053
4054 case EST_Uninstantiated:
4055 return {0, 0, 2};
4056
4057 case EST_Unevaluated:
4058 return {0, 0, 1};
4059 }
4060 llvm_unreachable("bad exception specification kind")::llvm::llvm_unreachable_internal("bad exception specification kind"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 4060)
;
4061 }
4062
4063 /// Return the number and kind of trailing objects
4064 /// related to the exception specification.
4065 ExceptionSpecSizeHolder getExceptionSpecSize() const {
4066 return getExceptionSpecSize(getExceptionSpecType(), getNumExceptions());
4067 }
4068
4069 /// Whether the trailing FunctionTypeExtraBitfields is present.
4070 static bool hasExtraBitfields(ExceptionSpecificationType EST) {
4071 // If the exception spec type is EST_Dynamic then we have > 0 exception
4072 // types and the exact number is stored in FunctionTypeExtraBitfields.
4073 return EST == EST_Dynamic;
4074 }
4075
4076 /// Whether the trailing FunctionTypeExtraBitfields is present.
4077 bool hasExtraBitfields() const {
4078 return hasExtraBitfields(getExceptionSpecType());
4079 }
4080
4081 bool hasExtQualifiers() const {
4082 return FunctionTypeBits.HasExtQuals;
4083 }
4084
4085public:
4086 unsigned getNumParams() const { return FunctionTypeBits.NumParams; }
4087
4088 QualType getParamType(unsigned i) const {
4089 assert(i < getNumParams() && "invalid parameter index")((i < getNumParams() && "invalid parameter index")
? static_cast<void> (0) : __assert_fail ("i < getNumParams() && \"invalid parameter index\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 4089, __PRETTY_FUNCTION__))
;
4090 return param_type_begin()[i];
4091 }
4092
4093 ArrayRef<QualType> getParamTypes() const {
4094 return llvm::makeArrayRef(param_type_begin(), param_type_end());
4095 }
4096
4097 ExtProtoInfo getExtProtoInfo() const {
4098 ExtProtoInfo EPI;
4099 EPI.ExtInfo = getExtInfo();
4100 EPI.Variadic = isVariadic();
4101 EPI.EllipsisLoc = getEllipsisLoc();
4102 EPI.HasTrailingReturn = hasTrailingReturn();
4103 EPI.ExceptionSpec = getExceptionSpecInfo();
4104 EPI.TypeQuals = getMethodQuals();
4105 EPI.RefQualifier = getRefQualifier();
4106 EPI.ExtParameterInfos = getExtParameterInfosOrNull();
4107 return EPI;
4108 }
4109
4110 /// Get the kind of exception specification on this function.
4111 ExceptionSpecificationType getExceptionSpecType() const {
4112 return static_cast<ExceptionSpecificationType>(
4113 FunctionTypeBits.ExceptionSpecType);
4114 }
4115
4116 /// Return whether this function has any kind of exception spec.
4117 bool hasExceptionSpec() const { return getExceptionSpecType() != EST_None; }
4118
4119 /// Return whether this function has a dynamic (throw) exception spec.
4120 bool hasDynamicExceptionSpec() const {
4121 return isDynamicExceptionSpec(getExceptionSpecType());
4122 }
4123
4124 /// Return whether this function has a noexcept exception spec.
4125 bool hasNoexceptExceptionSpec() const {
4126 return isNoexceptExceptionSpec(getExceptionSpecType());
4127 }
4128
4129 /// Return whether this function has a dependent exception spec.
4130 bool hasDependentExceptionSpec() const;
4131
4132 /// Return whether this function has an instantiation-dependent exception
4133 /// spec.
4134 bool hasInstantiationDependentExceptionSpec() const;
4135
4136 /// Return all the available information about this type's exception spec.
4137 ExceptionSpecInfo getExceptionSpecInfo() const {
4138 ExceptionSpecInfo Result;
4139 Result.Type = getExceptionSpecType();
4140 if (Result.Type == EST_Dynamic) {
4141 Result.Exceptions = exceptions();
4142 } else if (isComputedNoexcept(Result.Type)) {
4143 Result.NoexceptExpr = getNoexceptExpr();
4144 } else if (Result.Type == EST_Uninstantiated) {
4145 Result.SourceDecl = getExceptionSpecDecl();
4146 Result.SourceTemplate = getExceptionSpecTemplate();
4147 } else if (Result.Type == EST_Unevaluated) {
4148 Result.SourceDecl = getExceptionSpecDecl();
4149 }
4150 return Result;
4151 }
4152
4153 /// Return the number of types in the exception specification.
4154 unsigned getNumExceptions() const {
4155 return getExceptionSpecType() == EST_Dynamic
4156 ? getTrailingObjects<FunctionTypeExtraBitfields>()
4157 ->NumExceptionType
4158 : 0;
4159 }
4160
4161 /// Return the ith exception type, where 0 <= i < getNumExceptions().
4162 QualType getExceptionType(unsigned i) const {
4163 assert(i < getNumExceptions() && "Invalid exception number!")((i < getNumExceptions() && "Invalid exception number!"
) ? static_cast<void> (0) : __assert_fail ("i < getNumExceptions() && \"Invalid exception number!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 4163, __PRETTY_FUNCTION__))
;
4164 return exception_begin()[i];
4165 }
4166
4167 /// Return the expression inside noexcept(expression), or a null pointer
4168 /// if there is none (because the exception spec is not of this form).
4169 Expr *getNoexceptExpr() const {
4170 if (!isComputedNoexcept(getExceptionSpecType()))
4171 return nullptr;
4172 return *getTrailingObjects<Expr *>();
4173 }
4174
4175 /// If this function type has an exception specification which hasn't
4176 /// been determined yet (either because it has not been evaluated or because
4177 /// it has not been instantiated), this is the function whose exception
4178 /// specification is represented by this type.
4179 FunctionDecl *getExceptionSpecDecl() const {
4180 if (getExceptionSpecType() != EST_Uninstantiated &&
4181 getExceptionSpecType() != EST_Unevaluated)
4182 return nullptr;
4183 return getTrailingObjects<FunctionDecl *>()[0];
4184 }
4185
4186 /// If this function type has an uninstantiated exception
4187 /// specification, this is the function whose exception specification
4188 /// should be instantiated to find the exception specification for
4189 /// this type.
4190 FunctionDecl *getExceptionSpecTemplate() const {
4191 if (getExceptionSpecType() != EST_Uninstantiated)
4192 return nullptr;
4193 return getTrailingObjects<FunctionDecl *>()[1];
4194 }
4195
4196 /// Determine whether this function type has a non-throwing exception
4197 /// specification.
4198 CanThrowResult canThrow() const;
4199
4200 /// Determine whether this function type has a non-throwing exception
4201 /// specification. If this depends on template arguments, returns
4202 /// \c ResultIfDependent.
4203 bool isNothrow(bool ResultIfDependent = false) const {
4204 return ResultIfDependent ? canThrow() != CT_Can : canThrow() == CT_Cannot;
4205 }
4206
4207 /// Whether this function prototype is variadic.
4208 bool isVariadic() const { return FunctionTypeBits.Variadic; }
4209
4210 SourceLocation getEllipsisLoc() const {
4211 return isVariadic() ? *getTrailingObjects<SourceLocation>()
4212 : SourceLocation();
4213 }
4214
4215 /// Determines whether this function prototype contains a
4216 /// parameter pack at the end.
4217 ///
4218 /// A function template whose last parameter is a parameter pack can be
4219 /// called with an arbitrary number of arguments, much like a variadic
4220 /// function.
4221 bool isTemplateVariadic() const;
4222
4223 /// Whether this function prototype has a trailing return type.
4224 bool hasTrailingReturn() const { return FunctionTypeBits.HasTrailingReturn; }
4225
4226 Qualifiers getMethodQuals() const {
4227 if (hasExtQualifiers())
4228 return *getTrailingObjects<Qualifiers>();
4229 else
4230 return getFastTypeQuals();
4231 }
4232
4233 /// Retrieve the ref-qualifier associated with this function type.
4234 RefQualifierKind getRefQualifier() const {
4235 return static_cast<RefQualifierKind>(FunctionTypeBits.RefQualifier);
4236 }
4237
4238 using param_type_iterator = const QualType *;
4239 using param_type_range = llvm::iterator_range<param_type_iterator>;
4240
4241 param_type_range param_types() const {
4242 return param_type_range(param_type_begin(), param_type_end());
4243 }
4244
4245 param_type_iterator param_type_begin() const {
4246 return getTrailingObjects<QualType>();
4247 }
4248
4249 param_type_iterator param_type_end() const {
4250 return param_type_begin() + getNumParams();
4251 }
4252
4253 using exception_iterator = const QualType *;
4254
4255 ArrayRef<QualType> exceptions() const {
4256 return llvm::makeArrayRef(exception_begin(), exception_end());
4257 }
4258
4259 exception_iterator exception_begin() const {
4260 return reinterpret_cast<exception_iterator>(
4261 getTrailingObjects<ExceptionType>());
4262 }
4263
4264 exception_iterator exception_end() const {
4265 return exception_begin() + getNumExceptions();
4266 }
4267
4268 /// Is there any interesting extra information for any of the parameters
4269 /// of this function type?
4270 bool hasExtParameterInfos() const {
4271 return FunctionTypeBits.HasExtParameterInfos;
4272 }
4273
4274 ArrayRef<ExtParameterInfo> getExtParameterInfos() const {
4275 assert(hasExtParameterInfos())((hasExtParameterInfos()) ? static_cast<void> (0) : __assert_fail
("hasExtParameterInfos()", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 4275, __PRETTY_FUNCTION__))
;
4276 return ArrayRef<ExtParameterInfo>(getTrailingObjects<ExtParameterInfo>(),
4277 getNumParams());
4278 }
4279
4280 /// Return a pointer to the beginning of the array of extra parameter
4281 /// information, if present, or else null if none of the parameters
4282 /// carry it. This is equivalent to getExtProtoInfo().ExtParameterInfos.
4283 const ExtParameterInfo *getExtParameterInfosOrNull() const {
4284 if (!hasExtParameterInfos())
4285 return nullptr;
4286 return getTrailingObjects<ExtParameterInfo>();
4287 }
4288
4289 ExtParameterInfo getExtParameterInfo(unsigned I) const {
4290 assert(I < getNumParams() && "parameter index out of range")((I < getNumParams() && "parameter index out of range"
) ? static_cast<void> (0) : __assert_fail ("I < getNumParams() && \"parameter index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 4290, __PRETTY_FUNCTION__))
;
4291 if (hasExtParameterInfos())
4292 return getTrailingObjects<ExtParameterInfo>()[I];
4293 return ExtParameterInfo();
4294 }
4295
4296 ParameterABI getParameterABI(unsigned I) const {
4297 assert(I < getNumParams() && "parameter index out of range")((I < getNumParams() && "parameter index out of range"
) ? static_cast<void> (0) : __assert_fail ("I < getNumParams() && \"parameter index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 4297, __PRETTY_FUNCTION__))
;
4298 if (hasExtParameterInfos())
4299 return getTrailingObjects<ExtParameterInfo>()[I].getABI();
4300 return ParameterABI::Ordinary;
4301 }
4302
4303 bool isParamConsumed(unsigned I) const {
4304 assert(I < getNumParams() && "parameter index out of range")((I < getNumParams() && "parameter index out of range"
) ? static_cast<void> (0) : __assert_fail ("I < getNumParams() && \"parameter index out of range\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 4304, __PRETTY_FUNCTION__))
;
4305 if (hasExtParameterInfos())
4306 return getTrailingObjects<ExtParameterInfo>()[I].isConsumed();
4307 return false;
4308 }
4309
4310 bool isSugared() const { return false; }
4311 QualType desugar() const { return QualType(this, 0); }
4312
4313 void printExceptionSpecification(raw_ostream &OS,
4314 const PrintingPolicy &Policy) const;
4315
4316 static bool classof(const Type *T) {
4317 return T->getTypeClass() == FunctionProto;
4318 }
4319
4320 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx);
4321 static void Profile(llvm::FoldingSetNodeID &ID, QualType Result,
4322 param_type_iterator ArgTys, unsigned NumArgs,
4323 const ExtProtoInfo &EPI, const ASTContext &Context,
4324 bool Canonical);
4325};
4326
4327/// Represents the dependent type named by a dependently-scoped
4328/// typename using declaration, e.g.
4329/// using typename Base<T>::foo;
4330///
4331/// Template instantiation turns these into the underlying type.
4332class UnresolvedUsingType : public Type {
4333 friend class ASTContext; // ASTContext creates these.
4334
4335 UnresolvedUsingTypenameDecl *Decl;
4336
4337 UnresolvedUsingType(const UnresolvedUsingTypenameDecl *D)
4338 : Type(UnresolvedUsing, QualType(),
4339 TypeDependence::DependentInstantiation),
4340 Decl(const_cast<UnresolvedUsingTypenameDecl *>(D)) {}
4341
4342public:
4343 UnresolvedUsingTypenameDecl *getDecl() const { return Decl; }
4344
4345 bool isSugared() const { return false; }
4346 QualType desugar() const { return QualType(this, 0); }
4347
4348 static bool classof(const Type *T) {
4349 return T->getTypeClass() == UnresolvedUsing;
4350 }
4351
4352 void Profile(llvm::FoldingSetNodeID &ID) {
4353 return Profile(ID, Decl);
4354 }
4355
4356 static void Profile(llvm::FoldingSetNodeID &ID,
4357 UnresolvedUsingTypenameDecl *D) {
4358 ID.AddPointer(D);
4359 }
4360};
4361
4362class TypedefType : public Type {
4363 TypedefNameDecl *Decl;
4364
4365protected:
4366 friend class ASTContext; // ASTContext creates these.
4367
4368 TypedefType(TypeClass tc, const TypedefNameDecl *D, QualType can);
4369
4370public:
4371 TypedefNameDecl *getDecl() const { return Decl; }
4372
4373 bool isSugared() const { return true; }
4374 QualType desugar() const;
4375
4376 static bool classof(const Type *T) { return T->getTypeClass() == Typedef; }
4377};
4378
4379/// Sugar type that represents a type that was qualified by a qualifier written
4380/// as a macro invocation.
4381class MacroQualifiedType : public Type {
4382 friend class ASTContext; // ASTContext creates these.
4383
4384 QualType UnderlyingTy;
4385 const IdentifierInfo *MacroII;
4386
4387 MacroQualifiedType(QualType UnderlyingTy, QualType CanonTy,
4388 const IdentifierInfo *MacroII)
4389 : Type(MacroQualified, CanonTy, UnderlyingTy->getDependence()),
4390 UnderlyingTy(UnderlyingTy), MacroII(MacroII) {
4391 assert(isa<AttributedType>(UnderlyingTy) &&((isa<AttributedType>(UnderlyingTy) && "Expected a macro qualified type to only wrap attributed types."
) ? static_cast<void> (0) : __assert_fail ("isa<AttributedType>(UnderlyingTy) && \"Expected a macro qualified type to only wrap attributed types.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 4392, __PRETTY_FUNCTION__))
4392 "Expected a macro qualified type to only wrap attributed types.")((isa<AttributedType>(UnderlyingTy) && "Expected a macro qualified type to only wrap attributed types."
) ? static_cast<void> (0) : __assert_fail ("isa<AttributedType>(UnderlyingTy) && \"Expected a macro qualified type to only wrap attributed types.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 4392, __PRETTY_FUNCTION__))
;
4393 }
4394
4395public:
4396 const IdentifierInfo *getMacroIdentifier() const { return MacroII; }
4397 QualType getUnderlyingType() const { return UnderlyingTy; }
4398
4399 /// Return this attributed type's modified type with no qualifiers attached to
4400 /// it.
4401 QualType getModifiedType() const;
4402
4403 bool isSugared() const { return true; }
4404 QualType desugar() const;
4405
4406 static bool classof(const Type *T) {
4407 return T->getTypeClass() == MacroQualified;
4408 }
4409};
4410
4411/// Represents a `typeof` (or __typeof__) expression (a GCC extension).
4412class TypeOfExprType : public Type {
4413 Expr *TOExpr;
4414
4415protected:
4416 friend class ASTContext; // ASTContext creates these.
4417
4418 TypeOfExprType(Expr *E, QualType can = QualType());
4419
4420public:
4421 Expr *getUnderlyingExpr() const { return TOExpr; }
4422
4423 /// Remove a single level of sugar.
4424 QualType desugar() const;
4425
4426 /// Returns whether this type directly provides sugar.
4427 bool isSugared() const;
4428
4429 static bool classof(const Type *T) { return T->getTypeClass() == TypeOfExpr; }
4430};
4431
4432/// Internal representation of canonical, dependent
4433/// `typeof(expr)` types.
4434///
4435/// This class is used internally by the ASTContext to manage
4436/// canonical, dependent types, only. Clients will only see instances
4437/// of this class via TypeOfExprType nodes.
4438class DependentTypeOfExprType
4439 : public TypeOfExprType, public llvm::FoldingSetNode {
4440 const ASTContext &Context;
4441
4442public:
4443 DependentTypeOfExprType(const ASTContext &Context, Expr *E)
4444 : TypeOfExprType(E), Context(Context) {}
4445
4446 void Profile(llvm::FoldingSetNodeID &ID) {
4447 Profile(ID, Context, getUnderlyingExpr());
4448 }
4449
4450 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
4451 Expr *E);
4452};
4453
4454/// Represents `typeof(type)`, a GCC extension.
4455class TypeOfType : public Type {
4456 friend class ASTContext; // ASTContext creates these.
4457
4458 QualType TOType;
4459
4460 TypeOfType(QualType T, QualType can)
4461 : Type(TypeOf, can, T->getDependence()), TOType(T) {
4462 assert(!isa<TypedefType>(can) && "Invalid canonical type")((!isa<TypedefType>(can) && "Invalid canonical type"
) ? static_cast<void> (0) : __assert_fail ("!isa<TypedefType>(can) && \"Invalid canonical type\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 4462, __PRETTY_FUNCTION__))
;
4463 }
4464
4465public:
4466 QualType getUnderlyingType() const { return TOType; }
4467
4468 /// Remove a single level of sugar.
4469 QualType desugar() const { return getUnderlyingType(); }
4470
4471 /// Returns whether this type directly provides sugar.
4472 bool isSugared() const { return true; }
4473
4474 static bool classof(const Type *T) { return T->getTypeClass() == TypeOf; }
4475};
4476
4477/// Represents the type `decltype(expr)` (C++11).
4478class DecltypeType : public Type {
4479 Expr *E;
4480 QualType UnderlyingType;
4481
4482protected:
4483 friend class ASTContext; // ASTContext creates these.
4484
4485 DecltypeType(Expr *E, QualType underlyingType, QualType can = QualType());
4486
4487public:
4488 Expr *getUnderlyingExpr() const { return E; }
4489 QualType getUnderlyingType() const { return UnderlyingType; }
4490
4491 /// Remove a single level of sugar.
4492 QualType desugar() const;
4493
4494 /// Returns whether this type directly provides sugar.
4495 bool isSugared() const;
4496
4497 static bool classof(const Type *T) { return T->getTypeClass() == Decltype; }
4498};
4499
4500/// Internal representation of canonical, dependent
4501/// decltype(expr) types.
4502///
4503/// This class is used internally by the ASTContext to manage
4504/// canonical, dependent types, only. Clients will only see instances
4505/// of this class via DecltypeType nodes.
4506class DependentDecltypeType : public DecltypeType, public llvm::FoldingSetNode {
4507 const ASTContext &Context;
4508
4509public:
4510 DependentDecltypeType(const ASTContext &Context, Expr *E);
4511
4512 void Profile(llvm::FoldingSetNodeID &ID) {
4513 Profile(ID, Context, getUnderlyingExpr());
4514 }
4515
4516 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
4517 Expr *E);
4518};
4519
4520/// A unary type transform, which is a type constructed from another.
4521class UnaryTransformType : public Type {
4522public:
4523 enum UTTKind {
4524 EnumUnderlyingType
4525 };
4526
4527private:
4528 /// The untransformed type.
4529 QualType BaseType;
4530
4531 /// The transformed type if not dependent, otherwise the same as BaseType.
4532 QualType UnderlyingType;
4533
4534 UTTKind UKind;
4535
4536protected:
4537 friend class ASTContext;
4538
4539 UnaryTransformType(QualType BaseTy, QualType UnderlyingTy, UTTKind UKind,
4540 QualType CanonicalTy);
4541
4542public:
4543 bool isSugared() const { return !isDependentType(); }
4544 QualType desugar() const { return UnderlyingType; }
4545
4546 QualType getUnderlyingType() const { return UnderlyingType; }
4547 QualType getBaseType() const { return BaseType; }
4548
4549 UTTKind getUTTKind() const { return UKind; }
4550
4551 static bool classof(const Type *T) {
4552 return T->getTypeClass() == UnaryTransform;
4553 }
4554};
4555
4556/// Internal representation of canonical, dependent
4557/// __underlying_type(type) types.
4558///
4559/// This class is used internally by the ASTContext to manage
4560/// canonical, dependent types, only. Clients will only see instances
4561/// of this class via UnaryTransformType nodes.
4562class DependentUnaryTransformType : public UnaryTransformType,
4563 public llvm::FoldingSetNode {
4564public:
4565 DependentUnaryTransformType(const ASTContext &C, QualType BaseType,
4566 UTTKind UKind);
4567
4568 void Profile(llvm::FoldingSetNodeID &ID) {
4569 Profile(ID, getBaseType(), getUTTKind());
4570 }
4571
4572 static void Profile(llvm::FoldingSetNodeID &ID, QualType BaseType,
4573 UTTKind UKind) {
4574 ID.AddPointer(BaseType.getAsOpaquePtr());
4575 ID.AddInteger((unsigned)UKind);
4576 }
4577};
4578
4579class TagType : public Type {
4580 friend class ASTReader;
4581 template <class T> friend class serialization::AbstractTypeReader;
4582
4583 /// Stores the TagDecl associated with this type. The decl may point to any
4584 /// TagDecl that declares the entity.
4585 TagDecl *decl;
4586
4587protected:
4588 TagType(TypeClass TC, const TagDecl *D, QualType can);
4589
4590public:
4591 TagDecl *getDecl() const;
4592
4593 /// Determines whether this type is in the process of being defined.
4594 bool isBeingDefined() const;
4595
4596 static bool classof(const Type *T) {
4597 return T->getTypeClass() == Enum || T->getTypeClass() == Record;
4598 }
4599};
4600
4601/// A helper class that allows the use of isa/cast/dyncast
4602/// to detect TagType objects of structs/unions/classes.
4603class RecordType : public TagType {
4604protected:
4605 friend class ASTContext; // ASTContext creates these.
4606
4607 explicit RecordType(const RecordDecl *D)
4608 : TagType(Record, reinterpret_cast<const TagDecl*>(D), QualType()) {}
4609 explicit RecordType(TypeClass TC, RecordDecl *D)
4610 : TagType(TC, reinterpret_cast<const TagDecl*>(D), QualType()) {}
4611
4612public:
4613 RecordDecl *getDecl() const {
4614 return reinterpret_cast<RecordDecl*>(TagType::getDecl());
4615 }
4616
4617 /// Recursively check all fields in the record for const-ness. If any field
4618 /// is declared const, return true. Otherwise, return false.
4619 bool hasConstFields() const;
4620
4621 bool isSugared() const { return false; }
4622 QualType desugar() const { return QualType(this, 0); }
4623
4624 static bool classof(const Type *T) { return T->getTypeClass() == Record; }
4625};
4626
4627/// A helper class that allows the use of isa/cast/dyncast
4628/// to detect TagType objects of enums.
4629class EnumType : public TagType {
4630 friend class ASTContext; // ASTContext creates these.
4631
4632 explicit EnumType(const EnumDecl *D)
4633 : TagType(Enum, reinterpret_cast<const TagDecl*>(D), QualType()) {}
4634
4635public:
4636 EnumDecl *getDecl() const {
4637 return reinterpret_cast<EnumDecl*>(TagType::getDecl());
4638 }
4639
4640 bool isSugared() const { return false; }
4641 QualType desugar() const { return QualType(this, 0); }
4642
4643 static bool classof(const Type *T) { return T->getTypeClass() == Enum; }
4644};
4645
4646/// An attributed type is a type to which a type attribute has been applied.
4647///
4648/// The "modified type" is the fully-sugared type to which the attributed
4649/// type was applied; generally it is not canonically equivalent to the
4650/// attributed type. The "equivalent type" is the minimally-desugared type
4651/// which the type is canonically equivalent to.
4652///
4653/// For example, in the following attributed type:
4654/// int32_t __attribute__((vector_size(16)))
4655/// - the modified type is the TypedefType for int32_t
4656/// - the equivalent type is VectorType(16, int32_t)
4657/// - the canonical type is VectorType(16, int)
4658class AttributedType : public Type, public llvm::FoldingSetNode {
4659public:
4660 using Kind = attr::Kind;
4661
4662private:
4663 friend class ASTContext; // ASTContext creates these
4664
4665 QualType ModifiedType;
4666 QualType EquivalentType;
4667
4668 AttributedType(QualType canon, attr::Kind attrKind, QualType modified,
4669 QualType equivalent)
4670 : Type(Attributed, canon, equivalent->getDependence()),
4671 ModifiedType(modified), EquivalentType(equivalent) {
4672 AttributedTypeBits.AttrKind = attrKind;
4673 }
4674
4675public:
4676 Kind getAttrKind() const {
4677 return static_cast<Kind>(AttributedTypeBits.AttrKind);
4678 }
4679
4680 QualType getModifiedType() const { return ModifiedType; }
4681 QualType getEquivalentType() const { return EquivalentType; }
4682
4683 bool isSugared() const { return true; }
4684 QualType desugar() const { return getEquivalentType(); }
4685
4686 /// Does this attribute behave like a type qualifier?
4687 ///
4688 /// A type qualifier adjusts a type to provide specialized rules for
4689 /// a specific object, like the standard const and volatile qualifiers.
4690 /// This includes attributes controlling things like nullability,
4691 /// address spaces, and ARC ownership. The value of the object is still
4692 /// largely described by the modified type.
4693 ///
4694 /// In contrast, many type attributes "rewrite" their modified type to
4695 /// produce a fundamentally different type, not necessarily related in any
4696 /// formalizable way to the original type. For example, calling convention
4697 /// and vector attributes are not simple type qualifiers.
4698 ///
4699 /// Type qualifiers are often, but not always, reflected in the canonical
4700 /// type.
4701 bool isQualifier() const;
4702
4703 bool isMSTypeSpec() const;
4704
4705 bool isCallingConv() const;
4706
4707 llvm::Optional<NullabilityKind> getImmediateNullability() const;
4708
4709 /// Retrieve the attribute kind corresponding to the given
4710 /// nullability kind.
4711 static Kind getNullabilityAttrKind(NullabilityKind kind) {
4712 switch (kind) {
4713 case NullabilityKind::NonNull:
4714 return attr::TypeNonNull;
4715
4716 case NullabilityKind::Nullable:
4717 return attr::TypeNullable;
4718
4719 case NullabilityKind::Unspecified:
4720 return attr::TypeNullUnspecified;
4721 }
4722 llvm_unreachable("Unknown nullability kind.")::llvm::llvm_unreachable_internal("Unknown nullability kind."
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 4722)
;
4723 }
4724
4725 /// Strip off the top-level nullability annotation on the given
4726 /// type, if it's there.
4727 ///
4728 /// \param T The type to strip. If the type is exactly an
4729 /// AttributedType specifying nullability (without looking through
4730 /// type sugar), the nullability is returned and this type changed
4731 /// to the underlying modified type.
4732 ///
4733 /// \returns the top-level nullability, if present.
4734 static Optional<NullabilityKind> stripOuterNullability(QualType &T);
4735
4736 void Profile(llvm::FoldingSetNodeID &ID) {
4737 Profile(ID, getAttrKind(), ModifiedType, EquivalentType);
4738 }
4739
4740 static void Profile(llvm::FoldingSetNodeID &ID, Kind attrKind,
4741 QualType modified, QualType equivalent) {
4742 ID.AddInteger(attrKind);
4743 ID.AddPointer(modified.getAsOpaquePtr());
4744 ID.AddPointer(equivalent.getAsOpaquePtr());
4745 }
4746
4747 static bool classof(const Type *T) {
4748 return T->getTypeClass() == Attributed;
4749 }
4750};
4751
4752class TemplateTypeParmType : public Type, public llvm::FoldingSetNode {
4753 friend class ASTContext; // ASTContext creates these
4754
4755 // Helper data collector for canonical types.
4756 struct CanonicalTTPTInfo {
4757 unsigned Depth : 15;
4758 unsigned ParameterPack : 1;
4759 unsigned Index : 16;
4760 };
4761
4762 union {
4763 // Info for the canonical type.
4764 CanonicalTTPTInfo CanTTPTInfo;
4765
4766 // Info for the non-canonical type.
4767 TemplateTypeParmDecl *TTPDecl;
4768 };
4769
4770 /// Build a non-canonical type.
4771 TemplateTypeParmType(TemplateTypeParmDecl *TTPDecl, QualType Canon)
4772 : Type(TemplateTypeParm, Canon,
4773 TypeDependence::DependentInstantiation |
4774 (Canon->getDependence() & TypeDependence::UnexpandedPack)),
4775 TTPDecl(TTPDecl) {}
4776
4777 /// Build the canonical type.
4778 TemplateTypeParmType(unsigned D, unsigned I, bool PP)
4779 : Type(TemplateTypeParm, QualType(this, 0),
4780 TypeDependence::DependentInstantiation |
4781 (PP ? TypeDependence::UnexpandedPack : TypeDependence::None)) {
4782 CanTTPTInfo.Depth = D;
4783 CanTTPTInfo.Index = I;
4784 CanTTPTInfo.ParameterPack = PP;
4785 }
4786
4787 const CanonicalTTPTInfo& getCanTTPTInfo() const {
4788 QualType Can = getCanonicalTypeInternal();
4789 return Can->castAs<TemplateTypeParmType>()->CanTTPTInfo;
4790 }
4791
4792public:
4793 unsigned getDepth() const { return getCanTTPTInfo().Depth; }
4794 unsigned getIndex() const { return getCanTTPTInfo().Index; }
4795 bool isParameterPack() const { return getCanTTPTInfo().ParameterPack; }
4796
4797 TemplateTypeParmDecl *getDecl() const {
4798 return isCanonicalUnqualified() ? nullptr : TTPDecl;
4799 }
4800
4801 IdentifierInfo *getIdentifier() const;
4802
4803 bool isSugared() const { return false; }
4804 QualType desugar() const { return QualType(this, 0); }
4805
4806 void Profile(llvm::FoldingSetNodeID &ID) {
4807 Profile(ID, getDepth(), getIndex(), isParameterPack(), getDecl());
4808 }
4809
4810 static void Profile(llvm::FoldingSetNodeID &ID, unsigned Depth,
4811 unsigned Index, bool ParameterPack,
4812 TemplateTypeParmDecl *TTPDecl) {
4813 ID.AddInteger(Depth);
4814 ID.AddInteger(Index);
4815 ID.AddBoolean(ParameterPack);
4816 ID.AddPointer(TTPDecl);
4817 }
4818
4819 static bool classof(const Type *T) {
4820 return T->getTypeClass() == TemplateTypeParm;
4821 }
4822};
4823
4824/// Represents the result of substituting a type for a template
4825/// type parameter.
4826///
4827/// Within an instantiated template, all template type parameters have
4828/// been replaced with these. They are used solely to record that a
4829/// type was originally written as a template type parameter;
4830/// therefore they are never canonical.
4831class SubstTemplateTypeParmType : public Type, public llvm::FoldingSetNode {
4832 friend class ASTContext;
4833
4834 // The original type parameter.
4835 const TemplateTypeParmType *Replaced;
4836
4837 SubstTemplateTypeParmType(const TemplateTypeParmType *Param, QualType Canon)
4838 : Type(SubstTemplateTypeParm, Canon, Canon->getDependence()),
4839 Replaced(Param) {}
4840
4841public:
4842 /// Gets the template parameter that was substituted for.
4843 const TemplateTypeParmType *getReplacedParameter() const {
4844 return Replaced;
4845 }
4846
4847 /// Gets the type that was substituted for the template
4848 /// parameter.
4849 QualType getReplacementType() const {
4850 return getCanonicalTypeInternal();
4851 }
4852
4853 bool isSugared() const { return true; }
4854 QualType desugar() const { return getReplacementType(); }
4855
4856 void Profile(llvm::FoldingSetNodeID &ID) {
4857 Profile(ID, getReplacedParameter(), getReplacementType());
4858 }
4859
4860 static void Profile(llvm::FoldingSetNodeID &ID,
4861 const TemplateTypeParmType *Replaced,
4862 QualType Replacement) {
4863 ID.AddPointer(Replaced);
4864 ID.AddPointer(Replacement.getAsOpaquePtr());
4865 }
4866
4867 static bool classof(const Type *T) {
4868 return T->getTypeClass() == SubstTemplateTypeParm;
4869 }
4870};
4871
4872/// Represents the result of substituting a set of types for a template
4873/// type parameter pack.
4874///
4875/// When a pack expansion in the source code contains multiple parameter packs
4876/// and those parameter packs correspond to different levels of template
4877/// parameter lists, this type node is used to represent a template type
4878/// parameter pack from an outer level, which has already had its argument pack
4879/// substituted but that still lives within a pack expansion that itself
4880/// could not be instantiated. When actually performing a substitution into
4881/// that pack expansion (e.g., when all template parameters have corresponding
4882/// arguments), this type will be replaced with the \c SubstTemplateTypeParmType
4883/// at the current pack substitution index.
4884class SubstTemplateTypeParmPackType : public Type, public llvm::FoldingSetNode {
4885 friend class ASTContext;
4886
4887 /// The original type parameter.
4888 const TemplateTypeParmType *Replaced;
4889
4890 /// A pointer to the set of template arguments that this
4891 /// parameter pack is instantiated with.
4892 const TemplateArgument *Arguments;
4893
4894 SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
4895 QualType Canon,
4896 const TemplateArgument &ArgPack);
4897
4898public:
4899 IdentifierInfo *getIdentifier() const { return Replaced->getIdentifier(); }
4900
4901 /// Gets the template parameter that was substituted for.
4902 const TemplateTypeParmType *getReplacedParameter() const {
4903 return Replaced;
4904 }
4905
4906 unsigned getNumArgs() const {
4907 return SubstTemplateTypeParmPackTypeBits.NumArgs;
4908 }
4909
4910 bool isSugared() const { return false; }
4911 QualType desugar() const { return QualType(this, 0); }
4912
4913 TemplateArgument getArgumentPack() const;
4914
4915 void Profile(llvm::FoldingSetNodeID &ID);
4916 static void Profile(llvm::FoldingSetNodeID &ID,
4917 const TemplateTypeParmType *Replaced,
4918 const TemplateArgument &ArgPack);
4919
4920 static bool classof(const Type *T) {
4921 return T->getTypeClass() == SubstTemplateTypeParmPack;
4922 }
4923};
4924
4925/// Common base class for placeholders for types that get replaced by
4926/// placeholder type deduction: C++11 auto, C++14 decltype(auto), C++17 deduced
4927/// class template types, and constrained type names.
4928///
4929/// These types are usually a placeholder for a deduced type. However, before
4930/// the initializer is attached, or (usually) if the initializer is
4931/// type-dependent, there is no deduced type and the type is canonical. In
4932/// the latter case, it is also a dependent type.
4933class DeducedType : public Type {
4934protected:
4935 DeducedType(TypeClass TC, QualType DeducedAsType,
4936 TypeDependence ExtraDependence)
4937 : Type(TC,
4938 // FIXME: Retain the sugared deduced type?
4939 DeducedAsType.isNull() ? QualType(this, 0)
4940 : DeducedAsType.getCanonicalType(),
4941 ExtraDependence | (DeducedAsType.isNull()
4942 ? TypeDependence::None
4943 : DeducedAsType->getDependence() &
4944 ~TypeDependence::VariablyModified)) {}
4945
4946public:
4947 bool isSugared() const { return !isCanonicalUnqualified(); }
4948 QualType desugar() const { return getCanonicalTypeInternal(); }
4949
4950 /// Get the type deduced for this placeholder type, or null if it's
4951 /// either not been deduced or was deduced to a dependent type.
4952 QualType getDeducedType() const {
4953 return !isCanonicalUnqualified() ? getCanonicalTypeInternal() : QualType();
4954 }
4955 bool isDeduced() const {
4956 return !isCanonicalUnqualified() || isDependentType();
4957 }
4958
4959 static bool classof(const Type *T) {
4960 return T->getTypeClass() == Auto ||
4961 T->getTypeClass() == DeducedTemplateSpecialization;
4962 }
4963};
4964
4965/// Represents a C++11 auto or C++14 decltype(auto) type, possibly constrained
4966/// by a type-constraint.
4967class alignas(8) AutoType : public DeducedType, public llvm::FoldingSetNode {
4968 friend class ASTContext; // ASTContext creates these
4969
4970 ConceptDecl *TypeConstraintConcept;
4971
4972 AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
4973 TypeDependence ExtraDependence, ConceptDecl *CD,
4974 ArrayRef<TemplateArgument> TypeConstraintArgs);
4975
4976 const TemplateArgument *getArgBuffer() const {
4977 return reinterpret_cast<const TemplateArgument*>(this+1);
4978 }
4979
4980 TemplateArgument *getArgBuffer() {
4981 return reinterpret_cast<TemplateArgument*>(this+1);
4982 }
4983
4984public:
4985 /// Retrieve the template arguments.
4986 const TemplateArgument *getArgs() const {
4987 return getArgBuffer();
4988 }
4989
4990 /// Retrieve the number of template arguments.
4991 unsigned getNumArgs() const {
4992 return AutoTypeBits.NumArgs;
4993 }
4994
4995 const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
4996
4997 ArrayRef<TemplateArgument> getTypeConstraintArguments() const {
4998 return {getArgs(), getNumArgs()};
4999 }
5000
5001 ConceptDecl *getTypeConstraintConcept() const {
5002 return TypeConstraintConcept;
5003 }
5004
5005 bool isConstrained() const {
5006 return TypeConstraintConcept != nullptr;
5007 }
5008
5009 bool isDecltypeAuto() const {
5010 return getKeyword() == AutoTypeKeyword::DecltypeAuto;
5011 }
5012
5013 AutoTypeKeyword getKeyword() const {
5014 return (AutoTypeKeyword)AutoTypeBits.Keyword;
5015 }
5016
5017 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
5018 Profile(ID, Context, getDeducedType(), getKeyword(), isDependentType(),
5019 getTypeConstraintConcept(), getTypeConstraintArguments());
5020 }
5021
5022 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
5023 QualType Deduced, AutoTypeKeyword Keyword,
5024 bool IsDependent, ConceptDecl *CD,
5025 ArrayRef<TemplateArgument> Arguments);
5026
5027 static bool classof(const Type *T) {
5028 return T->getTypeClass() == Auto;
5029 }
5030};
5031
5032/// Represents a C++17 deduced template specialization type.
5033class DeducedTemplateSpecializationType : public DeducedType,
5034 public llvm::FoldingSetNode {
5035 friend class ASTContext; // ASTContext creates these
5036
5037 /// The name of the template whose arguments will be deduced.
5038 TemplateName Template;
5039
5040 DeducedTemplateSpecializationType(TemplateName Template,
5041 QualType DeducedAsType,
5042 bool IsDeducedAsDependent)
5043 : DeducedType(DeducedTemplateSpecialization, DeducedAsType,
5044 toTypeDependence(Template.getDependence()) |
5045 (IsDeducedAsDependent
5046 ? TypeDependence::DependentInstantiation
5047 : TypeDependence::None)),
5048 Template(Template) {}
5049
5050public:
5051 /// Retrieve the name of the template that we are deducing.
5052 TemplateName getTemplateName() const { return Template;}
5053
5054 void Profile(llvm::FoldingSetNodeID &ID) {
5055 Profile(ID, getTemplateName(), getDeducedType(), isDependentType());
5056 }
5057
5058 static void Profile(llvm::FoldingSetNodeID &ID, TemplateName Template,
5059 QualType Deduced, bool IsDependent) {
5060 Template.Profile(ID);
5061 ID.AddPointer(Deduced.getAsOpaquePtr());
5062 ID.AddBoolean(IsDependent);
5063 }
5064
5065 static bool classof(const Type *T) {
5066 return T->getTypeClass() == DeducedTemplateSpecialization;
5067 }
5068};
5069
5070/// Represents a type template specialization; the template
5071/// must be a class template, a type alias template, or a template
5072/// template parameter. A template which cannot be resolved to one of
5073/// these, e.g. because it is written with a dependent scope
5074/// specifier, is instead represented as a
5075/// @c DependentTemplateSpecializationType.
5076///
5077/// A non-dependent template specialization type is always "sugar",
5078/// typically for a \c RecordType. For example, a class template
5079/// specialization type of \c vector<int> will refer to a tag type for
5080/// the instantiation \c std::vector<int, std::allocator<int>>
5081///
5082/// Template specializations are dependent if either the template or
5083/// any of the template arguments are dependent, in which case the
5084/// type may also be canonical.
5085///
5086/// Instances of this type are allocated with a trailing array of
5087/// TemplateArguments, followed by a QualType representing the
5088/// non-canonical aliased type when the template is a type alias
5089/// template.
5090class alignas(8) TemplateSpecializationType
5091 : public Type,
5092 public llvm::FoldingSetNode {
5093 friend class ASTContext; // ASTContext creates these
5094
5095 /// The name of the template being specialized. This is
5096 /// either a TemplateName::Template (in which case it is a
5097 /// ClassTemplateDecl*, a TemplateTemplateParmDecl*, or a
5098 /// TypeAliasTemplateDecl*), a
5099 /// TemplateName::SubstTemplateTemplateParmPack, or a
5100 /// TemplateName::SubstTemplateTemplateParm (in which case the
5101 /// replacement must, recursively, be one of these).
5102 TemplateName Template;
5103
5104 TemplateSpecializationType(TemplateName T,
5105 ArrayRef<TemplateArgument> Args,
5106 QualType Canon,
5107 QualType Aliased);
5108
5109public:
5110 /// Determine whether any of the given template arguments are dependent.
5111 static bool anyDependentTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
5112 bool &InstantiationDependent);
5113
5114 static bool anyDependentTemplateArguments(const TemplateArgumentListInfo &,
5115 bool &InstantiationDependent);
5116
5117 /// True if this template specialization type matches a current
5118 /// instantiation in the context in which it is found.
5119 bool isCurrentInstantiation() const {
5120 return isa<InjectedClassNameType>(getCanonicalTypeInternal());
5121 }
5122
5123 /// Determine if this template specialization type is for a type alias
5124 /// template that has been substituted.
5125 ///
5126 /// Nearly every template specialization type whose template is an alias
5127 /// template will be substituted. However, this is not the case when
5128 /// the specialization contains a pack expansion but the template alias
5129 /// does not have a corresponding parameter pack, e.g.,
5130 ///
5131 /// \code
5132 /// template<typename T, typename U, typename V> struct S;
5133 /// template<typename T, typename U> using A = S<T, int, U>;
5134 /// template<typename... Ts> struct X {
5135 /// typedef A<Ts...> type; // not a type alias
5136 /// };
5137 /// \endcode
5138 bool isTypeAlias() const { return TemplateSpecializationTypeBits.TypeAlias; }
5139
5140 /// Get the aliased type, if this is a specialization of a type alias
5141 /// template.
5142 QualType getAliasedType() const {
5143 assert(isTypeAlias() && "not a type alias template specialization")((isTypeAlias() && "not a type alias template specialization"
) ? static_cast<void> (0) : __assert_fail ("isTypeAlias() && \"not a type alias template specialization\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 5143, __PRETTY_FUNCTION__))
;
5144 return *reinterpret_cast<const QualType*>(end());
5145 }
5146
5147 using iterator = const TemplateArgument *;
5148
5149 iterator begin() const { return getArgs(); }
5150 iterator end() const; // defined inline in TemplateBase.h
5151
5152 /// Retrieve the name of the template that we are specializing.
5153 TemplateName getTemplateName() const { return Template; }
5154
5155 /// Retrieve the template arguments.
5156 const TemplateArgument *getArgs() const {
5157 return reinterpret_cast<const TemplateArgument *>(this + 1);
5158 }
5159
5160 /// Retrieve the number of template arguments.
5161 unsigned getNumArgs() const {
5162 return TemplateSpecializationTypeBits.NumArgs;
5163 }
5164
5165 /// Retrieve a specific template argument as a type.
5166 /// \pre \c isArgType(Arg)
5167 const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
5168
5169 ArrayRef<TemplateArgument> template_arguments() const {
5170 return {getArgs(), getNumArgs()};
5171 }
5172
5173 bool isSugared() const {
5174 return !isDependentType() || isCurrentInstantiation() || isTypeAlias();
5175 }
5176
5177 QualType desugar() const {
5178 return isTypeAlias() ? getAliasedType() : getCanonicalTypeInternal();
5179 }
5180
5181 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) {
5182 Profile(ID, Template, template_arguments(), Ctx);
5183 if (isTypeAlias())
5184 getAliasedType().Profile(ID);
5185 }
5186
5187 static void Profile(llvm::FoldingSetNodeID &ID, TemplateName T,
5188 ArrayRef<TemplateArgument> Args,
5189 const ASTContext &Context);
5190
5191 static bool classof(const Type *T) {
5192 return T->getTypeClass() == TemplateSpecialization;
5193 }
5194};
5195
5196/// Print a template argument list, including the '<' and '>'
5197/// enclosing the template arguments.
5198void printTemplateArgumentList(raw_ostream &OS,
5199 ArrayRef<TemplateArgument> Args,
5200 const PrintingPolicy &Policy,
5201 const TemplateParameterList *TPL = nullptr);
5202
5203void printTemplateArgumentList(raw_ostream &OS,
5204 ArrayRef<TemplateArgumentLoc> Args,
5205 const PrintingPolicy &Policy,
5206 const TemplateParameterList *TPL = nullptr);
5207
5208void printTemplateArgumentList(raw_ostream &OS,
5209 const TemplateArgumentListInfo &Args,
5210 const PrintingPolicy &Policy,
5211 const TemplateParameterList *TPL = nullptr);
5212
5213/// The injected class name of a C++ class template or class
5214/// template partial specialization. Used to record that a type was
5215/// spelled with a bare identifier rather than as a template-id; the
5216/// equivalent for non-templated classes is just RecordType.
5217///
5218/// Injected class name types are always dependent. Template
5219/// instantiation turns these into RecordTypes.
5220///
5221/// Injected class name types are always canonical. This works
5222/// because it is impossible to compare an injected class name type
5223/// with the corresponding non-injected template type, for the same
5224/// reason that it is impossible to directly compare template
5225/// parameters from different dependent contexts: injected class name
5226/// types can only occur within the scope of a particular templated
5227/// declaration, and within that scope every template specialization
5228/// will canonicalize to the injected class name (when appropriate
5229/// according to the rules of the language).
5230class InjectedClassNameType : public Type {
5231 friend class ASTContext; // ASTContext creates these.
5232 friend class ASTNodeImporter;
5233 friend class ASTReader; // FIXME: ASTContext::getInjectedClassNameType is not
5234 // currently suitable for AST reading, too much
5235 // interdependencies.
5236 template <class T> friend class serialization::AbstractTypeReader;
5237
5238 CXXRecordDecl *Decl;
5239
5240 /// The template specialization which this type represents.
5241 /// For example, in
5242 /// template <class T> class A { ... };
5243 /// this is A<T>, whereas in
5244 /// template <class X, class Y> class A<B<X,Y> > { ... };
5245 /// this is A<B<X,Y> >.
5246 ///
5247 /// It is always unqualified, always a template specialization type,
5248 /// and always dependent.
5249 QualType InjectedType;
5250
5251 InjectedClassNameType(CXXRecordDecl *D, QualType TST)
5252 : Type(InjectedClassName, QualType(),
5253 TypeDependence::DependentInstantiation),
5254 Decl(D), InjectedType(TST) {
5255 assert(isa<TemplateSpecializationType>(TST))((isa<TemplateSpecializationType>(TST)) ? static_cast<
void> (0) : __assert_fail ("isa<TemplateSpecializationType>(TST)"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 5255, __PRETTY_FUNCTION__))
;
5256 assert(!TST.hasQualifiers())((!TST.hasQualifiers()) ? static_cast<void> (0) : __assert_fail
("!TST.hasQualifiers()", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 5256, __PRETTY_FUNCTION__))
;
5257 assert(TST->isDependentType())((TST->isDependentType()) ? static_cast<void> (0) : __assert_fail
("TST->isDependentType()", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 5257, __PRETTY_FUNCTION__))
;
5258 }
5259
5260public:
5261 QualType getInjectedSpecializationType() const { return InjectedType; }
5262
5263 const TemplateSpecializationType *getInjectedTST() const {
5264 return cast<TemplateSpecializationType>(InjectedType.getTypePtr());
5265 }
5266
5267 TemplateName getTemplateName() const {
5268 return getInjectedTST()->getTemplateName();
5269 }
5270
5271 CXXRecordDecl *getDecl() const;
5272
5273 bool isSugared() const { return false; }
5274 QualType desugar() const { return QualType(this, 0); }
5275
5276 static bool classof(const Type *T) {
5277 return T->getTypeClass() == InjectedClassName;
5278 }
5279};
5280
5281/// The kind of a tag type.
5282enum TagTypeKind {
5283 /// The "struct" keyword.
5284 TTK_Struct,
5285
5286 /// The "__interface" keyword.
5287 TTK_Interface,
5288
5289 /// The "union" keyword.
5290 TTK_Union,
5291
5292 /// The "class" keyword.
5293 TTK_Class,
5294
5295 /// The "enum" keyword.
5296 TTK_Enum
5297};
5298
5299/// The elaboration keyword that precedes a qualified type name or
5300/// introduces an elaborated-type-specifier.
5301enum ElaboratedTypeKeyword {
5302 /// The "struct" keyword introduces the elaborated-type-specifier.
5303 ETK_Struct,
5304
5305 /// The "__interface" keyword introduces the elaborated-type-specifier.
5306 ETK_Interface,
5307
5308 /// The "union" keyword introduces the elaborated-type-specifier.
5309 ETK_Union,
5310
5311 /// The "class" keyword introduces the elaborated-type-specifier.
5312 ETK_Class,
5313
5314 /// The "enum" keyword introduces the elaborated-type-specifier.
5315 ETK_Enum,
5316
5317 /// The "typename" keyword precedes the qualified type name, e.g.,
5318 /// \c typename T::type.
5319 ETK_Typename,
5320
5321 /// No keyword precedes the qualified type name.
5322 ETK_None
5323};
5324
5325/// A helper class for Type nodes having an ElaboratedTypeKeyword.
5326/// The keyword in stored in the free bits of the base class.
5327/// Also provides a few static helpers for converting and printing
5328/// elaborated type keyword and tag type kind enumerations.
5329class TypeWithKeyword : public Type {
5330protected:
5331 TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc,
5332 QualType Canonical, TypeDependence Dependence)
5333 : Type(tc, Canonical, Dependence) {
5334 TypeWithKeywordBits.Keyword = Keyword;
5335 }
5336
5337public:
5338 ElaboratedTypeKeyword getKeyword() const {
5339 return static_cast<ElaboratedTypeKeyword>(TypeWithKeywordBits.Keyword);
5340 }
5341
5342 /// Converts a type specifier (DeclSpec::TST) into an elaborated type keyword.
5343 static ElaboratedTypeKeyword getKeywordForTypeSpec(unsigned TypeSpec);
5344
5345 /// Converts a type specifier (DeclSpec::TST) into a tag type kind.
5346 /// It is an error to provide a type specifier which *isn't* a tag kind here.
5347 static TagTypeKind getTagTypeKindForTypeSpec(unsigned TypeSpec);
5348
5349 /// Converts a TagTypeKind into an elaborated type keyword.
5350 static ElaboratedTypeKeyword getKeywordForTagTypeKind(TagTypeKind Tag);
5351
5352 /// Converts an elaborated type keyword into a TagTypeKind.
5353 /// It is an error to provide an elaborated type keyword
5354 /// which *isn't* a tag kind here.
5355 static TagTypeKind getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword);
5356
5357 static bool KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword);
5358
5359 static StringRef getKeywordName(ElaboratedTypeKeyword Keyword);
5360
5361 static StringRef getTagTypeKindName(TagTypeKind Kind) {
5362 return getKeywordName(getKeywordForTagTypeKind(Kind));
5363 }
5364
5365 class CannotCastToThisType {};
5366 static CannotCastToThisType classof(const Type *);
5367};
5368
5369/// Represents a type that was referred to using an elaborated type
5370/// keyword, e.g., struct S, or via a qualified name, e.g., N::M::type,
5371/// or both.
5372///
5373/// This type is used to keep track of a type name as written in the
5374/// source code, including tag keywords and any nested-name-specifiers.
5375/// The type itself is always "sugar", used to express what was written
5376/// in the source code but containing no additional semantic information.
5377class ElaboratedType final
5378 : public TypeWithKeyword,
5379 public llvm::FoldingSetNode,
5380 private llvm::TrailingObjects<ElaboratedType, TagDecl *> {
5381 friend class ASTContext; // ASTContext creates these
5382 friend TrailingObjects;
5383
5384 /// The nested name specifier containing the qualifier.
5385 NestedNameSpecifier *NNS;
5386
5387 /// The type that this qualified name refers to.
5388 QualType NamedType;
5389
5390 /// The (re)declaration of this tag type owned by this occurrence is stored
5391 /// as a trailing object if there is one. Use getOwnedTagDecl to obtain
5392 /// it, or obtain a null pointer if there is none.
5393
5394 ElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
5395 QualType NamedType, QualType CanonType, TagDecl *OwnedTagDecl)
5396 : TypeWithKeyword(Keyword, Elaborated, CanonType,
5397 NamedType->getDependence()),
5398 NNS(NNS), NamedType(NamedType) {
5399 ElaboratedTypeBits.HasOwnedTagDecl = false;
5400 if (OwnedTagDecl) {
5401 ElaboratedTypeBits.HasOwnedTagDecl = true;
5402 *getTrailingObjects<TagDecl *>() = OwnedTagDecl;
5403 }
5404 assert(!(Keyword == ETK_None && NNS == nullptr) &&((!(Keyword == ETK_None && NNS == nullptr) &&
"ElaboratedType cannot have elaborated type keyword " "and name qualifier both null."
) ? static_cast<void> (0) : __assert_fail ("!(Keyword == ETK_None && NNS == nullptr) && \"ElaboratedType cannot have elaborated type keyword \" \"and name qualifier both null.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 5406, __PRETTY_FUNCTION__))
5405 "ElaboratedType cannot have elaborated type keyword "((!(Keyword == ETK_None && NNS == nullptr) &&
"ElaboratedType cannot have elaborated type keyword " "and name qualifier both null."
) ? static_cast<void> (0) : __assert_fail ("!(Keyword == ETK_None && NNS == nullptr) && \"ElaboratedType cannot have elaborated type keyword \" \"and name qualifier both null.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 5406, __PRETTY_FUNCTION__))
5406 "and name qualifier both null.")((!(Keyword == ETK_None && NNS == nullptr) &&
"ElaboratedType cannot have elaborated type keyword " "and name qualifier both null."
) ? static_cast<void> (0) : __assert_fail ("!(Keyword == ETK_None && NNS == nullptr) && \"ElaboratedType cannot have elaborated type keyword \" \"and name qualifier both null.\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 5406, __PRETTY_FUNCTION__))
;
5407 }
5408
5409public:
5410 /// Retrieve the qualification on this type.
5411 NestedNameSpecifier *getQualifier() const { return NNS; }
5412
5413 /// Retrieve the type named by the qualified-id.
5414 QualType getNamedType() const { return NamedType; }
5415
5416 /// Remove a single level of sugar.
5417 QualType desugar() const { return getNamedType(); }
5418
5419 /// Returns whether this type directly provides sugar.
5420 bool isSugared() const { return true; }
5421
5422 /// Return the (re)declaration of this type owned by this occurrence of this
5423 /// type, or nullptr if there is none.
5424 TagDecl *getOwnedTagDecl() const {
5425 return ElaboratedTypeBits.HasOwnedTagDecl ? *getTrailingObjects<TagDecl *>()
5426 : nullptr;
5427 }
5428
5429 void Profile(llvm::FoldingSetNodeID &ID) {
5430 Profile(ID, getKeyword(), NNS, NamedType, getOwnedTagDecl());
5431 }
5432
5433 static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
5434 NestedNameSpecifier *NNS, QualType NamedType,
5435 TagDecl *OwnedTagDecl) {
5436 ID.AddInteger(Keyword);
5437 ID.AddPointer(NNS);
5438 NamedType.Profile(ID);
5439 ID.AddPointer(OwnedTagDecl);
5440 }
5441
5442 static bool classof(const Type *T) { return T->getTypeClass() == Elaborated; }
5443};
5444
5445/// Represents a qualified type name for which the type name is
5446/// dependent.
5447///
5448/// DependentNameType represents a class of dependent types that involve a
5449/// possibly dependent nested-name-specifier (e.g., "T::") followed by a
5450/// name of a type. The DependentNameType may start with a "typename" (for a
5451/// typename-specifier), "class", "struct", "union", or "enum" (for a
5452/// dependent elaborated-type-specifier), or nothing (in contexts where we
5453/// know that we must be referring to a type, e.g., in a base class specifier).
5454/// Typically the nested-name-specifier is dependent, but in MSVC compatibility
5455/// mode, this type is used with non-dependent names to delay name lookup until
5456/// instantiation.
5457class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
5458 friend class ASTContext; // ASTContext creates these
5459
5460 /// The nested name specifier containing the qualifier.
5461 NestedNameSpecifier *NNS;
5462
5463 /// The type that this typename specifier refers to.
5464 const IdentifierInfo *Name;
5465
5466 DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
5467 const IdentifierInfo *Name, QualType CanonType)
5468 : TypeWithKeyword(Keyword, DependentName, CanonType,
5469 TypeDependence::DependentInstantiation |
5470 toTypeDependence(NNS->getDependence())),
5471 NNS(NNS), Name(Name) {}
5472
5473public:
5474 /// Retrieve the qualification on this type.
5475 NestedNameSpecifier *getQualifier() const { return NNS; }
5476
5477 /// Retrieve the type named by the typename specifier as an identifier.
5478 ///
5479 /// This routine will return a non-NULL identifier pointer when the
5480 /// form of the original typename was terminated by an identifier,
5481 /// e.g., "typename T::type".
5482 const IdentifierInfo *getIdentifier() const {
5483 return Name;
5484 }
5485
5486 bool isSugared() const { return false; }
5487 QualType desugar() const { return QualType(this, 0); }
5488
5489 void Profile(llvm::FoldingSetNodeID &ID) {
5490 Profile(ID, getKeyword(), NNS, Name);
5491 }
5492
5493 static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
5494 NestedNameSpecifier *NNS, const IdentifierInfo *Name) {
5495 ID.AddInteger(Keyword);
5496 ID.AddPointer(NNS);
5497 ID.AddPointer(Name);
5498 }
5499
5500 static bool classof(const Type *T) {
5501 return T->getTypeClass() == DependentName;
5502 }
5503};
5504
5505/// Represents a template specialization type whose template cannot be
5506/// resolved, e.g.
5507/// A<T>::template B<T>
5508class alignas(8) DependentTemplateSpecializationType
5509 : public TypeWithKeyword,
5510 public llvm::FoldingSetNode {
5511 friend class ASTContext; // ASTContext creates these
5512
5513 /// The nested name specifier containing the qualifier.
5514 NestedNameSpecifier *NNS;
5515
5516 /// The identifier of the template.
5517 const IdentifierInfo *Name;
5518
5519 DependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
5520 NestedNameSpecifier *NNS,
5521 const IdentifierInfo *Name,
5522 ArrayRef<TemplateArgument> Args,
5523 QualType Canon);
5524
5525 const TemplateArgument *getArgBuffer() const {
5526 return reinterpret_cast<const TemplateArgument*>(this+1);
5527 }
5528
5529 TemplateArgument *getArgBuffer() {
5530 return reinterpret_cast<TemplateArgument*>(this+1);
5531 }
5532
5533public:
5534 NestedNameSpecifier *getQualifier() const { return NNS; }
5535 const IdentifierInfo *getIdentifier() const { return Name; }
5536
5537 /// Retrieve the template arguments.
5538 const TemplateArgument *getArgs() const {
5539 return getArgBuffer();
5540 }
5541
5542 /// Retrieve the number of template arguments.
5543 unsigned getNumArgs() const {
5544 return DependentTemplateSpecializationTypeBits.NumArgs;
5545 }
5546
5547 const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
5548
5549 ArrayRef<TemplateArgument> template_arguments() const {
5550 return {getArgs(), getNumArgs()};
5551 }
5552
5553 using iterator = const TemplateArgument *;
5554
5555 iterator begin() const { return getArgs(); }
5556 iterator end() const; // inline in TemplateBase.h
5557
5558 bool isSugared() const { return false; }
5559 QualType desugar() const { return QualType(this, 0); }
5560
5561 void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
5562 Profile(ID, Context, getKeyword(), NNS, Name, {getArgs(), getNumArgs()});
5563 }
5564
5565 static void Profile(llvm::FoldingSetNodeID &ID,
5566 const ASTContext &Context,
5567 ElaboratedTypeKeyword Keyword,
5568 NestedNameSpecifier *Qualifier,
5569 const IdentifierInfo *Name,
5570 ArrayRef<TemplateArgument> Args);
5571
5572 static bool classof(const Type *T) {
5573 return T->getTypeClass() == DependentTemplateSpecialization;
5574 }
5575};
5576
5577/// Represents a pack expansion of types.
5578///
5579/// Pack expansions are part of C++11 variadic templates. A pack
5580/// expansion contains a pattern, which itself contains one or more
5581/// "unexpanded" parameter packs. When instantiated, a pack expansion
5582/// produces a series of types, each instantiated from the pattern of
5583/// the expansion, where the Ith instantiation of the pattern uses the
5584/// Ith arguments bound to each of the unexpanded parameter packs. The
5585/// pack expansion is considered to "expand" these unexpanded
5586/// parameter packs.
5587///
5588/// \code
5589/// template<typename ...Types> struct tuple;
5590///
5591/// template<typename ...Types>
5592/// struct tuple_of_references {
5593/// typedef tuple<Types&...> type;
5594/// };
5595/// \endcode
5596///
5597/// Here, the pack expansion \c Types&... is represented via a
5598/// PackExpansionType whose pattern is Types&.
5599class PackExpansionType : public Type, public llvm::FoldingSetNode {
5600 friend class ASTContext; // ASTContext creates these
5601
5602 /// The pattern of the pack expansion.
5603 QualType Pattern;
5604
5605 PackExpansionType(QualType Pattern, QualType Canon,
5606 Optional<unsigned> NumExpansions)
5607 : Type(PackExpansion, Canon,
5608 (Pattern->getDependence() | TypeDependence::Dependent |
5609 TypeDependence::Instantiation) &
5610 ~TypeDependence::UnexpandedPack),
5611 Pattern(Pattern) {
5612 PackExpansionTypeBits.NumExpansions =
5613 NumExpansions ? *NumExpansions + 1 : 0;
5614 }
5615
5616public:
5617 /// Retrieve the pattern of this pack expansion, which is the
5618 /// type that will be repeatedly instantiated when instantiating the
5619 /// pack expansion itself.
5620 QualType getPattern() const { return Pattern; }
5621
5622 /// Retrieve the number of expansions that this pack expansion will
5623 /// generate, if known.
5624 Optional<unsigned> getNumExpansions() const {
5625 if (PackExpansionTypeBits.NumExpansions)
5626 return PackExpansionTypeBits.NumExpansions - 1;
5627 return None;
5628 }
5629
5630 bool isSugared() const { return false; }
5631 QualType desugar() const { return QualType(this, 0); }
5632
5633 void Profile(llvm::FoldingSetNodeID &ID) {
5634 Profile(ID, getPattern(), getNumExpansions());
5635 }
5636
5637 static void Profile(llvm::FoldingSetNodeID &ID, QualType Pattern,
5638 Optional<unsigned> NumExpansions) {
5639 ID.AddPointer(Pattern.getAsOpaquePtr());
5640 ID.AddBoolean(NumExpansions.hasValue());
5641 if (NumExpansions)
5642 ID.AddInteger(*NumExpansions);
5643 }
5644
5645 static bool classof(const Type *T) {
5646 return T->getTypeClass() == PackExpansion;
5647 }
5648};
5649
5650/// This class wraps the list of protocol qualifiers. For types that can
5651/// take ObjC protocol qualifers, they can subclass this class.
5652template <class T>
5653class ObjCProtocolQualifiers {
5654protected:
5655 ObjCProtocolQualifiers() = default;
5656
5657 ObjCProtocolDecl * const *getProtocolStorage() const {
5658 return const_cast<ObjCProtocolQualifiers*>(this)->getProtocolStorage();
5659 }
5660
5661 ObjCProtocolDecl **getProtocolStorage() {
5662 return static_cast<T*>(this)->getProtocolStorageImpl();
5663 }
5664
5665 void setNumProtocols(unsigned N) {
5666 static_cast<T*>(this)->setNumProtocolsImpl(N);
5667 }
5668
5669 void initialize(ArrayRef<ObjCProtocolDecl *> protocols) {
5670 setNumProtocols(protocols.size());
5671 assert(getNumProtocols() == protocols.size() &&((getNumProtocols() == protocols.size() && "bitfield overflow in protocol count"
) ? static_cast<void> (0) : __assert_fail ("getNumProtocols() == protocols.size() && \"bitfield overflow in protocol count\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 5672, __PRETTY_FUNCTION__))
5672 "bitfield overflow in protocol count")((getNumProtocols() == protocols.size() && "bitfield overflow in protocol count"
) ? static_cast<void> (0) : __assert_fail ("getNumProtocols() == protocols.size() && \"bitfield overflow in protocol count\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 5672, __PRETTY_FUNCTION__))
;
5673 if (!protocols.empty())
5674 memcpy(getProtocolStorage(), protocols.data(),
5675 protocols.size() * sizeof(ObjCProtocolDecl*));
5676 }
5677
5678public:
5679 using qual_iterator = ObjCProtocolDecl * const *;
5680 using qual_range = llvm::iterator_range<qual_iterator>;
5681
5682 qual_range quals() const { return qual_range(qual_begin(), qual_end()); }
5683 qual_iterator qual_begin() const { return getProtocolStorage(); }
5684 qual_iterator qual_end() const { return qual_begin() + getNumProtocols(); }
5685
5686 bool qual_empty() const { return getNumProtocols() == 0; }
5687
5688 /// Return the number of qualifying protocols in this type, or 0 if
5689 /// there are none.
5690 unsigned getNumProtocols() const {
5691 return static_cast<const T*>(this)->getNumProtocolsImpl();
5692 }
5693
5694 /// Fetch a protocol by index.
5695 ObjCProtocolDecl *getProtocol(unsigned I) const {
5696 assert(I < getNumProtocols() && "Out-of-range protocol access")((I < getNumProtocols() && "Out-of-range protocol access"
) ? static_cast<void> (0) : __assert_fail ("I < getNumProtocols() && \"Out-of-range protocol access\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 5696, __PRETTY_FUNCTION__))
;
5697 return qual_begin()[I];
5698 }
5699
5700 /// Retrieve all of the protocol qualifiers.
5701 ArrayRef<ObjCProtocolDecl *> getProtocols() const {
5702 return ArrayRef<ObjCProtocolDecl *>(qual_begin(), getNumProtocols());
5703 }
5704};
5705
5706/// Represents a type parameter type in Objective C. It can take
5707/// a list of protocols.
5708class ObjCTypeParamType : public Type,
5709 public ObjCProtocolQualifiers<ObjCTypeParamType>,
5710 public llvm::FoldingSetNode {
5711 friend class ASTContext;
5712 friend class ObjCProtocolQualifiers<ObjCTypeParamType>;
5713
5714 /// The number of protocols stored on this type.
5715 unsigned NumProtocols : 6;
5716
5717 ObjCTypeParamDecl *OTPDecl;
5718
5719 /// The protocols are stored after the ObjCTypeParamType node. In the
5720 /// canonical type, the list of protocols are sorted alphabetically
5721 /// and uniqued.
5722 ObjCProtocolDecl **getProtocolStorageImpl();
5723
5724 /// Return the number of qualifying protocols in this interface type,
5725 /// or 0 if there are none.
5726 unsigned getNumProtocolsImpl() const {
5727 return NumProtocols;
5728 }
5729
5730 void setNumProtocolsImpl(unsigned N) {
5731 NumProtocols = N;
5732 }
5733
5734 ObjCTypeParamType(const ObjCTypeParamDecl *D,
5735 QualType can,
5736 ArrayRef<ObjCProtocolDecl *> protocols);
5737
5738public:
5739 bool isSugared() const { return true; }
5740 QualType desugar() const { return getCanonicalTypeInternal(); }
5741
5742 static bool classof(const Type *T) {
5743 return T->getTypeClass() == ObjCTypeParam;
5744 }
5745
5746 void Profile(llvm::FoldingSetNodeID &ID);
5747 static void Profile(llvm::FoldingSetNodeID &ID,
5748 const ObjCTypeParamDecl *OTPDecl,
5749 QualType CanonicalType,
5750 ArrayRef<ObjCProtocolDecl *> protocols);
5751
5752 ObjCTypeParamDecl *getDecl() const { return OTPDecl; }
5753};
5754
5755/// Represents a class type in Objective C.
5756///
5757/// Every Objective C type is a combination of a base type, a set of
5758/// type arguments (optional, for parameterized classes) and a list of
5759/// protocols.
5760///
5761/// Given the following declarations:
5762/// \code
5763/// \@class C<T>;
5764/// \@protocol P;
5765/// \endcode
5766///
5767/// 'C' is an ObjCInterfaceType C. It is sugar for an ObjCObjectType
5768/// with base C and no protocols.
5769///
5770/// 'C<P>' is an unspecialized ObjCObjectType with base C and protocol list [P].
5771/// 'C<C*>' is a specialized ObjCObjectType with type arguments 'C*' and no
5772/// protocol list.
5773/// 'C<C*><P>' is a specialized ObjCObjectType with base C, type arguments 'C*',
5774/// and protocol list [P].
5775///
5776/// 'id' is a TypedefType which is sugar for an ObjCObjectPointerType whose
5777/// pointee is an ObjCObjectType with base BuiltinType::ObjCIdType
5778/// and no protocols.
5779///
5780/// 'id<P>' is an ObjCObjectPointerType whose pointee is an ObjCObjectType
5781/// with base BuiltinType::ObjCIdType and protocol list [P]. Eventually
5782/// this should get its own sugar class to better represent the source.
5783class ObjCObjectType : public Type,
5784 public ObjCProtocolQualifiers<ObjCObjectType> {
5785 friend class ObjCProtocolQualifiers<ObjCObjectType>;
5786
5787 // ObjCObjectType.NumTypeArgs - the number of type arguments stored
5788 // after the ObjCObjectPointerType node.
5789 // ObjCObjectType.NumProtocols - the number of protocols stored
5790 // after the type arguments of ObjCObjectPointerType node.
5791 //
5792 // These protocols are those written directly on the type. If
5793 // protocol qualifiers ever become additive, the iterators will need
5794 // to get kindof complicated.
5795 //
5796 // In the canonical object type, these are sorted alphabetically
5797 // and uniqued.
5798
5799 /// Either a BuiltinType or an InterfaceType or sugar for either.
5800 QualType BaseType;
5801
5802 /// Cached superclass type.
5803 mutable llvm::PointerIntPair<const ObjCObjectType *, 1, bool>
5804 CachedSuperClassType;
5805
5806 QualType *getTypeArgStorage();
5807 const QualType *getTypeArgStorage() const {
5808 return const_cast<ObjCObjectType *>(this)->getTypeArgStorage();
5809 }
5810
5811 ObjCProtocolDecl **getProtocolStorageImpl();
5812 /// Return the number of qualifying protocols in this interface type,
5813 /// or 0 if there are none.
5814 unsigned getNumProtocolsImpl() const {
5815 return ObjCObjectTypeBits.NumProtocols;
5816 }
5817 void setNumProtocolsImpl(unsigned N) {
5818 ObjCObjectTypeBits.NumProtocols = N;
5819 }
5820
5821protected:
5822 enum Nonce_ObjCInterface { Nonce_ObjCInterface };
5823
5824 ObjCObjectType(QualType Canonical, QualType Base,
5825 ArrayRef<QualType> typeArgs,
5826 ArrayRef<ObjCProtocolDecl *> protocols,
5827 bool isKindOf);
5828
5829 ObjCObjectType(enum Nonce_ObjCInterface)
5830 : Type(ObjCInterface, QualType(), TypeDependence::None),
5831 BaseType(QualType(this_(), 0)) {
5832 ObjCObjectTypeBits.NumProtocols = 0;
5833 ObjCObjectTypeBits.NumTypeArgs = 0;
5834 ObjCObjectTypeBits.IsKindOf = 0;
5835 }
5836
5837 void computeSuperClassTypeSlow() const;
5838
5839public:
5840 /// Gets the base type of this object type. This is always (possibly
5841 /// sugar for) one of:
5842 /// - the 'id' builtin type (as opposed to the 'id' type visible to the
5843 /// user, which is a typedef for an ObjCObjectPointerType)
5844 /// - the 'Class' builtin type (same caveat)
5845 /// - an ObjCObjectType (currently always an ObjCInterfaceType)
5846 QualType getBaseType() const { return BaseType; }
5847
5848 bool isObjCId() const {
5849 return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCId);
5850 }
5851
5852 bool isObjCClass() const {
5853 return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCClass);
5854 }
5855
5856 bool isObjCUnqualifiedId() const { return qual_empty() && isObjCId(); }
5857 bool isObjCUnqualifiedClass() const { return qual_empty() && isObjCClass(); }
5858 bool isObjCUnqualifiedIdOrClass() const {
5859 if (!qual_empty()) return false;
5860 if (const BuiltinType *T = getBaseType()->getAs<BuiltinType>())
5861 return T->getKind() == BuiltinType::ObjCId ||
5862 T->getKind() == BuiltinType::ObjCClass;
5863 return false;
5864 }
5865 bool isObjCQualifiedId() const { return !qual_empty() && isObjCId(); }
5866 bool isObjCQualifiedClass() const { return !qual_empty() && isObjCClass(); }
5867
5868 /// Gets the interface declaration for this object type, if the base type
5869 /// really is an interface.
5870 ObjCInterfaceDecl *getInterface() const;
5871
5872 /// Determine whether this object type is "specialized", meaning
5873 /// that it has type arguments.
5874 bool isSpecialized() const;
5875
5876 /// Determine whether this object type was written with type arguments.
5877 bool isSpecializedAsWritten() const {
5878 return ObjCObjectTypeBits.NumTypeArgs > 0;
5879 }
5880
5881 /// Determine whether this object type is "unspecialized", meaning
5882 /// that it has no type arguments.
5883 bool isUnspecialized() const { return !isSpecialized(); }
5884
5885 /// Determine whether this object type is "unspecialized" as
5886 /// written, meaning that it has no type arguments.
5887 bool isUnspecializedAsWritten() const { return !isSpecializedAsWritten(); }
5888
5889 /// Retrieve the type arguments of this object type (semantically).
5890 ArrayRef<QualType> getTypeArgs() const;
5891
5892 /// Retrieve the type arguments of this object type as they were
5893 /// written.
5894 ArrayRef<QualType> getTypeArgsAsWritten() const {
5895 return llvm::makeArrayRef(getTypeArgStorage(),
5896 ObjCObjectTypeBits.NumTypeArgs);
5897 }
5898
5899 /// Whether this is a "__kindof" type as written.
5900 bool isKindOfTypeAsWritten() const { return ObjCObjectTypeBits.IsKindOf; }
5901
5902 /// Whether this ia a "__kindof" type (semantically).
5903 bool isKindOfType() const;
5904
5905 /// Retrieve the type of the superclass of this object type.
5906 ///
5907 /// This operation substitutes any type arguments into the
5908 /// superclass of the current class type, potentially producing a
5909 /// specialization of the superclass type. Produces a null type if
5910 /// there is no superclass.
5911 QualType getSuperClassType() const {
5912 if (!CachedSuperClassType.getInt())
5913 computeSuperClassTypeSlow();
5914
5915 assert(CachedSuperClassType.getInt() && "Superclass not set?")((CachedSuperClassType.getInt() && "Superclass not set?"
) ? static_cast<void> (0) : __assert_fail ("CachedSuperClassType.getInt() && \"Superclass not set?\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 5915, __PRETTY_FUNCTION__))
;
5916 return QualType(CachedSuperClassType.getPointer(), 0);
5917 }
5918
5919 /// Strip off the Objective-C "kindof" type and (with it) any
5920 /// protocol qualifiers.
5921 QualType stripObjCKindOfTypeAndQuals(const ASTContext &ctx) const;
5922
5923 bool isSugared() const { return false; }
5924 QualType desugar() const { return QualType(this, 0); }
5925
5926 static bool classof(const Type *T) {
5927 return T->getTypeClass() == ObjCObject ||
5928 T->getTypeClass() == ObjCInterface;
5929 }
5930};
5931
5932/// A class providing a concrete implementation
5933/// of ObjCObjectType, so as to not increase the footprint of
5934/// ObjCInterfaceType. Code outside of ASTContext and the core type
5935/// system should not reference this type.
5936class ObjCObjectTypeImpl : public ObjCObjectType, public llvm::FoldingSetNode {
5937 friend class ASTContext;
5938
5939 // If anyone adds fields here, ObjCObjectType::getProtocolStorage()
5940 // will need to be modified.
5941
5942 ObjCObjectTypeImpl(QualType Canonical, QualType Base,
5943 ArrayRef<QualType> typeArgs,
5944 ArrayRef<ObjCProtocolDecl *> protocols,
5945 bool isKindOf)
5946 : ObjCObjectType(Canonical, Base, typeArgs, protocols, isKindOf) {}
5947
5948public:
5949 void Profile(llvm::FoldingSetNodeID &ID);
5950 static void Profile(llvm::FoldingSetNodeID &ID,
5951 QualType Base,
5952 ArrayRef<QualType> typeArgs,
5953 ArrayRef<ObjCProtocolDecl *> protocols,
5954 bool isKindOf);
5955};
5956
5957inline QualType *ObjCObjectType::getTypeArgStorage() {
5958 return reinterpret_cast<QualType *>(static_cast<ObjCObjectTypeImpl*>(this)+1);
5959}
5960
5961inline ObjCProtocolDecl **ObjCObjectType::getProtocolStorageImpl() {
5962 return reinterpret_cast<ObjCProtocolDecl**>(
5963 getTypeArgStorage() + ObjCObjectTypeBits.NumTypeArgs);
5964}
5965
5966inline ObjCProtocolDecl **ObjCTypeParamType::getProtocolStorageImpl() {
5967 return reinterpret_cast<ObjCProtocolDecl**>(
5968 static_cast<ObjCTypeParamType*>(this)+1);
5969}
5970
5971/// Interfaces are the core concept in Objective-C for object oriented design.
5972/// They basically correspond to C++ classes. There are two kinds of interface
5973/// types: normal interfaces like `NSString`, and qualified interfaces, which
5974/// are qualified with a protocol list like `NSString<NSCopyable, NSAmazing>`.
5975///
5976/// ObjCInterfaceType guarantees the following properties when considered
5977/// as a subtype of its superclass, ObjCObjectType:
5978/// - There are no protocol qualifiers. To reinforce this, code which
5979/// tries to invoke the protocol methods via an ObjCInterfaceType will
5980/// fail to compile.
5981/// - It is its own base type. That is, if T is an ObjCInterfaceType*,
5982/// T->getBaseType() == QualType(T, 0).
5983class ObjCInterfaceType : public ObjCObjectType {
5984 friend class ASTContext; // ASTContext creates these.
5985 friend class ASTReader;
5986 friend class ObjCInterfaceDecl;
5987 template <class T> friend class serialization::AbstractTypeReader;
5988
5989 mutable ObjCInterfaceDecl *Decl;
5990
5991 ObjCInterfaceType(const ObjCInterfaceDecl *D)
5992 : ObjCObjectType(Nonce_ObjCInterface),
5993 Decl(const_cast<ObjCInterfaceDecl*>(D)) {}
5994
5995public:
5996 /// Get the declaration of this interface.
5997 ObjCInterfaceDecl *getDecl() const { return Decl; }
5998
5999 bool isSugared() const { return false; }
6000 QualType desugar() const { return QualType(this, 0); }
6001
6002 static bool classof(const Type *T) {
6003 return T->getTypeClass() == ObjCInterface;
6004 }
6005
6006 // Nonsense to "hide" certain members of ObjCObjectType within this
6007 // class. People asking for protocols on an ObjCInterfaceType are
6008 // not going to get what they want: ObjCInterfaceTypes are
6009 // guaranteed to have no protocols.
6010 enum {
6011 qual_iterator,
6012 qual_begin,
6013 qual_end,
6014 getNumProtocols,
6015 getProtocol
6016 };
6017};
6018
6019inline ObjCInterfaceDecl *ObjCObjectType::getInterface() const {
6020 QualType baseType = getBaseType();
6021 while (const auto *ObjT = baseType->getAs<ObjCObjectType>()) {
6022 if (const auto *T = dyn_cast<ObjCInterfaceType>(ObjT))
6023 return T->getDecl();
6024
6025 baseType = ObjT->getBaseType();
6026 }
6027
6028 return nullptr;
6029}
6030
6031/// Represents a pointer to an Objective C object.
6032///
6033/// These are constructed from pointer declarators when the pointee type is
6034/// an ObjCObjectType (or sugar for one). In addition, the 'id' and 'Class'
6035/// types are typedefs for these, and the protocol-qualified types 'id<P>'
6036/// and 'Class<P>' are translated into these.
6037///
6038/// Pointers to pointers to Objective C objects are still PointerTypes;
6039/// only the first level of pointer gets it own type implementation.
6040class ObjCObjectPointerType : public Type, public llvm::FoldingSetNode {
6041 friend class ASTContext; // ASTContext creates these.
6042
6043 QualType PointeeType;
6044
6045 ObjCObjectPointerType(QualType Canonical, QualType Pointee)
6046 : Type(ObjCObjectPointer, Canonical, Pointee->getDependence()),
6047 PointeeType(Pointee) {}
6048
6049public:
6050 /// Gets the type pointed to by this ObjC pointer.
6051 /// The result will always be an ObjCObjectType or sugar thereof.
6052 QualType getPointeeType() const { return PointeeType; }
6053
6054 /// Gets the type pointed to by this ObjC pointer. Always returns non-null.
6055 ///
6056 /// This method is equivalent to getPointeeType() except that
6057 /// it discards any typedefs (or other sugar) between this
6058 /// type and the "outermost" object type. So for:
6059 /// \code
6060 /// \@class A; \@protocol P; \@protocol Q;
6061 /// typedef A<P> AP;
6062 /// typedef A A1;
6063 /// typedef A1<P> A1P;
6064 /// typedef A1P<Q> A1PQ;
6065 /// \endcode
6066 /// For 'A*', getObjectType() will return 'A'.
6067 /// For 'A<P>*', getObjectType() will return 'A<P>'.
6068 /// For 'AP*', getObjectType() will return 'A<P>'.
6069 /// For 'A1*', getObjectType() will return 'A'.
6070 /// For 'A1<P>*', getObjectType() will return 'A1<P>'.
6071 /// For 'A1P*', getObjectType() will return 'A1<P>'.
6072 /// For 'A1PQ*', getObjectType() will return 'A1<Q>', because
6073 /// adding protocols to a protocol-qualified base discards the
6074 /// old qualifiers (for now). But if it didn't, getObjectType()
6075 /// would return 'A1P<Q>' (and we'd have to make iterating over
6076 /// qualifiers more complicated).
6077 const ObjCObjectType *getObjectType() const {
6078 return PointeeType->castAs<ObjCObjectType>();
6079 }
6080
6081 /// If this pointer points to an Objective C
6082 /// \@interface type, gets the type for that interface. Any protocol
6083 /// qualifiers on the interface are ignored.
6084 ///
6085 /// \return null if the base type for this pointer is 'id' or 'Class'
6086 const ObjCInterfaceType *getInterfaceType() const;
6087
6088 /// If this pointer points to an Objective \@interface
6089 /// type, gets the declaration for that interface.
6090 ///
6091 /// \return null if the base type for this pointer is 'id' or 'Class'
6092 ObjCInterfaceDecl *getInterfaceDecl() const {
6093 return getObjectType()->getInterface();
6094 }
6095
6096 /// True if this is equivalent to the 'id' type, i.e. if
6097 /// its object type is the primitive 'id' type with no protocols.
6098 bool isObjCIdType() const {
6099 return getObjectType()->isObjCUnqualifiedId();
6100 }
6101
6102 /// True if this is equivalent to the 'Class' type,
6103 /// i.e. if its object tive is the primitive 'Class' type with no protocols.
6104 bool isObjCClassType() const {
6105 return getObjectType()->isObjCUnqualifiedClass();
6106 }
6107
6108 /// True if this is equivalent to the 'id' or 'Class' type,
6109 bool isObjCIdOrClassType() const {
6110 return getObjectType()->isObjCUnqualifiedIdOrClass();
6111 }
6112
6113 /// True if this is equivalent to 'id<P>' for some non-empty set of
6114 /// protocols.
6115 bool isObjCQualifiedIdType() const {
6116 return getObjectType()->isObjCQualifiedId();
6117 }
6118
6119 /// True if this is equivalent to 'Class<P>' for some non-empty set of
6120 /// protocols.
6121 bool isObjCQualifiedClassType() const {
6122 return getObjectType()->isObjCQualifiedClass();
6123 }
6124
6125 /// Whether this is a "__kindof" type.
6126 bool isKindOfType() const { return getObjectType()->isKindOfType(); }
6127
6128 /// Whether this type is specialized, meaning that it has type arguments.
6129 bool isSpecialized() const { return getObjectType()->isSpecialized(); }
6130
6131 /// Whether this type is specialized, meaning that it has type arguments.
6132 bool isSpecializedAsWritten() const {
6133 return getObjectType()->isSpecializedAsWritten();
6134 }
6135
6136 /// Whether this type is unspecialized, meaning that is has no type arguments.
6137 bool isUnspecialized() const { return getObjectType()->isUnspecialized(); }
6138
6139 /// Determine whether this object type is "unspecialized" as
6140 /// written, meaning that it has no type arguments.
6141 bool isUnspecializedAsWritten() const { return !isSpecializedAsWritten(); }
6142
6143 /// Retrieve the type arguments for this type.
6144 ArrayRef<QualType> getTypeArgs() const {
6145 return getObjectType()->getTypeArgs();
6146 }
6147
6148 /// Retrieve the type arguments for this type.
6149 ArrayRef<QualType> getTypeArgsAsWritten() const {
6150 return getObjectType()->getTypeArgsAsWritten();
6151 }
6152
6153 /// An iterator over the qualifiers on the object type. Provided
6154 /// for convenience. This will always iterate over the full set of
6155 /// protocols on a type, not just those provided directly.
6156 using qual_iterator = ObjCObjectType::qual_iterator;
6157 using qual_range = llvm::iterator_range<qual_iterator>;
6158
6159 qual_range quals() const { return qual_range(qual_begin(), qual_end()); }
6160
6161 qual_iterator qual_begin() const {
6162 return getObjectType()->qual_begin();
6163 }
6164
6165 qual_iterator qual_end() const {
6166 return getObjectType()->qual_end();
6167 }
6168
6169 bool qual_empty() const { return getObjectType()->qual_empty(); }
6170
6171 /// Return the number of qualifying protocols on the object type.
6172 unsigned getNumProtocols() const {
6173 return getObjectType()->getNumProtocols();
6174 }
6175
6176 /// Retrieve a qualifying protocol by index on the object type.
6177 ObjCProtocolDecl *getProtocol(unsigned I) const {
6178 return getObjectType()->getProtocol(I);
6179 }
6180
6181 bool isSugared() const { return false; }
6182 QualType desugar() const { return QualType(this, 0); }
6183
6184 /// Retrieve the type of the superclass of this object pointer type.
6185 ///
6186 /// This operation substitutes any type arguments into the
6187 /// superclass of the current class type, potentially producing a
6188 /// pointer to a specialization of the superclass type. Produces a
6189 /// null type if there is no superclass.
6190 QualType getSuperClassType() const;
6191
6192 /// Strip off the Objective-C "kindof" type and (with it) any
6193 /// protocol qualifiers.
6194 const ObjCObjectPointerType *stripObjCKindOfTypeAndQuals(
6195 const ASTContext &ctx) const;
6196
6197 void Profile(llvm::FoldingSetNodeID &ID) {
6198 Profile(ID, getPointeeType());
6199 }
6200
6201 static void Profile(llvm::FoldingSetNodeID &ID, QualType T) {
6202 ID.AddPointer(T.getAsOpaquePtr());
6203 }
6204
6205 static bool classof(const Type *T) {
6206 return T->getTypeClass() == ObjCObjectPointer;
6207 }
6208};
6209
6210class AtomicType : public Type, public llvm::FoldingSetNode {
6211 friend class ASTContext; // ASTContext creates these.
6212
6213 QualType ValueType;
6214
6215 AtomicType(QualType ValTy, QualType Canonical)
6216 : Type(Atomic, Canonical, ValTy->getDependence()), ValueType(ValTy) {}
6217
6218public:
6219 /// Gets the type contained by this atomic type, i.e.
6220 /// the type returned by performing an atomic load of this atomic type.
6221 QualType getValueType() const { return ValueType; }
6222
6223 bool isSugared() const { return false; }
6224 QualType desugar() const { return QualType(this, 0); }
6225
6226 void Profile(llvm::FoldingSetNodeID &ID) {
6227 Profile(ID, getValueType());
6228 }
6229
6230 static void Profile(llvm::FoldingSetNodeID &ID, QualType T) {
6231 ID.AddPointer(T.getAsOpaquePtr());
6232 }
6233
6234 static bool classof(const Type *T) {
6235 return T->getTypeClass() == Atomic;
6236 }
6237};
6238
6239/// PipeType - OpenCL20.
6240class PipeType : public Type, public llvm::FoldingSetNode {
6241 friend class ASTContext; // ASTContext creates these.
6242
6243 QualType ElementType;
6244 bool isRead;
6245
6246 PipeType(QualType elemType, QualType CanonicalPtr, bool isRead)
6247 : Type(Pipe, CanonicalPtr, elemType->getDependence()),
6248 ElementType(elemType), isRead(isRead) {}
6249
6250public:
6251 QualType getElementType() const { return ElementType; }
6252
6253 bool isSugared() const { return false; }
6254
6255 QualType desugar() const { return QualType(this, 0); }
6256
6257 void Profile(llvm::FoldingSetNodeID &ID) {
6258 Profile(ID, getElementType(), isReadOnly());
6259 }
6260
6261 static void Profile(llvm::FoldingSetNodeID &ID, QualType T, bool isRead) {
6262 ID.AddPointer(T.getAsOpaquePtr());
6263 ID.AddBoolean(isRead);
6264 }
6265
6266 static bool classof(const Type *T) {
6267 return T->getTypeClass() == Pipe;
6268 }
6269
6270 bool isReadOnly() const { return isRead; }
6271};
6272
6273/// A fixed int type of a specified bitwidth.
6274class ExtIntType final : public Type, public llvm::FoldingSetNode {
6275 friend class ASTContext;
6276 unsigned IsUnsigned : 1;
6277 unsigned NumBits : 24;
6278
6279protected:
6280 ExtIntType(bool isUnsigned, unsigned NumBits);
6281
6282public:
6283 bool isUnsigned() const { return IsUnsigned; }
6284 bool isSigned() const { return !IsUnsigned; }
6285 unsigned getNumBits() const { return NumBits; }
6286
6287 bool isSugared() const { return false; }
6288 QualType desugar() const { return QualType(this, 0); }
6289
6290 void Profile(llvm::FoldingSetNodeID &ID) {
6291 Profile(ID, isUnsigned(), getNumBits());
6292 }
6293
6294 static void Profile(llvm::FoldingSetNodeID &ID, bool IsUnsigned,
6295 unsigned NumBits) {
6296 ID.AddBoolean(IsUnsigned);
6297 ID.AddInteger(NumBits);
6298 }
6299
6300 static bool classof(const Type *T) { return T->getTypeClass() == ExtInt; }
6301};
6302
6303class DependentExtIntType final : public Type, public llvm::FoldingSetNode {
6304 friend class ASTContext;
6305 const ASTContext &Context;
6306 llvm::PointerIntPair<Expr*, 1, bool> ExprAndUnsigned;
6307
6308protected:
6309 DependentExtIntType(const ASTContext &Context, bool IsUnsigned,
6310 Expr *NumBits);
6311
6312public:
6313 bool isUnsigned() const;
6314 bool isSigned() const { return !isUnsigned(); }
6315 Expr *getNumBitsExpr() const;
6316
6317 bool isSugared() const { return false; }
6318 QualType desugar() const { return QualType(this, 0); }
6319
6320 void Profile(llvm::FoldingSetNodeID &ID) {
6321 Profile(ID, Context, isUnsigned(), getNumBitsExpr());
6322 }
6323 static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
6324 bool IsUnsigned, Expr *NumBitsExpr);
6325
6326 static bool classof(const Type *T) {
6327 return T->getTypeClass() == DependentExtInt;
6328 }
6329};
6330
6331/// A qualifier set is used to build a set of qualifiers.
6332class QualifierCollector : public Qualifiers {
6333public:
6334 QualifierCollector(Qualifiers Qs = Qualifiers()) : Qualifiers(Qs) {}
6335
6336 /// Collect any qualifiers on the given type and return an
6337 /// unqualified type. The qualifiers are assumed to be consistent
6338 /// with those already in the type.
6339 const Type *strip(QualType type) {
6340 addFastQualifiers(type.getLocalFastQualifiers());
6341 if (!type.hasLocalNonFastQualifiers())
6342 return type.getTypePtrUnsafe();
6343
6344 const ExtQuals *extQuals = type.getExtQualsUnsafe();
6345 addConsistentQualifiers(extQuals->getQualifiers());
6346 return extQuals->getBaseType();
6347 }
6348
6349 /// Apply the collected qualifiers to the given type.
6350 QualType apply(const ASTContext &Context, QualType QT) const;
6351
6352 /// Apply the collected qualifiers to the given type.
6353 QualType apply(const ASTContext &Context, const Type* T) const;
6354};
6355
6356/// A container of type source information.
6357///
6358/// A client can read the relevant info using TypeLoc wrappers, e.g:
6359/// @code
6360/// TypeLoc TL = TypeSourceInfo->getTypeLoc();
6361/// TL.getBeginLoc().print(OS, SrcMgr);
6362/// @endcode
6363class alignas(8) TypeSourceInfo {
6364 // Contains a memory block after the class, used for type source information,
6365 // allocated by ASTContext.
6366 friend class ASTContext;
6367
6368 QualType Ty;
6369
6370 TypeSourceInfo(QualType ty) : Ty(ty) {}
6371
6372public:
6373 /// Return the type wrapped by this type source info.
6374 QualType getType() const { return Ty; }
6375
6376 /// Return the TypeLoc wrapper for the type source info.
6377 TypeLoc getTypeLoc() const; // implemented in TypeLoc.h
6378
6379 /// Override the type stored in this TypeSourceInfo. Use with caution!
6380 void overrideType(QualType T) { Ty = T; }
6381};
6382
6383// Inline function definitions.
6384
6385inline SplitQualType SplitQualType::getSingleStepDesugaredType() const {
6386 SplitQualType desugar =
6387 Ty->getLocallyUnqualifiedSingleStepDesugaredType().split();
6388 desugar.Quals.addConsistentQualifiers(Quals);
6389 return desugar;
6390}
6391
6392inline const Type *QualType::getTypePtr() const {
6393 return getCommonPtr()->BaseType;
6394}
6395
6396inline const Type *QualType::getTypePtrOrNull() const {
6397 return (isNull() ? nullptr : getCommonPtr()->BaseType);
6398}
6399
6400inline SplitQualType QualType::split() const {
6401 if (!hasLocalNonFastQualifiers())
6402 return SplitQualType(getTypePtrUnsafe(),
6403 Qualifiers::fromFastMask(getLocalFastQualifiers()));
6404
6405 const ExtQuals *eq = getExtQualsUnsafe();
6406 Qualifiers qs = eq->getQualifiers();
6407 qs.addFastQualifiers(getLocalFastQualifiers());
6408 return SplitQualType(eq->getBaseType(), qs);
6409}
6410
6411inline Qualifiers QualType::getLocalQualifiers() const {
6412 Qualifiers Quals;
6413 if (hasLocalNonFastQualifiers())
6414 Quals = getExtQualsUnsafe()->getQualifiers();
6415 Quals.addFastQualifiers(getLocalFastQualifiers());
6416 return Quals;
6417}
6418
6419inline Qualifiers QualType::getQualifiers() const {
6420 Qualifiers quals = getCommonPtr()->CanonicalType.getLocalQualifiers();
6421 quals.addFastQualifiers(getLocalFastQualifiers());
6422 return quals;
6423}
6424
6425inline unsigned QualType::getCVRQualifiers() const {
6426 unsigned cvr = getCommonPtr()->CanonicalType.getLocalCVRQualifiers();
6427 cvr |= getLocalCVRQualifiers();
6428 return cvr;
6429}
6430
6431inline QualType QualType::getCanonicalType() const {
6432 QualType canon = getCommonPtr()->CanonicalType;
6433 return canon.withFastQualifiers(getLocalFastQualifiers());
6434}
6435
6436inline bool QualType::isCanonical() const {
6437 return getTypePtr()->isCanonicalUnqualified();
6438}
6439
6440inline bool QualType::isCanonicalAsParam() const {
6441 if (!isCanonical()) return false;
6442 if (hasLocalQualifiers()) return false;
6443
6444 const Type *T = getTypePtr();
6445 if (T->isVariablyModifiedType() && T->hasSizedVLAType())
6446 return false;
6447
6448 return !isa<FunctionType>(T) && !isa<ArrayType>(T);
6449}
6450
6451inline bool QualType::isConstQualified() const {
6452 return isLocalConstQualified() ||
6453 getCommonPtr()->CanonicalType.isLocalConstQualified();
6454}
6455
6456inline bool QualType::isRestrictQualified() const {
6457 return isLocalRestrictQualified() ||
6458 getCommonPtr()->CanonicalType.isLocalRestrictQualified();
6459}
6460
6461
6462inline bool QualType::isVolatileQualified() const {
6463 return isLocalVolatileQualified() ||
6464 getCommonPtr()->CanonicalType.isLocalVolatileQualified();
6465}
6466
6467inline bool QualType::hasQualifiers() const {
6468 return hasLocalQualifiers() ||
6469 getCommonPtr()->CanonicalType.hasLocalQualifiers();
6470}
6471
6472inline QualType QualType::getUnqualifiedType() const {
6473 if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
6474 return QualType(getTypePtr(), 0);
6475
6476 return QualType(getSplitUnqualifiedTypeImpl(*this).Ty, 0);
6477}
6478
6479inline SplitQualType QualType::getSplitUnqualifiedType() const {
6480 if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
6481 return split();
6482
6483 return getSplitUnqualifiedTypeImpl(*this);
6484}
6485
6486inline void QualType::removeLocalConst() {
6487 removeLocalFastQualifiers(Qualifiers::Const);
6488}
6489
6490inline void QualType::removeLocalRestrict() {
6491 removeLocalFastQualifiers(Qualifiers::Restrict);
6492}
6493
6494inline void QualType::removeLocalVolatile() {
6495 removeLocalFastQualifiers(Qualifiers::Volatile);
6496}
6497
6498inline void QualType::removeLocalCVRQualifiers(unsigned Mask) {
6499 assert(!(Mask & ~Qualifiers::CVRMask) && "mask has non-CVR bits")((!(Mask & ~Qualifiers::CVRMask) && "mask has non-CVR bits"
) ? static_cast<void> (0) : __assert_fail ("!(Mask & ~Qualifiers::CVRMask) && \"mask has non-CVR bits\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 6499, __PRETTY_FUNCTION__))
;
6500 static_assert((int)Qualifiers::CVRMask == (int)Qualifiers::FastMask,
6501 "Fast bits differ from CVR bits!");
6502
6503 // Fast path: we don't need to touch the slow qualifiers.
6504 removeLocalFastQualifiers(Mask);
6505}
6506
6507/// Check if this type has any address space qualifier.
6508inline bool QualType::hasAddressSpace() const {
6509 return getQualifiers().hasAddressSpace();
6510}
6511
6512/// Return the address space of this type.
6513inline LangAS QualType::getAddressSpace() const {
6514 return getQualifiers().getAddressSpace();
6515}
6516
6517/// Return the gc attribute of this type.
6518inline Qualifiers::GC QualType::getObjCGCAttr() const {
6519 return getQualifiers().getObjCGCAttr();
6520}
6521
6522inline bool QualType::hasNonTrivialToPrimitiveDefaultInitializeCUnion() const {
6523 if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl())
6524 return hasNonTrivialToPrimitiveDefaultInitializeCUnion(RD);
6525 return false;
6526}
6527
6528inline bool QualType::hasNonTrivialToPrimitiveDestructCUnion() const {
6529 if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl())
6530 return hasNonTrivialToPrimitiveDestructCUnion(RD);
6531 return false;
6532}
6533
6534inline bool QualType::hasNonTrivialToPrimitiveCopyCUnion() const {
6535 if (auto *RD = getTypePtr()->getBaseElementTypeUnsafe()->getAsRecordDecl())
6536 return hasNonTrivialToPrimitiveCopyCUnion(RD);
6537 return false;
6538}
6539
6540inline FunctionType::ExtInfo getFunctionExtInfo(const Type &t) {
6541 if (const auto *PT = t.getAs<PointerType>()) {
6542 if (const auto *FT = PT->getPointeeType()->getAs<FunctionType>())
6543 return FT->getExtInfo();
6544 } else if (const auto *FT = t.getAs<FunctionType>())
6545 return FT->getExtInfo();
6546
6547 return FunctionType::ExtInfo();
6548}
6549
6550inline FunctionType::ExtInfo getFunctionExtInfo(QualType t) {
6551 return getFunctionExtInfo(*t);
6552}
6553
6554/// Determine whether this type is more
6555/// qualified than the Other type. For example, "const volatile int"
6556/// is more qualified than "const int", "volatile int", and
6557/// "int". However, it is not more qualified than "const volatile
6558/// int".
6559inline bool QualType::isMoreQualifiedThan(QualType other) const {
6560 Qualifiers MyQuals = getQualifiers();
6561 Qualifiers OtherQuals = other.getQualifiers();
6562 return (MyQuals != OtherQuals && MyQuals.compatiblyIncludes(OtherQuals));
6563}
6564
6565/// Determine whether this type is at last
6566/// as qualified as the Other type. For example, "const volatile
6567/// int" is at least as qualified as "const int", "volatile int",
6568/// "int", and "const volatile int".
6569inline bool QualType::isAtLeastAsQualifiedAs(QualType other) const {
6570 Qualifiers OtherQuals = other.getQualifiers();
6571
6572 // Ignore __unaligned qualifier if this type is a void.
6573 if (getUnqualifiedType()->isVoidType())
6574 OtherQuals.removeUnaligned();
6575
6576 return getQualifiers().compatiblyIncludes(OtherQuals);
6577}
6578
6579/// If Type is a reference type (e.g., const
6580/// int&), returns the type that the reference refers to ("const
6581/// int"). Otherwise, returns the type itself. This routine is used
6582/// throughout Sema to implement C++ 5p6:
6583///
6584/// If an expression initially has the type "reference to T" (8.3.2,
6585/// 8.5.3), the type is adjusted to "T" prior to any further
6586/// analysis, the expression designates the object or function
6587/// denoted by the reference, and the expression is an lvalue.
6588inline QualType QualType::getNonReferenceType() const {
6589 if (const auto *RefType = (*this)->getAs<ReferenceType>())
6590 return RefType->getPointeeType();
6591 else
6592 return *this;
6593}
6594
6595inline bool QualType::isCForbiddenLValueType() const {
6596 return ((getTypePtr()->isVoidType() && !hasQualifiers()) ||
6597 getTypePtr()->isFunctionType());
6598}
6599
6600/// Tests whether the type is categorized as a fundamental type.
6601///
6602/// \returns True for types specified in C++0x [basic.fundamental].
6603inline bool Type::isFundamentalType() const {
6604 return isVoidType() ||
6605 isNullPtrType() ||
6606 // FIXME: It's really annoying that we don't have an
6607 // 'isArithmeticType()' which agrees with the standard definition.
6608 (isArithmeticType() && !isEnumeralType());
6609}
6610
6611/// Tests whether the type is categorized as a compound type.
6612///
6613/// \returns True for types specified in C++0x [basic.compound].
6614inline bool Type::isCompoundType() const {
6615 // C++0x [basic.compound]p1:
6616 // Compound types can be constructed in the following ways:
6617 // -- arrays of objects of a given type [...];
6618 return isArrayType() ||
6619 // -- functions, which have parameters of given types [...];
6620 isFunctionType() ||
6621 // -- pointers to void or objects or functions [...];
6622 isPointerType() ||
6623 // -- references to objects or functions of a given type. [...]
6624 isReferenceType() ||
6625 // -- classes containing a sequence of objects of various types, [...];
6626 isRecordType() ||
6627 // -- unions, which are classes capable of containing objects of different
6628 // types at different times;
6629 isUnionType() ||
6630 // -- enumerations, which comprise a set of named constant values. [...];
6631 isEnumeralType() ||
6632 // -- pointers to non-static class members, [...].
6633 isMemberPointerType();
6634}
6635
6636inline bool Type::isFunctionType() const {
6637 return isa<FunctionType>(CanonicalType);
6638}
6639
6640inline bool Type::isPointerType() const {
6641 return isa<PointerType>(CanonicalType);
6642}
6643
6644inline bool Type::isAnyPointerType() const {
6645 return isPointerType() || isObjCObjectPointerType();
6646}
6647
6648inline bool Type::isBlockPointerType() const {
6649 return isa<BlockPointerType>(CanonicalType);
6650}
6651
6652inline bool Type::isReferenceType() const {
6653 return isa<ReferenceType>(CanonicalType);
6654}
6655
6656inline bool Type::isLValueReferenceType() const {
6657 return isa<LValueReferenceType>(CanonicalType);
6658}
6659
6660inline bool Type::isRValueReferenceType() const {
6661 return isa<RValueReferenceType>(CanonicalType);
6662}
6663
6664inline bool Type::isObjectPointerType() const {
6665 // Note: an "object pointer type" is not the same thing as a pointer to an
6666 // object type; rather, it is a pointer to an object type or a pointer to cv
6667 // void.
6668 if (const auto *T = getAs<PointerType>())
6669 return !T->getPointeeType()->isFunctionType();
6670 else
6671 return false;
6672}
6673
6674inline bool Type::isFunctionPointerType() const {
6675 if (const auto *T = getAs<PointerType>())
6676 return T->getPointeeType()->isFunctionType();
6677 else
6678 return false;
6679}
6680
6681inline bool Type::isFunctionReferenceType() const {
6682 if (const auto *T = getAs<ReferenceType>())
6683 return T->getPointeeType()->isFunctionType();
6684 else
6685 return false;
6686}
6687
6688inline bool Type::isMemberPointerType() const {
6689 return isa<MemberPointerType>(CanonicalType);
6690}
6691
6692inline bool Type::isMemberFunctionPointerType() const {
6693 if (const auto *T = getAs<MemberPointerType>())
6694 return T->isMemberFunctionPointer();
6695 else
6696 return false;
6697}
6698
6699inline bool Type::isMemberDataPointerType() const {
6700 if (const auto *T = getAs<MemberPointerType>())
6701 return T->isMemberDataPointer();
6702 else
6703 return false;
6704}
6705
6706inline bool Type::isArrayType() const {
6707 return isa<ArrayType>(CanonicalType);
6708}
6709
6710inline bool Type::isConstantArrayType() const {
6711 return isa<ConstantArrayType>(CanonicalType);
6712}
6713
6714inline bool Type::isIncompleteArrayType() const {
6715 return isa<IncompleteArrayType>(CanonicalType);
6716}
6717
6718inline bool Type::isVariableArrayType() const {
6719 return isa<VariableArrayType>(CanonicalType);
6720}
6721
6722inline bool Type::isDependentSizedArrayType() const {
6723 return isa<DependentSizedArrayType>(CanonicalType);
6724}
6725
6726inline bool Type::isBuiltinType() const {
6727 return isa<BuiltinType>(CanonicalType);
6728}
6729
6730inline bool Type::isRecordType() const {
6731 return isa<RecordType>(CanonicalType);
6732}
6733
6734inline bool Type::isEnumeralType() const {
6735 return isa<EnumType>(CanonicalType);
6736}
6737
6738inline bool Type::isAnyComplexType() const {
6739 return isa<ComplexType>(CanonicalType);
6740}
6741
6742inline bool Type::isVectorType() const {
6743 return isa<VectorType>(CanonicalType);
6744}
6745
6746inline bool Type::isExtVectorType() const {
6747 return isa<ExtVectorType>(CanonicalType);
6748}
6749
6750inline bool Type::isMatrixType() const {
6751 return isa<MatrixType>(CanonicalType);
6752}
6753
6754inline bool Type::isConstantMatrixType() const {
6755 return isa<ConstantMatrixType>(CanonicalType);
6756}
6757
6758inline bool Type::isDependentAddressSpaceType() const {
6759 return isa<DependentAddressSpaceType>(CanonicalType);
6760}
6761
6762inline bool Type::isObjCObjectPointerType() const {
6763 return isa<ObjCObjectPointerType>(CanonicalType);
6764}
6765
6766inline bool Type::isObjCObjectType() const {
6767 return isa<ObjCObjectType>(CanonicalType);
6768}
6769
6770inline bool Type::isObjCObjectOrInterfaceType() const {
6771 return isa<ObjCInterfaceType>(CanonicalType) ||
6772 isa<ObjCObjectType>(CanonicalType);
6773}
6774
6775inline bool Type::isAtomicType() const {
6776 return isa<AtomicType>(CanonicalType);
6777}
6778
6779inline bool Type::isUndeducedAutoType() const {
6780 return isa<AutoType>(CanonicalType);
6781}
6782
6783inline bool Type::isObjCQualifiedIdType() const {
6784 if (const auto *OPT = getAs<ObjCObjectPointerType>())
6785 return OPT->isObjCQualifiedIdType();
6786 return false;
6787}
6788
6789inline bool Type::isObjCQualifiedClassType() const {
6790 if (const auto *OPT = getAs<ObjCObjectPointerType>())
6791 return OPT->isObjCQualifiedClassType();
6792 return false;
6793}
6794
6795inline bool Type::isObjCIdType() const {
6796 if (const auto *OPT = getAs<ObjCObjectPointerType>())
6797 return OPT->isObjCIdType();
6798 return false;
6799}
6800
6801inline bool Type::isObjCClassType() const {
6802 if (const auto *OPT = getAs<ObjCObjectPointerType>())
6803 return OPT->isObjCClassType();
6804 return false;
6805}
6806
6807inline bool Type::isObjCSelType() const {
6808 if (const auto *OPT = getAs<PointerType>())
6809 return OPT->getPointeeType()->isSpecificBuiltinType(BuiltinType::ObjCSel);
6810 return false;
6811}
6812
6813inline bool Type::isObjCBuiltinType() const {
6814 return isObjCIdType() || isObjCClassType() || isObjCSelType();
6815}
6816
6817inline bool Type::isDecltypeType() const {
6818 return isa<DecltypeType>(this);
6819}
6820
6821#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
6822 inline bool Type::is##Id##Type() const { \
6823 return isSpecificBuiltinType(BuiltinType::Id); \
6824 }
6825#include "clang/Basic/OpenCLImageTypes.def"
6826
6827inline bool Type::isSamplerT() const {
6828 return isSpecificBuiltinType(BuiltinType::OCLSampler);
6829}
6830
6831inline bool Type::isEventT() const {
6832 return isSpecificBuiltinType(BuiltinType::OCLEvent);
6833}
6834
6835inline bool Type::isClkEventT() const {
6836 return isSpecificBuiltinType(BuiltinType::OCLClkEvent);
6837}
6838
6839inline bool Type::isQueueT() const {
6840 return isSpecificBuiltinType(BuiltinType::OCLQueue);
6841}
6842
6843inline bool Type::isReserveIDT() const {
6844 return isSpecificBuiltinType(BuiltinType::OCLReserveID);
6845}
6846
6847inline bool Type::isImageType() const {
6848#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) is##Id##Type() ||
6849 return
6850#include "clang/Basic/OpenCLImageTypes.def"
6851 false; // end boolean or operation
6852}
6853
6854inline bool Type::isPipeType() const {
6855 return isa<PipeType>(CanonicalType);
6856}
6857
6858inline bool Type::isExtIntType() const {
6859 return isa<ExtIntType>(CanonicalType);
6860}
6861
6862#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
6863 inline bool Type::is##Id##Type() const { \
6864 return isSpecificBuiltinType(BuiltinType::Id); \
6865 }
6866#include "clang/Basic/OpenCLExtensionTypes.def"
6867
6868inline bool Type::isOCLIntelSubgroupAVCType() const {
6869#define INTEL_SUBGROUP_AVC_TYPE(ExtType, Id) \
6870 isOCLIntelSubgroupAVC##Id##Type() ||
6871 return
6872#include "clang/Basic/OpenCLExtensionTypes.def"
6873 false; // end of boolean or operation
6874}
6875
6876inline bool Type::isOCLExtOpaqueType() const {
6877#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) is##Id##Type() ||
6878 return
6879#include "clang/Basic/OpenCLExtensionTypes.def"
6880 false; // end of boolean or operation
6881}
6882
6883inline bool Type::isOpenCLSpecificType() const {
6884 return isSamplerT() || isEventT() || isImageType() || isClkEventT() ||
6885 isQueueT() || isReserveIDT() || isPipeType() || isOCLExtOpaqueType();
6886}
6887
6888inline bool Type::isTemplateTypeParmType() const {
6889 return isa<TemplateTypeParmType>(CanonicalType);
6890}
6891
6892inline bool Type::isSpecificBuiltinType(unsigned K) const {
6893 if (const BuiltinType *BT = getAs<BuiltinType>()) {
6894 return BT->getKind() == static_cast<BuiltinType::Kind>(K);
6895 }
6896 return false;
6897}
6898
6899inline bool Type::isPlaceholderType() const {
6900 if (const auto *BT = dyn_cast<BuiltinType>(this))
6901 return BT->isPlaceholderType();
6902 return false;
6903}
6904
6905inline const BuiltinType *Type::getAsPlaceholderType() const {
6906 if (const auto *BT = dyn_cast<BuiltinType>(this))
6907 if (BT->isPlaceholderType())
6908 return BT;
6909 return nullptr;
6910}
6911
6912inline bool Type::isSpecificPlaceholderType(unsigned K) const {
6913 assert(BuiltinType::isPlaceholderTypeKind((BuiltinType::Kind) K))((BuiltinType::isPlaceholderTypeKind((BuiltinType::Kind) K)) ?
static_cast<void> (0) : __assert_fail ("BuiltinType::isPlaceholderTypeKind((BuiltinType::Kind) K)"
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 6913, __PRETTY_FUNCTION__))
;
6914 return isSpecificBuiltinType(K);
6915}
6916
6917inline bool Type::isNonOverloadPlaceholderType() const {
6918 if (const auto *BT = dyn_cast<BuiltinType>(this))
6919 return BT->isNonOverloadPlaceholderType();
6920 return false;
6921}
6922
6923inline bool Type::isVoidType() const {
6924 return isSpecificBuiltinType(BuiltinType::Void);
6925}
6926
6927inline bool Type::isHalfType() const {
6928 // FIXME: Should we allow complex __fp16? Probably not.
6929 return isSpecificBuiltinType(BuiltinType::Half);
6930}
6931
6932inline bool Type::isFloat16Type() const {
6933 return isSpecificBuiltinType(BuiltinType::Float16);
6934}
6935
6936inline bool Type::isBFloat16Type() const {
6937 return isSpecificBuiltinType(BuiltinType::BFloat16);
6938}
6939
6940inline bool Type::isFloat128Type() const {
6941 return isSpecificBuiltinType(BuiltinType::Float128);
6942}
6943
6944inline bool Type::isNullPtrType() const {
6945 return isSpecificBuiltinType(BuiltinType::NullPtr);
6946}
6947
6948bool IsEnumDeclComplete(EnumDecl *);
6949bool IsEnumDeclScoped(EnumDecl *);
6950
6951inline bool Type::isIntegerType() const {
6952 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
6953 return BT->getKind() >= BuiltinType::Bool &&
6954 BT->getKind() <= BuiltinType::Int128;
6955 if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
6956 // Incomplete enum types are not treated as integer types.
6957 // FIXME: In C++, enum types are never integer types.
6958 return IsEnumDeclComplete(ET->getDecl()) &&
6959 !IsEnumDeclScoped(ET->getDecl());
6960 }
6961 return isExtIntType();
6962}
6963
6964inline bool Type::isFixedPointType() const {
6965 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
6966 return BT->getKind() >= BuiltinType::ShortAccum &&
6967 BT->getKind() <= BuiltinType::SatULongFract;
6968 }
6969 return false;
6970}
6971
6972inline bool Type::isFixedPointOrIntegerType() const {
6973 return isFixedPointType() || isIntegerType();
6974}
6975
6976inline bool Type::isSaturatedFixedPointType() const {
6977 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
6978 return BT->getKind() >= BuiltinType::SatShortAccum &&
6979 BT->getKind() <= BuiltinType::SatULongFract;
6980 }
6981 return false;
6982}
6983
6984inline bool Type::isUnsaturatedFixedPointType() const {
6985 return isFixedPointType() && !isSaturatedFixedPointType();
6986}
6987
6988inline bool Type::isSignedFixedPointType() const {
6989 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
6990 return ((BT->getKind() >= BuiltinType::ShortAccum &&
6991 BT->getKind() <= BuiltinType::LongAccum) ||
6992 (BT->getKind() >= BuiltinType::ShortFract &&
6993 BT->getKind() <= BuiltinType::LongFract) ||
6994 (BT->getKind() >= BuiltinType::SatShortAccum &&
6995 BT->getKind() <= BuiltinType::SatLongAccum) ||
6996 (BT->getKind() >= BuiltinType::SatShortFract &&
6997 BT->getKind() <= BuiltinType::SatLongFract));
6998 }
6999 return false;
7000}
7001
7002inline bool Type::isUnsignedFixedPointType() const {
7003 return isFixedPointType() && !isSignedFixedPointType();
7004}
7005
7006inline bool Type::isScalarType() const {
7007 if (const auto *BT
9.1
'BT' is null
9.1
'BT' is null
9.1
'BT' is null
= dyn_cast<BuiltinType>(CanonicalType))
5
Calling 'dyn_cast<clang::BuiltinType, clang::QualType>'
9
Returning from 'dyn_cast<clang::BuiltinType, clang::QualType>'
10
Taking false branch
7008 return BT->getKind() > BuiltinType::Void &&
7009 BT->getKind() <= BuiltinType::NullPtr;
7010 if (const EnumType *ET
15.1
'ET' is null
15.1
'ET' is null
15.1
'ET' is null
= dyn_cast<EnumType>(CanonicalType))
11
Calling 'dyn_cast<clang::EnumType, clang::QualType>'
15
Returning from 'dyn_cast<clang::EnumType, clang::QualType>'
16
Taking false branch
7011 // Enums are scalar types, but only if they are defined. Incomplete enums
7012 // are not treated as scalar types.
7013 return IsEnumDeclComplete(ET->getDecl());
7014 return isa<PointerType>(CanonicalType) ||
17
Assuming field 'CanonicalType' is a 'PointerType'
18
Returning the value 1, which participates in a condition later
7015 isa<BlockPointerType>(CanonicalType) ||
7016 isa<MemberPointerType>(CanonicalType) ||
7017 isa<ComplexType>(CanonicalType) ||
7018 isa<ObjCObjectPointerType>(CanonicalType) ||
7019 isExtIntType();
7020}
7021
7022inline bool Type::isIntegralOrEnumerationType() const {
7023 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
7024 return BT->getKind() >= BuiltinType::Bool &&
7025 BT->getKind() <= BuiltinType::Int128;
7026
7027 // Check for a complete enum type; incomplete enum types are not properly an
7028 // enumeration type in the sense required here.
7029 if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
7030 return IsEnumDeclComplete(ET->getDecl());
7031
7032 return isExtIntType();
7033}
7034
7035inline bool Type::isBooleanType() const {
7036 if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
7037 return BT->getKind() == BuiltinType::Bool;
7038 return false;
7039}
7040
7041inline bool Type::isUndeducedType() const {
7042 auto *DT = getContainedDeducedType();
7043 return DT && !DT->isDeduced();
7044}
7045
7046/// Determines whether this is a type for which one can define
7047/// an overloaded operator.
7048inline bool Type::isOverloadableType() const {
7049 return isDependentType() || isRecordType() || isEnumeralType();
7050}
7051
7052/// Determines whether this type can decay to a pointer type.
7053inline bool Type::canDecayToPointerType() const {
7054 return isFunctionType() || isArrayType();
7055}
7056
7057inline bool Type::hasPointerRepresentation() const {
7058 return (isPointerType() || isReferenceType() || isBlockPointerType() ||
7059 isObjCObjectPointerType() || isNullPtrType());
7060}
7061
7062inline bool Type::hasObjCPointerRepresentation() const {
7063 return isObjCObjectPointerType();
7064}
7065
7066inline const Type *Type::getBaseElementTypeUnsafe() const {
7067 const Type *type = this;
7068 while (const ArrayType *arrayType = type->getAsArrayTypeUnsafe())
7069 type = arrayType->getElementType().getTypePtr();
7070 return type;
7071}
7072
7073inline const Type *Type::getPointeeOrArrayElementType() const {
7074 const Type *type = this;
7075 if (type->isAnyPointerType())
7076 return type->getPointeeType().getTypePtr();
7077 else if (type->isArrayType())
7078 return type->getBaseElementTypeUnsafe();
7079 return type;
7080}
7081/// Insertion operator for partial diagnostics. This allows sending adress
7082/// spaces into a diagnostic with <<.
7083inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
7084 LangAS AS) {
7085 PD.AddTaggedVal(static_cast<std::underlying_type_t<LangAS>>(AS),
7086 DiagnosticsEngine::ArgumentKind::ak_addrspace);
7087 return PD;
7088}
7089
7090/// Insertion operator for partial diagnostics. This allows sending Qualifiers
7091/// into a diagnostic with <<.
7092inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
7093 Qualifiers Q) {
7094 PD.AddTaggedVal(Q.getAsOpaqueValue(),
7095 DiagnosticsEngine::ArgumentKind::ak_qual);
7096 return PD;
7097}
7098
7099/// Insertion operator for partial diagnostics. This allows sending QualType's
7100/// into a diagnostic with <<.
7101inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
7102 QualType T) {
7103 PD.AddTaggedVal(reinterpret_cast<intptr_t>(T.getAsOpaquePtr()),
7104 DiagnosticsEngine::ak_qualtype);
7105 return PD;
7106}
7107
7108// Helper class template that is used by Type::getAs to ensure that one does
7109// not try to look through a qualified type to get to an array type.
7110template <typename T>
7111using TypeIsArrayType =
7112 std::integral_constant<bool, std::is_same<T, ArrayType>::value ||
7113 std::is_base_of<ArrayType, T>::value>;
7114
7115// Member-template getAs<specific type>'.
7116template <typename T> const T *Type::getAs() const {
7117 static_assert(!TypeIsArrayType<T>::value,
7118 "ArrayType cannot be used with getAs!");
7119
7120 // If this is directly a T type, return it.
7121 if (const auto *Ty = dyn_cast<T>(this))
7122 return Ty;
7123
7124 // If the canonical form of this type isn't the right kind, reject it.
7125 if (!isa<T>(CanonicalType))
7126 return nullptr;
7127
7128 // If this is a typedef for the type, strip the typedef off without
7129 // losing all typedef information.
7130 return cast<T>(getUnqualifiedDesugaredType());
7131}
7132
7133template <typename T> const T *Type::getAsAdjusted() const {
7134 static_assert(!TypeIsArrayType<T>::value, "ArrayType cannot be used with getAsAdjusted!");
7135
7136 // If this is directly a T type, return it.
7137 if (const auto *Ty = dyn_cast<T>(this))
7138 return Ty;
7139
7140 // If the canonical form of this type isn't the right kind, reject it.
7141 if (!isa<T>(CanonicalType))
7142 return nullptr;
7143
7144 // Strip off type adjustments that do not modify the underlying nature of the
7145 // type.
7146 const Type *Ty = this;
7147 while (Ty) {
7148 if (const auto *A = dyn_cast<AttributedType>(Ty))
7149 Ty = A->getModifiedType().getTypePtr();
7150 else if (const auto *E = dyn_cast<ElaboratedType>(Ty))
7151 Ty = E->desugar().getTypePtr();
7152 else if (const auto *P = dyn_cast<ParenType>(Ty))
7153 Ty = P->desugar().getTypePtr();
7154 else if (const auto *A = dyn_cast<AdjustedType>(Ty))
7155 Ty = A->desugar().getTypePtr();
7156 else if (const auto *M = dyn_cast<MacroQualifiedType>(Ty))
7157 Ty = M->desugar().getTypePtr();
7158 else
7159 break;
7160 }
7161
7162 // Just because the canonical type is correct does not mean we can use cast<>,
7163 // since we may not have stripped off all the sugar down to the base type.
7164 return dyn_cast<T>(Ty);
7165}
7166
7167inline const ArrayType *Type::getAsArrayTypeUnsafe() const {
7168 // If this is directly an array type, return it.
7169 if (const auto *arr = dyn_cast<ArrayType>(this))
7170 return arr;
7171
7172 // If the canonical form of this type isn't the right kind, reject it.
7173 if (!isa<ArrayType>(CanonicalType))
7174 return nullptr;
7175
7176 // If this is a typedef for the type, strip the typedef off without
7177 // losing all typedef information.
7178 return cast<ArrayType>(getUnqualifiedDesugaredType());
7179}
7180
7181template <typename T> const T *Type::castAs() const {
7182 static_assert(!TypeIsArrayType<T>::value,
7183 "ArrayType cannot be used with castAs!");
7184
7185 if (const auto *ty = dyn_cast<T>(this)) return ty;
7186 assert(isa<T>(CanonicalType))((isa<T>(CanonicalType)) ? static_cast<void> (0) :
__assert_fail ("isa<T>(CanonicalType)", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 7186, __PRETTY_FUNCTION__))
;
7187 return cast<T>(getUnqualifiedDesugaredType());
7188}
7189
7190inline const ArrayType *Type::castAsArrayTypeUnsafe() const {
7191 assert(isa<ArrayType>(CanonicalType))((isa<ArrayType>(CanonicalType)) ? static_cast<void>
(0) : __assert_fail ("isa<ArrayType>(CanonicalType)", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 7191, __PRETTY_FUNCTION__))
;
7192 if (const auto *arr = dyn_cast<ArrayType>(this)) return arr;
7193 return cast<ArrayType>(getUnqualifiedDesugaredType());
7194}
7195
7196DecayedType::DecayedType(QualType OriginalType, QualType DecayedPtr,
7197 QualType CanonicalPtr)
7198 : AdjustedType(Decayed, OriginalType, DecayedPtr, CanonicalPtr) {
7199#ifndef NDEBUG
7200 QualType Adjusted = getAdjustedType();
7201 (void)AttributedType::stripOuterNullability(Adjusted);
7202 assert(isa<PointerType>(Adjusted))((isa<PointerType>(Adjusted)) ? static_cast<void>
(0) : __assert_fail ("isa<PointerType>(Adjusted)", "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/clang/include/clang/AST/Type.h"
, 7202, __PRETTY_FUNCTION__))
;
7203#endif
7204}
7205
7206QualType DecayedType::getPointeeType() const {
7207 QualType Decayed = getDecayedType();
7208 (void)AttributedType::stripOuterNullability(Decayed);
7209 return cast<PointerType>(Decayed)->getPointeeType();
7210}
7211
7212// Get the decimal string representation of a fixed point type, represented
7213// as a scaled integer.
7214// TODO: At some point, we should change the arguments to instead just accept an
7215// APFixedPoint instead of APSInt and scale.
7216void FixedPointValueToString(SmallVectorImpl<char> &Str, llvm::APSInt Val,
7217 unsigned Scale);
7218
7219} // namespace clang
7220
7221#endif // LLVM_CLANG_AST_TYPE_H

/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h

1//===- llvm/Support/Casting.h - Allow flexible, checked, casts --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the isa<X>(), cast<X>(), dyn_cast<X>(), cast_or_null<X>(),
10// and dyn_cast_or_null<X>() templates.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_SUPPORT_CASTING_H
15#define LLVM_SUPPORT_CASTING_H
16
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/type_traits.h"
19#include <cassert>
20#include <memory>
21#include <type_traits>
22
23namespace llvm {
24
25//===----------------------------------------------------------------------===//
26// isa<x> Support Templates
27//===----------------------------------------------------------------------===//
28
29// Define a template that can be specialized by smart pointers to reflect the
30// fact that they are automatically dereferenced, and are not involved with the
31// template selection process... the default implementation is a noop.
32//
33template<typename From> struct simplify_type {
34 using SimpleType = From; // The real type this represents...
35
36 // An accessor to get the real value...
37 static SimpleType &getSimplifiedValue(From &Val) { return Val; }
38};
39
40template<typename From> struct simplify_type<const From> {
41 using NonConstSimpleType = typename simplify_type<From>::SimpleType;
42 using SimpleType =
43 typename add_const_past_pointer<NonConstSimpleType>::type;
44 using RetType =
45 typename add_lvalue_reference_if_not_pointer<SimpleType>::type;
46
47 static RetType getSimplifiedValue(const From& Val) {
48 return simplify_type<From>::getSimplifiedValue(const_cast<From&>(Val));
49 }
50};
51
52// The core of the implementation of isa<X> is here; To and From should be
53// the names of classes. This template can be specialized to customize the
54// implementation of isa<> without rewriting it from scratch.
55template <typename To, typename From, typename Enabler = void>
56struct isa_impl {
57 static inline bool doit(const From &Val) {
58 return To::classof(&Val);
59 }
60};
61
62/// Always allow upcasts, and perform no dynamic check for them.
63template <typename To, typename From>
64struct isa_impl<To, From, std::enable_if_t<std::is_base_of<To, From>::value>> {
65 static inline bool doit(const From &) { return true; }
66};
67
68template <typename To, typename From> struct isa_impl_cl {
69 static inline bool doit(const From &Val) {
70 return isa_impl<To, From>::doit(Val);
71 }
72};
73
74template <typename To, typename From> struct isa_impl_cl<To, const From> {
75 static inline bool doit(const From &Val) {
76 return isa_impl<To, From>::doit(Val);
77 }
78};
79
80template <typename To, typename From>
81struct isa_impl_cl<To, const std::unique_ptr<From>> {
82 static inline bool doit(const std::unique_ptr<From> &Val) {
83 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h"
, 83, __PRETTY_FUNCTION__))
;
84 return isa_impl_cl<To, From>::doit(*Val);
85 }
86};
87
88template <typename To, typename From> struct isa_impl_cl<To, From*> {
89 static inline bool doit(const From *Val) {
90 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h"
, 90, __PRETTY_FUNCTION__))
;
91 return isa_impl<To, From>::doit(*Val);
92 }
93};
94
95template <typename To, typename From> struct isa_impl_cl<To, From*const> {
96 static inline bool doit(const From *Val) {
97 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h"
, 97, __PRETTY_FUNCTION__))
;
98 return isa_impl<To, From>::doit(*Val);
99 }
100};
101
102template <typename To, typename From> struct isa_impl_cl<To, const From*> {
103 static inline bool doit(const From *Val) {
104 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h"
, 104, __PRETTY_FUNCTION__))
;
105 return isa_impl<To, From>::doit(*Val);
106 }
107};
108
109template <typename To, typename From> struct isa_impl_cl<To, const From*const> {
110 static inline bool doit(const From *Val) {
111 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h"
, 111, __PRETTY_FUNCTION__))
;
112 return isa_impl<To, From>::doit(*Val);
113 }
114};
115
116template<typename To, typename From, typename SimpleFrom>
117struct isa_impl_wrap {
118 // When From != SimplifiedType, we can simplify the type some more by using
119 // the simplify_type template.
120 static bool doit(const From &Val) {
121 return isa_impl_wrap<To, SimpleFrom,
122 typename simplify_type<SimpleFrom>::SimpleType>::doit(
123 simplify_type<const From>::getSimplifiedValue(Val));
124 }
125};
126
127template<typename To, typename FromTy>
128struct isa_impl_wrap<To, FromTy, FromTy> {
129 // When From == SimpleType, we are as simple as we are going to get.
130 static bool doit(const FromTy &Val) {
131 return isa_impl_cl<To,FromTy>::doit(Val);
132 }
133};
134
135// isa<X> - Return true if the parameter to the template is an instance of one
136// of the template type arguments. Used like this:
137//
138// if (isa<Type>(myVal)) { ... }
139// if (isa<Type0, Type1, Type2>(myVal)) { ... }
140//
141template <class X, class Y> LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa(const Y &Val) {
142 return isa_impl_wrap<X, const Y,
143 typename simplify_type<const Y>::SimpleType>::doit(Val);
144}
145
146template <typename First, typename Second, typename... Rest, typename Y>
147LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa(const Y &Val) {
148 return isa<First>(Val) || isa<Second, Rest...>(Val);
149}
150
151// isa_and_nonnull<X> - Functionally identical to isa, except that a null value
152// is accepted.
153//
154template <typename... X, class Y>
155LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa_and_nonnull(const Y &Val) {
156 if (!Val)
157 return false;
158 return isa<X...>(Val);
159}
160
161//===----------------------------------------------------------------------===//
162// cast<x> Support Templates
163//===----------------------------------------------------------------------===//
164
165template<class To, class From> struct cast_retty;
166
167// Calculate what type the 'cast' function should return, based on a requested
168// type of To and a source type of From.
169template<class To, class From> struct cast_retty_impl {
170 using ret_type = To &; // Normal case, return Ty&
171};
172template<class To, class From> struct cast_retty_impl<To, const From> {
173 using ret_type = const To &; // Normal case, return Ty&
174};
175
176template<class To, class From> struct cast_retty_impl<To, From*> {
177 using ret_type = To *; // Pointer arg case, return Ty*
178};
179
180template<class To, class From> struct cast_retty_impl<To, const From*> {
181 using ret_type = const To *; // Constant pointer arg case, return const Ty*
182};
183
184template<class To, class From> struct cast_retty_impl<To, const From*const> {
185 using ret_type = const To *; // Constant pointer arg case, return const Ty*
186};
187
188template <class To, class From>
189struct cast_retty_impl<To, std::unique_ptr<From>> {
190private:
191 using PointerType = typename cast_retty_impl<To, From *>::ret_type;
192 using ResultType = std::remove_pointer_t<PointerType>;
193
194public:
195 using ret_type = std::unique_ptr<ResultType>;
196};
197
198template<class To, class From, class SimpleFrom>
199struct cast_retty_wrap {
200 // When the simplified type and the from type are not the same, use the type
201 // simplifier to reduce the type, then reuse cast_retty_impl to get the
202 // resultant type.
203 using ret_type = typename cast_retty<To, SimpleFrom>::ret_type;
204};
205
206template<class To, class FromTy>
207struct cast_retty_wrap<To, FromTy, FromTy> {
208 // When the simplified type is equal to the from type, use it directly.
209 using ret_type = typename cast_retty_impl<To,FromTy>::ret_type;
210};
211
212template<class To, class From>
213struct cast_retty {
214 using ret_type = typename cast_retty_wrap<
215 To, From, typename simplify_type<From>::SimpleType>::ret_type;
216};
217
218// Ensure the non-simple values are converted using the simplify_type template
219// that may be specialized by smart pointers...
220//
221template<class To, class From, class SimpleFrom> struct cast_convert_val {
222 // This is not a simple type, use the template to simplify it...
223 static typename cast_retty<To, From>::ret_type doit(From &Val) {
224 return cast_convert_val<To, SimpleFrom,
225 typename simplify_type<SimpleFrom>::SimpleType>::doit(
226 simplify_type<From>::getSimplifiedValue(Val));
227 }
228};
229
230template<class To, class FromTy> struct cast_convert_val<To,FromTy,FromTy> {
231 // This _is_ a simple type, just cast it.
232 static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) {
233 typename cast_retty<To, FromTy>::ret_type Res2
234 = (typename cast_retty<To, FromTy>::ret_type)const_cast<FromTy&>(Val);
235 return Res2;
236 }
237};
238
239template <class X> struct is_simple_type {
240 static const bool value =
241 std::is_same<X, typename simplify_type<X>::SimpleType>::value;
242};
243
244// cast<X> - Return the argument parameter cast to the specified type. This
245// casting operator asserts that the type is correct, so it does not return null
246// on failure. It does not allow a null argument (use cast_or_null for that).
247// It is typically used like this:
248//
249// cast<Instruction>(myVal)->getParent()
250//
251template <class X, class Y>
252inline std::enable_if_t<!is_simple_type<Y>::value,
253 typename cast_retty<X, const Y>::ret_type>
254cast(const Y &Val) {
255 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h"
, 255, __PRETTY_FUNCTION__))
;
256 return cast_convert_val<
257 X, const Y, typename simplify_type<const Y>::SimpleType>::doit(Val);
258}
259
260template <class X, class Y>
261inline typename cast_retty<X, Y>::ret_type cast(Y &Val) {
262 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h"
, 262, __PRETTY_FUNCTION__))
;
263 return cast_convert_val<X, Y,
264 typename simplify_type<Y>::SimpleType>::doit(Val);
265}
266
267template <class X, class Y>
268inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) {
269 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h"
, 269, __PRETTY_FUNCTION__))
;
270 return cast_convert_val<X, Y*,
271 typename simplify_type<Y*>::SimpleType>::doit(Val);
272}
273
274template <class X, class Y>
275inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type
276cast(std::unique_ptr<Y> &&Val) {
277 assert(isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val.get()) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h"
, 277, __PRETTY_FUNCTION__))
;
278 using ret_type = typename cast_retty<X, std::unique_ptr<Y>>::ret_type;
279 return ret_type(
280 cast_convert_val<X, Y *, typename simplify_type<Y *>::SimpleType>::doit(
281 Val.release()));
282}
283
284// cast_or_null<X> - Functionally identical to cast, except that a null value is
285// accepted.
286//
287template <class X, class Y>
288LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t<
289 !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>
290cast_or_null(const Y &Val) {
291 if (!Val)
292 return nullptr;
293 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h"
, 293, __PRETTY_FUNCTION__))
;
294 return cast<X>(Val);
295}
296
297template <class X, class Y>
298LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t<!is_simple_type<Y>::value,
299 typename cast_retty<X, Y>::ret_type>
300cast_or_null(Y &Val) {
301 if (!Val)
302 return nullptr;
303 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h"
, 303, __PRETTY_FUNCTION__))
;
304 return cast<X>(Val);
305}
306
307template <class X, class Y>
308LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type
309cast_or_null(Y *Val) {
310 if (!Val) return nullptr;
311 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-12~++20201124111112+7b5254223ac/llvm/include/llvm/Support/Casting.h"
, 311, __PRETTY_FUNCTION__))
;
312 return cast<X>(Val);
313}
314
315template <class X, class Y>
316inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type
317cast_or_null(std::unique_ptr<Y> &&Val) {
318 if (!Val)
319 return nullptr;
320 return cast<X>(std::move(Val));
321}
322
323// dyn_cast<X> - Return the argument parameter cast to the specified type. This
324// casting operator returns null if the argument is of the wrong type, so it can
325// be used to test for a type as well as cast if successful. This should be
326// used in the context of an if statement like this:
327//
328// if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... }
329//
330
331template <class X, class Y>
332LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t<
333 !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>
334dyn_cast(const Y &Val) {
335 return isa<X>(Val) ? cast<X>(Val) : nullptr;
6
Assuming 'Val' is not a 'BuiltinType'
7
'?' condition is false
8
Returning null pointer, which participates in a condition later
12
Assuming 'Val' is not a 'EnumType'
13
'?' condition is false
14
Returning null pointer, which participates in a condition later
336}
337
338template <class X, class Y>
339LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y>::ret_type dyn_cast(Y &Val) {
340 return isa<X>(Val) ? cast<X>(Val) : nullptr;
341}
342
343template <class X, class Y>
344LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type dyn_cast(Y *Val) {
345 return isa<X>(Val) ? cast<X>(Val) : nullptr;
346}
347
348// dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null
349// value is accepted.
350//
351template <class X, class Y>
352LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t<
353 !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>
354dyn_cast_or_null(const Y &Val) {
355 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
356}
357
358template <class X, class Y>
359LLVM_NODISCARD[[clang::warn_unused_result]] inline std::enable_if_t<!is_simple_type<Y>::value,
360 typename cast_retty<X, Y>::ret_type>
361dyn_cast_or_null(Y &Val) {
362 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
363}
364
365template <class X, class Y>
366LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type
367dyn_cast_or_null(Y *Val) {
368 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
369}
370
371// unique_dyn_cast<X> - Given a unique_ptr<Y>, try to return a unique_ptr<X>,
372// taking ownership of the input pointer iff isa<X>(Val) is true. If the
373// cast is successful, From refers to nullptr on exit and the casted value
374// is returned. If the cast is unsuccessful, the function returns nullptr
375// and From is unchanged.
376template <class X, class Y>
377LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &Val)
378 -> decltype(cast<X>(Val)) {
379 if (!isa<X>(Val))
380 return nullptr;
381 return cast<X>(std::move(Val));
382}
383
384template <class X, class Y>
385LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &&Val) {
386 return unique_dyn_cast<X, Y>(Val);
387}
388
389// dyn_cast_or_null<X> - Functionally identical to unique_dyn_cast, except that
390// a null value is accepted.
391template <class X, class Y>
392LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &Val)
393 -> decltype(cast<X>(Val)) {
394 if (!Val)
395 return nullptr;
396 return unique_dyn_cast<X, Y>(Val);
397}
398
399template <class X, class Y>
400LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &&Val) {
401 return unique_dyn_cast_or_null<X, Y>(Val);
402}
403
404} // end namespace llvm
405
406#endif // LLVM_SUPPORT_CASTING_H