File: | clang/lib/AST/ASTContext.cpp |
Warning: | line 3270, column 3 Value stored to 'AT' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the ASTContext interface. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "clang/AST/ASTContext.h" |
14 | #include "CXXABI.h" |
15 | #include "Interp/Context.h" |
16 | #include "clang/AST/APValue.h" |
17 | #include "clang/AST/ASTConcept.h" |
18 | #include "clang/AST/ASTMutationListener.h" |
19 | #include "clang/AST/ASTTypeTraits.h" |
20 | #include "clang/AST/Attr.h" |
21 | #include "clang/AST/AttrIterator.h" |
22 | #include "clang/AST/CharUnits.h" |
23 | #include "clang/AST/Comment.h" |
24 | #include "clang/AST/Decl.h" |
25 | #include "clang/AST/DeclBase.h" |
26 | #include "clang/AST/DeclCXX.h" |
27 | #include "clang/AST/DeclContextInternals.h" |
28 | #include "clang/AST/DeclObjC.h" |
29 | #include "clang/AST/DeclOpenMP.h" |
30 | #include "clang/AST/DeclTemplate.h" |
31 | #include "clang/AST/DeclarationName.h" |
32 | #include "clang/AST/DependenceFlags.h" |
33 | #include "clang/AST/Expr.h" |
34 | #include "clang/AST/ExprCXX.h" |
35 | #include "clang/AST/ExprConcepts.h" |
36 | #include "clang/AST/ExternalASTSource.h" |
37 | #include "clang/AST/Mangle.h" |
38 | #include "clang/AST/MangleNumberingContext.h" |
39 | #include "clang/AST/NestedNameSpecifier.h" |
40 | #include "clang/AST/ParentMapContext.h" |
41 | #include "clang/AST/RawCommentList.h" |
42 | #include "clang/AST/RecordLayout.h" |
43 | #include "clang/AST/Stmt.h" |
44 | #include "clang/AST/TemplateBase.h" |
45 | #include "clang/AST/TemplateName.h" |
46 | #include "clang/AST/Type.h" |
47 | #include "clang/AST/TypeLoc.h" |
48 | #include "clang/AST/UnresolvedSet.h" |
49 | #include "clang/AST/VTableBuilder.h" |
50 | #include "clang/Basic/AddressSpaces.h" |
51 | #include "clang/Basic/Builtins.h" |
52 | #include "clang/Basic/CommentOptions.h" |
53 | #include "clang/Basic/ExceptionSpecificationType.h" |
54 | #include "clang/Basic/IdentifierTable.h" |
55 | #include "clang/Basic/LLVM.h" |
56 | #include "clang/Basic/LangOptions.h" |
57 | #include "clang/Basic/Linkage.h" |
58 | #include "clang/Basic/Module.h" |
59 | #include "clang/Basic/NoSanitizeList.h" |
60 | #include "clang/Basic/ObjCRuntime.h" |
61 | #include "clang/Basic/SourceLocation.h" |
62 | #include "clang/Basic/SourceManager.h" |
63 | #include "clang/Basic/Specifiers.h" |
64 | #include "clang/Basic/TargetCXXABI.h" |
65 | #include "clang/Basic/TargetInfo.h" |
66 | #include "clang/Basic/XRayLists.h" |
67 | #include "llvm/ADT/APFixedPoint.h" |
68 | #include "llvm/ADT/APInt.h" |
69 | #include "llvm/ADT/APSInt.h" |
70 | #include "llvm/ADT/ArrayRef.h" |
71 | #include "llvm/ADT/DenseMap.h" |
72 | #include "llvm/ADT/DenseSet.h" |
73 | #include "llvm/ADT/FoldingSet.h" |
74 | #include "llvm/ADT/None.h" |
75 | #include "llvm/ADT/Optional.h" |
76 | #include "llvm/ADT/PointerUnion.h" |
77 | #include "llvm/ADT/STLExtras.h" |
78 | #include "llvm/ADT/SmallPtrSet.h" |
79 | #include "llvm/ADT/SmallVector.h" |
80 | #include "llvm/ADT/StringExtras.h" |
81 | #include "llvm/ADT/StringRef.h" |
82 | #include "llvm/ADT/Triple.h" |
83 | #include "llvm/Support/Capacity.h" |
84 | #include "llvm/Support/Casting.h" |
85 | #include "llvm/Support/Compiler.h" |
86 | #include "llvm/Support/ErrorHandling.h" |
87 | #include "llvm/Support/MD5.h" |
88 | #include "llvm/Support/MathExtras.h" |
89 | #include "llvm/Support/raw_ostream.h" |
90 | #include <algorithm> |
91 | #include <cassert> |
92 | #include <cstddef> |
93 | #include <cstdint> |
94 | #include <cstdlib> |
95 | #include <map> |
96 | #include <memory> |
97 | #include <string> |
98 | #include <tuple> |
99 | #include <utility> |
100 | |
101 | using namespace clang; |
102 | |
103 | enum FloatingRank { |
104 | BFloat16Rank, Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank |
105 | }; |
106 | |
107 | /// \returns location that is relevant when searching for Doc comments related |
108 | /// to \p D. |
109 | static SourceLocation getDeclLocForCommentSearch(const Decl *D, |
110 | SourceManager &SourceMgr) { |
111 | assert(D)(static_cast<void> (0)); |
112 | |
113 | // User can not attach documentation to implicit declarations. |
114 | if (D->isImplicit()) |
115 | return {}; |
116 | |
117 | // User can not attach documentation to implicit instantiations. |
118 | if (const auto *FD = dyn_cast<FunctionDecl>(D)) { |
119 | if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) |
120 | return {}; |
121 | } |
122 | |
123 | if (const auto *VD = dyn_cast<VarDecl>(D)) { |
124 | if (VD->isStaticDataMember() && |
125 | VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) |
126 | return {}; |
127 | } |
128 | |
129 | if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { |
130 | if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) |
131 | return {}; |
132 | } |
133 | |
134 | if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { |
135 | TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); |
136 | if (TSK == TSK_ImplicitInstantiation || |
137 | TSK == TSK_Undeclared) |
138 | return {}; |
139 | } |
140 | |
141 | if (const auto *ED = dyn_cast<EnumDecl>(D)) { |
142 | if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) |
143 | return {}; |
144 | } |
145 | if (const auto *TD = dyn_cast<TagDecl>(D)) { |
146 | // When tag declaration (but not definition!) is part of the |
147 | // decl-specifier-seq of some other declaration, it doesn't get comment |
148 | if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) |
149 | return {}; |
150 | } |
151 | // TODO: handle comments for function parameters properly. |
152 | if (isa<ParmVarDecl>(D)) |
153 | return {}; |
154 | |
155 | // TODO: we could look up template parameter documentation in the template |
156 | // documentation. |
157 | if (isa<TemplateTypeParmDecl>(D) || |
158 | isa<NonTypeTemplateParmDecl>(D) || |
159 | isa<TemplateTemplateParmDecl>(D)) |
160 | return {}; |
161 | |
162 | // Find declaration location. |
163 | // For Objective-C declarations we generally don't expect to have multiple |
164 | // declarators, thus use declaration starting location as the "declaration |
165 | // location". |
166 | // For all other declarations multiple declarators are used quite frequently, |
167 | // so we use the location of the identifier as the "declaration location". |
168 | if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || |
169 | isa<ObjCPropertyDecl>(D) || |
170 | isa<RedeclarableTemplateDecl>(D) || |
171 | isa<ClassTemplateSpecializationDecl>(D) || |
172 | // Allow association with Y across {} in `typedef struct X {} Y`. |
173 | isa<TypedefDecl>(D)) |
174 | return D->getBeginLoc(); |
175 | |
176 | const SourceLocation DeclLoc = D->getLocation(); |
177 | if (DeclLoc.isMacroID()) { |
178 | if (isa<TypedefDecl>(D)) { |
179 | // If location of the typedef name is in a macro, it is because being |
180 | // declared via a macro. Try using declaration's starting location as |
181 | // the "declaration location". |
182 | return D->getBeginLoc(); |
183 | } |
184 | |
185 | if (const auto *TD = dyn_cast<TagDecl>(D)) { |
186 | // If location of the tag decl is inside a macro, but the spelling of |
187 | // the tag name comes from a macro argument, it looks like a special |
188 | // macro like NS_ENUM is being used to define the tag decl. In that |
189 | // case, adjust the source location to the expansion loc so that we can |
190 | // attach the comment to the tag decl. |
191 | if (SourceMgr.isMacroArgExpansion(DeclLoc) && TD->isCompleteDefinition()) |
192 | return SourceMgr.getExpansionLoc(DeclLoc); |
193 | } |
194 | } |
195 | |
196 | return DeclLoc; |
197 | } |
198 | |
199 | RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( |
200 | const Decl *D, const SourceLocation RepresentativeLocForDecl, |
201 | const std::map<unsigned, RawComment *> &CommentsInTheFile) const { |
202 | // If the declaration doesn't map directly to a location in a file, we |
203 | // can't find the comment. |
204 | if (RepresentativeLocForDecl.isInvalid() || |
205 | !RepresentativeLocForDecl.isFileID()) |
206 | return nullptr; |
207 | |
208 | // If there are no comments anywhere, we won't find anything. |
209 | if (CommentsInTheFile.empty()) |
210 | return nullptr; |
211 | |
212 | // Decompose the location for the declaration and find the beginning of the |
213 | // file buffer. |
214 | const std::pair<FileID, unsigned> DeclLocDecomp = |
215 | SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); |
216 | |
217 | // Slow path. |
218 | auto OffsetCommentBehindDecl = |
219 | CommentsInTheFile.lower_bound(DeclLocDecomp.second); |
220 | |
221 | // First check whether we have a trailing comment. |
222 | if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { |
223 | RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; |
224 | if ((CommentBehindDecl->isDocumentation() || |
225 | LangOpts.CommentOpts.ParseAllComments) && |
226 | CommentBehindDecl->isTrailingComment() && |
227 | (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || |
228 | isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { |
229 | |
230 | // Check that Doxygen trailing comment comes after the declaration, starts |
231 | // on the same line and in the same file as the declaration. |
232 | if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == |
233 | Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, |
234 | OffsetCommentBehindDecl->first)) { |
235 | return CommentBehindDecl; |
236 | } |
237 | } |
238 | } |
239 | |
240 | // The comment just after the declaration was not a trailing comment. |
241 | // Let's look at the previous comment. |
242 | if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) |
243 | return nullptr; |
244 | |
245 | auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; |
246 | RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; |
247 | |
248 | // Check that we actually have a non-member Doxygen comment. |
249 | if (!(CommentBeforeDecl->isDocumentation() || |
250 | LangOpts.CommentOpts.ParseAllComments) || |
251 | CommentBeforeDecl->isTrailingComment()) |
252 | return nullptr; |
253 | |
254 | // Decompose the end of the comment. |
255 | const unsigned CommentEndOffset = |
256 | Comments.getCommentEndOffset(CommentBeforeDecl); |
257 | |
258 | // Get the corresponding buffer. |
259 | bool Invalid = false; |
260 | const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, |
261 | &Invalid).data(); |
262 | if (Invalid) |
263 | return nullptr; |
264 | |
265 | // Extract text between the comment and declaration. |
266 | StringRef Text(Buffer + CommentEndOffset, |
267 | DeclLocDecomp.second - CommentEndOffset); |
268 | |
269 | // There should be no other declarations or preprocessor directives between |
270 | // comment and declaration. |
271 | if (Text.find_first_of(";{}#@") != StringRef::npos) |
272 | return nullptr; |
273 | |
274 | return CommentBeforeDecl; |
275 | } |
276 | |
277 | RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { |
278 | const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); |
279 | |
280 | // If the declaration doesn't map directly to a location in a file, we |
281 | // can't find the comment. |
282 | if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) |
283 | return nullptr; |
284 | |
285 | if (ExternalSource && !CommentsLoaded) { |
286 | ExternalSource->ReadComments(); |
287 | CommentsLoaded = true; |
288 | } |
289 | |
290 | if (Comments.empty()) |
291 | return nullptr; |
292 | |
293 | const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; |
294 | const auto CommentsInThisFile = Comments.getCommentsInFile(File); |
295 | if (!CommentsInThisFile || CommentsInThisFile->empty()) |
296 | return nullptr; |
297 | |
298 | return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); |
299 | } |
300 | |
301 | void ASTContext::addComment(const RawComment &RC) { |
302 | assert(LangOpts.RetainCommentsFromSystemHeaders ||(static_cast<void> (0)) |
303 | !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()))(static_cast<void> (0)); |
304 | Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); |
305 | } |
306 | |
307 | /// If we have a 'templated' declaration for a template, adjust 'D' to |
308 | /// refer to the actual template. |
309 | /// If we have an implicit instantiation, adjust 'D' to refer to template. |
310 | static const Decl &adjustDeclToTemplate(const Decl &D) { |
311 | if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { |
312 | // Is this function declaration part of a function template? |
313 | if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) |
314 | return *FTD; |
315 | |
316 | // Nothing to do if function is not an implicit instantiation. |
317 | if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) |
318 | return D; |
319 | |
320 | // Function is an implicit instantiation of a function template? |
321 | if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) |
322 | return *FTD; |
323 | |
324 | // Function is instantiated from a member definition of a class template? |
325 | if (const FunctionDecl *MemberDecl = |
326 | FD->getInstantiatedFromMemberFunction()) |
327 | return *MemberDecl; |
328 | |
329 | return D; |
330 | } |
331 | if (const auto *VD = dyn_cast<VarDecl>(&D)) { |
332 | // Static data member is instantiated from a member definition of a class |
333 | // template? |
334 | if (VD->isStaticDataMember()) |
335 | if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) |
336 | return *MemberDecl; |
337 | |
338 | return D; |
339 | } |
340 | if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { |
341 | // Is this class declaration part of a class template? |
342 | if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) |
343 | return *CTD; |
344 | |
345 | // Class is an implicit instantiation of a class template or partial |
346 | // specialization? |
347 | if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { |
348 | if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) |
349 | return D; |
350 | llvm::PointerUnion<ClassTemplateDecl *, |
351 | ClassTemplatePartialSpecializationDecl *> |
352 | PU = CTSD->getSpecializedTemplateOrPartial(); |
353 | return PU.is<ClassTemplateDecl *>() |
354 | ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) |
355 | : *static_cast<const Decl *>( |
356 | PU.get<ClassTemplatePartialSpecializationDecl *>()); |
357 | } |
358 | |
359 | // Class is instantiated from a member definition of a class template? |
360 | if (const MemberSpecializationInfo *Info = |
361 | CRD->getMemberSpecializationInfo()) |
362 | return *Info->getInstantiatedFrom(); |
363 | |
364 | return D; |
365 | } |
366 | if (const auto *ED = dyn_cast<EnumDecl>(&D)) { |
367 | // Enum is instantiated from a member definition of a class template? |
368 | if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) |
369 | return *MemberDecl; |
370 | |
371 | return D; |
372 | } |
373 | // FIXME: Adjust alias templates? |
374 | return D; |
375 | } |
376 | |
377 | const RawComment *ASTContext::getRawCommentForAnyRedecl( |
378 | const Decl *D, |
379 | const Decl **OriginalDecl) const { |
380 | if (!D) { |
381 | if (OriginalDecl) |
382 | OriginalDecl = nullptr; |
383 | return nullptr; |
384 | } |
385 | |
386 | D = &adjustDeclToTemplate(*D); |
387 | |
388 | // Any comment directly attached to D? |
389 | { |
390 | auto DeclComment = DeclRawComments.find(D); |
391 | if (DeclComment != DeclRawComments.end()) { |
392 | if (OriginalDecl) |
393 | *OriginalDecl = D; |
394 | return DeclComment->second; |
395 | } |
396 | } |
397 | |
398 | // Any comment attached to any redeclaration of D? |
399 | const Decl *CanonicalD = D->getCanonicalDecl(); |
400 | if (!CanonicalD) |
401 | return nullptr; |
402 | |
403 | { |
404 | auto RedeclComment = RedeclChainComments.find(CanonicalD); |
405 | if (RedeclComment != RedeclChainComments.end()) { |
406 | if (OriginalDecl) |
407 | *OriginalDecl = RedeclComment->second; |
408 | auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); |
409 | assert(CommentAtRedecl != DeclRawComments.end() &&(static_cast<void> (0)) |
410 | "This decl is supposed to have comment attached.")(static_cast<void> (0)); |
411 | return CommentAtRedecl->second; |
412 | } |
413 | } |
414 | |
415 | // Any redeclarations of D that we haven't checked for comments yet? |
416 | // We can't use DenseMap::iterator directly since it'd get invalid. |
417 | auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { |
418 | auto LookupRes = CommentlessRedeclChains.find(CanonicalD); |
419 | if (LookupRes != CommentlessRedeclChains.end()) |
420 | return LookupRes->second; |
421 | return nullptr; |
422 | }(); |
423 | |
424 | for (const auto Redecl : D->redecls()) { |
425 | assert(Redecl)(static_cast<void> (0)); |
426 | // Skip all redeclarations that have been checked previously. |
427 | if (LastCheckedRedecl) { |
428 | if (LastCheckedRedecl == Redecl) { |
429 | LastCheckedRedecl = nullptr; |
430 | } |
431 | continue; |
432 | } |
433 | const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); |
434 | if (RedeclComment) { |
435 | cacheRawCommentForDecl(*Redecl, *RedeclComment); |
436 | if (OriginalDecl) |
437 | *OriginalDecl = Redecl; |
438 | return RedeclComment; |
439 | } |
440 | CommentlessRedeclChains[CanonicalD] = Redecl; |
441 | } |
442 | |
443 | if (OriginalDecl) |
444 | *OriginalDecl = nullptr; |
445 | return nullptr; |
446 | } |
447 | |
448 | void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, |
449 | const RawComment &Comment) const { |
450 | assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments)(static_cast<void> (0)); |
451 | DeclRawComments.try_emplace(&OriginalD, &Comment); |
452 | const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); |
453 | RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); |
454 | CommentlessRedeclChains.erase(CanonicalDecl); |
455 | } |
456 | |
457 | static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, |
458 | SmallVectorImpl<const NamedDecl *> &Redeclared) { |
459 | const DeclContext *DC = ObjCMethod->getDeclContext(); |
460 | if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { |
461 | const ObjCInterfaceDecl *ID = IMD->getClassInterface(); |
462 | if (!ID) |
463 | return; |
464 | // Add redeclared method here. |
465 | for (const auto *Ext : ID->known_extensions()) { |
466 | if (ObjCMethodDecl *RedeclaredMethod = |
467 | Ext->getMethod(ObjCMethod->getSelector(), |
468 | ObjCMethod->isInstanceMethod())) |
469 | Redeclared.push_back(RedeclaredMethod); |
470 | } |
471 | } |
472 | } |
473 | |
474 | void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, |
475 | const Preprocessor *PP) { |
476 | if (Comments.empty() || Decls.empty()) |
477 | return; |
478 | |
479 | FileID File; |
480 | for (Decl *D : Decls) { |
481 | SourceLocation Loc = D->getLocation(); |
482 | if (Loc.isValid()) { |
483 | // See if there are any new comments that are not attached to a decl. |
484 | // The location doesn't have to be precise - we care only about the file. |
485 | File = SourceMgr.getDecomposedLoc(Loc).first; |
486 | break; |
487 | } |
488 | } |
489 | |
490 | if (File.isInvalid()) |
491 | return; |
492 | |
493 | auto CommentsInThisFile = Comments.getCommentsInFile(File); |
494 | if (!CommentsInThisFile || CommentsInThisFile->empty() || |
495 | CommentsInThisFile->rbegin()->second->isAttached()) |
496 | return; |
497 | |
498 | // There is at least one comment not attached to a decl. |
499 | // Maybe it should be attached to one of Decls? |
500 | // |
501 | // Note that this way we pick up not only comments that precede the |
502 | // declaration, but also comments that *follow* the declaration -- thanks to |
503 | // the lookahead in the lexer: we've consumed the semicolon and looked |
504 | // ahead through comments. |
505 | |
506 | for (const Decl *D : Decls) { |
507 | assert(D)(static_cast<void> (0)); |
508 | if (D->isInvalidDecl()) |
509 | continue; |
510 | |
511 | D = &adjustDeclToTemplate(*D); |
512 | |
513 | const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); |
514 | |
515 | if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) |
516 | continue; |
517 | |
518 | if (DeclRawComments.count(D) > 0) |
519 | continue; |
520 | |
521 | if (RawComment *const DocComment = |
522 | getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { |
523 | cacheRawCommentForDecl(*D, *DocComment); |
524 | comments::FullComment *FC = DocComment->parse(*this, PP, D); |
525 | ParsedComments[D->getCanonicalDecl()] = FC; |
526 | } |
527 | } |
528 | } |
529 | |
530 | comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, |
531 | const Decl *D) const { |
532 | auto *ThisDeclInfo = new (*this) comments::DeclInfo; |
533 | ThisDeclInfo->CommentDecl = D; |
534 | ThisDeclInfo->IsFilled = false; |
535 | ThisDeclInfo->fill(); |
536 | ThisDeclInfo->CommentDecl = FC->getDecl(); |
537 | if (!ThisDeclInfo->TemplateParameters) |
538 | ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; |
539 | comments::FullComment *CFC = |
540 | new (*this) comments::FullComment(FC->getBlocks(), |
541 | ThisDeclInfo); |
542 | return CFC; |
543 | } |
544 | |
545 | comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { |
546 | const RawComment *RC = getRawCommentForDeclNoCache(D); |
547 | return RC ? RC->parse(*this, nullptr, D) : nullptr; |
548 | } |
549 | |
550 | comments::FullComment *ASTContext::getCommentForDecl( |
551 | const Decl *D, |
552 | const Preprocessor *PP) const { |
553 | if (!D || D->isInvalidDecl()) |
554 | return nullptr; |
555 | D = &adjustDeclToTemplate(*D); |
556 | |
557 | const Decl *Canonical = D->getCanonicalDecl(); |
558 | llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = |
559 | ParsedComments.find(Canonical); |
560 | |
561 | if (Pos != ParsedComments.end()) { |
562 | if (Canonical != D) { |
563 | comments::FullComment *FC = Pos->second; |
564 | comments::FullComment *CFC = cloneFullComment(FC, D); |
565 | return CFC; |
566 | } |
567 | return Pos->second; |
568 | } |
569 | |
570 | const Decl *OriginalDecl = nullptr; |
571 | |
572 | const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); |
573 | if (!RC) { |
574 | if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { |
575 | SmallVector<const NamedDecl*, 8> Overridden; |
576 | const auto *OMD = dyn_cast<ObjCMethodDecl>(D); |
577 | if (OMD && OMD->isPropertyAccessor()) |
578 | if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) |
579 | if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) |
580 | return cloneFullComment(FC, D); |
581 | if (OMD) |
582 | addRedeclaredMethods(OMD, Overridden); |
583 | getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); |
584 | for (unsigned i = 0, e = Overridden.size(); i < e; i++) |
585 | if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) |
586 | return cloneFullComment(FC, D); |
587 | } |
588 | else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { |
589 | // Attach any tag type's documentation to its typedef if latter |
590 | // does not have one of its own. |
591 | QualType QT = TD->getUnderlyingType(); |
592 | if (const auto *TT = QT->getAs<TagType>()) |
593 | if (const Decl *TD = TT->getDecl()) |
594 | if (comments::FullComment *FC = getCommentForDecl(TD, PP)) |
595 | return cloneFullComment(FC, D); |
596 | } |
597 | else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { |
598 | while (IC->getSuperClass()) { |
599 | IC = IC->getSuperClass(); |
600 | if (comments::FullComment *FC = getCommentForDecl(IC, PP)) |
601 | return cloneFullComment(FC, D); |
602 | } |
603 | } |
604 | else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { |
605 | if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) |
606 | if (comments::FullComment *FC = getCommentForDecl(IC, PP)) |
607 | return cloneFullComment(FC, D); |
608 | } |
609 | else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { |
610 | if (!(RD = RD->getDefinition())) |
611 | return nullptr; |
612 | // Check non-virtual bases. |
613 | for (const auto &I : RD->bases()) { |
614 | if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) |
615 | continue; |
616 | QualType Ty = I.getType(); |
617 | if (Ty.isNull()) |
618 | continue; |
619 | if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { |
620 | if (!(NonVirtualBase= NonVirtualBase->getDefinition())) |
621 | continue; |
622 | |
623 | if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) |
624 | return cloneFullComment(FC, D); |
625 | } |
626 | } |
627 | // Check virtual bases. |
628 | for (const auto &I : RD->vbases()) { |
629 | if (I.getAccessSpecifier() != AS_public) |
630 | continue; |
631 | QualType Ty = I.getType(); |
632 | if (Ty.isNull()) |
633 | continue; |
634 | if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { |
635 | if (!(VirtualBase= VirtualBase->getDefinition())) |
636 | continue; |
637 | if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) |
638 | return cloneFullComment(FC, D); |
639 | } |
640 | } |
641 | } |
642 | return nullptr; |
643 | } |
644 | |
645 | // If the RawComment was attached to other redeclaration of this Decl, we |
646 | // should parse the comment in context of that other Decl. This is important |
647 | // because comments can contain references to parameter names which can be |
648 | // different across redeclarations. |
649 | if (D != OriginalDecl && OriginalDecl) |
650 | return getCommentForDecl(OriginalDecl, PP); |
651 | |
652 | comments::FullComment *FC = RC->parse(*this, PP, D); |
653 | ParsedComments[Canonical] = FC; |
654 | return FC; |
655 | } |
656 | |
657 | void |
658 | ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, |
659 | const ASTContext &C, |
660 | TemplateTemplateParmDecl *Parm) { |
661 | ID.AddInteger(Parm->getDepth()); |
662 | ID.AddInteger(Parm->getPosition()); |
663 | ID.AddBoolean(Parm->isParameterPack()); |
664 | |
665 | TemplateParameterList *Params = Parm->getTemplateParameters(); |
666 | ID.AddInteger(Params->size()); |
667 | for (TemplateParameterList::const_iterator P = Params->begin(), |
668 | PEnd = Params->end(); |
669 | P != PEnd; ++P) { |
670 | if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { |
671 | ID.AddInteger(0); |
672 | ID.AddBoolean(TTP->isParameterPack()); |
673 | const TypeConstraint *TC = TTP->getTypeConstraint(); |
674 | ID.AddBoolean(TC != nullptr); |
675 | if (TC) |
676 | TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, |
677 | /*Canonical=*/true); |
678 | if (TTP->isExpandedParameterPack()) { |
679 | ID.AddBoolean(true); |
680 | ID.AddInteger(TTP->getNumExpansionParameters()); |
681 | } else |
682 | ID.AddBoolean(false); |
683 | continue; |
684 | } |
685 | |
686 | if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { |
687 | ID.AddInteger(1); |
688 | ID.AddBoolean(NTTP->isParameterPack()); |
689 | ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); |
690 | if (NTTP->isExpandedParameterPack()) { |
691 | ID.AddBoolean(true); |
692 | ID.AddInteger(NTTP->getNumExpansionTypes()); |
693 | for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { |
694 | QualType T = NTTP->getExpansionType(I); |
695 | ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); |
696 | } |
697 | } else |
698 | ID.AddBoolean(false); |
699 | continue; |
700 | } |
701 | |
702 | auto *TTP = cast<TemplateTemplateParmDecl>(*P); |
703 | ID.AddInteger(2); |
704 | Profile(ID, C, TTP); |
705 | } |
706 | Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause(); |
707 | ID.AddBoolean(RequiresClause != nullptr); |
708 | if (RequiresClause) |
709 | RequiresClause->Profile(ID, C, /*Canonical=*/true); |
710 | } |
711 | |
712 | static Expr * |
713 | canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC, |
714 | QualType ConstrainedType) { |
715 | // This is a bit ugly - we need to form a new immediately-declared |
716 | // constraint that references the new parameter; this would ideally |
717 | // require semantic analysis (e.g. template<C T> struct S {}; - the |
718 | // converted arguments of C<T> could be an argument pack if C is |
719 | // declared as template<typename... T> concept C = ...). |
720 | // We don't have semantic analysis here so we dig deep into the |
721 | // ready-made constraint expr and change the thing manually. |
722 | ConceptSpecializationExpr *CSE; |
723 | if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC)) |
724 | CSE = cast<ConceptSpecializationExpr>(Fold->getLHS()); |
725 | else |
726 | CSE = cast<ConceptSpecializationExpr>(IDC); |
727 | ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments(); |
728 | SmallVector<TemplateArgument, 3> NewConverted; |
729 | NewConverted.reserve(OldConverted.size()); |
730 | if (OldConverted.front().getKind() == TemplateArgument::Pack) { |
731 | // The case: |
732 | // template<typename... T> concept C = true; |
733 | // template<C<int> T> struct S; -> constraint is C<{T, int}> |
734 | NewConverted.push_back(ConstrainedType); |
735 | for (auto &Arg : OldConverted.front().pack_elements().drop_front(1)) |
736 | NewConverted.push_back(Arg); |
737 | TemplateArgument NewPack(NewConverted); |
738 | |
739 | NewConverted.clear(); |
740 | NewConverted.push_back(NewPack); |
741 | assert(OldConverted.size() == 1 &&(static_cast<void> (0)) |
742 | "Template parameter pack should be the last parameter")(static_cast<void> (0)); |
743 | } else { |
744 | assert(OldConverted.front().getKind() == TemplateArgument::Type &&(static_cast<void> (0)) |
745 | "Unexpected first argument kind for immediately-declared "(static_cast<void> (0)) |
746 | "constraint")(static_cast<void> (0)); |
747 | NewConverted.push_back(ConstrainedType); |
748 | for (auto &Arg : OldConverted.drop_front(1)) |
749 | NewConverted.push_back(Arg); |
750 | } |
751 | Expr *NewIDC = ConceptSpecializationExpr::Create( |
752 | C, CSE->getNamedConcept(), NewConverted, nullptr, |
753 | CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack()); |
754 | |
755 | if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC)) |
756 | NewIDC = new (C) CXXFoldExpr( |
757 | OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC, |
758 | BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr, |
759 | SourceLocation(), /*NumExpansions=*/None); |
760 | return NewIDC; |
761 | } |
762 | |
763 | TemplateTemplateParmDecl * |
764 | ASTContext::getCanonicalTemplateTemplateParmDecl( |
765 | TemplateTemplateParmDecl *TTP) const { |
766 | // Check if we already have a canonical template template parameter. |
767 | llvm::FoldingSetNodeID ID; |
768 | CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); |
769 | void *InsertPos = nullptr; |
770 | CanonicalTemplateTemplateParm *Canonical |
771 | = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); |
772 | if (Canonical) |
773 | return Canonical->getParam(); |
774 | |
775 | // Build a canonical template parameter list. |
776 | TemplateParameterList *Params = TTP->getTemplateParameters(); |
777 | SmallVector<NamedDecl *, 4> CanonParams; |
778 | CanonParams.reserve(Params->size()); |
779 | for (TemplateParameterList::const_iterator P = Params->begin(), |
780 | PEnd = Params->end(); |
781 | P != PEnd; ++P) { |
782 | if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { |
783 | TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(*this, |
784 | getTranslationUnitDecl(), SourceLocation(), SourceLocation(), |
785 | TTP->getDepth(), TTP->getIndex(), nullptr, false, |
786 | TTP->isParameterPack(), TTP->hasTypeConstraint(), |
787 | TTP->isExpandedParameterPack() ? |
788 | llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None); |
789 | if (const auto *TC = TTP->getTypeConstraint()) { |
790 | QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0); |
791 | Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint( |
792 | *this, TC->getImmediatelyDeclaredConstraint(), |
793 | ParamAsArgument); |
794 | TemplateArgumentListInfo CanonArgsAsWritten; |
795 | if (auto *Args = TC->getTemplateArgsAsWritten()) |
796 | for (const auto &ArgLoc : Args->arguments()) |
797 | CanonArgsAsWritten.addArgument( |
798 | TemplateArgumentLoc(ArgLoc.getArgument(), |
799 | TemplateArgumentLocInfo())); |
800 | NewTTP->setTypeConstraint( |
801 | NestedNameSpecifierLoc(), |
802 | DeclarationNameInfo(TC->getNamedConcept()->getDeclName(), |
803 | SourceLocation()), /*FoundDecl=*/nullptr, |
804 | // Actually canonicalizing a TemplateArgumentLoc is difficult so we |
805 | // simply omit the ArgsAsWritten |
806 | TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC); |
807 | } |
808 | CanonParams.push_back(NewTTP); |
809 | } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { |
810 | QualType T = getCanonicalType(NTTP->getType()); |
811 | TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); |
812 | NonTypeTemplateParmDecl *Param; |
813 | if (NTTP->isExpandedParameterPack()) { |
814 | SmallVector<QualType, 2> ExpandedTypes; |
815 | SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; |
816 | for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { |
817 | ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); |
818 | ExpandedTInfos.push_back( |
819 | getTrivialTypeSourceInfo(ExpandedTypes.back())); |
820 | } |
821 | |
822 | Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), |
823 | SourceLocation(), |
824 | SourceLocation(), |
825 | NTTP->getDepth(), |
826 | NTTP->getPosition(), nullptr, |
827 | T, |
828 | TInfo, |
829 | ExpandedTypes, |
830 | ExpandedTInfos); |
831 | } else { |
832 | Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), |
833 | SourceLocation(), |
834 | SourceLocation(), |
835 | NTTP->getDepth(), |
836 | NTTP->getPosition(), nullptr, |
837 | T, |
838 | NTTP->isParameterPack(), |
839 | TInfo); |
840 | } |
841 | if (AutoType *AT = T->getContainedAutoType()) { |
842 | if (AT->isConstrained()) { |
843 | Param->setPlaceholderTypeConstraint( |
844 | canonicalizeImmediatelyDeclaredConstraint( |
845 | *this, NTTP->getPlaceholderTypeConstraint(), T)); |
846 | } |
847 | } |
848 | CanonParams.push_back(Param); |
849 | |
850 | } else |
851 | CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( |
852 | cast<TemplateTemplateParmDecl>(*P))); |
853 | } |
854 | |
855 | Expr *CanonRequiresClause = nullptr; |
856 | if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause()) |
857 | CanonRequiresClause = RequiresClause; |
858 | |
859 | TemplateTemplateParmDecl *CanonTTP |
860 | = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), |
861 | SourceLocation(), TTP->getDepth(), |
862 | TTP->getPosition(), |
863 | TTP->isParameterPack(), |
864 | nullptr, |
865 | TemplateParameterList::Create(*this, SourceLocation(), |
866 | SourceLocation(), |
867 | CanonParams, |
868 | SourceLocation(), |
869 | CanonRequiresClause)); |
870 | |
871 | // Get the new insert position for the node we care about. |
872 | Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); |
873 | assert(!Canonical && "Shouldn't be in the map!")(static_cast<void> (0)); |
874 | (void)Canonical; |
875 | |
876 | // Create the canonical template template parameter entry. |
877 | Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); |
878 | CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); |
879 | return CanonTTP; |
880 | } |
881 | |
882 | TargetCXXABI::Kind ASTContext::getCXXABIKind() const { |
883 | auto Kind = getTargetInfo().getCXXABI().getKind(); |
884 | return getLangOpts().CXXABI.getValueOr(Kind); |
885 | } |
886 | |
887 | CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { |
888 | if (!LangOpts.CPlusPlus) return nullptr; |
889 | |
890 | switch (getCXXABIKind()) { |
891 | case TargetCXXABI::AppleARM64: |
892 | case TargetCXXABI::Fuchsia: |
893 | case TargetCXXABI::GenericARM: // Same as Itanium at this level |
894 | case TargetCXXABI::iOS: |
895 | case TargetCXXABI::WatchOS: |
896 | case TargetCXXABI::GenericAArch64: |
897 | case TargetCXXABI::GenericMIPS: |
898 | case TargetCXXABI::GenericItanium: |
899 | case TargetCXXABI::WebAssembly: |
900 | case TargetCXXABI::XL: |
901 | return CreateItaniumCXXABI(*this); |
902 | case TargetCXXABI::Microsoft: |
903 | return CreateMicrosoftCXXABI(*this); |
904 | } |
905 | llvm_unreachable("Invalid CXXABI type!")__builtin_unreachable(); |
906 | } |
907 | |
908 | interp::Context &ASTContext::getInterpContext() { |
909 | if (!InterpContext) { |
910 | InterpContext.reset(new interp::Context(*this)); |
911 | } |
912 | return *InterpContext.get(); |
913 | } |
914 | |
915 | ParentMapContext &ASTContext::getParentMapContext() { |
916 | if (!ParentMapCtx) |
917 | ParentMapCtx.reset(new ParentMapContext(*this)); |
918 | return *ParentMapCtx.get(); |
919 | } |
920 | |
921 | static const LangASMap *getAddressSpaceMap(const TargetInfo &T, |
922 | const LangOptions &LOpts) { |
923 | if (LOpts.FakeAddressSpaceMap) { |
924 | // The fake address space map must have a distinct entry for each |
925 | // language-specific address space. |
926 | static const unsigned FakeAddrSpaceMap[] = { |
927 | 0, // Default |
928 | 1, // opencl_global |
929 | 3, // opencl_local |
930 | 2, // opencl_constant |
931 | 0, // opencl_private |
932 | 4, // opencl_generic |
933 | 5, // opencl_global_device |
934 | 6, // opencl_global_host |
935 | 7, // cuda_device |
936 | 8, // cuda_constant |
937 | 9, // cuda_shared |
938 | 1, // sycl_global |
939 | 5, // sycl_global_device |
940 | 6, // sycl_global_host |
941 | 3, // sycl_local |
942 | 0, // sycl_private |
943 | 10, // ptr32_sptr |
944 | 11, // ptr32_uptr |
945 | 12 // ptr64 |
946 | }; |
947 | return &FakeAddrSpaceMap; |
948 | } else { |
949 | return &T.getAddressSpaceMap(); |
950 | } |
951 | } |
952 | |
953 | static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, |
954 | const LangOptions &LangOpts) { |
955 | switch (LangOpts.getAddressSpaceMapMangling()) { |
956 | case LangOptions::ASMM_Target: |
957 | return TI.useAddressSpaceMapMangling(); |
958 | case LangOptions::ASMM_On: |
959 | return true; |
960 | case LangOptions::ASMM_Off: |
961 | return false; |
962 | } |
963 | llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.")__builtin_unreachable(); |
964 | } |
965 | |
966 | ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, |
967 | IdentifierTable &idents, SelectorTable &sels, |
968 | Builtin::Context &builtins, TranslationUnitKind TUKind) |
969 | : ConstantArrayTypes(this_()), FunctionProtoTypes(this_()), |
970 | TemplateSpecializationTypes(this_()), |
971 | DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), |
972 | SubstTemplateTemplateParmPacks(this_()), |
973 | CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), |
974 | NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), |
975 | XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, |
976 | LangOpts.XRayNeverInstrumentFiles, |
977 | LangOpts.XRayAttrListFiles, SM)), |
978 | ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), |
979 | PrintingPolicy(LOpts), Idents(idents), Selectors(sels), |
980 | BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), |
981 | Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), |
982 | CompCategories(this_()), LastSDM(nullptr, 0) { |
983 | addTranslationUnitDecl(); |
984 | } |
985 | |
986 | ASTContext::~ASTContext() { |
987 | // Release the DenseMaps associated with DeclContext objects. |
988 | // FIXME: Is this the ideal solution? |
989 | ReleaseDeclContextMaps(); |
990 | |
991 | // Call all of the deallocation functions on all of their targets. |
992 | for (auto &Pair : Deallocations) |
993 | (Pair.first)(Pair.second); |
994 | |
995 | // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed |
996 | // because they can contain DenseMaps. |
997 | for (llvm::DenseMap<const ObjCContainerDecl*, |
998 | const ASTRecordLayout*>::iterator |
999 | I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) |
1000 | // Increment in loop to prevent using deallocated memory. |
1001 | if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) |
1002 | R->Destroy(*this); |
1003 | |
1004 | for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator |
1005 | I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { |
1006 | // Increment in loop to prevent using deallocated memory. |
1007 | if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) |
1008 | R->Destroy(*this); |
1009 | } |
1010 | |
1011 | for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), |
1012 | AEnd = DeclAttrs.end(); |
1013 | A != AEnd; ++A) |
1014 | A->second->~AttrVec(); |
1015 | |
1016 | for (const auto &Value : ModuleInitializers) |
1017 | Value.second->~PerModuleInitializers(); |
1018 | } |
1019 | |
1020 | void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { |
1021 | TraversalScope = TopLevelDecls; |
1022 | getParentMapContext().clear(); |
1023 | } |
1024 | |
1025 | void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { |
1026 | Deallocations.push_back({Callback, Data}); |
1027 | } |
1028 | |
1029 | void |
1030 | ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { |
1031 | ExternalSource = std::move(Source); |
1032 | } |
1033 | |
1034 | void ASTContext::PrintStats() const { |
1035 | llvm::errs() << "\n*** AST Context Stats:\n"; |
1036 | llvm::errs() << " " << Types.size() << " types total.\n"; |
1037 | |
1038 | unsigned counts[] = { |
1039 | #define TYPE(Name, Parent) 0, |
1040 | #define ABSTRACT_TYPE(Name, Parent) |
1041 | #include "clang/AST/TypeNodes.inc" |
1042 | 0 // Extra |
1043 | }; |
1044 | |
1045 | for (unsigned i = 0, e = Types.size(); i != e; ++i) { |
1046 | Type *T = Types[i]; |
1047 | counts[(unsigned)T->getTypeClass()]++; |
1048 | } |
1049 | |
1050 | unsigned Idx = 0; |
1051 | unsigned TotalBytes = 0; |
1052 | #define TYPE(Name, Parent) \ |
1053 | if (counts[Idx]) \ |
1054 | llvm::errs() << " " << counts[Idx] << " " << #Name \ |
1055 | << " types, " << sizeof(Name##Type) << " each " \ |
1056 | << "(" << counts[Idx] * sizeof(Name##Type) \ |
1057 | << " bytes)\n"; \ |
1058 | TotalBytes += counts[Idx] * sizeof(Name##Type); \ |
1059 | ++Idx; |
1060 | #define ABSTRACT_TYPE(Name, Parent) |
1061 | #include "clang/AST/TypeNodes.inc" |
1062 | |
1063 | llvm::errs() << "Total bytes = " << TotalBytes << "\n"; |
1064 | |
1065 | // Implicit special member functions. |
1066 | llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" |
1067 | << NumImplicitDefaultConstructors |
1068 | << " implicit default constructors created\n"; |
1069 | llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" |
1070 | << NumImplicitCopyConstructors |
1071 | << " implicit copy constructors created\n"; |
1072 | if (getLangOpts().CPlusPlus) |
1073 | llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" |
1074 | << NumImplicitMoveConstructors |
1075 | << " implicit move constructors created\n"; |
1076 | llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" |
1077 | << NumImplicitCopyAssignmentOperators |
1078 | << " implicit copy assignment operators created\n"; |
1079 | if (getLangOpts().CPlusPlus) |
1080 | llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" |
1081 | << NumImplicitMoveAssignmentOperators |
1082 | << " implicit move assignment operators created\n"; |
1083 | llvm::errs() << NumImplicitDestructorsDeclared << "/" |
1084 | << NumImplicitDestructors |
1085 | << " implicit destructors created\n"; |
1086 | |
1087 | if (ExternalSource) { |
1088 | llvm::errs() << "\n"; |
1089 | ExternalSource->PrintStats(); |
1090 | } |
1091 | |
1092 | BumpAlloc.PrintStats(); |
1093 | } |
1094 | |
1095 | void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, |
1096 | bool NotifyListeners) { |
1097 | if (NotifyListeners) |
1098 | if (auto *Listener = getASTMutationListener()) |
1099 | Listener->RedefinedHiddenDefinition(ND, M); |
1100 | |
1101 | MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); |
1102 | } |
1103 | |
1104 | void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { |
1105 | auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); |
1106 | if (It == MergedDefModules.end()) |
1107 | return; |
1108 | |
1109 | auto &Merged = It->second; |
1110 | llvm::DenseSet<Module*> Found; |
1111 | for (Module *&M : Merged) |
1112 | if (!Found.insert(M).second) |
1113 | M = nullptr; |
1114 | Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end()); |
1115 | } |
1116 | |
1117 | ArrayRef<Module *> |
1118 | ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { |
1119 | auto MergedIt = |
1120 | MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); |
1121 | if (MergedIt == MergedDefModules.end()) |
1122 | return None; |
1123 | return MergedIt->second; |
1124 | } |
1125 | |
1126 | void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { |
1127 | if (LazyInitializers.empty()) |
1128 | return; |
1129 | |
1130 | auto *Source = Ctx.getExternalSource(); |
1131 | assert(Source && "lazy initializers but no external source")(static_cast<void> (0)); |
1132 | |
1133 | auto LazyInits = std::move(LazyInitializers); |
1134 | LazyInitializers.clear(); |
1135 | |
1136 | for (auto ID : LazyInits) |
1137 | Initializers.push_back(Source->GetExternalDecl(ID)); |
1138 | |
1139 | assert(LazyInitializers.empty() &&(static_cast<void> (0)) |
1140 | "GetExternalDecl for lazy module initializer added more inits")(static_cast<void> (0)); |
1141 | } |
1142 | |
1143 | void ASTContext::addModuleInitializer(Module *M, Decl *D) { |
1144 | // One special case: if we add a module initializer that imports another |
1145 | // module, and that module's only initializer is an ImportDecl, simplify. |
1146 | if (const auto *ID = dyn_cast<ImportDecl>(D)) { |
1147 | auto It = ModuleInitializers.find(ID->getImportedModule()); |
1148 | |
1149 | // Maybe the ImportDecl does nothing at all. (Common case.) |
1150 | if (It == ModuleInitializers.end()) |
1151 | return; |
1152 | |
1153 | // Maybe the ImportDecl only imports another ImportDecl. |
1154 | auto &Imported = *It->second; |
1155 | if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { |
1156 | Imported.resolve(*this); |
1157 | auto *OnlyDecl = Imported.Initializers.front(); |
1158 | if (isa<ImportDecl>(OnlyDecl)) |
1159 | D = OnlyDecl; |
1160 | } |
1161 | } |
1162 | |
1163 | auto *&Inits = ModuleInitializers[M]; |
1164 | if (!Inits) |
1165 | Inits = new (*this) PerModuleInitializers; |
1166 | Inits->Initializers.push_back(D); |
1167 | } |
1168 | |
1169 | void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { |
1170 | auto *&Inits = ModuleInitializers[M]; |
1171 | if (!Inits) |
1172 | Inits = new (*this) PerModuleInitializers; |
1173 | Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), |
1174 | IDs.begin(), IDs.end()); |
1175 | } |
1176 | |
1177 | ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { |
1178 | auto It = ModuleInitializers.find(M); |
1179 | if (It == ModuleInitializers.end()) |
1180 | return None; |
1181 | |
1182 | auto *Inits = It->second; |
1183 | Inits->resolve(*this); |
1184 | return Inits->Initializers; |
1185 | } |
1186 | |
1187 | ExternCContextDecl *ASTContext::getExternCContextDecl() const { |
1188 | if (!ExternCContext) |
1189 | ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); |
1190 | |
1191 | return ExternCContext; |
1192 | } |
1193 | |
1194 | BuiltinTemplateDecl * |
1195 | ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, |
1196 | const IdentifierInfo *II) const { |
1197 | auto *BuiltinTemplate = |
1198 | BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); |
1199 | BuiltinTemplate->setImplicit(); |
1200 | getTranslationUnitDecl()->addDecl(BuiltinTemplate); |
1201 | |
1202 | return BuiltinTemplate; |
1203 | } |
1204 | |
1205 | BuiltinTemplateDecl * |
1206 | ASTContext::getMakeIntegerSeqDecl() const { |
1207 | if (!MakeIntegerSeqDecl) |
1208 | MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, |
1209 | getMakeIntegerSeqName()); |
1210 | return MakeIntegerSeqDecl; |
1211 | } |
1212 | |
1213 | BuiltinTemplateDecl * |
1214 | ASTContext::getTypePackElementDecl() const { |
1215 | if (!TypePackElementDecl) |
1216 | TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, |
1217 | getTypePackElementName()); |
1218 | return TypePackElementDecl; |
1219 | } |
1220 | |
1221 | RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, |
1222 | RecordDecl::TagKind TK) const { |
1223 | SourceLocation Loc; |
1224 | RecordDecl *NewDecl; |
1225 | if (getLangOpts().CPlusPlus) |
1226 | NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, |
1227 | Loc, &Idents.get(Name)); |
1228 | else |
1229 | NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, |
1230 | &Idents.get(Name)); |
1231 | NewDecl->setImplicit(); |
1232 | NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( |
1233 | const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); |
1234 | return NewDecl; |
1235 | } |
1236 | |
1237 | TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, |
1238 | StringRef Name) const { |
1239 | TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); |
1240 | TypedefDecl *NewDecl = TypedefDecl::Create( |
1241 | const_cast<ASTContext &>(*this), getTranslationUnitDecl(), |
1242 | SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); |
1243 | NewDecl->setImplicit(); |
1244 | return NewDecl; |
1245 | } |
1246 | |
1247 | TypedefDecl *ASTContext::getInt128Decl() const { |
1248 | if (!Int128Decl) |
1249 | Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); |
1250 | return Int128Decl; |
1251 | } |
1252 | |
1253 | TypedefDecl *ASTContext::getUInt128Decl() const { |
1254 | if (!UInt128Decl) |
1255 | UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); |
1256 | return UInt128Decl; |
1257 | } |
1258 | |
1259 | void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { |
1260 | auto *Ty = new (*this, TypeAlignment) BuiltinType(K); |
1261 | R = CanQualType::CreateUnsafe(QualType(Ty, 0)); |
1262 | Types.push_back(Ty); |
1263 | } |
1264 | |
1265 | void ASTContext::InitBuiltinTypes(const TargetInfo &Target, |
1266 | const TargetInfo *AuxTarget) { |
1267 | assert((!this->Target || this->Target == &Target) &&(static_cast<void> (0)) |
1268 | "Incorrect target reinitialization")(static_cast<void> (0)); |
1269 | assert(VoidTy.isNull() && "Context reinitialized?")(static_cast<void> (0)); |
1270 | |
1271 | this->Target = &Target; |
1272 | this->AuxTarget = AuxTarget; |
1273 | |
1274 | ABI.reset(createCXXABI(Target)); |
1275 | AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); |
1276 | AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); |
1277 | |
1278 | // C99 6.2.5p19. |
1279 | InitBuiltinType(VoidTy, BuiltinType::Void); |
1280 | |
1281 | // C99 6.2.5p2. |
1282 | InitBuiltinType(BoolTy, BuiltinType::Bool); |
1283 | // C99 6.2.5p3. |
1284 | if (LangOpts.CharIsSigned) |
1285 | InitBuiltinType(CharTy, BuiltinType::Char_S); |
1286 | else |
1287 | InitBuiltinType(CharTy, BuiltinType::Char_U); |
1288 | // C99 6.2.5p4. |
1289 | InitBuiltinType(SignedCharTy, BuiltinType::SChar); |
1290 | InitBuiltinType(ShortTy, BuiltinType::Short); |
1291 | InitBuiltinType(IntTy, BuiltinType::Int); |
1292 | InitBuiltinType(LongTy, BuiltinType::Long); |
1293 | InitBuiltinType(LongLongTy, BuiltinType::LongLong); |
1294 | |
1295 | // C99 6.2.5p6. |
1296 | InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); |
1297 | InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); |
1298 | InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); |
1299 | InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); |
1300 | InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); |
1301 | |
1302 | // C99 6.2.5p10. |
1303 | InitBuiltinType(FloatTy, BuiltinType::Float); |
1304 | InitBuiltinType(DoubleTy, BuiltinType::Double); |
1305 | InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); |
1306 | |
1307 | // GNU extension, __float128 for IEEE quadruple precision |
1308 | InitBuiltinType(Float128Ty, BuiltinType::Float128); |
1309 | |
1310 | // C11 extension ISO/IEC TS 18661-3 |
1311 | InitBuiltinType(Float16Ty, BuiltinType::Float16); |
1312 | |
1313 | // ISO/IEC JTC1 SC22 WG14 N1169 Extension |
1314 | InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); |
1315 | InitBuiltinType(AccumTy, BuiltinType::Accum); |
1316 | InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); |
1317 | InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); |
1318 | InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); |
1319 | InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); |
1320 | InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); |
1321 | InitBuiltinType(FractTy, BuiltinType::Fract); |
1322 | InitBuiltinType(LongFractTy, BuiltinType::LongFract); |
1323 | InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); |
1324 | InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); |
1325 | InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); |
1326 | InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); |
1327 | InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); |
1328 | InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); |
1329 | InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); |
1330 | InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); |
1331 | InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); |
1332 | InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); |
1333 | InitBuiltinType(SatFractTy, BuiltinType::SatFract); |
1334 | InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); |
1335 | InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); |
1336 | InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); |
1337 | InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); |
1338 | |
1339 | // GNU extension, 128-bit integers. |
1340 | InitBuiltinType(Int128Ty, BuiltinType::Int128); |
1341 | InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); |
1342 | |
1343 | // C++ 3.9.1p5 |
1344 | if (TargetInfo::isTypeSigned(Target.getWCharType())) |
1345 | InitBuiltinType(WCharTy, BuiltinType::WChar_S); |
1346 | else // -fshort-wchar makes wchar_t be unsigned. |
1347 | InitBuiltinType(WCharTy, BuiltinType::WChar_U); |
1348 | if (LangOpts.CPlusPlus && LangOpts.WChar) |
1349 | WideCharTy = WCharTy; |
1350 | else { |
1351 | // C99 (or C++ using -fno-wchar). |
1352 | WideCharTy = getFromTargetType(Target.getWCharType()); |
1353 | } |
1354 | |
1355 | WIntTy = getFromTargetType(Target.getWIntType()); |
1356 | |
1357 | // C++20 (proposed) |
1358 | InitBuiltinType(Char8Ty, BuiltinType::Char8); |
1359 | |
1360 | if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ |
1361 | InitBuiltinType(Char16Ty, BuiltinType::Char16); |
1362 | else // C99 |
1363 | Char16Ty = getFromTargetType(Target.getChar16Type()); |
1364 | |
1365 | if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ |
1366 | InitBuiltinType(Char32Ty, BuiltinType::Char32); |
1367 | else // C99 |
1368 | Char32Ty = getFromTargetType(Target.getChar32Type()); |
1369 | |
1370 | // Placeholder type for type-dependent expressions whose type is |
1371 | // completely unknown. No code should ever check a type against |
1372 | // DependentTy and users should never see it; however, it is here to |
1373 | // help diagnose failures to properly check for type-dependent |
1374 | // expressions. |
1375 | InitBuiltinType(DependentTy, BuiltinType::Dependent); |
1376 | |
1377 | // Placeholder type for functions. |
1378 | InitBuiltinType(OverloadTy, BuiltinType::Overload); |
1379 | |
1380 | // Placeholder type for bound members. |
1381 | InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); |
1382 | |
1383 | // Placeholder type for pseudo-objects. |
1384 | InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); |
1385 | |
1386 | // "any" type; useful for debugger-like clients. |
1387 | InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); |
1388 | |
1389 | // Placeholder type for unbridged ARC casts. |
1390 | InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); |
1391 | |
1392 | // Placeholder type for builtin functions. |
1393 | InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); |
1394 | |
1395 | // Placeholder type for OMP array sections. |
1396 | if (LangOpts.OpenMP) { |
1397 | InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); |
1398 | InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); |
1399 | InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); |
1400 | } |
1401 | if (LangOpts.MatrixTypes) |
1402 | InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); |
1403 | |
1404 | // C99 6.2.5p11. |
1405 | FloatComplexTy = getComplexType(FloatTy); |
1406 | DoubleComplexTy = getComplexType(DoubleTy); |
1407 | LongDoubleComplexTy = getComplexType(LongDoubleTy); |
1408 | Float128ComplexTy = getComplexType(Float128Ty); |
1409 | |
1410 | // Builtin types for 'id', 'Class', and 'SEL'. |
1411 | InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); |
1412 | InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); |
1413 | InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); |
1414 | |
1415 | if (LangOpts.OpenCL) { |
1416 | #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ |
1417 | InitBuiltinType(SingletonId, BuiltinType::Id); |
1418 | #include "clang/Basic/OpenCLImageTypes.def" |
1419 | |
1420 | InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); |
1421 | InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); |
1422 | InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); |
1423 | InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); |
1424 | InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); |
1425 | |
1426 | #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ |
1427 | InitBuiltinType(Id##Ty, BuiltinType::Id); |
1428 | #include "clang/Basic/OpenCLExtensionTypes.def" |
1429 | } |
1430 | |
1431 | if (Target.hasAArch64SVETypes()) { |
1432 | #define SVE_TYPE(Name, Id, SingletonId) \ |
1433 | InitBuiltinType(SingletonId, BuiltinType::Id); |
1434 | #include "clang/Basic/AArch64SVEACLETypes.def" |
1435 | } |
1436 | |
1437 | if (Target.getTriple().isPPC64() && |
1438 | Target.hasFeature("paired-vector-memops")) { |
1439 | if (Target.hasFeature("mma")) { |
1440 | #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ |
1441 | InitBuiltinType(Id##Ty, BuiltinType::Id); |
1442 | #include "clang/Basic/PPCTypes.def" |
1443 | } |
1444 | #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ |
1445 | InitBuiltinType(Id##Ty, BuiltinType::Id); |
1446 | #include "clang/Basic/PPCTypes.def" |
1447 | } |
1448 | |
1449 | if (Target.hasRISCVVTypes()) { |
1450 | #define RVV_TYPE(Name, Id, SingletonId) \ |
1451 | InitBuiltinType(SingletonId, BuiltinType::Id); |
1452 | #include "clang/Basic/RISCVVTypes.def" |
1453 | } |
1454 | |
1455 | // Builtin type for __objc_yes and __objc_no |
1456 | ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? |
1457 | SignedCharTy : BoolTy); |
1458 | |
1459 | ObjCConstantStringType = QualType(); |
1460 | |
1461 | ObjCSuperType = QualType(); |
1462 | |
1463 | // void * type |
1464 | if (LangOpts.OpenCLGenericAddressSpace) { |
1465 | auto Q = VoidTy.getQualifiers(); |
1466 | Q.setAddressSpace(LangAS::opencl_generic); |
1467 | VoidPtrTy = getPointerType(getCanonicalType( |
1468 | getQualifiedType(VoidTy.getUnqualifiedType(), Q))); |
1469 | } else { |
1470 | VoidPtrTy = getPointerType(VoidTy); |
1471 | } |
1472 | |
1473 | // nullptr type (C++0x 2.14.7) |
1474 | InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); |
1475 | |
1476 | // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 |
1477 | InitBuiltinType(HalfTy, BuiltinType::Half); |
1478 | |
1479 | InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); |
1480 | |
1481 | // Builtin type used to help define __builtin_va_list. |
1482 | VaListTagDecl = nullptr; |
1483 | |
1484 | // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. |
1485 | if (LangOpts.MicrosoftExt || LangOpts.Borland) { |
1486 | MSGuidTagDecl = buildImplicitRecord("_GUID"); |
1487 | getTranslationUnitDecl()->addDecl(MSGuidTagDecl); |
1488 | } |
1489 | } |
1490 | |
1491 | DiagnosticsEngine &ASTContext::getDiagnostics() const { |
1492 | return SourceMgr.getDiagnostics(); |
1493 | } |
1494 | |
1495 | AttrVec& ASTContext::getDeclAttrs(const Decl *D) { |
1496 | AttrVec *&Result = DeclAttrs[D]; |
1497 | if (!Result) { |
1498 | void *Mem = Allocate(sizeof(AttrVec)); |
1499 | Result = new (Mem) AttrVec; |
1500 | } |
1501 | |
1502 | return *Result; |
1503 | } |
1504 | |
1505 | /// Erase the attributes corresponding to the given declaration. |
1506 | void ASTContext::eraseDeclAttrs(const Decl *D) { |
1507 | llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); |
1508 | if (Pos != DeclAttrs.end()) { |
1509 | Pos->second->~AttrVec(); |
1510 | DeclAttrs.erase(Pos); |
1511 | } |
1512 | } |
1513 | |
1514 | // FIXME: Remove ? |
1515 | MemberSpecializationInfo * |
1516 | ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { |
1517 | assert(Var->isStaticDataMember() && "Not a static data member")(static_cast<void> (0)); |
1518 | return getTemplateOrSpecializationInfo(Var) |
1519 | .dyn_cast<MemberSpecializationInfo *>(); |
1520 | } |
1521 | |
1522 | ASTContext::TemplateOrSpecializationInfo |
1523 | ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { |
1524 | llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = |
1525 | TemplateOrInstantiation.find(Var); |
1526 | if (Pos == TemplateOrInstantiation.end()) |
1527 | return {}; |
1528 | |
1529 | return Pos->second; |
1530 | } |
1531 | |
1532 | void |
1533 | ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, |
1534 | TemplateSpecializationKind TSK, |
1535 | SourceLocation PointOfInstantiation) { |
1536 | assert(Inst->isStaticDataMember() && "Not a static data member")(static_cast<void> (0)); |
1537 | assert(Tmpl->isStaticDataMember() && "Not a static data member")(static_cast<void> (0)); |
1538 | setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( |
1539 | Tmpl, TSK, PointOfInstantiation)); |
1540 | } |
1541 | |
1542 | void |
1543 | ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, |
1544 | TemplateOrSpecializationInfo TSI) { |
1545 | assert(!TemplateOrInstantiation[Inst] &&(static_cast<void> (0)) |
1546 | "Already noted what the variable was instantiated from")(static_cast<void> (0)); |
1547 | TemplateOrInstantiation[Inst] = TSI; |
1548 | } |
1549 | |
1550 | NamedDecl * |
1551 | ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { |
1552 | auto Pos = InstantiatedFromUsingDecl.find(UUD); |
1553 | if (Pos == InstantiatedFromUsingDecl.end()) |
1554 | return nullptr; |
1555 | |
1556 | return Pos->second; |
1557 | } |
1558 | |
1559 | void |
1560 | ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { |
1561 | assert((isa<UsingDecl>(Pattern) ||(static_cast<void> (0)) |
1562 | isa<UnresolvedUsingValueDecl>(Pattern) ||(static_cast<void> (0)) |
1563 | isa<UnresolvedUsingTypenameDecl>(Pattern)) &&(static_cast<void> (0)) |
1564 | "pattern decl is not a using decl")(static_cast<void> (0)); |
1565 | assert((isa<UsingDecl>(Inst) ||(static_cast<void> (0)) |
1566 | isa<UnresolvedUsingValueDecl>(Inst) ||(static_cast<void> (0)) |
1567 | isa<UnresolvedUsingTypenameDecl>(Inst)) &&(static_cast<void> (0)) |
1568 | "instantiation did not produce a using decl")(static_cast<void> (0)); |
1569 | assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists")(static_cast<void> (0)); |
1570 | InstantiatedFromUsingDecl[Inst] = Pattern; |
1571 | } |
1572 | |
1573 | UsingEnumDecl * |
1574 | ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { |
1575 | auto Pos = InstantiatedFromUsingEnumDecl.find(UUD); |
1576 | if (Pos == InstantiatedFromUsingEnumDecl.end()) |
1577 | return nullptr; |
1578 | |
1579 | return Pos->second; |
1580 | } |
1581 | |
1582 | void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, |
1583 | UsingEnumDecl *Pattern) { |
1584 | assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists")(static_cast<void> (0)); |
1585 | InstantiatedFromUsingEnumDecl[Inst] = Pattern; |
1586 | } |
1587 | |
1588 | UsingShadowDecl * |
1589 | ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { |
1590 | llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos |
1591 | = InstantiatedFromUsingShadowDecl.find(Inst); |
1592 | if (Pos == InstantiatedFromUsingShadowDecl.end()) |
1593 | return nullptr; |
1594 | |
1595 | return Pos->second; |
1596 | } |
1597 | |
1598 | void |
1599 | ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, |
1600 | UsingShadowDecl *Pattern) { |
1601 | assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists")(static_cast<void> (0)); |
1602 | InstantiatedFromUsingShadowDecl[Inst] = Pattern; |
1603 | } |
1604 | |
1605 | FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { |
1606 | llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos |
1607 | = InstantiatedFromUnnamedFieldDecl.find(Field); |
1608 | if (Pos == InstantiatedFromUnnamedFieldDecl.end()) |
1609 | return nullptr; |
1610 | |
1611 | return Pos->second; |
1612 | } |
1613 | |
1614 | void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, |
1615 | FieldDecl *Tmpl) { |
1616 | assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed")(static_cast<void> (0)); |
1617 | assert(!Tmpl->getDeclName() && "Template field decl is not unnamed")(static_cast<void> (0)); |
1618 | assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&(static_cast<void> (0)) |
1619 | "Already noted what unnamed field was instantiated from")(static_cast<void> (0)); |
1620 | |
1621 | InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; |
1622 | } |
1623 | |
1624 | ASTContext::overridden_cxx_method_iterator |
1625 | ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { |
1626 | return overridden_methods(Method).begin(); |
1627 | } |
1628 | |
1629 | ASTContext::overridden_cxx_method_iterator |
1630 | ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { |
1631 | return overridden_methods(Method).end(); |
1632 | } |
1633 | |
1634 | unsigned |
1635 | ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { |
1636 | auto Range = overridden_methods(Method); |
1637 | return Range.end() - Range.begin(); |
1638 | } |
1639 | |
1640 | ASTContext::overridden_method_range |
1641 | ASTContext::overridden_methods(const CXXMethodDecl *Method) const { |
1642 | llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = |
1643 | OverriddenMethods.find(Method->getCanonicalDecl()); |
1644 | if (Pos == OverriddenMethods.end()) |
1645 | return overridden_method_range(nullptr, nullptr); |
1646 | return overridden_method_range(Pos->second.begin(), Pos->second.end()); |
1647 | } |
1648 | |
1649 | void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, |
1650 | const CXXMethodDecl *Overridden) { |
1651 | assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl())(static_cast<void> (0)); |
1652 | OverriddenMethods[Method].push_back(Overridden); |
1653 | } |
1654 | |
1655 | void ASTContext::getOverriddenMethods( |
1656 | const NamedDecl *D, |
1657 | SmallVectorImpl<const NamedDecl *> &Overridden) const { |
1658 | assert(D)(static_cast<void> (0)); |
1659 | |
1660 | if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { |
1661 | Overridden.append(overridden_methods_begin(CXXMethod), |
1662 | overridden_methods_end(CXXMethod)); |
1663 | return; |
1664 | } |
1665 | |
1666 | const auto *Method = dyn_cast<ObjCMethodDecl>(D); |
1667 | if (!Method) |
1668 | return; |
1669 | |
1670 | SmallVector<const ObjCMethodDecl *, 8> OverDecls; |
1671 | Method->getOverriddenMethods(OverDecls); |
1672 | Overridden.append(OverDecls.begin(), OverDecls.end()); |
1673 | } |
1674 | |
1675 | void ASTContext::addedLocalImportDecl(ImportDecl *Import) { |
1676 | assert(!Import->getNextLocalImport() &&(static_cast<void> (0)) |
1677 | "Import declaration already in the chain")(static_cast<void> (0)); |
1678 | assert(!Import->isFromASTFile() && "Non-local import declaration")(static_cast<void> (0)); |
1679 | if (!FirstLocalImport) { |
1680 | FirstLocalImport = Import; |
1681 | LastLocalImport = Import; |
1682 | return; |
1683 | } |
1684 | |
1685 | LastLocalImport->setNextLocalImport(Import); |
1686 | LastLocalImport = Import; |
1687 | } |
1688 | |
1689 | //===----------------------------------------------------------------------===// |
1690 | // Type Sizing and Analysis |
1691 | //===----------------------------------------------------------------------===// |
1692 | |
1693 | /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified |
1694 | /// scalar floating point type. |
1695 | const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { |
1696 | switch (T->castAs<BuiltinType>()->getKind()) { |
1697 | default: |
1698 | llvm_unreachable("Not a floating point type!")__builtin_unreachable(); |
1699 | case BuiltinType::BFloat16: |
1700 | return Target->getBFloat16Format(); |
1701 | case BuiltinType::Float16: |
1702 | case BuiltinType::Half: |
1703 | return Target->getHalfFormat(); |
1704 | case BuiltinType::Float: return Target->getFloatFormat(); |
1705 | case BuiltinType::Double: return Target->getDoubleFormat(); |
1706 | case BuiltinType::LongDouble: |
1707 | if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) |
1708 | return AuxTarget->getLongDoubleFormat(); |
1709 | return Target->getLongDoubleFormat(); |
1710 | case BuiltinType::Float128: |
1711 | if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) |
1712 | return AuxTarget->getFloat128Format(); |
1713 | return Target->getFloat128Format(); |
1714 | } |
1715 | } |
1716 | |
1717 | CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { |
1718 | unsigned Align = Target->getCharWidth(); |
1719 | |
1720 | bool UseAlignAttrOnly = false; |
1721 | if (unsigned AlignFromAttr = D->getMaxAlignment()) { |
1722 | Align = AlignFromAttr; |
1723 | |
1724 | // __attribute__((aligned)) can increase or decrease alignment |
1725 | // *except* on a struct or struct member, where it only increases |
1726 | // alignment unless 'packed' is also specified. |
1727 | // |
1728 | // It is an error for alignas to decrease alignment, so we can |
1729 | // ignore that possibility; Sema should diagnose it. |
1730 | if (isa<FieldDecl>(D)) { |
1731 | UseAlignAttrOnly = D->hasAttr<PackedAttr>() || |
1732 | cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); |
1733 | } else { |
1734 | UseAlignAttrOnly = true; |
1735 | } |
1736 | } |
1737 | else if (isa<FieldDecl>(D)) |
1738 | UseAlignAttrOnly = |
1739 | D->hasAttr<PackedAttr>() || |
1740 | cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); |
1741 | |
1742 | // If we're using the align attribute only, just ignore everything |
1743 | // else about the declaration and its type. |
1744 | if (UseAlignAttrOnly) { |
1745 | // do nothing |
1746 | } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { |
1747 | QualType T = VD->getType(); |
1748 | if (const auto *RT = T->getAs<ReferenceType>()) { |
1749 | if (ForAlignof) |
1750 | T = RT->getPointeeType(); |
1751 | else |
1752 | T = getPointerType(RT->getPointeeType()); |
1753 | } |
1754 | QualType BaseT = getBaseElementType(T); |
1755 | if (T->isFunctionType()) |
1756 | Align = getTypeInfoImpl(T.getTypePtr()).Align; |
1757 | else if (!BaseT->isIncompleteType()) { |
1758 | // Adjust alignments of declarations with array type by the |
1759 | // large-array alignment on the target. |
1760 | if (const ArrayType *arrayType = getAsArrayType(T)) { |
1761 | unsigned MinWidth = Target->getLargeArrayMinWidth(); |
1762 | if (!ForAlignof && MinWidth) { |
1763 | if (isa<VariableArrayType>(arrayType)) |
1764 | Align = std::max(Align, Target->getLargeArrayAlign()); |
1765 | else if (isa<ConstantArrayType>(arrayType) && |
1766 | MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) |
1767 | Align = std::max(Align, Target->getLargeArrayAlign()); |
1768 | } |
1769 | } |
1770 | Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); |
1771 | if (BaseT.getQualifiers().hasUnaligned()) |
1772 | Align = Target->getCharWidth(); |
1773 | if (const auto *VD = dyn_cast<VarDecl>(D)) { |
1774 | if (VD->hasGlobalStorage() && !ForAlignof) { |
1775 | uint64_t TypeSize = getTypeSize(T.getTypePtr()); |
1776 | Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); |
1777 | } |
1778 | } |
1779 | } |
1780 | |
1781 | // Fields can be subject to extra alignment constraints, like if |
1782 | // the field is packed, the struct is packed, or the struct has a |
1783 | // a max-field-alignment constraint (#pragma pack). So calculate |
1784 | // the actual alignment of the field within the struct, and then |
1785 | // (as we're expected to) constrain that by the alignment of the type. |
1786 | if (const auto *Field = dyn_cast<FieldDecl>(VD)) { |
1787 | const RecordDecl *Parent = Field->getParent(); |
1788 | // We can only produce a sensible answer if the record is valid. |
1789 | if (!Parent->isInvalidDecl()) { |
1790 | const ASTRecordLayout &Layout = getASTRecordLayout(Parent); |
1791 | |
1792 | // Start with the record's overall alignment. |
1793 | unsigned FieldAlign = toBits(Layout.getAlignment()); |
1794 | |
1795 | // Use the GCD of that and the offset within the record. |
1796 | uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); |
1797 | if (Offset > 0) { |
1798 | // Alignment is always a power of 2, so the GCD will be a power of 2, |
1799 | // which means we get to do this crazy thing instead of Euclid's. |
1800 | uint64_t LowBitOfOffset = Offset & (~Offset + 1); |
1801 | if (LowBitOfOffset < FieldAlign) |
1802 | FieldAlign = static_cast<unsigned>(LowBitOfOffset); |
1803 | } |
1804 | |
1805 | Align = std::min(Align, FieldAlign); |
1806 | } |
1807 | } |
1808 | } |
1809 | |
1810 | // Some targets have hard limitation on the maximum requestable alignment in |
1811 | // aligned attribute for static variables. |
1812 | const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); |
1813 | const auto *VD = dyn_cast<VarDecl>(D); |
1814 | if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) |
1815 | Align = std::min(Align, MaxAlignedAttr); |
1816 | |
1817 | return toCharUnitsFromBits(Align); |
1818 | } |
1819 | |
1820 | CharUnits ASTContext::getExnObjectAlignment() const { |
1821 | return toCharUnitsFromBits(Target->getExnObjectAlignment()); |
1822 | } |
1823 | |
1824 | // getTypeInfoDataSizeInChars - Return the size of a type, in |
1825 | // chars. If the type is a record, its data size is returned. This is |
1826 | // the size of the memcpy that's performed when assigning this type |
1827 | // using a trivial copy/move assignment operator. |
1828 | TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { |
1829 | TypeInfoChars Info = getTypeInfoInChars(T); |
1830 | |
1831 | // In C++, objects can sometimes be allocated into the tail padding |
1832 | // of a base-class subobject. We decide whether that's possible |
1833 | // during class layout, so here we can just trust the layout results. |
1834 | if (getLangOpts().CPlusPlus) { |
1835 | if (const auto *RT = T->getAs<RecordType>()) { |
1836 | const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); |
1837 | Info.Width = layout.getDataSize(); |
1838 | } |
1839 | } |
1840 | |
1841 | return Info; |
1842 | } |
1843 | |
1844 | /// getConstantArrayInfoInChars - Performing the computation in CharUnits |
1845 | /// instead of in bits prevents overflowing the uint64_t for some large arrays. |
1846 | TypeInfoChars |
1847 | static getConstantArrayInfoInChars(const ASTContext &Context, |
1848 | const ConstantArrayType *CAT) { |
1849 | TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); |
1850 | uint64_t Size = CAT->getSize().getZExtValue(); |
1851 | assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=(static_cast<void> (0)) |
1852 | (uint64_t)(-1)/Size) &&(static_cast<void> (0)) |
1853 | "Overflow in array type char size evaluation")(static_cast<void> (0)); |
1854 | uint64_t Width = EltInfo.Width.getQuantity() * Size; |
1855 | unsigned Align = EltInfo.Align.getQuantity(); |
1856 | if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || |
1857 | Context.getTargetInfo().getPointerWidth(0) == 64) |
1858 | Width = llvm::alignTo(Width, Align); |
1859 | return TypeInfoChars(CharUnits::fromQuantity(Width), |
1860 | CharUnits::fromQuantity(Align), |
1861 | EltInfo.AlignRequirement); |
1862 | } |
1863 | |
1864 | TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { |
1865 | if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) |
1866 | return getConstantArrayInfoInChars(*this, CAT); |
1867 | TypeInfo Info = getTypeInfo(T); |
1868 | return TypeInfoChars(toCharUnitsFromBits(Info.Width), |
1869 | toCharUnitsFromBits(Info.Align), Info.AlignRequirement); |
1870 | } |
1871 | |
1872 | TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { |
1873 | return getTypeInfoInChars(T.getTypePtr()); |
1874 | } |
1875 | |
1876 | bool ASTContext::isAlignmentRequired(const Type *T) const { |
1877 | return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; |
1878 | } |
1879 | |
1880 | bool ASTContext::isAlignmentRequired(QualType T) const { |
1881 | return isAlignmentRequired(T.getTypePtr()); |
1882 | } |
1883 | |
1884 | unsigned ASTContext::getTypeAlignIfKnown(QualType T, |
1885 | bool NeedsPreferredAlignment) const { |
1886 | // An alignment on a typedef overrides anything else. |
1887 | if (const auto *TT = T->getAs<TypedefType>()) |
1888 | if (unsigned Align = TT->getDecl()->getMaxAlignment()) |
1889 | return Align; |
1890 | |
1891 | // If we have an (array of) complete type, we're done. |
1892 | T = getBaseElementType(T); |
1893 | if (!T->isIncompleteType()) |
1894 | return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); |
1895 | |
1896 | // If we had an array type, its element type might be a typedef |
1897 | // type with an alignment attribute. |
1898 | if (const auto *TT = T->getAs<TypedefType>()) |
1899 | if (unsigned Align = TT->getDecl()->getMaxAlignment()) |
1900 | return Align; |
1901 | |
1902 | // Otherwise, see if the declaration of the type had an attribute. |
1903 | if (const auto *TT = T->getAs<TagType>()) |
1904 | return TT->getDecl()->getMaxAlignment(); |
1905 | |
1906 | return 0; |
1907 | } |
1908 | |
1909 | TypeInfo ASTContext::getTypeInfo(const Type *T) const { |
1910 | TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); |
1911 | if (I != MemoizedTypeInfo.end()) |
1912 | return I->second; |
1913 | |
1914 | // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. |
1915 | TypeInfo TI = getTypeInfoImpl(T); |
1916 | MemoizedTypeInfo[T] = TI; |
1917 | return TI; |
1918 | } |
1919 | |
1920 | /// getTypeInfoImpl - Return the size of the specified type, in bits. This |
1921 | /// method does not work on incomplete types. |
1922 | /// |
1923 | /// FIXME: Pointers into different addr spaces could have different sizes and |
1924 | /// alignment requirements: getPointerInfo should take an AddrSpace, this |
1925 | /// should take a QualType, &c. |
1926 | TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { |
1927 | uint64_t Width = 0; |
1928 | unsigned Align = 8; |
1929 | AlignRequirementKind AlignRequirement = AlignRequirementKind::None; |
1930 | unsigned AS = 0; |
1931 | switch (T->getTypeClass()) { |
1932 | #define TYPE(Class, Base) |
1933 | #define ABSTRACT_TYPE(Class, Base) |
1934 | #define NON_CANONICAL_TYPE(Class, Base) |
1935 | #define DEPENDENT_TYPE(Class, Base) case Type::Class: |
1936 | #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ |
1937 | case Type::Class: \ |
1938 | assert(!T->isDependentType() && "should not see dependent types here")(static_cast<void> (0)); \ |
1939 | return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); |
1940 | #include "clang/AST/TypeNodes.inc" |
1941 | llvm_unreachable("Should not see dependent types")__builtin_unreachable(); |
1942 | |
1943 | case Type::FunctionNoProto: |
1944 | case Type::FunctionProto: |
1945 | // GCC extension: alignof(function) = 32 bits |
1946 | Width = 0; |
1947 | Align = 32; |
1948 | break; |
1949 | |
1950 | case Type::IncompleteArray: |
1951 | case Type::VariableArray: |
1952 | case Type::ConstantArray: { |
1953 | // Model non-constant sized arrays as size zero, but track the alignment. |
1954 | uint64_t Size = 0; |
1955 | if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) |
1956 | Size = CAT->getSize().getZExtValue(); |
1957 | |
1958 | TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); |
1959 | assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&(static_cast<void> (0)) |
1960 | "Overflow in array type bit size evaluation")(static_cast<void> (0)); |
1961 | Width = EltInfo.Width * Size; |
1962 | Align = EltInfo.Align; |
1963 | AlignRequirement = EltInfo.AlignRequirement; |
1964 | if (!getTargetInfo().getCXXABI().isMicrosoft() || |
1965 | getTargetInfo().getPointerWidth(0) == 64) |
1966 | Width = llvm::alignTo(Width, Align); |
1967 | break; |
1968 | } |
1969 | |
1970 | case Type::ExtVector: |
1971 | case Type::Vector: { |
1972 | const auto *VT = cast<VectorType>(T); |
1973 | TypeInfo EltInfo = getTypeInfo(VT->getElementType()); |
1974 | Width = EltInfo.Width * VT->getNumElements(); |
1975 | Align = Width; |
1976 | // If the alignment is not a power of 2, round up to the next power of 2. |
1977 | // This happens for non-power-of-2 length vectors. |
1978 | if (Align & (Align-1)) { |
1979 | Align = llvm::NextPowerOf2(Align); |
1980 | Width = llvm::alignTo(Width, Align); |
1981 | } |
1982 | // Adjust the alignment based on the target max. |
1983 | uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); |
1984 | if (TargetVectorAlign && TargetVectorAlign < Align) |
1985 | Align = TargetVectorAlign; |
1986 | if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) |
1987 | // Adjust the alignment for fixed-length SVE vectors. This is important |
1988 | // for non-power-of-2 vector lengths. |
1989 | Align = 128; |
1990 | else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) |
1991 | // Adjust the alignment for fixed-length SVE predicates. |
1992 | Align = 16; |
1993 | break; |
1994 | } |
1995 | |
1996 | case Type::ConstantMatrix: { |
1997 | const auto *MT = cast<ConstantMatrixType>(T); |
1998 | TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); |
1999 | // The internal layout of a matrix value is implementation defined. |
2000 | // Initially be ABI compatible with arrays with respect to alignment and |
2001 | // size. |
2002 | Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); |
2003 | Align = ElementInfo.Align; |
2004 | break; |
2005 | } |
2006 | |
2007 | case Type::Builtin: |
2008 | switch (cast<BuiltinType>(T)->getKind()) { |
2009 | default: llvm_unreachable("Unknown builtin type!")__builtin_unreachable(); |
2010 | case BuiltinType::Void: |
2011 | // GCC extension: alignof(void) = 8 bits. |
2012 | Width = 0; |
2013 | Align = 8; |
2014 | break; |
2015 | case BuiltinType::Bool: |
2016 | Width = Target->getBoolWidth(); |
2017 | Align = Target->getBoolAlign(); |
2018 | break; |
2019 | case BuiltinType::Char_S: |
2020 | case BuiltinType::Char_U: |
2021 | case BuiltinType::UChar: |
2022 | case BuiltinType::SChar: |
2023 | case BuiltinType::Char8: |
2024 | Width = Target->getCharWidth(); |
2025 | Align = Target->getCharAlign(); |
2026 | break; |
2027 | case BuiltinType::WChar_S: |
2028 | case BuiltinType::WChar_U: |
2029 | Width = Target->getWCharWidth(); |
2030 | Align = Target->getWCharAlign(); |
2031 | break; |
2032 | case BuiltinType::Char16: |
2033 | Width = Target->getChar16Width(); |
2034 | Align = Target->getChar16Align(); |
2035 | break; |
2036 | case BuiltinType::Char32: |
2037 | Width = Target->getChar32Width(); |
2038 | Align = Target->getChar32Align(); |
2039 | break; |
2040 | case BuiltinType::UShort: |
2041 | case BuiltinType::Short: |
2042 | Width = Target->getShortWidth(); |
2043 | Align = Target->getShortAlign(); |
2044 | break; |
2045 | case BuiltinType::UInt: |
2046 | case BuiltinType::Int: |
2047 | Width = Target->getIntWidth(); |
2048 | Align = Target->getIntAlign(); |
2049 | break; |
2050 | case BuiltinType::ULong: |
2051 | case BuiltinType::Long: |
2052 | Width = Target->getLongWidth(); |
2053 | Align = Target->getLongAlign(); |
2054 | break; |
2055 | case BuiltinType::ULongLong: |
2056 | case BuiltinType::LongLong: |
2057 | Width = Target->getLongLongWidth(); |
2058 | Align = Target->getLongLongAlign(); |
2059 | break; |
2060 | case BuiltinType::Int128: |
2061 | case BuiltinType::UInt128: |
2062 | Width = 128; |
2063 | Align = 128; // int128_t is 128-bit aligned on all targets. |
2064 | break; |
2065 | case BuiltinType::ShortAccum: |
2066 | case BuiltinType::UShortAccum: |
2067 | case BuiltinType::SatShortAccum: |
2068 | case BuiltinType::SatUShortAccum: |
2069 | Width = Target->getShortAccumWidth(); |
2070 | Align = Target->getShortAccumAlign(); |
2071 | break; |
2072 | case BuiltinType::Accum: |
2073 | case BuiltinType::UAccum: |
2074 | case BuiltinType::SatAccum: |
2075 | case BuiltinType::SatUAccum: |
2076 | Width = Target->getAccumWidth(); |
2077 | Align = Target->getAccumAlign(); |
2078 | break; |
2079 | case BuiltinType::LongAccum: |
2080 | case BuiltinType::ULongAccum: |
2081 | case BuiltinType::SatLongAccum: |
2082 | case BuiltinType::SatULongAccum: |
2083 | Width = Target->getLongAccumWidth(); |
2084 | Align = Target->getLongAccumAlign(); |
2085 | break; |
2086 | case BuiltinType::ShortFract: |
2087 | case BuiltinType::UShortFract: |
2088 | case BuiltinType::SatShortFract: |
2089 | case BuiltinType::SatUShortFract: |
2090 | Width = Target->getShortFractWidth(); |
2091 | Align = Target->getShortFractAlign(); |
2092 | break; |
2093 | case BuiltinType::Fract: |
2094 | case BuiltinType::UFract: |
2095 | case BuiltinType::SatFract: |
2096 | case BuiltinType::SatUFract: |
2097 | Width = Target->getFractWidth(); |
2098 | Align = Target->getFractAlign(); |
2099 | break; |
2100 | case BuiltinType::LongFract: |
2101 | case BuiltinType::ULongFract: |
2102 | case BuiltinType::SatLongFract: |
2103 | case BuiltinType::SatULongFract: |
2104 | Width = Target->getLongFractWidth(); |
2105 | Align = Target->getLongFractAlign(); |
2106 | break; |
2107 | case BuiltinType::BFloat16: |
2108 | Width = Target->getBFloat16Width(); |
2109 | Align = Target->getBFloat16Align(); |
2110 | break; |
2111 | case BuiltinType::Float16: |
2112 | case BuiltinType::Half: |
2113 | if (Target->hasFloat16Type() || !getLangOpts().OpenMP || |
2114 | !getLangOpts().OpenMPIsDevice) { |
2115 | Width = Target->getHalfWidth(); |
2116 | Align = Target->getHalfAlign(); |
2117 | } else { |
2118 | assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&(static_cast<void> (0)) |
2119 | "Expected OpenMP device compilation.")(static_cast<void> (0)); |
2120 | Width = AuxTarget->getHalfWidth(); |
2121 | Align = AuxTarget->getHalfAlign(); |
2122 | } |
2123 | break; |
2124 | case BuiltinType::Float: |
2125 | Width = Target->getFloatWidth(); |
2126 | Align = Target->getFloatAlign(); |
2127 | break; |
2128 | case BuiltinType::Double: |
2129 | Width = Target->getDoubleWidth(); |
2130 | Align = Target->getDoubleAlign(); |
2131 | break; |
2132 | case BuiltinType::LongDouble: |
2133 | if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && |
2134 | (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || |
2135 | Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { |
2136 | Width = AuxTarget->getLongDoubleWidth(); |
2137 | Align = AuxTarget->getLongDoubleAlign(); |
2138 | } else { |
2139 | Width = Target->getLongDoubleWidth(); |
2140 | Align = Target->getLongDoubleAlign(); |
2141 | } |
2142 | break; |
2143 | case BuiltinType::Float128: |
2144 | if (Target->hasFloat128Type() || !getLangOpts().OpenMP || |
2145 | !getLangOpts().OpenMPIsDevice) { |
2146 | Width = Target->getFloat128Width(); |
2147 | Align = Target->getFloat128Align(); |
2148 | } else { |
2149 | assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&(static_cast<void> (0)) |
2150 | "Expected OpenMP device compilation.")(static_cast<void> (0)); |
2151 | Width = AuxTarget->getFloat128Width(); |
2152 | Align = AuxTarget->getFloat128Align(); |
2153 | } |
2154 | break; |
2155 | case BuiltinType::NullPtr: |
2156 | Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) |
2157 | Align = Target->getPointerAlign(0); // == sizeof(void*) |
2158 | break; |
2159 | case BuiltinType::ObjCId: |
2160 | case BuiltinType::ObjCClass: |
2161 | case BuiltinType::ObjCSel: |
2162 | Width = Target->getPointerWidth(0); |
2163 | Align = Target->getPointerAlign(0); |
2164 | break; |
2165 | case BuiltinType::OCLSampler: |
2166 | case BuiltinType::OCLEvent: |
2167 | case BuiltinType::OCLClkEvent: |
2168 | case BuiltinType::OCLQueue: |
2169 | case BuiltinType::OCLReserveID: |
2170 | #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ |
2171 | case BuiltinType::Id: |
2172 | #include "clang/Basic/OpenCLImageTypes.def" |
2173 | #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ |
2174 | case BuiltinType::Id: |
2175 | #include "clang/Basic/OpenCLExtensionTypes.def" |
2176 | AS = getTargetAddressSpace( |
2177 | Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T))); |
2178 | Width = Target->getPointerWidth(AS); |
2179 | Align = Target->getPointerAlign(AS); |
2180 | break; |
2181 | // The SVE types are effectively target-specific. The length of an |
2182 | // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple |
2183 | // of 128 bits. There is one predicate bit for each vector byte, so the |
2184 | // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. |
2185 | // |
2186 | // Because the length is only known at runtime, we use a dummy value |
2187 | // of 0 for the static length. The alignment values are those defined |
2188 | // by the Procedure Call Standard for the Arm Architecture. |
2189 | #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ |
2190 | IsSigned, IsFP, IsBF) \ |
2191 | case BuiltinType::Id: \ |
2192 | Width = 0; \ |
2193 | Align = 128; \ |
2194 | break; |
2195 | #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ |
2196 | case BuiltinType::Id: \ |
2197 | Width = 0; \ |
2198 | Align = 16; \ |
2199 | break; |
2200 | #include "clang/Basic/AArch64SVEACLETypes.def" |
2201 | #define PPC_VECTOR_TYPE(Name, Id, Size) \ |
2202 | case BuiltinType::Id: \ |
2203 | Width = Size; \ |
2204 | Align = Size; \ |
2205 | break; |
2206 | #include "clang/Basic/PPCTypes.def" |
2207 | #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ |
2208 | IsFP) \ |
2209 | case BuiltinType::Id: \ |
2210 | Width = 0; \ |
2211 | Align = ElBits; \ |
2212 | break; |
2213 | #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ |
2214 | case BuiltinType::Id: \ |
2215 | Width = 0; \ |
2216 | Align = 8; \ |
2217 | break; |
2218 | #include "clang/Basic/RISCVVTypes.def" |
2219 | } |
2220 | break; |
2221 | case Type::ObjCObjectPointer: |
2222 | Width = Target->getPointerWidth(0); |
2223 | Align = Target->getPointerAlign(0); |
2224 | break; |
2225 | case Type::BlockPointer: |
2226 | AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType()); |
2227 | Width = Target->getPointerWidth(AS); |
2228 | Align = Target->getPointerAlign(AS); |
2229 | break; |
2230 | case Type::LValueReference: |
2231 | case Type::RValueReference: |
2232 | // alignof and sizeof should never enter this code path here, so we go |
2233 | // the pointer route. |
2234 | AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType()); |
2235 | Width = Target->getPointerWidth(AS); |
2236 | Align = Target->getPointerAlign(AS); |
2237 | break; |
2238 | case Type::Pointer: |
2239 | AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType()); |
2240 | Width = Target->getPointerWidth(AS); |
2241 | Align = Target->getPointerAlign(AS); |
2242 | break; |
2243 | case Type::MemberPointer: { |
2244 | const auto *MPT = cast<MemberPointerType>(T); |
2245 | CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); |
2246 | Width = MPI.Width; |
2247 | Align = MPI.Align; |
2248 | break; |
2249 | } |
2250 | case Type::Complex: { |
2251 | // Complex types have the same alignment as their elements, but twice the |
2252 | // size. |
2253 | TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); |
2254 | Width = EltInfo.Width * 2; |
2255 | Align = EltInfo.Align; |
2256 | break; |
2257 | } |
2258 | case Type::ObjCObject: |
2259 | return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); |
2260 | case Type::Adjusted: |
2261 | case Type::Decayed: |
2262 | return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); |
2263 | case Type::ObjCInterface: { |
2264 | const auto *ObjCI = cast<ObjCInterfaceType>(T); |
2265 | if (ObjCI->getDecl()->isInvalidDecl()) { |
2266 | Width = 8; |
2267 | Align = 8; |
2268 | break; |
2269 | } |
2270 | const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); |
2271 | Width = toBits(Layout.getSize()); |
2272 | Align = toBits(Layout.getAlignment()); |
2273 | break; |
2274 | } |
2275 | case Type::ExtInt: { |
2276 | const auto *EIT = cast<ExtIntType>(T); |
2277 | Align = |
2278 | std::min(static_cast<unsigned>(std::max( |
2279 | getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))), |
2280 | Target->getLongLongAlign()); |
2281 | Width = llvm::alignTo(EIT->getNumBits(), Align); |
2282 | break; |
2283 | } |
2284 | case Type::Record: |
2285 | case Type::Enum: { |
2286 | const auto *TT = cast<TagType>(T); |
2287 | |
2288 | if (TT->getDecl()->isInvalidDecl()) { |
2289 | Width = 8; |
2290 | Align = 8; |
2291 | break; |
2292 | } |
2293 | |
2294 | if (const auto *ET = dyn_cast<EnumType>(TT)) { |
2295 | const EnumDecl *ED = ET->getDecl(); |
2296 | TypeInfo Info = |
2297 | getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); |
2298 | if (unsigned AttrAlign = ED->getMaxAlignment()) { |
2299 | Info.Align = AttrAlign; |
2300 | Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; |
2301 | } |
2302 | return Info; |
2303 | } |
2304 | |
2305 | const auto *RT = cast<RecordType>(TT); |
2306 | const RecordDecl *RD = RT->getDecl(); |
2307 | const ASTRecordLayout &Layout = getASTRecordLayout(RD); |
2308 | Width = toBits(Layout.getSize()); |
2309 | Align = toBits(Layout.getAlignment()); |
2310 | AlignRequirement = RD->hasAttr<AlignedAttr>() |
2311 | ? AlignRequirementKind::RequiredByRecord |
2312 | : AlignRequirementKind::None; |
2313 | break; |
2314 | } |
2315 | |
2316 | case Type::SubstTemplateTypeParm: |
2317 | return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> |
2318 | getReplacementType().getTypePtr()); |
2319 | |
2320 | case Type::Auto: |
2321 | case Type::DeducedTemplateSpecialization: { |
2322 | const auto *A = cast<DeducedType>(T); |
2323 | assert(!A->getDeducedType().isNull() &&(static_cast<void> (0)) |
2324 | "cannot request the size of an undeduced or dependent auto type")(static_cast<void> (0)); |
2325 | return getTypeInfo(A->getDeducedType().getTypePtr()); |
2326 | } |
2327 | |
2328 | case Type::Paren: |
2329 | return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); |
2330 | |
2331 | case Type::MacroQualified: |
2332 | return getTypeInfo( |
2333 | cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); |
2334 | |
2335 | case Type::ObjCTypeParam: |
2336 | return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); |
2337 | |
2338 | case Type::Typedef: { |
2339 | const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl(); |
2340 | TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); |
2341 | // If the typedef has an aligned attribute on it, it overrides any computed |
2342 | // alignment we have. This violates the GCC documentation (which says that |
2343 | // attribute(aligned) can only round up) but matches its implementation. |
2344 | if (unsigned AttrAlign = Typedef->getMaxAlignment()) { |
2345 | Align = AttrAlign; |
2346 | AlignRequirement = AlignRequirementKind::RequiredByTypedef; |
2347 | } else { |
2348 | Align = Info.Align; |
2349 | AlignRequirement = Info.AlignRequirement; |
2350 | } |
2351 | Width = Info.Width; |
2352 | break; |
2353 | } |
2354 | |
2355 | case Type::Elaborated: |
2356 | return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); |
2357 | |
2358 | case Type::Attributed: |
2359 | return getTypeInfo( |
2360 | cast<AttributedType>(T)->getEquivalentType().getTypePtr()); |
2361 | |
2362 | case Type::Atomic: { |
2363 | // Start with the base type information. |
2364 | TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); |
2365 | Width = Info.Width; |
2366 | Align = Info.Align; |
2367 | |
2368 | if (!Width) { |
2369 | // An otherwise zero-sized type should still generate an |
2370 | // atomic operation. |
2371 | Width = Target->getCharWidth(); |
2372 | assert(Align)(static_cast<void> (0)); |
2373 | } else if (Width <= Target->getMaxAtomicPromoteWidth()) { |
2374 | // If the size of the type doesn't exceed the platform's max |
2375 | // atomic promotion width, make the size and alignment more |
2376 | // favorable to atomic operations: |
2377 | |
2378 | // Round the size up to a power of 2. |
2379 | if (!llvm::isPowerOf2_64(Width)) |
2380 | Width = llvm::NextPowerOf2(Width); |
2381 | |
2382 | // Set the alignment equal to the size. |
2383 | Align = static_cast<unsigned>(Width); |
2384 | } |
2385 | } |
2386 | break; |
2387 | |
2388 | case Type::Pipe: |
2389 | Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global)); |
2390 | Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global)); |
2391 | break; |
2392 | } |
2393 | |
2394 | assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2")(static_cast<void> (0)); |
2395 | return TypeInfo(Width, Align, AlignRequirement); |
2396 | } |
2397 | |
2398 | unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { |
2399 | UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); |
2400 | if (I != MemoizedUnadjustedAlign.end()) |
2401 | return I->second; |
2402 | |
2403 | unsigned UnadjustedAlign; |
2404 | if (const auto *RT = T->getAs<RecordType>()) { |
2405 | const RecordDecl *RD = RT->getDecl(); |
2406 | const ASTRecordLayout &Layout = getASTRecordLayout(RD); |
2407 | UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); |
2408 | } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { |
2409 | const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); |
2410 | UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); |
2411 | } else { |
2412 | UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); |
2413 | } |
2414 | |
2415 | MemoizedUnadjustedAlign[T] = UnadjustedAlign; |
2416 | return UnadjustedAlign; |
2417 | } |
2418 | |
2419 | unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { |
2420 | unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); |
2421 | return SimdAlign; |
2422 | } |
2423 | |
2424 | /// toCharUnitsFromBits - Convert a size in bits to a size in characters. |
2425 | CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { |
2426 | return CharUnits::fromQuantity(BitSize / getCharWidth()); |
2427 | } |
2428 | |
2429 | /// toBits - Convert a size in characters to a size in characters. |
2430 | int64_t ASTContext::toBits(CharUnits CharSize) const { |
2431 | return CharSize.getQuantity() * getCharWidth(); |
2432 | } |
2433 | |
2434 | /// getTypeSizeInChars - Return the size of the specified type, in characters. |
2435 | /// This method does not work on incomplete types. |
2436 | CharUnits ASTContext::getTypeSizeInChars(QualType T) const { |
2437 | return getTypeInfoInChars(T).Width; |
2438 | } |
2439 | CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { |
2440 | return getTypeInfoInChars(T).Width; |
2441 | } |
2442 | |
2443 | /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in |
2444 | /// characters. This method does not work on incomplete types. |
2445 | CharUnits ASTContext::getTypeAlignInChars(QualType T) const { |
2446 | return toCharUnitsFromBits(getTypeAlign(T)); |
2447 | } |
2448 | CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { |
2449 | return toCharUnitsFromBits(getTypeAlign(T)); |
2450 | } |
2451 | |
2452 | /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a |
2453 | /// type, in characters, before alignment adustments. This method does |
2454 | /// not work on incomplete types. |
2455 | CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { |
2456 | return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); |
2457 | } |
2458 | CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { |
2459 | return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); |
2460 | } |
2461 | |
2462 | /// getPreferredTypeAlign - Return the "preferred" alignment of the specified |
2463 | /// type for the current target in bits. This can be different than the ABI |
2464 | /// alignment in cases where it is beneficial for performance or backwards |
2465 | /// compatibility preserving to overalign a data type. (Note: despite the name, |
2466 | /// the preferred alignment is ABI-impacting, and not an optimization.) |
2467 | unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { |
2468 | TypeInfo TI = getTypeInfo(T); |
2469 | unsigned ABIAlign = TI.Align; |
2470 | |
2471 | T = T->getBaseElementTypeUnsafe(); |
2472 | |
2473 | // The preferred alignment of member pointers is that of a pointer. |
2474 | if (T->isMemberPointerType()) |
2475 | return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); |
2476 | |
2477 | if (!Target->allowsLargerPreferedTypeAlignment()) |
2478 | return ABIAlign; |
2479 | |
2480 | if (const auto *RT = T->getAs<RecordType>()) { |
2481 | const RecordDecl *RD = RT->getDecl(); |
2482 | |
2483 | // When used as part of a typedef, or together with a 'packed' attribute, |
2484 | // the 'aligned' attribute can be used to decrease alignment. |
2485 | if ((TI.isAlignRequired() && T->getAs<TypedefType>() != nullptr) || |
2486 | RD->isInvalidDecl()) |
2487 | return ABIAlign; |
2488 | |
2489 | unsigned PreferredAlign = static_cast<unsigned>( |
2490 | toBits(getASTRecordLayout(RD).PreferredAlignment)); |
2491 | assert(PreferredAlign >= ABIAlign &&(static_cast<void> (0)) |
2492 | "PreferredAlign should be at least as large as ABIAlign.")(static_cast<void> (0)); |
2493 | return PreferredAlign; |
2494 | } |
2495 | |
2496 | // Double (and, for targets supporting AIX `power` alignment, long double) and |
2497 | // long long should be naturally aligned (despite requiring less alignment) if |
2498 | // possible. |
2499 | if (const auto *CT = T->getAs<ComplexType>()) |
2500 | T = CT->getElementType().getTypePtr(); |
2501 | if (const auto *ET = T->getAs<EnumType>()) |
2502 | T = ET->getDecl()->getIntegerType().getTypePtr(); |
2503 | if (T->isSpecificBuiltinType(BuiltinType::Double) || |
2504 | T->isSpecificBuiltinType(BuiltinType::LongLong) || |
2505 | T->isSpecificBuiltinType(BuiltinType::ULongLong) || |
2506 | (T->isSpecificBuiltinType(BuiltinType::LongDouble) && |
2507 | Target->defaultsToAIXPowerAlignment())) |
2508 | // Don't increase the alignment if an alignment attribute was specified on a |
2509 | // typedef declaration. |
2510 | if (!TI.isAlignRequired()) |
2511 | return std::max(ABIAlign, (unsigned)getTypeSize(T)); |
2512 | |
2513 | return ABIAlign; |
2514 | } |
2515 | |
2516 | /// getTargetDefaultAlignForAttributeAligned - Return the default alignment |
2517 | /// for __attribute__((aligned)) on this target, to be used if no alignment |
2518 | /// value is specified. |
2519 | unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { |
2520 | return getTargetInfo().getDefaultAlignForAttributeAligned(); |
2521 | } |
2522 | |
2523 | /// getAlignOfGlobalVar - Return the alignment in bits that should be given |
2524 | /// to a global variable of the specified type. |
2525 | unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { |
2526 | uint64_t TypeSize = getTypeSize(T.getTypePtr()); |
2527 | return std::max(getPreferredTypeAlign(T), |
2528 | getTargetInfo().getMinGlobalAlign(TypeSize)); |
2529 | } |
2530 | |
2531 | /// getAlignOfGlobalVarInChars - Return the alignment in characters that |
2532 | /// should be given to a global variable of the specified type. |
2533 | CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { |
2534 | return toCharUnitsFromBits(getAlignOfGlobalVar(T)); |
2535 | } |
2536 | |
2537 | CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { |
2538 | CharUnits Offset = CharUnits::Zero(); |
2539 | const ASTRecordLayout *Layout = &getASTRecordLayout(RD); |
2540 | while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { |
2541 | Offset += Layout->getBaseClassOffset(Base); |
2542 | Layout = &getASTRecordLayout(Base); |
2543 | } |
2544 | return Offset; |
2545 | } |
2546 | |
2547 | CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { |
2548 | const ValueDecl *MPD = MP.getMemberPointerDecl(); |
2549 | CharUnits ThisAdjustment = CharUnits::Zero(); |
2550 | ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); |
2551 | bool DerivedMember = MP.isMemberPointerToDerivedMember(); |
2552 | const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); |
2553 | for (unsigned I = 0, N = Path.size(); I != N; ++I) { |
2554 | const CXXRecordDecl *Base = RD; |
2555 | const CXXRecordDecl *Derived = Path[I]; |
2556 | if (DerivedMember) |
2557 | std::swap(Base, Derived); |
2558 | ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); |
2559 | RD = Path[I]; |
2560 | } |
2561 | if (DerivedMember) |
2562 | ThisAdjustment = -ThisAdjustment; |
2563 | return ThisAdjustment; |
2564 | } |
2565 | |
2566 | /// DeepCollectObjCIvars - |
2567 | /// This routine first collects all declared, but not synthesized, ivars in |
2568 | /// super class and then collects all ivars, including those synthesized for |
2569 | /// current class. This routine is used for implementation of current class |
2570 | /// when all ivars, declared and synthesized are known. |
2571 | void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, |
2572 | bool leafClass, |
2573 | SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { |
2574 | if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) |
2575 | DeepCollectObjCIvars(SuperClass, false, Ivars); |
2576 | if (!leafClass) { |
2577 | for (const auto *I : OI->ivars()) |
2578 | Ivars.push_back(I); |
2579 | } else { |
2580 | auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); |
2581 | for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; |
2582 | Iv= Iv->getNextIvar()) |
2583 | Ivars.push_back(Iv); |
2584 | } |
2585 | } |
2586 | |
2587 | /// CollectInheritedProtocols - Collect all protocols in current class and |
2588 | /// those inherited by it. |
2589 | void ASTContext::CollectInheritedProtocols(const Decl *CDecl, |
2590 | llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { |
2591 | if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { |
2592 | // We can use protocol_iterator here instead of |
2593 | // all_referenced_protocol_iterator since we are walking all categories. |
2594 | for (auto *Proto : OI->all_referenced_protocols()) { |
2595 | CollectInheritedProtocols(Proto, Protocols); |
2596 | } |
2597 | |
2598 | // Categories of this Interface. |
2599 | for (const auto *Cat : OI->visible_categories()) |
2600 | CollectInheritedProtocols(Cat, Protocols); |
2601 | |
2602 | if (ObjCInterfaceDecl *SD = OI->getSuperClass()) |
2603 | while (SD) { |
2604 | CollectInheritedProtocols(SD, Protocols); |
2605 | SD = SD->getSuperClass(); |
2606 | } |
2607 | } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { |
2608 | for (auto *Proto : OC->protocols()) { |
2609 | CollectInheritedProtocols(Proto, Protocols); |
2610 | } |
2611 | } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { |
2612 | // Insert the protocol. |
2613 | if (!Protocols.insert( |
2614 | const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) |
2615 | return; |
2616 | |
2617 | for (auto *Proto : OP->protocols()) |
2618 | CollectInheritedProtocols(Proto, Protocols); |
2619 | } |
2620 | } |
2621 | |
2622 | static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, |
2623 | const RecordDecl *RD) { |
2624 | assert(RD->isUnion() && "Must be union type")(static_cast<void> (0)); |
2625 | CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); |
2626 | |
2627 | for (const auto *Field : RD->fields()) { |
2628 | if (!Context.hasUniqueObjectRepresentations(Field->getType())) |
2629 | return false; |
2630 | CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); |
2631 | if (FieldSize != UnionSize) |
2632 | return false; |
2633 | } |
2634 | return !RD->field_empty(); |
2635 | } |
2636 | |
2637 | static int64_t getSubobjectOffset(const FieldDecl *Field, |
2638 | const ASTContext &Context, |
2639 | const clang::ASTRecordLayout & /*Layout*/) { |
2640 | return Context.getFieldOffset(Field); |
2641 | } |
2642 | |
2643 | static int64_t getSubobjectOffset(const CXXRecordDecl *RD, |
2644 | const ASTContext &Context, |
2645 | const clang::ASTRecordLayout &Layout) { |
2646 | return Context.toBits(Layout.getBaseClassOffset(RD)); |
2647 | } |
2648 | |
2649 | static llvm::Optional<int64_t> |
2650 | structHasUniqueObjectRepresentations(const ASTContext &Context, |
2651 | const RecordDecl *RD); |
2652 | |
2653 | static llvm::Optional<int64_t> |
2654 | getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) { |
2655 | if (Field->getType()->isRecordType()) { |
2656 | const RecordDecl *RD = Field->getType()->getAsRecordDecl(); |
2657 | if (!RD->isUnion()) |
2658 | return structHasUniqueObjectRepresentations(Context, RD); |
2659 | } |
2660 | if (!Field->getType()->isReferenceType() && |
2661 | !Context.hasUniqueObjectRepresentations(Field->getType())) |
2662 | return llvm::None; |
2663 | |
2664 | int64_t FieldSizeInBits = |
2665 | Context.toBits(Context.getTypeSizeInChars(Field->getType())); |
2666 | if (Field->isBitField()) { |
2667 | int64_t BitfieldSize = Field->getBitWidthValue(Context); |
2668 | if (BitfieldSize > FieldSizeInBits) |
2669 | return llvm::None; |
2670 | FieldSizeInBits = BitfieldSize; |
2671 | } |
2672 | return FieldSizeInBits; |
2673 | } |
2674 | |
2675 | static llvm::Optional<int64_t> |
2676 | getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context) { |
2677 | return structHasUniqueObjectRepresentations(Context, RD); |
2678 | } |
2679 | |
2680 | template <typename RangeT> |
2681 | static llvm::Optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( |
2682 | const RangeT &Subobjects, int64_t CurOffsetInBits, |
2683 | const ASTContext &Context, const clang::ASTRecordLayout &Layout) { |
2684 | for (const auto *Subobject : Subobjects) { |
2685 | llvm::Optional<int64_t> SizeInBits = |
2686 | getSubobjectSizeInBits(Subobject, Context); |
2687 | if (!SizeInBits) |
2688 | return llvm::None; |
2689 | if (*SizeInBits != 0) { |
2690 | int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); |
2691 | if (Offset != CurOffsetInBits) |
2692 | return llvm::None; |
2693 | CurOffsetInBits += *SizeInBits; |
2694 | } |
2695 | } |
2696 | return CurOffsetInBits; |
2697 | } |
2698 | |
2699 | static llvm::Optional<int64_t> |
2700 | structHasUniqueObjectRepresentations(const ASTContext &Context, |
2701 | const RecordDecl *RD) { |
2702 | assert(!RD->isUnion() && "Must be struct/class type")(static_cast<void> (0)); |
2703 | const auto &Layout = Context.getASTRecordLayout(RD); |
2704 | |
2705 | int64_t CurOffsetInBits = 0; |
2706 | if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { |
2707 | if (ClassDecl->isDynamicClass()) |
2708 | return llvm::None; |
2709 | |
2710 | SmallVector<CXXRecordDecl *, 4> Bases; |
2711 | for (const auto &Base : ClassDecl->bases()) { |
2712 | // Empty types can be inherited from, and non-empty types can potentially |
2713 | // have tail padding, so just make sure there isn't an error. |
2714 | Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); |
2715 | } |
2716 | |
2717 | llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { |
2718 | return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); |
2719 | }); |
2720 | |
2721 | llvm::Optional<int64_t> OffsetAfterBases = |
2722 | structSubobjectsHaveUniqueObjectRepresentations(Bases, CurOffsetInBits, |
2723 | Context, Layout); |
2724 | if (!OffsetAfterBases) |
2725 | return llvm::None; |
2726 | CurOffsetInBits = *OffsetAfterBases; |
2727 | } |
2728 | |
2729 | llvm::Optional<int64_t> OffsetAfterFields = |
2730 | structSubobjectsHaveUniqueObjectRepresentations( |
2731 | RD->fields(), CurOffsetInBits, Context, Layout); |
2732 | if (!OffsetAfterFields) |
2733 | return llvm::None; |
2734 | CurOffsetInBits = *OffsetAfterFields; |
2735 | |
2736 | return CurOffsetInBits; |
2737 | } |
2738 | |
2739 | bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { |
2740 | // C++17 [meta.unary.prop]: |
2741 | // The predicate condition for a template specialization |
2742 | // has_unique_object_representations<T> shall be |
2743 | // satisfied if and only if: |
2744 | // (9.1) - T is trivially copyable, and |
2745 | // (9.2) - any two objects of type T with the same value have the same |
2746 | // object representation, where two objects |
2747 | // of array or non-union class type are considered to have the same value |
2748 | // if their respective sequences of |
2749 | // direct subobjects have the same values, and two objects of union type |
2750 | // are considered to have the same |
2751 | // value if they have the same active member and the corresponding members |
2752 | // have the same value. |
2753 | // The set of scalar types for which this condition holds is |
2754 | // implementation-defined. [ Note: If a type has padding |
2755 | // bits, the condition does not hold; otherwise, the condition holds true |
2756 | // for unsigned integral types. -- end note ] |
2757 | assert(!Ty.isNull() && "Null QualType sent to unique object rep check")(static_cast<void> (0)); |
2758 | |
2759 | // Arrays are unique only if their element type is unique. |
2760 | if (Ty->isArrayType()) |
2761 | return hasUniqueObjectRepresentations(getBaseElementType(Ty)); |
2762 | |
2763 | // (9.1) - T is trivially copyable... |
2764 | if (!Ty.isTriviallyCopyableType(*this)) |
2765 | return false; |
2766 | |
2767 | // All integrals and enums are unique. |
2768 | if (Ty->isIntegralOrEnumerationType()) |
2769 | return true; |
2770 | |
2771 | // All other pointers are unique. |
2772 | if (Ty->isPointerType()) |
2773 | return true; |
2774 | |
2775 | if (Ty->isMemberPointerType()) { |
2776 | const auto *MPT = Ty->getAs<MemberPointerType>(); |
2777 | return !ABI->getMemberPointerInfo(MPT).HasPadding; |
2778 | } |
2779 | |
2780 | if (Ty->isRecordType()) { |
2781 | const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); |
2782 | |
2783 | if (Record->isInvalidDecl()) |
2784 | return false; |
2785 | |
2786 | if (Record->isUnion()) |
2787 | return unionHasUniqueObjectRepresentations(*this, Record); |
2788 | |
2789 | Optional<int64_t> StructSize = |
2790 | structHasUniqueObjectRepresentations(*this, Record); |
2791 | |
2792 | return StructSize && |
2793 | StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty)); |
2794 | } |
2795 | |
2796 | // FIXME: More cases to handle here (list by rsmith): |
2797 | // vectors (careful about, eg, vector of 3 foo) |
2798 | // _Complex int and friends |
2799 | // _Atomic T |
2800 | // Obj-C block pointers |
2801 | // Obj-C object pointers |
2802 | // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, |
2803 | // clk_event_t, queue_t, reserve_id_t) |
2804 | // There're also Obj-C class types and the Obj-C selector type, but I think it |
2805 | // makes sense for those to return false here. |
2806 | |
2807 | return false; |
2808 | } |
2809 | |
2810 | unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { |
2811 | unsigned count = 0; |
2812 | // Count ivars declared in class extension. |
2813 | for (const auto *Ext : OI->known_extensions()) |
2814 | count += Ext->ivar_size(); |
2815 | |
2816 | // Count ivar defined in this class's implementation. This |
2817 | // includes synthesized ivars. |
2818 | if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) |
2819 | count += ImplDecl->ivar_size(); |
2820 | |
2821 | return count; |
2822 | } |
2823 | |
2824 | bool ASTContext::isSentinelNullExpr(const Expr *E) { |
2825 | if (!E) |
2826 | return false; |
2827 | |
2828 | // nullptr_t is always treated as null. |
2829 | if (E->getType()->isNullPtrType()) return true; |
2830 | |
2831 | if (E->getType()->isAnyPointerType() && |
2832 | E->IgnoreParenCasts()->isNullPointerConstant(*this, |
2833 | Expr::NPC_ValueDependentIsNull)) |
2834 | return true; |
2835 | |
2836 | // Unfortunately, __null has type 'int'. |
2837 | if (isa<GNUNullExpr>(E)) return true; |
2838 | |
2839 | return false; |
2840 | } |
2841 | |
2842 | /// Get the implementation of ObjCInterfaceDecl, or nullptr if none |
2843 | /// exists. |
2844 | ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { |
2845 | llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator |
2846 | I = ObjCImpls.find(D); |
2847 | if (I != ObjCImpls.end()) |
2848 | return cast<ObjCImplementationDecl>(I->second); |
2849 | return nullptr; |
2850 | } |
2851 | |
2852 | /// Get the implementation of ObjCCategoryDecl, or nullptr if none |
2853 | /// exists. |
2854 | ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { |
2855 | llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator |
2856 | I = ObjCImpls.find(D); |
2857 | if (I != ObjCImpls.end()) |
2858 | return cast<ObjCCategoryImplDecl>(I->second); |
2859 | return nullptr; |
2860 | } |
2861 | |
2862 | /// Set the implementation of ObjCInterfaceDecl. |
2863 | void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, |
2864 | ObjCImplementationDecl *ImplD) { |
2865 | assert(IFaceD && ImplD && "Passed null params")(static_cast<void> (0)); |
2866 | ObjCImpls[IFaceD] = ImplD; |
2867 | } |
2868 | |
2869 | /// Set the implementation of ObjCCategoryDecl. |
2870 | void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, |
2871 | ObjCCategoryImplDecl *ImplD) { |
2872 | assert(CatD && ImplD && "Passed null params")(static_cast<void> (0)); |
2873 | ObjCImpls[CatD] = ImplD; |
2874 | } |
2875 | |
2876 | const ObjCMethodDecl * |
2877 | ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { |
2878 | return ObjCMethodRedecls.lookup(MD); |
2879 | } |
2880 | |
2881 | void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, |
2882 | const ObjCMethodDecl *Redecl) { |
2883 | assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration")(static_cast<void> (0)); |
2884 | ObjCMethodRedecls[MD] = Redecl; |
2885 | } |
2886 | |
2887 | const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( |
2888 | const NamedDecl *ND) const { |
2889 | if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) |
2890 | return ID; |
2891 | if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) |
2892 | return CD->getClassInterface(); |
2893 | if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) |
2894 | return IMD->getClassInterface(); |
2895 | |
2896 | return nullptr; |
2897 | } |
2898 | |
2899 | /// Get the copy initialization expression of VarDecl, or nullptr if |
2900 | /// none exists. |
2901 | BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { |
2902 | assert(VD && "Passed null params")(static_cast<void> (0)); |
2903 | assert(VD->hasAttr<BlocksAttr>() &&(static_cast<void> (0)) |
2904 | "getBlockVarCopyInits - not __block var")(static_cast<void> (0)); |
2905 | auto I = BlockVarCopyInits.find(VD); |
2906 | if (I != BlockVarCopyInits.end()) |
2907 | return I->second; |
2908 | return {nullptr, false}; |
2909 | } |
2910 | |
2911 | /// Set the copy initialization expression of a block var decl. |
2912 | void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, |
2913 | bool CanThrow) { |
2914 | assert(VD && CopyExpr && "Passed null params")(static_cast<void> (0)); |
2915 | assert(VD->hasAttr<BlocksAttr>() &&(static_cast<void> (0)) |
2916 | "setBlockVarCopyInits - not __block var")(static_cast<void> (0)); |
2917 | BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); |
2918 | } |
2919 | |
2920 | TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, |
2921 | unsigned DataSize) const { |
2922 | if (!DataSize) |
2923 | DataSize = TypeLoc::getFullDataSizeForType(T); |
2924 | else |
2925 | assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&(static_cast<void> (0)) |
2926 | "incorrect data size provided to CreateTypeSourceInfo!")(static_cast<void> (0)); |
2927 | |
2928 | auto *TInfo = |
2929 | (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); |
2930 | new (TInfo) TypeSourceInfo(T); |
2931 | return TInfo; |
2932 | } |
2933 | |
2934 | TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, |
2935 | SourceLocation L) const { |
2936 | TypeSourceInfo *DI = CreateTypeSourceInfo(T); |
2937 | DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); |
2938 | return DI; |
2939 | } |
2940 | |
2941 | const ASTRecordLayout & |
2942 | ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { |
2943 | return getObjCLayout(D, nullptr); |
2944 | } |
2945 | |
2946 | const ASTRecordLayout & |
2947 | ASTContext::getASTObjCImplementationLayout( |
2948 | const ObjCImplementationDecl *D) const { |
2949 | return getObjCLayout(D->getClassInterface(), D); |
2950 | } |
2951 | |
2952 | //===----------------------------------------------------------------------===// |
2953 | // Type creation/memoization methods |
2954 | //===----------------------------------------------------------------------===// |
2955 | |
2956 | QualType |
2957 | ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { |
2958 | unsigned fastQuals = quals.getFastQualifiers(); |
2959 | quals.removeFastQualifiers(); |
2960 | |
2961 | // Check if we've already instantiated this type. |
2962 | llvm::FoldingSetNodeID ID; |
2963 | ExtQuals::Profile(ID, baseType, quals); |
2964 | void *insertPos = nullptr; |
2965 | if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { |
2966 | assert(eq->getQualifiers() == quals)(static_cast<void> (0)); |
2967 | return QualType(eq, fastQuals); |
2968 | } |
2969 | |
2970 | // If the base type is not canonical, make the appropriate canonical type. |
2971 | QualType canon; |
2972 | if (!baseType->isCanonicalUnqualified()) { |
2973 | SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); |
2974 | canonSplit.Quals.addConsistentQualifiers(quals); |
2975 | canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); |
2976 | |
2977 | // Re-find the insert position. |
2978 | (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); |
2979 | } |
2980 | |
2981 | auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); |
2982 | ExtQualNodes.InsertNode(eq, insertPos); |
2983 | return QualType(eq, fastQuals); |
2984 | } |
2985 | |
2986 | QualType ASTContext::getAddrSpaceQualType(QualType T, |
2987 | LangAS AddressSpace) const { |
2988 | QualType CanT = getCanonicalType(T); |
2989 | if (CanT.getAddressSpace() == AddressSpace) |
2990 | return T; |
2991 | |
2992 | // If we are composing extended qualifiers together, merge together |
2993 | // into one ExtQuals node. |
2994 | QualifierCollector Quals; |
2995 | const Type *TypeNode = Quals.strip(T); |
2996 | |
2997 | // If this type already has an address space specified, it cannot get |
2998 | // another one. |
2999 | assert(!Quals.hasAddressSpace() &&(static_cast<void> (0)) |
3000 | "Type cannot be in multiple addr spaces!")(static_cast<void> (0)); |
3001 | Quals.addAddressSpace(AddressSpace); |
3002 | |
3003 | return getExtQualType(TypeNode, Quals); |
3004 | } |
3005 | |
3006 | QualType ASTContext::removeAddrSpaceQualType(QualType T) const { |
3007 | // If the type is not qualified with an address space, just return it |
3008 | // immediately. |
3009 | if (!T.hasAddressSpace()) |
3010 | return T; |
3011 | |
3012 | // If we are composing extended qualifiers together, merge together |
3013 | // into one ExtQuals node. |
3014 | QualifierCollector Quals; |
3015 | const Type *TypeNode; |
3016 | |
3017 | while (T.hasAddressSpace()) { |
3018 | TypeNode = Quals.strip(T); |
3019 | |
3020 | // If the type no longer has an address space after stripping qualifiers, |
3021 | // jump out. |
3022 | if (!QualType(TypeNode, 0).hasAddressSpace()) |
3023 | break; |
3024 | |
3025 | // There might be sugar in the way. Strip it and try again. |
3026 | T = T.getSingleStepDesugaredType(*this); |
3027 | } |
3028 | |
3029 | Quals.removeAddressSpace(); |
3030 | |
3031 | // Removal of the address space can mean there are no longer any |
3032 | // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) |
3033 | // or required. |
3034 | if (Quals.hasNonFastQualifiers()) |
3035 | return getExtQualType(TypeNode, Quals); |
3036 | else |
3037 | return QualType(TypeNode, Quals.getFastQualifiers()); |
3038 | } |
3039 | |
3040 | QualType ASTContext::getObjCGCQualType(QualType T, |
3041 | Qualifiers::GC GCAttr) const { |
3042 | QualType CanT = getCanonicalType(T); |
3043 | if (CanT.getObjCGCAttr() == GCAttr) |
3044 | return T; |
3045 | |
3046 | if (const auto *ptr = T->getAs<PointerType>()) { |
3047 | QualType Pointee = ptr->getPointeeType(); |
3048 | if (Pointee->isAnyPointerType()) { |
3049 | QualType ResultType = getObjCGCQualType(Pointee, GCAttr); |
3050 | return getPointerType(ResultType); |
3051 | } |
3052 | } |
3053 | |
3054 | // If we are composing extended qualifiers together, merge together |
3055 | // into one ExtQuals node. |
3056 | QualifierCollector Quals; |
3057 | const Type *TypeNode = Quals.strip(T); |
3058 | |
3059 | // If this type already has an ObjCGC specified, it cannot get |
3060 | // another one. |
3061 | assert(!Quals.hasObjCGCAttr() &&(static_cast<void> (0)) |
3062 | "Type cannot have multiple ObjCGCs!")(static_cast<void> (0)); |
3063 | Quals.addObjCGCAttr(GCAttr); |
3064 | |
3065 | return getExtQualType(TypeNode, Quals); |
3066 | } |
3067 | |
3068 | QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { |
3069 | if (const PointerType *Ptr = T->getAs<PointerType>()) { |
3070 | QualType Pointee = Ptr->getPointeeType(); |
3071 | if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { |
3072 | return getPointerType(removeAddrSpaceQualType(Pointee)); |
3073 | } |
3074 | } |
3075 | return T; |
3076 | } |
3077 | |
3078 | const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, |
3079 | FunctionType::ExtInfo Info) { |
3080 | if (T->getExtInfo() == Info) |
3081 | return T; |
3082 | |
3083 | QualType Result; |
3084 | if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { |
3085 | Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); |
3086 | } else { |
3087 | const auto *FPT = cast<FunctionProtoType>(T); |
3088 | FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); |
3089 | EPI.ExtInfo = Info; |
3090 | Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); |
3091 | } |
3092 | |
3093 | return cast<FunctionType>(Result.getTypePtr()); |
3094 | } |
3095 | |
3096 | void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, |
3097 | QualType ResultType) { |
3098 | FD = FD->getMostRecentDecl(); |
3099 | while (true) { |
3100 | const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); |
3101 | FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); |
3102 | FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); |
3103 | if (FunctionDecl *Next = FD->getPreviousDecl()) |
3104 | FD = Next; |
3105 | else |
3106 | break; |
3107 | } |
3108 | if (ASTMutationListener *L = getASTMutationListener()) |
3109 | L->DeducedReturnType(FD, ResultType); |
3110 | } |
3111 | |
3112 | /// Get a function type and produce the equivalent function type with the |
3113 | /// specified exception specification. Type sugar that can be present on a |
3114 | /// declaration of a function with an exception specification is permitted |
3115 | /// and preserved. Other type sugar (for instance, typedefs) is not. |
3116 | QualType ASTContext::getFunctionTypeWithExceptionSpec( |
3117 | QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) { |
3118 | // Might have some parens. |
3119 | if (const auto *PT = dyn_cast<ParenType>(Orig)) |
3120 | return getParenType( |
3121 | getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); |
3122 | |
3123 | // Might be wrapped in a macro qualified type. |
3124 | if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) |
3125 | return getMacroQualifiedType( |
3126 | getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), |
3127 | MQT->getMacroIdentifier()); |
3128 | |
3129 | // Might have a calling-convention attribute. |
3130 | if (const auto *AT = dyn_cast<AttributedType>(Orig)) |
3131 | return getAttributedType( |
3132 | AT->getAttrKind(), |
3133 | getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), |
3134 | getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); |
3135 | |
3136 | // Anything else must be a function type. Rebuild it with the new exception |
3137 | // specification. |
3138 | const auto *Proto = Orig->castAs<FunctionProtoType>(); |
3139 | return getFunctionType( |
3140 | Proto->getReturnType(), Proto->getParamTypes(), |
3141 | Proto->getExtProtoInfo().withExceptionSpec(ESI)); |
3142 | } |
3143 | |
3144 | bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, |
3145 | QualType U) { |
3146 | return hasSameType(T, U) || |
3147 | (getLangOpts().CPlusPlus17 && |
3148 | hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), |
3149 | getFunctionTypeWithExceptionSpec(U, EST_None))); |
3150 | } |
3151 | |
3152 | QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { |
3153 | if (const auto *Proto = T->getAs<FunctionProtoType>()) { |
3154 | QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); |
3155 | SmallVector<QualType, 16> Args(Proto->param_types()); |
3156 | for (unsigned i = 0, n = Args.size(); i != n; ++i) |
3157 | Args[i] = removePtrSizeAddrSpace(Args[i]); |
3158 | return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); |
3159 | } |
3160 | |
3161 | if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { |
3162 | QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); |
3163 | return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); |
3164 | } |
3165 | |
3166 | return T; |
3167 | } |
3168 | |
3169 | bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { |
3170 | return hasSameType(T, U) || |
3171 | hasSameType(getFunctionTypeWithoutPtrSizes(T), |
3172 | getFunctionTypeWithoutPtrSizes(U)); |
3173 | } |
3174 | |
3175 | void ASTContext::adjustExceptionSpec( |
3176 | FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, |
3177 | bool AsWritten) { |
3178 | // Update the type. |
3179 | QualType Updated = |
3180 | getFunctionTypeWithExceptionSpec(FD->getType(), ESI); |
3181 | FD->setType(Updated); |
3182 | |
3183 | if (!AsWritten) |
3184 | return; |
3185 | |
3186 | // Update the type in the type source information too. |
3187 | if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { |
3188 | // If the type and the type-as-written differ, we may need to update |
3189 | // the type-as-written too. |
3190 | if (TSInfo->getType() != FD->getType()) |
3191 | Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); |
3192 | |
3193 | // FIXME: When we get proper type location information for exceptions, |
3194 | // we'll also have to rebuild the TypeSourceInfo. For now, we just patch |
3195 | // up the TypeSourceInfo; |
3196 | assert(TypeLoc::getFullDataSizeForType(Updated) ==(static_cast<void> (0)) |
3197 | TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&(static_cast<void> (0)) |
3198 | "TypeLoc size mismatch from updating exception specification")(static_cast<void> (0)); |
3199 | TSInfo->overrideType(Updated); |
3200 | } |
3201 | } |
3202 | |
3203 | /// getComplexType - Return the uniqued reference to the type for a complex |
3204 | /// number with the specified element type. |
3205 | QualType ASTContext::getComplexType(QualType T) const { |
3206 | // Unique pointers, to guarantee there is only one pointer of a particular |
3207 | // structure. |
3208 | llvm::FoldingSetNodeID ID; |
3209 | ComplexType::Profile(ID, T); |
3210 | |
3211 | void *InsertPos = nullptr; |
3212 | if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) |
3213 | return QualType(CT, 0); |
3214 | |
3215 | // If the pointee type isn't canonical, this won't be a canonical type either, |
3216 | // so fill in the canonical type field. |
3217 | QualType Canonical; |
3218 | if (!T.isCanonical()) { |
3219 | Canonical = getComplexType(getCanonicalType(T)); |
3220 | |
3221 | // Get the new insert position for the node we care about. |
3222 | ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); |
3223 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); (void)NewIP; |
3224 | } |
3225 | auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); |
3226 | Types.push_back(New); |
3227 | ComplexTypes.InsertNode(New, InsertPos); |
3228 | return QualType(New, 0); |
3229 | } |
3230 | |
3231 | /// getPointerType - Return the uniqued reference to the type for a pointer to |
3232 | /// the specified type. |
3233 | QualType ASTContext::getPointerType(QualType T) const { |
3234 | // Unique pointers, to guarantee there is only one pointer of a particular |
3235 | // structure. |
3236 | llvm::FoldingSetNodeID ID; |
3237 | PointerType::Profile(ID, T); |
3238 | |
3239 | void *InsertPos = nullptr; |
3240 | if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) |
3241 | return QualType(PT, 0); |
3242 | |
3243 | // If the pointee type isn't canonical, this won't be a canonical type either, |
3244 | // so fill in the canonical type field. |
3245 | QualType Canonical; |
3246 | if (!T.isCanonical()) { |
3247 | Canonical = getPointerType(getCanonicalType(T)); |
3248 | |
3249 | // Get the new insert position for the node we care about. |
3250 | PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); |
3251 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); (void)NewIP; |
3252 | } |
3253 | auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); |
3254 | Types.push_back(New); |
3255 | PointerTypes.InsertNode(New, InsertPos); |
3256 | return QualType(New, 0); |
3257 | } |
3258 | |
3259 | QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { |
3260 | llvm::FoldingSetNodeID ID; |
3261 | AdjustedType::Profile(ID, Orig, New); |
3262 | void *InsertPos = nullptr; |
3263 | AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); |
3264 | if (AT) |
3265 | return QualType(AT, 0); |
3266 | |
3267 | QualType Canonical = getCanonicalType(New); |
3268 | |
3269 | // Get the new insert position for the node we care about. |
3270 | AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); |
Value stored to 'AT' is never read | |
3271 | assert(!AT && "Shouldn't be in the map!")(static_cast<void> (0)); |
3272 | |
3273 | AT = new (*this, TypeAlignment) |
3274 | AdjustedType(Type::Adjusted, Orig, New, Canonical); |
3275 | Types.push_back(AT); |
3276 | AdjustedTypes.InsertNode(AT, InsertPos); |
3277 | return QualType(AT, 0); |
3278 | } |
3279 | |
3280 | QualType ASTContext::getDecayedType(QualType T) const { |
3281 | assert((T->isArrayType() || T->isFunctionType()) && "T does not decay")(static_cast<void> (0)); |
3282 | |
3283 | QualType Decayed; |
3284 | |
3285 | // C99 6.7.5.3p7: |
3286 | // A declaration of a parameter as "array of type" shall be |
3287 | // adjusted to "qualified pointer to type", where the type |
3288 | // qualifiers (if any) are those specified within the [ and ] of |
3289 | // the array type derivation. |
3290 | if (T->isArrayType()) |
3291 | Decayed = getArrayDecayedType(T); |
3292 | |
3293 | // C99 6.7.5.3p8: |
3294 | // A declaration of a parameter as "function returning type" |
3295 | // shall be adjusted to "pointer to function returning type", as |
3296 | // in 6.3.2.1. |
3297 | if (T->isFunctionType()) |
3298 | Decayed = getPointerType(T); |
3299 | |
3300 | llvm::FoldingSetNodeID ID; |
3301 | AdjustedType::Profile(ID, T, Decayed); |
3302 | void *InsertPos = nullptr; |
3303 | AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); |
3304 | if (AT) |
3305 | return QualType(AT, 0); |
3306 | |
3307 | QualType Canonical = getCanonicalType(Decayed); |
3308 | |
3309 | // Get the new insert position for the node we care about. |
3310 | AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); |
3311 | assert(!AT && "Shouldn't be in the map!")(static_cast<void> (0)); |
3312 | |
3313 | AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); |
3314 | Types.push_back(AT); |
3315 | AdjustedTypes.InsertNode(AT, InsertPos); |
3316 | return QualType(AT, 0); |
3317 | } |
3318 | |
3319 | /// getBlockPointerType - Return the uniqued reference to the type for |
3320 | /// a pointer to the specified block. |
3321 | QualType ASTContext::getBlockPointerType(QualType T) const { |
3322 | assert(T->isFunctionType() && "block of function types only")(static_cast<void> (0)); |
3323 | // Unique pointers, to guarantee there is only one block of a particular |
3324 | // structure. |
3325 | llvm::FoldingSetNodeID ID; |
3326 | BlockPointerType::Profile(ID, T); |
3327 | |
3328 | void *InsertPos = nullptr; |
3329 | if (BlockPointerType *PT = |
3330 | BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) |
3331 | return QualType(PT, 0); |
3332 | |
3333 | // If the block pointee type isn't canonical, this won't be a canonical |
3334 | // type either so fill in the canonical type field. |
3335 | QualType Canonical; |
3336 | if (!T.isCanonical()) { |
3337 | Canonical = getBlockPointerType(getCanonicalType(T)); |
3338 | |
3339 | // Get the new insert position for the node we care about. |
3340 | BlockPointerType *NewIP = |
3341 | BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); |
3342 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); (void)NewIP; |
3343 | } |
3344 | auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); |
3345 | Types.push_back(New); |
3346 | BlockPointerTypes.InsertNode(New, InsertPos); |
3347 | return QualType(New, 0); |
3348 | } |
3349 | |
3350 | /// getLValueReferenceType - Return the uniqued reference to the type for an |
3351 | /// lvalue reference to the specified type. |
3352 | QualType |
3353 | ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { |
3354 | assert(getCanonicalType(T) != OverloadTy &&(static_cast<void> (0)) |
3355 | "Unresolved overloaded function type")(static_cast<void> (0)); |
3356 | |
3357 | // Unique pointers, to guarantee there is only one pointer of a particular |
3358 | // structure. |
3359 | llvm::FoldingSetNodeID ID; |
3360 | ReferenceType::Profile(ID, T, SpelledAsLValue); |
3361 | |
3362 | void *InsertPos = nullptr; |
3363 | if (LValueReferenceType *RT = |
3364 | LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) |
3365 | return QualType(RT, 0); |
3366 | |
3367 | const auto *InnerRef = T->getAs<ReferenceType>(); |
3368 | |
3369 | // If the referencee type isn't canonical, this won't be a canonical type |
3370 | // either, so fill in the canonical type field. |
3371 | QualType Canonical; |
3372 | if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { |
3373 | QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); |
3374 | Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); |
3375 | |
3376 | // Get the new insert position for the node we care about. |
3377 | LValueReferenceType *NewIP = |
3378 | LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); |
3379 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); (void)NewIP; |
3380 | } |
3381 | |
3382 | auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, |
3383 | SpelledAsLValue); |
3384 | Types.push_back(New); |
3385 | LValueReferenceTypes.InsertNode(New, InsertPos); |
3386 | |
3387 | return QualType(New, 0); |
3388 | } |
3389 | |
3390 | /// getRValueReferenceType - Return the uniqued reference to the type for an |
3391 | /// rvalue reference to the specified type. |
3392 | QualType ASTContext::getRValueReferenceType(QualType T) const { |
3393 | // Unique pointers, to guarantee there is only one pointer of a particular |
3394 | // structure. |
3395 | llvm::FoldingSetNodeID ID; |
3396 | ReferenceType::Profile(ID, T, false); |
3397 | |
3398 | void *InsertPos = nullptr; |
3399 | if (RValueReferenceType *RT = |
3400 | RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) |
3401 | return QualType(RT, 0); |
3402 | |
3403 | const auto *InnerRef = T->getAs<ReferenceType>(); |
3404 | |
3405 | // If the referencee type isn't canonical, this won't be a canonical type |
3406 | // either, so fill in the canonical type field. |
3407 | QualType Canonical; |
3408 | if (InnerRef || !T.isCanonical()) { |
3409 | QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); |
3410 | Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); |
3411 | |
3412 | // Get the new insert position for the node we care about. |
3413 | RValueReferenceType *NewIP = |
3414 | RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); |
3415 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); (void)NewIP; |
3416 | } |
3417 | |
3418 | auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); |
3419 | Types.push_back(New); |
3420 | RValueReferenceTypes.InsertNode(New, InsertPos); |
3421 | return QualType(New, 0); |
3422 | } |
3423 | |
3424 | /// getMemberPointerType - Return the uniqued reference to the type for a |
3425 | /// member pointer to the specified type, in the specified class. |
3426 | QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { |
3427 | // Unique pointers, to guarantee there is only one pointer of a particular |
3428 | // structure. |
3429 | llvm::FoldingSetNodeID ID; |
3430 | MemberPointerType::Profile(ID, T, Cls); |
3431 | |
3432 | void *InsertPos = nullptr; |
3433 | if (MemberPointerType *PT = |
3434 | MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) |
3435 | return QualType(PT, 0); |
3436 | |
3437 | // If the pointee or class type isn't canonical, this won't be a canonical |
3438 | // type either, so fill in the canonical type field. |
3439 | QualType Canonical; |
3440 | if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { |
3441 | Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); |
3442 | |
3443 | // Get the new insert position for the node we care about. |
3444 | MemberPointerType *NewIP = |
3445 | MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); |
3446 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); (void)NewIP; |
3447 | } |
3448 | auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); |
3449 | Types.push_back(New); |
3450 | MemberPointerTypes.InsertNode(New, InsertPos); |
3451 | return QualType(New, 0); |
3452 | } |
3453 | |
3454 | /// getConstantArrayType - Return the unique reference to the type for an |
3455 | /// array of the specified element type. |
3456 | QualType ASTContext::getConstantArrayType(QualType EltTy, |
3457 | const llvm::APInt &ArySizeIn, |
3458 | const Expr *SizeExpr, |
3459 | ArrayType::ArraySizeModifier ASM, |
3460 | unsigned IndexTypeQuals) const { |
3461 | assert((EltTy->isDependentType() ||(static_cast<void> (0)) |
3462 | EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&(static_cast<void> (0)) |
3463 | "Constant array of VLAs is illegal!")(static_cast<void> (0)); |
3464 | |
3465 | // We only need the size as part of the type if it's instantiation-dependent. |
3466 | if (SizeExpr && !SizeExpr->isInstantiationDependent()) |
3467 | SizeExpr = nullptr; |
3468 | |
3469 | // Convert the array size into a canonical width matching the pointer size for |
3470 | // the target. |
3471 | llvm::APInt ArySize(ArySizeIn); |
3472 | ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); |
3473 | |
3474 | llvm::FoldingSetNodeID ID; |
3475 | ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, |
3476 | IndexTypeQuals); |
3477 | |
3478 | void *InsertPos = nullptr; |
3479 | if (ConstantArrayType *ATP = |
3480 | ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) |
3481 | return QualType(ATP, 0); |
3482 | |
3483 | // If the element type isn't canonical or has qualifiers, or the array bound |
3484 | // is instantiation-dependent, this won't be a canonical type either, so fill |
3485 | // in the canonical type field. |
3486 | QualType Canon; |
3487 | if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { |
3488 | SplitQualType canonSplit = getCanonicalType(EltTy).split(); |
3489 | Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, |
3490 | ASM, IndexTypeQuals); |
3491 | Canon = getQualifiedType(Canon, canonSplit.Quals); |
3492 | |
3493 | // Get the new insert position for the node we care about. |
3494 | ConstantArrayType *NewIP = |
3495 | ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); |
3496 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); (void)NewIP; |
3497 | } |
3498 | |
3499 | void *Mem = Allocate( |
3500 | ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), |
3501 | TypeAlignment); |
3502 | auto *New = new (Mem) |
3503 | ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); |
3504 | ConstantArrayTypes.InsertNode(New, InsertPos); |
3505 | Types.push_back(New); |
3506 | return QualType(New, 0); |
3507 | } |
3508 | |
3509 | /// getVariableArrayDecayedType - Turns the given type, which may be |
3510 | /// variably-modified, into the corresponding type with all the known |
3511 | /// sizes replaced with [*]. |
3512 | QualType ASTContext::getVariableArrayDecayedType(QualType type) const { |
3513 | // Vastly most common case. |
3514 | if (!type->isVariablyModifiedType()) return type; |
3515 | |
3516 | QualType result; |
3517 | |
3518 | SplitQualType split = type.getSplitDesugaredType(); |
3519 | const Type *ty = split.Ty; |
3520 | switch (ty->getTypeClass()) { |
3521 | #define TYPE(Class, Base) |
3522 | #define ABSTRACT_TYPE(Class, Base) |
3523 | #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: |
3524 | #include "clang/AST/TypeNodes.inc" |
3525 | llvm_unreachable("didn't desugar past all non-canonical types?")__builtin_unreachable(); |
3526 | |
3527 | // These types should never be variably-modified. |
3528 | case Type::Builtin: |
3529 | case Type::Complex: |
3530 | case Type::Vector: |
3531 | case Type::DependentVector: |
3532 | case Type::ExtVector: |
3533 | case Type::DependentSizedExtVector: |
3534 | case Type::ConstantMatrix: |
3535 | case Type::DependentSizedMatrix: |
3536 | case Type::DependentAddressSpace: |
3537 | case Type::ObjCObject: |
3538 | case Type::ObjCInterface: |
3539 | case Type::ObjCObjectPointer: |
3540 | case Type::Record: |
3541 | case Type::Enum: |
3542 | case Type::UnresolvedUsing: |
3543 | case Type::TypeOfExpr: |
3544 | case Type::TypeOf: |
3545 | case Type::Decltype: |
3546 | case Type::UnaryTransform: |
3547 | case Type::DependentName: |
3548 | case Type::InjectedClassName: |
3549 | case Type::TemplateSpecialization: |
3550 | case Type::DependentTemplateSpecialization: |
3551 | case Type::TemplateTypeParm: |
3552 | case Type::SubstTemplateTypeParmPack: |
3553 | case Type::Auto: |
3554 | case Type::DeducedTemplateSpecialization: |
3555 | case Type::PackExpansion: |
3556 | case Type::ExtInt: |
3557 | case Type::DependentExtInt: |
3558 | llvm_unreachable("type should never be variably-modified")__builtin_unreachable(); |
3559 | |
3560 | // These types can be variably-modified but should never need to |
3561 | // further decay. |
3562 | case Type::FunctionNoProto: |
3563 | case Type::FunctionProto: |
3564 | case Type::BlockPointer: |
3565 | case Type::MemberPointer: |
3566 | case Type::Pipe: |
3567 | return type; |
3568 | |
3569 | // These types can be variably-modified. All these modifications |
3570 | // preserve structure except as noted by comments. |
3571 | // TODO: if we ever care about optimizing VLAs, there are no-op |
3572 | // optimizations available here. |
3573 | case Type::Pointer: |
3574 | result = getPointerType(getVariableArrayDecayedType( |
3575 | cast<PointerType>(ty)->getPointeeType())); |
3576 | break; |
3577 | |
3578 | case Type::LValueReference: { |
3579 | const auto *lv = cast<LValueReferenceType>(ty); |
3580 | result = getLValueReferenceType( |
3581 | getVariableArrayDecayedType(lv->getPointeeType()), |
3582 | lv->isSpelledAsLValue()); |
3583 | break; |
3584 | } |
3585 | |
3586 | case Type::RValueReference: { |
3587 | const auto *lv = cast<RValueReferenceType>(ty); |
3588 | result = getRValueReferenceType( |
3589 | getVariableArrayDecayedType(lv->getPointeeType())); |
3590 | break; |
3591 | } |
3592 | |
3593 | case Type::Atomic: { |
3594 | const auto *at = cast<AtomicType>(ty); |
3595 | result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); |
3596 | break; |
3597 | } |
3598 | |
3599 | case Type::ConstantArray: { |
3600 | const auto *cat = cast<ConstantArrayType>(ty); |
3601 | result = getConstantArrayType( |
3602 | getVariableArrayDecayedType(cat->getElementType()), |
3603 | cat->getSize(), |
3604 | cat->getSizeExpr(), |
3605 | cat->getSizeModifier(), |
3606 | cat->getIndexTypeCVRQualifiers()); |
3607 | break; |
3608 | } |
3609 | |
3610 | case Type::DependentSizedArray: { |
3611 | const auto *dat = cast<DependentSizedArrayType>(ty); |
3612 | result = getDependentSizedArrayType( |
3613 | getVariableArrayDecayedType(dat->getElementType()), |
3614 | dat->getSizeExpr(), |
3615 | dat->getSizeModifier(), |
3616 | dat->getIndexTypeCVRQualifiers(), |
3617 | dat->getBracketsRange()); |
3618 | break; |
3619 | } |
3620 | |
3621 | // Turn incomplete types into [*] types. |
3622 | case Type::IncompleteArray: { |
3623 | const auto *iat = cast<IncompleteArrayType>(ty); |
3624 | result = getVariableArrayType( |
3625 | getVariableArrayDecayedType(iat->getElementType()), |
3626 | /*size*/ nullptr, |
3627 | ArrayType::Normal, |
3628 | iat->getIndexTypeCVRQualifiers(), |
3629 | SourceRange()); |
3630 | break; |
3631 | } |
3632 | |
3633 | // Turn VLA types into [*] types. |
3634 | case Type::VariableArray: { |
3635 | const auto *vat = cast<VariableArrayType>(ty); |
3636 | result = getVariableArrayType( |
3637 | getVariableArrayDecayedType(vat->getElementType()), |
3638 | /*size*/ nullptr, |
3639 | ArrayType::Star, |
3640 | vat->getIndexTypeCVRQualifiers(), |
3641 | vat->getBracketsRange()); |
3642 | break; |
3643 | } |
3644 | } |
3645 | |
3646 | // Apply the top-level qualifiers from the original. |
3647 | return getQualifiedType(result, split.Quals); |
3648 | } |
3649 | |
3650 | /// getVariableArrayType - Returns a non-unique reference to the type for a |
3651 | /// variable array of the specified element type. |
3652 | QualType ASTContext::getVariableArrayType(QualType EltTy, |
3653 | Expr *NumElts, |
3654 | ArrayType::ArraySizeModifier ASM, |
3655 | unsigned IndexTypeQuals, |
3656 | SourceRange Brackets) const { |
3657 | // Since we don't unique expressions, it isn't possible to unique VLA's |
3658 | // that have an expression provided for their size. |
3659 | QualType Canon; |
3660 | |
3661 | // Be sure to pull qualifiers off the element type. |
3662 | if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { |
3663 | SplitQualType canonSplit = getCanonicalType(EltTy).split(); |
3664 | Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, |
3665 | IndexTypeQuals, Brackets); |
3666 | Canon = getQualifiedType(Canon, canonSplit.Quals); |
3667 | } |
3668 | |
3669 | auto *New = new (*this, TypeAlignment) |
3670 | VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); |
3671 | |
3672 | VariableArrayTypes.push_back(New); |
3673 | Types.push_back(New); |
3674 | return QualType(New, 0); |
3675 | } |
3676 | |
3677 | /// getDependentSizedArrayType - Returns a non-unique reference to |
3678 | /// the type for a dependently-sized array of the specified element |
3679 | /// type. |
3680 | QualType ASTContext::getDependentSizedArrayType(QualType elementType, |
3681 | Expr *numElements, |
3682 | ArrayType::ArraySizeModifier ASM, |
3683 | unsigned elementTypeQuals, |
3684 | SourceRange brackets) const { |
3685 | assert((!numElements || numElements->isTypeDependent() ||(static_cast<void> (0)) |
3686 | numElements->isValueDependent()) &&(static_cast<void> (0)) |
3687 | "Size must be type- or value-dependent!")(static_cast<void> (0)); |
3688 | |
3689 | // Dependently-sized array types that do not have a specified number |
3690 | // of elements will have their sizes deduced from a dependent |
3691 | // initializer. We do no canonicalization here at all, which is okay |
3692 | // because they can't be used in most locations. |
3693 | if (!numElements) { |
3694 | auto *newType |
3695 | = new (*this, TypeAlignment) |
3696 | DependentSizedArrayType(*this, elementType, QualType(), |
3697 | numElements, ASM, elementTypeQuals, |
3698 | brackets); |
3699 | Types.push_back(newType); |
3700 | return QualType(newType, 0); |
3701 | } |
3702 | |
3703 | // Otherwise, we actually build a new type every time, but we |
3704 | // also build a canonical type. |
3705 | |
3706 | SplitQualType canonElementType = getCanonicalType(elementType).split(); |
3707 | |
3708 | void *insertPos = nullptr; |
3709 | llvm::FoldingSetNodeID ID; |
3710 | DependentSizedArrayType::Profile(ID, *this, |
3711 | QualType(canonElementType.Ty, 0), |
3712 | ASM, elementTypeQuals, numElements); |
3713 | |
3714 | // Look for an existing type with these properties. |
3715 | DependentSizedArrayType *canonTy = |
3716 | DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); |
3717 | |
3718 | // If we don't have one, build one. |
3719 | if (!canonTy) { |
3720 | canonTy = new (*this, TypeAlignment) |
3721 | DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), |
3722 | QualType(), numElements, ASM, elementTypeQuals, |
3723 | brackets); |
3724 | DependentSizedArrayTypes.InsertNode(canonTy, insertPos); |
3725 | Types.push_back(canonTy); |
3726 | } |
3727 | |
3728 | // Apply qualifiers from the element type to the array. |
3729 | QualType canon = getQualifiedType(QualType(canonTy,0), |
3730 | canonElementType.Quals); |
3731 | |
3732 | // If we didn't need extra canonicalization for the element type or the size |
3733 | // expression, then just use that as our result. |
3734 | if (QualType(canonElementType.Ty, 0) == elementType && |
3735 | canonTy->getSizeExpr() == numElements) |
3736 | return canon; |
3737 | |
3738 | // Otherwise, we need to build a type which follows the spelling |
3739 | // of the element type. |
3740 | auto *sugaredType |
3741 | = new (*this, TypeAlignment) |
3742 | DependentSizedArrayType(*this, elementType, canon, numElements, |
3743 | ASM, elementTypeQuals, brackets); |
3744 | Types.push_back(sugaredType); |
3745 | return QualType(sugaredType, 0); |
3746 | } |
3747 | |
3748 | QualType ASTContext::getIncompleteArrayType(QualType elementType, |
3749 | ArrayType::ArraySizeModifier ASM, |
3750 | unsigned elementTypeQuals) const { |
3751 | llvm::FoldingSetNodeID ID; |
3752 | IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); |
3753 | |
3754 | void *insertPos = nullptr; |
3755 | if (IncompleteArrayType *iat = |
3756 | IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) |
3757 | return QualType(iat, 0); |
3758 | |
3759 | // If the element type isn't canonical, this won't be a canonical type |
3760 | // either, so fill in the canonical type field. We also have to pull |
3761 | // qualifiers off the element type. |
3762 | QualType canon; |
3763 | |
3764 | if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { |
3765 | SplitQualType canonSplit = getCanonicalType(elementType).split(); |
3766 | canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), |
3767 | ASM, elementTypeQuals); |
3768 | canon = getQualifiedType(canon, canonSplit.Quals); |
3769 | |
3770 | // Get the new insert position for the node we care about. |
3771 | IncompleteArrayType *existing = |
3772 | IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); |
3773 | assert(!existing && "Shouldn't be in the map!")(static_cast<void> (0)); (void) existing; |
3774 | } |
3775 | |
3776 | auto *newType = new (*this, TypeAlignment) |
3777 | IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); |
3778 | |
3779 | IncompleteArrayTypes.InsertNode(newType, insertPos); |
3780 | Types.push_back(newType); |
3781 | return QualType(newType, 0); |
3782 | } |
3783 | |
3784 | ASTContext::BuiltinVectorTypeInfo |
3785 | ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { |
3786 | #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS){getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable (ELTS), NUMVECTORS}; \ |
3787 | {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ |
3788 | NUMVECTORS}; |
3789 | |
3790 | #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS){ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; \ |
3791 | {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; |
3792 | |
3793 | switch (Ty->getKind()) { |
3794 | default: |
3795 | llvm_unreachable("Unsupported builtin vector type")__builtin_unreachable(); |
3796 | case BuiltinType::SveInt8: |
3797 | return SVE_INT_ELTTY(8, 16, true, 1){getIntTypeForBitwidth(8, true), llvm::ElementCount::getScalable (16), 1};; |
3798 | case BuiltinType::SveUint8: |
3799 | return SVE_INT_ELTTY(8, 16, false, 1){getIntTypeForBitwidth(8, false), llvm::ElementCount::getScalable (16), 1};; |
3800 | case BuiltinType::SveInt8x2: |
3801 | return SVE_INT_ELTTY(8, 16, true, 2){getIntTypeForBitwidth(8, true), llvm::ElementCount::getScalable (16), 2};; |
3802 | case BuiltinType::SveUint8x2: |
3803 | return SVE_INT_ELTTY(8, 16, false, 2){getIntTypeForBitwidth(8, false), llvm::ElementCount::getScalable (16), 2};; |
3804 | case BuiltinType::SveInt8x3: |
3805 | return SVE_INT_ELTTY(8, 16, true, 3){getIntTypeForBitwidth(8, true), llvm::ElementCount::getScalable (16), 3};; |
3806 | case BuiltinType::SveUint8x3: |
3807 | return SVE_INT_ELTTY(8, 16, false, 3){getIntTypeForBitwidth(8, false), llvm::ElementCount::getScalable (16), 3};; |
3808 | case BuiltinType::SveInt8x4: |
3809 | return SVE_INT_ELTTY(8, 16, true, 4){getIntTypeForBitwidth(8, true), llvm::ElementCount::getScalable (16), 4};; |
3810 | case BuiltinType::SveUint8x4: |
3811 | return SVE_INT_ELTTY(8, 16, false, 4){getIntTypeForBitwidth(8, false), llvm::ElementCount::getScalable (16), 4};; |
3812 | case BuiltinType::SveInt16: |
3813 | return SVE_INT_ELTTY(16, 8, true, 1){getIntTypeForBitwidth(16, true), llvm::ElementCount::getScalable (8), 1};; |
3814 | case BuiltinType::SveUint16: |
3815 | return SVE_INT_ELTTY(16, 8, false, 1){getIntTypeForBitwidth(16, false), llvm::ElementCount::getScalable (8), 1};; |
3816 | case BuiltinType::SveInt16x2: |
3817 | return SVE_INT_ELTTY(16, 8, true, 2){getIntTypeForBitwidth(16, true), llvm::ElementCount::getScalable (8), 2};; |
3818 | case BuiltinType::SveUint16x2: |
3819 | return SVE_INT_ELTTY(16, 8, false, 2){getIntTypeForBitwidth(16, false), llvm::ElementCount::getScalable (8), 2};; |
3820 | case BuiltinType::SveInt16x3: |
3821 | return SVE_INT_ELTTY(16, 8, true, 3){getIntTypeForBitwidth(16, true), llvm::ElementCount::getScalable (8), 3};; |
3822 | case BuiltinType::SveUint16x3: |
3823 | return SVE_INT_ELTTY(16, 8, false, 3){getIntTypeForBitwidth(16, false), llvm::ElementCount::getScalable (8), 3};; |
3824 | case BuiltinType::SveInt16x4: |
3825 | return SVE_INT_ELTTY(16, 8, true, 4){getIntTypeForBitwidth(16, true), llvm::ElementCount::getScalable (8), 4};; |
3826 | case BuiltinType::SveUint16x4: |
3827 | return SVE_INT_ELTTY(16, 8, false, 4){getIntTypeForBitwidth(16, false), llvm::ElementCount::getScalable (8), 4};; |
3828 | case BuiltinType::SveInt32: |
3829 | return SVE_INT_ELTTY(32, 4, true, 1){getIntTypeForBitwidth(32, true), llvm::ElementCount::getScalable (4), 1};; |
3830 | case BuiltinType::SveUint32: |
3831 | return SVE_INT_ELTTY(32, 4, false, 1){getIntTypeForBitwidth(32, false), llvm::ElementCount::getScalable (4), 1};; |
3832 | case BuiltinType::SveInt32x2: |
3833 | return SVE_INT_ELTTY(32, 4, true, 2){getIntTypeForBitwidth(32, true), llvm::ElementCount::getScalable (4), 2};; |
3834 | case BuiltinType::SveUint32x2: |
3835 | return SVE_INT_ELTTY(32, 4, false, 2){getIntTypeForBitwidth(32, false), llvm::ElementCount::getScalable (4), 2};; |
3836 | case BuiltinType::SveInt32x3: |
3837 | return SVE_INT_ELTTY(32, 4, true, 3){getIntTypeForBitwidth(32, true), llvm::ElementCount::getScalable (4), 3};; |
3838 | case BuiltinType::SveUint32x3: |
3839 | return SVE_INT_ELTTY(32, 4, false, 3){getIntTypeForBitwidth(32, false), llvm::ElementCount::getScalable (4), 3};; |
3840 | case BuiltinType::SveInt32x4: |
3841 | return SVE_INT_ELTTY(32, 4, true, 4){getIntTypeForBitwidth(32, true), llvm::ElementCount::getScalable (4), 4};; |
3842 | case BuiltinType::SveUint32x4: |
3843 | return SVE_INT_ELTTY(32, 4, false, 4){getIntTypeForBitwidth(32, false), llvm::ElementCount::getScalable (4), 4};; |
3844 | case BuiltinType::SveInt64: |
3845 | return SVE_INT_ELTTY(64, 2, true, 1){getIntTypeForBitwidth(64, true), llvm::ElementCount::getScalable (2), 1};; |
3846 | case BuiltinType::SveUint64: |
3847 | return SVE_INT_ELTTY(64, 2, false, 1){getIntTypeForBitwidth(64, false), llvm::ElementCount::getScalable (2), 1};; |
3848 | case BuiltinType::SveInt64x2: |
3849 | return SVE_INT_ELTTY(64, 2, true, 2){getIntTypeForBitwidth(64, true), llvm::ElementCount::getScalable (2), 2};; |
3850 | case BuiltinType::SveUint64x2: |
3851 | return SVE_INT_ELTTY(64, 2, false, 2){getIntTypeForBitwidth(64, false), llvm::ElementCount::getScalable (2), 2};; |
3852 | case BuiltinType::SveInt64x3: |
3853 | return SVE_INT_ELTTY(64, 2, true, 3){getIntTypeForBitwidth(64, true), llvm::ElementCount::getScalable (2), 3};; |
3854 | case BuiltinType::SveUint64x3: |
3855 | return SVE_INT_ELTTY(64, 2, false, 3){getIntTypeForBitwidth(64, false), llvm::ElementCount::getScalable (2), 3};; |
3856 | case BuiltinType::SveInt64x4: |
3857 | return SVE_INT_ELTTY(64, 2, true, 4){getIntTypeForBitwidth(64, true), llvm::ElementCount::getScalable (2), 4};; |
3858 | case BuiltinType::SveUint64x4: |
3859 | return SVE_INT_ELTTY(64, 2, false, 4){getIntTypeForBitwidth(64, false), llvm::ElementCount::getScalable (2), 4};; |
3860 | case BuiltinType::SveBool: |
3861 | return SVE_ELTTY(BoolTy, 16, 1){BoolTy, llvm::ElementCount::getScalable(16), 1};; |
3862 | case BuiltinType::SveFloat16: |
3863 | return SVE_ELTTY(HalfTy, 8, 1){HalfTy, llvm::ElementCount::getScalable(8), 1};; |
3864 | case BuiltinType::SveFloat16x2: |
3865 | return SVE_ELTTY(HalfTy, 8, 2){HalfTy, llvm::ElementCount::getScalable(8), 2};; |
3866 | case BuiltinType::SveFloat16x3: |
3867 | return SVE_ELTTY(HalfTy, 8, 3){HalfTy, llvm::ElementCount::getScalable(8), 3};; |
3868 | case BuiltinType::SveFloat16x4: |
3869 | return SVE_ELTTY(HalfTy, 8, 4){HalfTy, llvm::ElementCount::getScalable(8), 4};; |
3870 | case BuiltinType::SveFloat32: |
3871 | return SVE_ELTTY(FloatTy, 4, 1){FloatTy, llvm::ElementCount::getScalable(4), 1};; |
3872 | case BuiltinType::SveFloat32x2: |
3873 | return SVE_ELTTY(FloatTy, 4, 2){FloatTy, llvm::ElementCount::getScalable(4), 2};; |
3874 | case BuiltinType::SveFloat32x3: |
3875 | return SVE_ELTTY(FloatTy, 4, 3){FloatTy, llvm::ElementCount::getScalable(4), 3};; |
3876 | case BuiltinType::SveFloat32x4: |
3877 | return SVE_ELTTY(FloatTy, 4, 4){FloatTy, llvm::ElementCount::getScalable(4), 4};; |
3878 | case BuiltinType::SveFloat64: |
3879 | return SVE_ELTTY(DoubleTy, 2, 1){DoubleTy, llvm::ElementCount::getScalable(2), 1};; |
3880 | case BuiltinType::SveFloat64x2: |
3881 | return SVE_ELTTY(DoubleTy, 2, 2){DoubleTy, llvm::ElementCount::getScalable(2), 2};; |
3882 | case BuiltinType::SveFloat64x3: |
3883 | return SVE_ELTTY(DoubleTy, 2, 3){DoubleTy, llvm::ElementCount::getScalable(2), 3};; |
3884 | case BuiltinType::SveFloat64x4: |
3885 | return SVE_ELTTY(DoubleTy, 2, 4){DoubleTy, llvm::ElementCount::getScalable(2), 4};; |
3886 | case BuiltinType::SveBFloat16: |
3887 | return SVE_ELTTY(BFloat16Ty, 8, 1){BFloat16Ty, llvm::ElementCount::getScalable(8), 1};; |
3888 | case BuiltinType::SveBFloat16x2: |
3889 | return SVE_ELTTY(BFloat16Ty, 8, 2){BFloat16Ty, llvm::ElementCount::getScalable(8), 2};; |
3890 | case BuiltinType::SveBFloat16x3: |
3891 | return SVE_ELTTY(BFloat16Ty, 8, 3){BFloat16Ty, llvm::ElementCount::getScalable(8), 3};; |
3892 | case BuiltinType::SveBFloat16x4: |
3893 | return SVE_ELTTY(BFloat16Ty, 8, 4){BFloat16Ty, llvm::ElementCount::getScalable(8), 4};; |
3894 | #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ |
3895 | IsSigned) \ |
3896 | case BuiltinType::Id: \ |
3897 | return {getIntTypeForBitwidth(ElBits, IsSigned), \ |
3898 | llvm::ElementCount::getScalable(NumEls), NF}; |
3899 | #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ |
3900 | case BuiltinType::Id: \ |
3901 | return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ |
3902 | llvm::ElementCount::getScalable(NumEls), NF}; |
3903 | #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ |
3904 | case BuiltinType::Id: \ |
3905 | return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; |
3906 | #include "clang/Basic/RISCVVTypes.def" |
3907 | } |
3908 | } |
3909 | |
3910 | /// getScalableVectorType - Return the unique reference to a scalable vector |
3911 | /// type of the specified element type and size. VectorType must be a built-in |
3912 | /// type. |
3913 | QualType ASTContext::getScalableVectorType(QualType EltTy, |
3914 | unsigned NumElts) const { |
3915 | if (Target->hasAArch64SVETypes()) { |
3916 | uint64_t EltTySize = getTypeSize(EltTy); |
3917 | #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ |
3918 | IsSigned, IsFP, IsBF) \ |
3919 | if (!EltTy->isBooleanType() && \ |
3920 | ((EltTy->hasIntegerRepresentation() && \ |
3921 | EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ |
3922 | (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ |
3923 | IsFP && !IsBF) || \ |
3924 | (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ |
3925 | IsBF && !IsFP)) && \ |
3926 | EltTySize == ElBits && NumElts == NumEls) { \ |
3927 | return SingletonId; \ |
3928 | } |
3929 | #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ |
3930 | if (EltTy->isBooleanType() && NumElts == NumEls) \ |
3931 | return SingletonId; |
3932 | #include "clang/Basic/AArch64SVEACLETypes.def" |
3933 | } else if (Target->hasRISCVVTypes()) { |
3934 | uint64_t EltTySize = getTypeSize(EltTy); |
3935 | #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ |
3936 | IsFP) \ |
3937 | if (!EltTy->isBooleanType() && \ |
3938 | ((EltTy->hasIntegerRepresentation() && \ |
3939 | EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ |
3940 | (EltTy->hasFloatingRepresentation() && IsFP)) && \ |
3941 | EltTySize == ElBits && NumElts == NumEls) \ |
3942 | return SingletonId; |
3943 | #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ |
3944 | if (EltTy->isBooleanType() && NumElts == NumEls) \ |
3945 | return SingletonId; |
3946 | #include "clang/Basic/RISCVVTypes.def" |
3947 | } |
3948 | return QualType(); |
3949 | } |
3950 | |
3951 | /// getVectorType - Return the unique reference to a vector type of |
3952 | /// the specified element type and size. VectorType must be a built-in type. |
3953 | QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, |
3954 | VectorType::VectorKind VecKind) const { |
3955 | assert(vecType->isBuiltinType())(static_cast<void> (0)); |
3956 | |
3957 | // Check if we've already instantiated a vector of this type. |
3958 | llvm::FoldingSetNodeID ID; |
3959 | VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); |
3960 | |
3961 | void *InsertPos = nullptr; |
3962 | if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) |
3963 | return QualType(VTP, 0); |
3964 | |
3965 | // If the element type isn't canonical, this won't be a canonical type either, |
3966 | // so fill in the canonical type field. |
3967 | QualType Canonical; |
3968 | if (!vecType.isCanonical()) { |
3969 | Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); |
3970 | |
3971 | // Get the new insert position for the node we care about. |
3972 | VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); |
3973 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); (void)NewIP; |
3974 | } |
3975 | auto *New = new (*this, TypeAlignment) |
3976 | VectorType(vecType, NumElts, Canonical, VecKind); |
3977 | VectorTypes.InsertNode(New, InsertPos); |
3978 | Types.push_back(New); |
3979 | return QualType(New, 0); |
3980 | } |
3981 | |
3982 | QualType |
3983 | ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, |
3984 | SourceLocation AttrLoc, |
3985 | VectorType::VectorKind VecKind) const { |
3986 | llvm::FoldingSetNodeID ID; |
3987 | DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, |
3988 | VecKind); |
3989 | void *InsertPos = nullptr; |
3990 | DependentVectorType *Canon = |
3991 | DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); |
3992 | DependentVectorType *New; |
3993 | |
3994 | if (Canon) { |
3995 | New = new (*this, TypeAlignment) DependentVectorType( |
3996 | *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); |
3997 | } else { |
3998 | QualType CanonVecTy = getCanonicalType(VecType); |
3999 | if (CanonVecTy == VecType) { |
4000 | New = new (*this, TypeAlignment) DependentVectorType( |
4001 | *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); |
4002 | |
4003 | DependentVectorType *CanonCheck = |
4004 | DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); |
4005 | assert(!CanonCheck &&(static_cast<void> (0)) |
4006 | "Dependent-sized vector_size canonical type broken")(static_cast<void> (0)); |
4007 | (void)CanonCheck; |
4008 | DependentVectorTypes.InsertNode(New, InsertPos); |
4009 | } else { |
4010 | QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, |
4011 | SourceLocation(), VecKind); |
4012 | New = new (*this, TypeAlignment) DependentVectorType( |
4013 | *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); |
4014 | } |
4015 | } |
4016 | |
4017 | Types.push_back(New); |
4018 | return QualType(New, 0); |
4019 | } |
4020 | |
4021 | /// getExtVectorType - Return the unique reference to an extended vector type of |
4022 | /// the specified element type and size. VectorType must be a built-in type. |
4023 | QualType |
4024 | ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { |
4025 | assert(vecType->isBuiltinType() || vecType->isDependentType())(static_cast<void> (0)); |
4026 | |
4027 | // Check if we've already instantiated a vector of this type. |
4028 | llvm::FoldingSetNodeID ID; |
4029 | VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, |
4030 | VectorType::GenericVector); |
4031 | void *InsertPos = nullptr; |
4032 | if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) |
4033 | return QualType(VTP, 0); |
4034 | |
4035 | // If the element type isn't canonical, this won't be a canonical type either, |
4036 | // so fill in the canonical type field. |
4037 | QualType Canonical; |
4038 | if (!vecType.isCanonical()) { |
4039 | Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); |
4040 | |
4041 | // Get the new insert position for the node we care about. |
4042 | VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); |
4043 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); (void)NewIP; |
4044 | } |
4045 | auto *New = new (*this, TypeAlignment) |
4046 | ExtVectorType(vecType, NumElts, Canonical); |
4047 | VectorTypes.InsertNode(New, InsertPos); |
4048 | Types.push_back(New); |
4049 | return QualType(New, 0); |
4050 | } |
4051 | |
4052 | QualType |
4053 | ASTContext::getDependentSizedExtVectorType(QualType vecType, |
4054 | Expr *SizeExpr, |
4055 | SourceLocation AttrLoc) const { |
4056 | llvm::FoldingSetNodeID ID; |
4057 | DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), |
4058 | SizeExpr); |
4059 | |
4060 | void *InsertPos = nullptr; |
4061 | DependentSizedExtVectorType *Canon |
4062 | = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); |
4063 | DependentSizedExtVectorType *New; |
4064 | if (Canon) { |
4065 | // We already have a canonical version of this array type; use it as |
4066 | // the canonical type for a newly-built type. |
4067 | New = new (*this, TypeAlignment) |
4068 | DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), |
4069 | SizeExpr, AttrLoc); |
4070 | } else { |
4071 | QualType CanonVecTy = getCanonicalType(vecType); |
4072 | if (CanonVecTy == vecType) { |
4073 | New = new (*this, TypeAlignment) |
4074 | DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, |
4075 | AttrLoc); |
4076 | |
4077 | DependentSizedExtVectorType *CanonCheck |
4078 | = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); |
4079 | assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken")(static_cast<void> (0)); |
4080 | (void)CanonCheck; |
4081 | DependentSizedExtVectorTypes.InsertNode(New, InsertPos); |
4082 | } else { |
4083 | QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, |
4084 | SourceLocation()); |
4085 | New = new (*this, TypeAlignment) DependentSizedExtVectorType( |
4086 | *this, vecType, CanonExtTy, SizeExpr, AttrLoc); |
4087 | } |
4088 | } |
4089 | |
4090 | Types.push_back(New); |
4091 | return QualType(New, 0); |
4092 | } |
4093 | |
4094 | QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, |
4095 | unsigned NumColumns) const { |
4096 | llvm::FoldingSetNodeID ID; |
4097 | ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, |
4098 | Type::ConstantMatrix); |
4099 | |
4100 | assert(MatrixType::isValidElementType(ElementTy) &&(static_cast<void> (0)) |
4101 | "need a valid element type")(static_cast<void> (0)); |
4102 | assert(ConstantMatrixType::isDimensionValid(NumRows) &&(static_cast<void> (0)) |
4103 | ConstantMatrixType::isDimensionValid(NumColumns) &&(static_cast<void> (0)) |
4104 | "need valid matrix dimensions")(static_cast<void> (0)); |
4105 | void *InsertPos = nullptr; |
4106 | if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) |
4107 | return QualType(MTP, 0); |
4108 | |
4109 | QualType Canonical; |
4110 | if (!ElementTy.isCanonical()) { |
4111 | Canonical = |
4112 | getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); |
4113 | |
4114 | ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); |
4115 | assert(!NewIP && "Matrix type shouldn't already exist in the map")(static_cast<void> (0)); |
4116 | (void)NewIP; |
4117 | } |
4118 | |
4119 | auto *New = new (*this, TypeAlignment) |
4120 | ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); |
4121 | MatrixTypes.InsertNode(New, InsertPos); |
4122 | Types.push_back(New); |
4123 | return QualType(New, 0); |
4124 | } |
4125 | |
4126 | QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, |
4127 | Expr *RowExpr, |
4128 | Expr *ColumnExpr, |
4129 | SourceLocation AttrLoc) const { |
4130 | QualType CanonElementTy = getCanonicalType(ElementTy); |
4131 | llvm::FoldingSetNodeID ID; |
4132 | DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, |
4133 | ColumnExpr); |
4134 | |
4135 | void *InsertPos = nullptr; |
4136 | DependentSizedMatrixType *Canon = |
4137 | DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); |
4138 | |
4139 | if (!Canon) { |
4140 | Canon = new (*this, TypeAlignment) DependentSizedMatrixType( |
4141 | *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); |
4142 | #ifndef NDEBUG1 |
4143 | DependentSizedMatrixType *CanonCheck = |
4144 | DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); |
4145 | assert(!CanonCheck && "Dependent-sized matrix canonical type broken")(static_cast<void> (0)); |
4146 | #endif |
4147 | DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); |
4148 | Types.push_back(Canon); |
4149 | } |
4150 | |
4151 | // Already have a canonical version of the matrix type |
4152 | // |
4153 | // If it exactly matches the requested type, use it directly. |
4154 | if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && |
4155 | Canon->getRowExpr() == ColumnExpr) |
4156 | return QualType(Canon, 0); |
4157 | |
4158 | // Use Canon as the canonical type for newly-built type. |
4159 | DependentSizedMatrixType *New = new (*this, TypeAlignment) |
4160 | DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, |
4161 | ColumnExpr, AttrLoc); |
4162 | Types.push_back(New); |
4163 | return QualType(New, 0); |
4164 | } |
4165 | |
4166 | QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, |
4167 | Expr *AddrSpaceExpr, |
4168 | SourceLocation AttrLoc) const { |
4169 | assert(AddrSpaceExpr->isInstantiationDependent())(static_cast<void> (0)); |
4170 | |
4171 | QualType canonPointeeType = getCanonicalType(PointeeType); |
4172 | |
4173 | void *insertPos = nullptr; |
4174 | llvm::FoldingSetNodeID ID; |
4175 | DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, |
4176 | AddrSpaceExpr); |
4177 | |
4178 | DependentAddressSpaceType *canonTy = |
4179 | DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); |
4180 | |
4181 | if (!canonTy) { |
4182 | canonTy = new (*this, TypeAlignment) |
4183 | DependentAddressSpaceType(*this, canonPointeeType, |
4184 | QualType(), AddrSpaceExpr, AttrLoc); |
4185 | DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); |
4186 | Types.push_back(canonTy); |
4187 | } |
4188 | |
4189 | if (canonPointeeType == PointeeType && |
4190 | canonTy->getAddrSpaceExpr() == AddrSpaceExpr) |
4191 | return QualType(canonTy, 0); |
4192 | |
4193 | auto *sugaredType |
4194 | = new (*this, TypeAlignment) |
4195 | DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), |
4196 | AddrSpaceExpr, AttrLoc); |
4197 | Types.push_back(sugaredType); |
4198 | return QualType(sugaredType, 0); |
4199 | } |
4200 | |
4201 | /// Determine whether \p T is canonical as the result type of a function. |
4202 | static bool isCanonicalResultType(QualType T) { |
4203 | return T.isCanonical() && |
4204 | (T.getObjCLifetime() == Qualifiers::OCL_None || |
4205 | T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); |
4206 | } |
4207 | |
4208 | /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. |
4209 | QualType |
4210 | ASTContext::getFunctionNoProtoType(QualType ResultTy, |
4211 | const FunctionType::ExtInfo &Info) const { |
4212 | // Unique functions, to guarantee there is only one function of a particular |
4213 | // structure. |
4214 | llvm::FoldingSetNodeID ID; |
4215 | FunctionNoProtoType::Profile(ID, ResultTy, Info); |
4216 | |
4217 | void *InsertPos = nullptr; |
4218 | if (FunctionNoProtoType *FT = |
4219 | FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) |
4220 | return QualType(FT, 0); |
4221 | |
4222 | QualType Canonical; |
4223 | if (!isCanonicalResultType(ResultTy)) { |
4224 | Canonical = |
4225 | getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); |
4226 | |
4227 | // Get the new insert position for the node we care about. |
4228 | FunctionNoProtoType *NewIP = |
4229 | FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); |
4230 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); (void)NewIP; |
4231 | } |
4232 | |
4233 | auto *New = new (*this, TypeAlignment) |
4234 | FunctionNoProtoType(ResultTy, Canonical, Info); |
4235 | Types.push_back(New); |
4236 | FunctionNoProtoTypes.InsertNode(New, InsertPos); |
4237 | return QualType(New, 0); |
4238 | } |
4239 | |
4240 | CanQualType |
4241 | ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { |
4242 | CanQualType CanResultType = getCanonicalType(ResultType); |
4243 | |
4244 | // Canonical result types do not have ARC lifetime qualifiers. |
4245 | if (CanResultType.getQualifiers().hasObjCLifetime()) { |
4246 | Qualifiers Qs = CanResultType.getQualifiers(); |
4247 | Qs.removeObjCLifetime(); |
4248 | return CanQualType::CreateUnsafe( |
4249 | getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); |
4250 | } |
4251 | |
4252 | return CanResultType; |
4253 | } |
4254 | |
4255 | static bool isCanonicalExceptionSpecification( |
4256 | const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { |
4257 | if (ESI.Type == EST_None) |
4258 | return true; |
4259 | if (!NoexceptInType) |
4260 | return false; |
4261 | |
4262 | // C++17 onwards: exception specification is part of the type, as a simple |
4263 | // boolean "can this function type throw". |
4264 | if (ESI.Type == EST_BasicNoexcept) |
4265 | return true; |
4266 | |
4267 | // A noexcept(expr) specification is (possibly) canonical if expr is |
4268 | // value-dependent. |
4269 | if (ESI.Type == EST_DependentNoexcept) |
4270 | return true; |
4271 | |
4272 | // A dynamic exception specification is canonical if it only contains pack |
4273 | // expansions (so we can't tell whether it's non-throwing) and all its |
4274 | // contained types are canonical. |
4275 | if (ESI.Type == EST_Dynamic) { |
4276 | bool AnyPackExpansions = false; |
4277 | for (QualType ET : ESI.Exceptions) { |
4278 | if (!ET.isCanonical()) |
4279 | return false; |
4280 | if (ET->getAs<PackExpansionType>()) |
4281 | AnyPackExpansions = true; |
4282 | } |
4283 | return AnyPackExpansions; |
4284 | } |
4285 | |
4286 | return false; |
4287 | } |
4288 | |
4289 | QualType ASTContext::getFunctionTypeInternal( |
4290 | QualType ResultTy, ArrayRef<QualType> ArgArray, |
4291 | const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { |
4292 | size_t NumArgs = ArgArray.size(); |
4293 | |
4294 | // Unique functions, to guarantee there is only one function of a particular |
4295 | // structure. |
4296 | llvm::FoldingSetNodeID ID; |
4297 | FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, |
4298 | *this, true); |
4299 | |
4300 | QualType Canonical; |
4301 | bool Unique = false; |
4302 | |
4303 | void *InsertPos = nullptr; |
4304 | if (FunctionProtoType *FPT = |
4305 | FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { |
4306 | QualType Existing = QualType(FPT, 0); |
4307 | |
4308 | // If we find a pre-existing equivalent FunctionProtoType, we can just reuse |
4309 | // it so long as our exception specification doesn't contain a dependent |
4310 | // noexcept expression, or we're just looking for a canonical type. |
4311 | // Otherwise, we're going to need to create a type |
4312 | // sugar node to hold the concrete expression. |
4313 | if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || |
4314 | EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) |
4315 | return Existing; |
4316 | |
4317 | // We need a new type sugar node for this one, to hold the new noexcept |
4318 | // expression. We do no canonicalization here, but that's OK since we don't |
4319 | // expect to see the same noexcept expression much more than once. |
4320 | Canonical = getCanonicalType(Existing); |
4321 | Unique = true; |
4322 | } |
4323 | |
4324 | bool NoexceptInType = getLangOpts().CPlusPlus17; |
4325 | bool IsCanonicalExceptionSpec = |
4326 | isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); |
4327 | |
4328 | // Determine whether the type being created is already canonical or not. |
4329 | bool isCanonical = !Unique && IsCanonicalExceptionSpec && |
4330 | isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; |
4331 | for (unsigned i = 0; i != NumArgs && isCanonical; ++i) |
4332 | if (!ArgArray[i].isCanonicalAsParam()) |
4333 | isCanonical = false; |
4334 | |
4335 | if (OnlyWantCanonical) |
4336 | assert(isCanonical &&(static_cast<void> (0)) |
4337 | "given non-canonical parameters constructing canonical type")(static_cast<void> (0)); |
4338 | |
4339 | // If this type isn't canonical, get the canonical version of it if we don't |
4340 | // already have it. The exception spec is only partially part of the |
4341 | // canonical type, and only in C++17 onwards. |
4342 | if (!isCanonical && Canonical.isNull()) { |
4343 | SmallVector<QualType, 16> CanonicalArgs; |
4344 | CanonicalArgs.reserve(NumArgs); |
4345 | for (unsigned i = 0; i != NumArgs; ++i) |
4346 | CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); |
4347 | |
4348 | llvm::SmallVector<QualType, 8> ExceptionTypeStorage; |
4349 | FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; |
4350 | CanonicalEPI.HasTrailingReturn = false; |
4351 | |
4352 | if (IsCanonicalExceptionSpec) { |
4353 | // Exception spec is already OK. |
4354 | } else if (NoexceptInType) { |
4355 | switch (EPI.ExceptionSpec.Type) { |
4356 | case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: |
4357 | // We don't know yet. It shouldn't matter what we pick here; no-one |
4358 | // should ever look at this. |
4359 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
4360 | case EST_None: case EST_MSAny: case EST_NoexceptFalse: |
4361 | CanonicalEPI.ExceptionSpec.Type = EST_None; |
4362 | break; |
4363 | |
4364 | // A dynamic exception specification is almost always "not noexcept", |
4365 | // with the exception that a pack expansion might expand to no types. |
4366 | case EST_Dynamic: { |
4367 | bool AnyPacks = false; |
4368 | for (QualType ET : EPI.ExceptionSpec.Exceptions) { |
4369 | if (ET->getAs<PackExpansionType>()) |
4370 | AnyPacks = true; |
4371 | ExceptionTypeStorage.push_back(getCanonicalType(ET)); |
4372 | } |
4373 | if (!AnyPacks) |
4374 | CanonicalEPI.ExceptionSpec.Type = EST_None; |
4375 | else { |
4376 | CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; |
4377 | CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; |
4378 | } |
4379 | break; |
4380 | } |
4381 | |
4382 | case EST_DynamicNone: |
4383 | case EST_BasicNoexcept: |
4384 | case EST_NoexceptTrue: |
4385 | case EST_NoThrow: |
4386 | CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; |
4387 | break; |
4388 | |
4389 | case EST_DependentNoexcept: |
4390 | llvm_unreachable("dependent noexcept is already canonical")__builtin_unreachable(); |
4391 | } |
4392 | } else { |
4393 | CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); |
4394 | } |
4395 | |
4396 | // Adjust the canonical function result type. |
4397 | CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); |
4398 | Canonical = |
4399 | getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); |
4400 | |
4401 | // Get the new insert position for the node we care about. |
4402 | FunctionProtoType *NewIP = |
4403 | FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); |
4404 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); (void)NewIP; |
4405 | } |
4406 | |
4407 | // Compute the needed size to hold this FunctionProtoType and the |
4408 | // various trailing objects. |
4409 | auto ESH = FunctionProtoType::getExceptionSpecSize( |
4410 | EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); |
4411 | size_t Size = FunctionProtoType::totalSizeToAlloc< |
4412 | QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, |
4413 | FunctionType::ExceptionType, Expr *, FunctionDecl *, |
4414 | FunctionProtoType::ExtParameterInfo, Qualifiers>( |
4415 | NumArgs, EPI.Variadic, |
4416 | FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type), |
4417 | ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, |
4418 | EPI.ExtParameterInfos ? NumArgs : 0, |
4419 | EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); |
4420 | |
4421 | auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); |
4422 | FunctionProtoType::ExtProtoInfo newEPI = EPI; |
4423 | new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); |
4424 | Types.push_back(FTP); |
4425 | if (!Unique) |
4426 | FunctionProtoTypes.InsertNode(FTP, InsertPos); |
4427 | return QualType(FTP, 0); |
4428 | } |
4429 | |
4430 | QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { |
4431 | llvm::FoldingSetNodeID ID; |
4432 | PipeType::Profile(ID, T, ReadOnly); |
4433 | |
4434 | void *InsertPos = nullptr; |
4435 | if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) |
4436 | return QualType(PT, 0); |
4437 | |
4438 | // If the pipe element type isn't canonical, this won't be a canonical type |
4439 | // either, so fill in the canonical type field. |
4440 | QualType Canonical; |
4441 | if (!T.isCanonical()) { |
4442 | Canonical = getPipeType(getCanonicalType(T), ReadOnly); |
4443 | |
4444 | // Get the new insert position for the node we care about. |
4445 | PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); |
4446 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); |
4447 | (void)NewIP; |
4448 | } |
4449 | auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); |
4450 | Types.push_back(New); |
4451 | PipeTypes.InsertNode(New, InsertPos); |
4452 | return QualType(New, 0); |
4453 | } |
4454 | |
4455 | QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { |
4456 | // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. |
4457 | return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) |
4458 | : Ty; |
4459 | } |
4460 | |
4461 | QualType ASTContext::getReadPipeType(QualType T) const { |
4462 | return getPipeType(T, true); |
4463 | } |
4464 | |
4465 | QualType ASTContext::getWritePipeType(QualType T) const { |
4466 | return getPipeType(T, false); |
4467 | } |
4468 | |
4469 | QualType ASTContext::getExtIntType(bool IsUnsigned, unsigned NumBits) const { |
4470 | llvm::FoldingSetNodeID ID; |
4471 | ExtIntType::Profile(ID, IsUnsigned, NumBits); |
4472 | |
4473 | void *InsertPos = nullptr; |
4474 | if (ExtIntType *EIT = ExtIntTypes.FindNodeOrInsertPos(ID, InsertPos)) |
4475 | return QualType(EIT, 0); |
4476 | |
4477 | auto *New = new (*this, TypeAlignment) ExtIntType(IsUnsigned, NumBits); |
4478 | ExtIntTypes.InsertNode(New, InsertPos); |
4479 | Types.push_back(New); |
4480 | return QualType(New, 0); |
4481 | } |
4482 | |
4483 | QualType ASTContext::getDependentExtIntType(bool IsUnsigned, |
4484 | Expr *NumBitsExpr) const { |
4485 | assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent")(static_cast<void> (0)); |
4486 | llvm::FoldingSetNodeID ID; |
4487 | DependentExtIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); |
4488 | |
4489 | void *InsertPos = nullptr; |
4490 | if (DependentExtIntType *Existing = |
4491 | DependentExtIntTypes.FindNodeOrInsertPos(ID, InsertPos)) |
4492 | return QualType(Existing, 0); |
4493 | |
4494 | auto *New = new (*this, TypeAlignment) |
4495 | DependentExtIntType(*this, IsUnsigned, NumBitsExpr); |
4496 | DependentExtIntTypes.InsertNode(New, InsertPos); |
4497 | |
4498 | Types.push_back(New); |
4499 | return QualType(New, 0); |
4500 | } |
4501 | |
4502 | #ifndef NDEBUG1 |
4503 | static bool NeedsInjectedClassNameType(const RecordDecl *D) { |
4504 | if (!isa<CXXRecordDecl>(D)) return false; |
4505 | const auto *RD = cast<CXXRecordDecl>(D); |
4506 | if (isa<ClassTemplatePartialSpecializationDecl>(RD)) |
4507 | return true; |
4508 | if (RD->getDescribedClassTemplate() && |
4509 | !isa<ClassTemplateSpecializationDecl>(RD)) |
4510 | return true; |
4511 | return false; |
4512 | } |
4513 | #endif |
4514 | |
4515 | /// getInjectedClassNameType - Return the unique reference to the |
4516 | /// injected class name type for the specified templated declaration. |
4517 | QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, |
4518 | QualType TST) const { |
4519 | assert(NeedsInjectedClassNameType(Decl))(static_cast<void> (0)); |
4520 | if (Decl->TypeForDecl) { |
4521 | assert(isa<InjectedClassNameType>(Decl->TypeForDecl))(static_cast<void> (0)); |
4522 | } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { |
4523 | assert(PrevDecl->TypeForDecl && "previous declaration has no type")(static_cast<void> (0)); |
4524 | Decl->TypeForDecl = PrevDecl->TypeForDecl; |
4525 | assert(isa<InjectedClassNameType>(Decl->TypeForDecl))(static_cast<void> (0)); |
4526 | } else { |
4527 | Type *newType = |
4528 | new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); |
4529 | Decl->TypeForDecl = newType; |
4530 | Types.push_back(newType); |
4531 | } |
4532 | return QualType(Decl->TypeForDecl, 0); |
4533 | } |
4534 | |
4535 | /// getTypeDeclType - Return the unique reference to the type for the |
4536 | /// specified type declaration. |
4537 | QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { |
4538 | assert(Decl && "Passed null for Decl param")(static_cast<void> (0)); |
4539 | assert(!Decl->TypeForDecl && "TypeForDecl present in slow case")(static_cast<void> (0)); |
4540 | |
4541 | if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) |
4542 | return getTypedefType(Typedef); |
4543 | |
4544 | assert(!isa<TemplateTypeParmDecl>(Decl) &&(static_cast<void> (0)) |
4545 | "Template type parameter types are always available.")(static_cast<void> (0)); |
4546 | |
4547 | if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { |
4548 | assert(Record->isFirstDecl() && "struct/union has previous declaration")(static_cast<void> (0)); |
4549 | assert(!NeedsInjectedClassNameType(Record))(static_cast<void> (0)); |
4550 | return getRecordType(Record); |
4551 | } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { |
4552 | assert(Enum->isFirstDecl() && "enum has previous declaration")(static_cast<void> (0)); |
4553 | return getEnumType(Enum); |
4554 | } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { |
4555 | Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using); |
4556 | Decl->TypeForDecl = newType; |
4557 | Types.push_back(newType); |
4558 | } else |
4559 | llvm_unreachable("TypeDecl without a type?")__builtin_unreachable(); |
4560 | |
4561 | return QualType(Decl->TypeForDecl, 0); |
4562 | } |
4563 | |
4564 | /// getTypedefType - Return the unique reference to the type for the |
4565 | /// specified typedef name decl. |
4566 | QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, |
4567 | QualType Underlying) const { |
4568 | if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); |
4569 | |
4570 | if (Underlying.isNull()) |
4571 | Underlying = Decl->getUnderlyingType(); |
4572 | QualType Canonical = getCanonicalType(Underlying); |
4573 | auto *newType = new (*this, TypeAlignment) |
4574 | TypedefType(Type::Typedef, Decl, Underlying, Canonical); |
4575 | Decl->TypeForDecl = newType; |
4576 | Types.push_back(newType); |
4577 | return QualType(newType, 0); |
4578 | } |
4579 | |
4580 | QualType ASTContext::getRecordType(const RecordDecl *Decl) const { |
4581 | if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); |
4582 | |
4583 | if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) |
4584 | if (PrevDecl->TypeForDecl) |
4585 | return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); |
4586 | |
4587 | auto *newType = new (*this, TypeAlignment) RecordType(Decl); |
4588 | Decl->TypeForDecl = newType; |
4589 | Types.push_back(newType); |
4590 | return QualType(newType, 0); |
4591 | } |
4592 | |
4593 | QualType ASTContext::getEnumType(const EnumDecl *Decl) const { |
4594 | if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); |
4595 | |
4596 | if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) |
4597 | if (PrevDecl->TypeForDecl) |
4598 | return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); |
4599 | |
4600 | auto *newType = new (*this, TypeAlignment) EnumType(Decl); |
4601 | Decl->TypeForDecl = newType; |
4602 | Types.push_back(newType); |
4603 | return QualType(newType, 0); |
4604 | } |
4605 | |
4606 | QualType ASTContext::getAttributedType(attr::Kind attrKind, |
4607 | QualType modifiedType, |
4608 | QualType equivalentType) { |
4609 | llvm::FoldingSetNodeID id; |
4610 | AttributedType::Profile(id, attrKind, modifiedType, equivalentType); |
4611 | |
4612 | void *insertPos = nullptr; |
4613 | AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); |
4614 | if (type) return QualType(type, 0); |
4615 | |
4616 | QualType canon = getCanonicalType(equivalentType); |
4617 | type = new (*this, TypeAlignment) |
4618 | AttributedType(canon, attrKind, modifiedType, equivalentType); |
4619 | |
4620 | Types.push_back(type); |
4621 | AttributedTypes.InsertNode(type, insertPos); |
4622 | |
4623 | return QualType(type, 0); |
4624 | } |
4625 | |
4626 | /// Retrieve a substitution-result type. |
4627 | QualType |
4628 | ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, |
4629 | QualType Replacement) const { |
4630 | assert(Replacement.isCanonical()(static_cast<void> (0)) |
4631 | && "replacement types must always be canonical")(static_cast<void> (0)); |
4632 | |
4633 | llvm::FoldingSetNodeID ID; |
4634 | SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); |
4635 | void *InsertPos = nullptr; |
4636 | SubstTemplateTypeParmType *SubstParm |
4637 | = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); |
4638 | |
4639 | if (!SubstParm) { |
4640 | SubstParm = new (*this, TypeAlignment) |
4641 | SubstTemplateTypeParmType(Parm, Replacement); |
4642 | Types.push_back(SubstParm); |
4643 | SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); |
4644 | } |
4645 | |
4646 | return QualType(SubstParm, 0); |
4647 | } |
4648 | |
4649 | /// Retrieve a |
4650 | QualType ASTContext::getSubstTemplateTypeParmPackType( |
4651 | const TemplateTypeParmType *Parm, |
4652 | const TemplateArgument &ArgPack) { |
4653 | #ifndef NDEBUG1 |
4654 | for (const auto &P : ArgPack.pack_elements()) { |
4655 | assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type")(static_cast<void> (0)); |
4656 | assert(P.getAsType().isCanonical() && "Pack contains non-canonical type")(static_cast<void> (0)); |
4657 | } |
4658 | #endif |
4659 | |
4660 | llvm::FoldingSetNodeID ID; |
4661 | SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); |
4662 | void *InsertPos = nullptr; |
4663 | if (SubstTemplateTypeParmPackType *SubstParm |
4664 | = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) |
4665 | return QualType(SubstParm, 0); |
4666 | |
4667 | QualType Canon; |
4668 | if (!Parm->isCanonicalUnqualified()) { |
4669 | Canon = getCanonicalType(QualType(Parm, 0)); |
4670 | Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon), |
4671 | ArgPack); |
4672 | SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); |
4673 | } |
4674 | |
4675 | auto *SubstParm |
4676 | = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, |
4677 | ArgPack); |
4678 | Types.push_back(SubstParm); |
4679 | SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); |
4680 | return QualType(SubstParm, 0); |
4681 | } |
4682 | |
4683 | /// Retrieve the template type parameter type for a template |
4684 | /// parameter or parameter pack with the given depth, index, and (optionally) |
4685 | /// name. |
4686 | QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, |
4687 | bool ParameterPack, |
4688 | TemplateTypeParmDecl *TTPDecl) const { |
4689 | llvm::FoldingSetNodeID ID; |
4690 | TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); |
4691 | void *InsertPos = nullptr; |
4692 | TemplateTypeParmType *TypeParm |
4693 | = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); |
4694 | |
4695 | if (TypeParm) |
4696 | return QualType(TypeParm, 0); |
4697 | |
4698 | if (TTPDecl) { |
4699 | QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); |
4700 | TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); |
4701 | |
4702 | TemplateTypeParmType *TypeCheck |
4703 | = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); |
4704 | assert(!TypeCheck && "Template type parameter canonical type broken")(static_cast<void> (0)); |
4705 | (void)TypeCheck; |
4706 | } else |
4707 | TypeParm = new (*this, TypeAlignment) |
4708 | TemplateTypeParmType(Depth, Index, ParameterPack); |
4709 | |
4710 | Types.push_back(TypeParm); |
4711 | TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); |
4712 | |
4713 | return QualType(TypeParm, 0); |
4714 | } |
4715 | |
4716 | TypeSourceInfo * |
4717 | ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, |
4718 | SourceLocation NameLoc, |
4719 | const TemplateArgumentListInfo &Args, |
4720 | QualType Underlying) const { |
4721 | assert(!Name.getAsDependentTemplateName() &&(static_cast<void> (0)) |
4722 | "No dependent template names here!")(static_cast<void> (0)); |
4723 | QualType TST = getTemplateSpecializationType(Name, Args, Underlying); |
4724 | |
4725 | TypeSourceInfo *DI = CreateTypeSourceInfo(TST); |
4726 | TemplateSpecializationTypeLoc TL = |
4727 | DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); |
4728 | TL.setTemplateKeywordLoc(SourceLocation()); |
4729 | TL.setTemplateNameLoc(NameLoc); |
4730 | TL.setLAngleLoc(Args.getLAngleLoc()); |
4731 | TL.setRAngleLoc(Args.getRAngleLoc()); |
4732 | for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) |
4733 | TL.setArgLocInfo(i, Args[i].getLocInfo()); |
4734 | return DI; |
4735 | } |
4736 | |
4737 | QualType |
4738 | ASTContext::getTemplateSpecializationType(TemplateName Template, |
4739 | const TemplateArgumentListInfo &Args, |
4740 | QualType Underlying) const { |
4741 | assert(!Template.getAsDependentTemplateName() &&(static_cast<void> (0)) |
4742 | "No dependent template names here!")(static_cast<void> (0)); |
4743 | |
4744 | SmallVector<TemplateArgument, 4> ArgVec; |
4745 | ArgVec.reserve(Args.size()); |
4746 | for (const TemplateArgumentLoc &Arg : Args.arguments()) |
4747 | ArgVec.push_back(Arg.getArgument()); |
4748 | |
4749 | return getTemplateSpecializationType(Template, ArgVec, Underlying); |
4750 | } |
4751 | |
4752 | #ifndef NDEBUG1 |
4753 | static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { |
4754 | for (const TemplateArgument &Arg : Args) |
4755 | if (Arg.isPackExpansion()) |
4756 | return true; |
4757 | |
4758 | return true; |
4759 | } |
4760 | #endif |
4761 | |
4762 | QualType |
4763 | ASTContext::getTemplateSpecializationType(TemplateName Template, |
4764 | ArrayRef<TemplateArgument> Args, |
4765 | QualType Underlying) const { |
4766 | assert(!Template.getAsDependentTemplateName() &&(static_cast<void> (0)) |
4767 | "No dependent template names here!")(static_cast<void> (0)); |
4768 | // Look through qualified template names. |
4769 | if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) |
4770 | Template = TemplateName(QTN->getTemplateDecl()); |
4771 | |
4772 | bool IsTypeAlias = |
4773 | Template.getAsTemplateDecl() && |
4774 | isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()); |
4775 | QualType CanonType; |
4776 | if (!Underlying.isNull()) |
4777 | CanonType = getCanonicalType(Underlying); |
4778 | else { |
4779 | // We can get here with an alias template when the specialization contains |
4780 | // a pack expansion that does not match up with a parameter pack. |
4781 | assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&(static_cast<void> (0)) |
4782 | "Caller must compute aliased type")(static_cast<void> (0)); |
4783 | IsTypeAlias = false; |
4784 | CanonType = getCanonicalTemplateSpecializationType(Template, Args); |
4785 | } |
4786 | |
4787 | // Allocate the (non-canonical) template specialization type, but don't |
4788 | // try to unique it: these types typically have location information that |
4789 | // we don't unique and don't want to lose. |
4790 | void *Mem = Allocate(sizeof(TemplateSpecializationType) + |
4791 | sizeof(TemplateArgument) * Args.size() + |
4792 | (IsTypeAlias? sizeof(QualType) : 0), |
4793 | TypeAlignment); |
4794 | auto *Spec |
4795 | = new (Mem) TemplateSpecializationType(Template, Args, CanonType, |
4796 | IsTypeAlias ? Underlying : QualType()); |
4797 | |
4798 | Types.push_back(Spec); |
4799 | return QualType(Spec, 0); |
4800 | } |
4801 | |
4802 | QualType ASTContext::getCanonicalTemplateSpecializationType( |
4803 | TemplateName Template, ArrayRef<TemplateArgument> Args) const { |
4804 | assert(!Template.getAsDependentTemplateName() &&(static_cast<void> (0)) |
4805 | "No dependent template names here!")(static_cast<void> (0)); |
4806 | |
4807 | // Look through qualified template names. |
4808 | if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) |
4809 | Template = TemplateName(QTN->getTemplateDecl()); |
4810 | |
4811 | // Build the canonical template specialization type. |
4812 | TemplateName CanonTemplate = getCanonicalTemplateName(Template); |
4813 | SmallVector<TemplateArgument, 4> CanonArgs; |
4814 | unsigned NumArgs = Args.size(); |
4815 | CanonArgs.reserve(NumArgs); |
4816 | for (const TemplateArgument &Arg : Args) |
4817 | CanonArgs.push_back(getCanonicalTemplateArgument(Arg)); |
4818 | |
4819 | // Determine whether this canonical template specialization type already |
4820 | // exists. |
4821 | llvm::FoldingSetNodeID ID; |
4822 | TemplateSpecializationType::Profile(ID, CanonTemplate, |
4823 | CanonArgs, *this); |
4824 | |
4825 | void *InsertPos = nullptr; |
4826 | TemplateSpecializationType *Spec |
4827 | = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); |
4828 | |
4829 | if (!Spec) { |
4830 | // Allocate a new canonical template specialization type. |
4831 | void *Mem = Allocate((sizeof(TemplateSpecializationType) + |
4832 | sizeof(TemplateArgument) * NumArgs), |
4833 | TypeAlignment); |
4834 | Spec = new (Mem) TemplateSpecializationType(CanonTemplate, |
4835 | CanonArgs, |
4836 | QualType(), QualType()); |
4837 | Types.push_back(Spec); |
4838 | TemplateSpecializationTypes.InsertNode(Spec, InsertPos); |
4839 | } |
4840 | |
4841 | assert(Spec->isDependentType() &&(static_cast<void> (0)) |
4842 | "Non-dependent template-id type must have a canonical type")(static_cast<void> (0)); |
4843 | return QualType(Spec, 0); |
4844 | } |
4845 | |
4846 | QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, |
4847 | NestedNameSpecifier *NNS, |
4848 | QualType NamedType, |
4849 | TagDecl *OwnedTagDecl) const { |
4850 | llvm::FoldingSetNodeID ID; |
4851 | ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); |
4852 | |
4853 | void *InsertPos = nullptr; |
4854 | ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); |
4855 | if (T) |
4856 | return QualType(T, 0); |
4857 | |
4858 | QualType Canon = NamedType; |
4859 | if (!Canon.isCanonical()) { |
4860 | Canon = getCanonicalType(NamedType); |
4861 | ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); |
4862 | assert(!CheckT && "Elaborated canonical type broken")(static_cast<void> (0)); |
4863 | (void)CheckT; |
4864 | } |
4865 | |
4866 | void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), |
4867 | TypeAlignment); |
4868 | T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); |
4869 | |
4870 | Types.push_back(T); |
4871 | ElaboratedTypes.InsertNode(T, InsertPos); |
4872 | return QualType(T, 0); |
4873 | } |
4874 | |
4875 | QualType |
4876 | ASTContext::getParenType(QualType InnerType) const { |
4877 | llvm::FoldingSetNodeID ID; |
4878 | ParenType::Profile(ID, InnerType); |
4879 | |
4880 | void *InsertPos = nullptr; |
4881 | ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); |
4882 | if (T) |
4883 | return QualType(T, 0); |
4884 | |
4885 | QualType Canon = InnerType; |
4886 | if (!Canon.isCanonical()) { |
4887 | Canon = getCanonicalType(InnerType); |
4888 | ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); |
4889 | assert(!CheckT && "Paren canonical type broken")(static_cast<void> (0)); |
4890 | (void)CheckT; |
4891 | } |
4892 | |
4893 | T = new (*this, TypeAlignment) ParenType(InnerType, Canon); |
4894 | Types.push_back(T); |
4895 | ParenTypes.InsertNode(T, InsertPos); |
4896 | return QualType(T, 0); |
4897 | } |
4898 | |
4899 | QualType |
4900 | ASTContext::getMacroQualifiedType(QualType UnderlyingTy, |
4901 | const IdentifierInfo *MacroII) const { |
4902 | QualType Canon = UnderlyingTy; |
4903 | if (!Canon.isCanonical()) |
4904 | Canon = getCanonicalType(UnderlyingTy); |
4905 | |
4906 | auto *newType = new (*this, TypeAlignment) |
4907 | MacroQualifiedType(UnderlyingTy, Canon, MacroII); |
4908 | Types.push_back(newType); |
4909 | return QualType(newType, 0); |
4910 | } |
4911 | |
4912 | QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, |
4913 | NestedNameSpecifier *NNS, |
4914 | const IdentifierInfo *Name, |
4915 | QualType Canon) const { |
4916 | if (Canon.isNull()) { |
4917 | NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); |
4918 | if (CanonNNS != NNS) |
4919 | Canon = getDependentNameType(Keyword, CanonNNS, Name); |
4920 | } |
4921 | |
4922 | llvm::FoldingSetNodeID ID; |
4923 | DependentNameType::Profile(ID, Keyword, NNS, Name); |
4924 | |
4925 | void *InsertPos = nullptr; |
4926 | DependentNameType *T |
4927 | = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); |
4928 | if (T) |
4929 | return QualType(T, 0); |
4930 | |
4931 | T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); |
4932 | Types.push_back(T); |
4933 | DependentNameTypes.InsertNode(T, InsertPos); |
4934 | return QualType(T, 0); |
4935 | } |
4936 | |
4937 | QualType |
4938 | ASTContext::getDependentTemplateSpecializationType( |
4939 | ElaboratedTypeKeyword Keyword, |
4940 | NestedNameSpecifier *NNS, |
4941 | const IdentifierInfo *Name, |
4942 | const TemplateArgumentListInfo &Args) const { |
4943 | // TODO: avoid this copy |
4944 | SmallVector<TemplateArgument, 16> ArgCopy; |
4945 | for (unsigned I = 0, E = Args.size(); I != E; ++I) |
4946 | ArgCopy.push_back(Args[I].getArgument()); |
4947 | return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); |
4948 | } |
4949 | |
4950 | QualType |
4951 | ASTContext::getDependentTemplateSpecializationType( |
4952 | ElaboratedTypeKeyword Keyword, |
4953 | NestedNameSpecifier *NNS, |
4954 | const IdentifierInfo *Name, |
4955 | ArrayRef<TemplateArgument> Args) const { |
4956 | assert((!NNS || NNS->isDependent()) &&(static_cast<void> (0)) |
4957 | "nested-name-specifier must be dependent")(static_cast<void> (0)); |
4958 | |
4959 | llvm::FoldingSetNodeID ID; |
4960 | DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, |
4961 | Name, Args); |
4962 | |
4963 | void *InsertPos = nullptr; |
4964 | DependentTemplateSpecializationType *T |
4965 | = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); |
4966 | if (T) |
4967 | return QualType(T, 0); |
4968 | |
4969 | NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); |
4970 | |
4971 | ElaboratedTypeKeyword CanonKeyword = Keyword; |
4972 | if (Keyword == ETK_None) CanonKeyword = ETK_Typename; |
4973 | |
4974 | bool AnyNonCanonArgs = false; |
4975 | unsigned NumArgs = Args.size(); |
4976 | SmallVector<TemplateArgument, 16> CanonArgs(NumArgs); |
4977 | for (unsigned I = 0; I != NumArgs; ++I) { |
4978 | CanonArgs[I] = getCanonicalTemplateArgument(Args[I]); |
4979 | if (!CanonArgs[I].structurallyEquals(Args[I])) |
4980 | AnyNonCanonArgs = true; |
4981 | } |
4982 | |
4983 | QualType Canon; |
4984 | if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { |
4985 | Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, |
4986 | Name, |
4987 | CanonArgs); |
4988 | |
4989 | // Find the insert position again. |
4990 | DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); |
4991 | } |
4992 | |
4993 | void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + |
4994 | sizeof(TemplateArgument) * NumArgs), |
4995 | TypeAlignment); |
4996 | T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, |
4997 | Name, Args, Canon); |
4998 | Types.push_back(T); |
4999 | DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); |
5000 | return QualType(T, 0); |
5001 | } |
5002 | |
5003 | TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { |
5004 | TemplateArgument Arg; |
5005 | if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { |
5006 | QualType ArgType = getTypeDeclType(TTP); |
5007 | if (TTP->isParameterPack()) |
5008 | ArgType = getPackExpansionType(ArgType, None); |
5009 | |
5010 | Arg = TemplateArgument(ArgType); |
5011 | } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { |
5012 | QualType T = |
5013 | NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); |
5014 | // For class NTTPs, ensure we include the 'const' so the type matches that |
5015 | // of a real template argument. |
5016 | // FIXME: It would be more faithful to model this as something like an |
5017 | // lvalue-to-rvalue conversion applied to a const-qualified lvalue. |
5018 | if (T->isRecordType()) |
5019 | T.addConst(); |
5020 | Expr *E = new (*this) DeclRefExpr( |
5021 | *this, NTTP, /*enclosing*/ false, T, |
5022 | Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); |
5023 | |
5024 | if (NTTP->isParameterPack()) |
5025 | E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(), |
5026 | None); |
5027 | Arg = TemplateArgument(E); |
5028 | } else { |
5029 | auto *TTP = cast<TemplateTemplateParmDecl>(Param); |
5030 | if (TTP->isParameterPack()) |
5031 | Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>()); |
5032 | else |
5033 | Arg = TemplateArgument(TemplateName(TTP)); |
5034 | } |
5035 | |
5036 | if (Param->isTemplateParameterPack()) |
5037 | Arg = TemplateArgument::CreatePackCopy(*this, Arg); |
5038 | |
5039 | return Arg; |
5040 | } |
5041 | |
5042 | void |
5043 | ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, |
5044 | SmallVectorImpl<TemplateArgument> &Args) { |
5045 | Args.reserve(Args.size() + Params->size()); |
5046 | |
5047 | for (NamedDecl *Param : *Params) |
5048 | Args.push_back(getInjectedTemplateArg(Param)); |
5049 | } |
5050 | |
5051 | QualType ASTContext::getPackExpansionType(QualType Pattern, |
5052 | Optional<unsigned> NumExpansions, |
5053 | bool ExpectPackInType) { |
5054 | assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&(static_cast<void> (0)) |
5055 | "Pack expansions must expand one or more parameter packs")(static_cast<void> (0)); |
5056 | |
5057 | llvm::FoldingSetNodeID ID; |
5058 | PackExpansionType::Profile(ID, Pattern, NumExpansions); |
5059 | |
5060 | void *InsertPos = nullptr; |
5061 | PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); |
5062 | if (T) |
5063 | return QualType(T, 0); |
5064 | |
5065 | QualType Canon; |
5066 | if (!Pattern.isCanonical()) { |
5067 | Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, |
5068 | /*ExpectPackInType=*/false); |
5069 | |
5070 | // Find the insert position again, in case we inserted an element into |
5071 | // PackExpansionTypes and invalidated our insert position. |
5072 | PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); |
5073 | } |
5074 | |
5075 | T = new (*this, TypeAlignment) |
5076 | PackExpansionType(Pattern, Canon, NumExpansions); |
5077 | Types.push_back(T); |
5078 | PackExpansionTypes.InsertNode(T, InsertPos); |
5079 | return QualType(T, 0); |
5080 | } |
5081 | |
5082 | /// CmpProtocolNames - Comparison predicate for sorting protocols |
5083 | /// alphabetically. |
5084 | static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, |
5085 | ObjCProtocolDecl *const *RHS) { |
5086 | return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); |
5087 | } |
5088 | |
5089 | static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { |
5090 | if (Protocols.empty()) return true; |
5091 | |
5092 | if (Protocols[0]->getCanonicalDecl() != Protocols[0]) |
5093 | return false; |
5094 | |
5095 | for (unsigned i = 1; i != Protocols.size(); ++i) |
5096 | if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || |
5097 | Protocols[i]->getCanonicalDecl() != Protocols[i]) |
5098 | return false; |
5099 | return true; |
5100 | } |
5101 | |
5102 | static void |
5103 | SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { |
5104 | // Sort protocols, keyed by name. |
5105 | llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); |
5106 | |
5107 | // Canonicalize. |
5108 | for (ObjCProtocolDecl *&P : Protocols) |
5109 | P = P->getCanonicalDecl(); |
5110 | |
5111 | // Remove duplicates. |
5112 | auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); |
5113 | Protocols.erase(ProtocolsEnd, Protocols.end()); |
5114 | } |
5115 | |
5116 | QualType ASTContext::getObjCObjectType(QualType BaseType, |
5117 | ObjCProtocolDecl * const *Protocols, |
5118 | unsigned NumProtocols) const { |
5119 | return getObjCObjectType(BaseType, {}, |
5120 | llvm::makeArrayRef(Protocols, NumProtocols), |
5121 | /*isKindOf=*/false); |
5122 | } |
5123 | |
5124 | QualType ASTContext::getObjCObjectType( |
5125 | QualType baseType, |
5126 | ArrayRef<QualType> typeArgs, |
5127 | ArrayRef<ObjCProtocolDecl *> protocols, |
5128 | bool isKindOf) const { |
5129 | // If the base type is an interface and there aren't any protocols or |
5130 | // type arguments to add, then the interface type will do just fine. |
5131 | if (typeArgs.empty() && protocols.empty() && !isKindOf && |
5132 | isa<ObjCInterfaceType>(baseType)) |
5133 | return baseType; |
5134 | |
5135 | // Look in the folding set for an existing type. |
5136 | llvm::FoldingSetNodeID ID; |
5137 | ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); |
5138 | void *InsertPos = nullptr; |
5139 | if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) |
5140 | return QualType(QT, 0); |
5141 | |
5142 | // Determine the type arguments to be used for canonicalization, |
5143 | // which may be explicitly specified here or written on the base |
5144 | // type. |
5145 | ArrayRef<QualType> effectiveTypeArgs = typeArgs; |
5146 | if (effectiveTypeArgs.empty()) { |
5147 | if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) |
5148 | effectiveTypeArgs = baseObject->getTypeArgs(); |
5149 | } |
5150 | |
5151 | // Build the canonical type, which has the canonical base type and a |
5152 | // sorted-and-uniqued list of protocols and the type arguments |
5153 | // canonicalized. |
5154 | QualType canonical; |
5155 | bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(), |
5156 | effectiveTypeArgs.end(), |
5157 | [&](QualType type) { |
5158 | return type.isCanonical(); |
5159 | }); |
5160 | bool protocolsSorted = areSortedAndUniqued(protocols); |
5161 | if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { |
5162 | // Determine the canonical type arguments. |
5163 | ArrayRef<QualType> canonTypeArgs; |
5164 | SmallVector<QualType, 4> canonTypeArgsVec; |
5165 | if (!typeArgsAreCanonical) { |
5166 | canonTypeArgsVec.reserve(effectiveTypeArgs.size()); |
5167 | for (auto typeArg : effectiveTypeArgs) |
5168 | canonTypeArgsVec.push_back(getCanonicalType(typeArg)); |
5169 | canonTypeArgs = canonTypeArgsVec; |
5170 | } else { |
5171 | canonTypeArgs = effectiveTypeArgs; |
5172 | } |
5173 | |
5174 | ArrayRef<ObjCProtocolDecl *> canonProtocols; |
5175 | SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; |
5176 | if (!protocolsSorted) { |
5177 | canonProtocolsVec.append(protocols.begin(), protocols.end()); |
5178 | SortAndUniqueProtocols(canonProtocolsVec); |
5179 | canonProtocols = canonProtocolsVec; |
5180 | } else { |
5181 | canonProtocols = protocols; |
5182 | } |
5183 | |
5184 | canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, |
5185 | canonProtocols, isKindOf); |
5186 | |
5187 | // Regenerate InsertPos. |
5188 | ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); |
5189 | } |
5190 | |
5191 | unsigned size = sizeof(ObjCObjectTypeImpl); |
5192 | size += typeArgs.size() * sizeof(QualType); |
5193 | size += protocols.size() * sizeof(ObjCProtocolDecl *); |
5194 | void *mem = Allocate(size, TypeAlignment); |
5195 | auto *T = |
5196 | new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, |
5197 | isKindOf); |
5198 | |
5199 | Types.push_back(T); |
5200 | ObjCObjectTypes.InsertNode(T, InsertPos); |
5201 | return QualType(T, 0); |
5202 | } |
5203 | |
5204 | /// Apply Objective-C protocol qualifiers to the given type. |
5205 | /// If this is for the canonical type of a type parameter, we can apply |
5206 | /// protocol qualifiers on the ObjCObjectPointerType. |
5207 | QualType |
5208 | ASTContext::applyObjCProtocolQualifiers(QualType type, |
5209 | ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, |
5210 | bool allowOnPointerType) const { |
5211 | hasError = false; |
5212 | |
5213 | if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { |
5214 | return getObjCTypeParamType(objT->getDecl(), protocols); |
5215 | } |
5216 | |
5217 | // Apply protocol qualifiers to ObjCObjectPointerType. |
5218 | if (allowOnPointerType) { |
5219 | if (const auto *objPtr = |
5220 | dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { |
5221 | const ObjCObjectType *objT = objPtr->getObjectType(); |
5222 | // Merge protocol lists and construct ObjCObjectType. |
5223 | SmallVector<ObjCProtocolDecl*, 8> protocolsVec; |
5224 | protocolsVec.append(objT->qual_begin(), |
5225 | objT->qual_end()); |
5226 | protocolsVec.append(protocols.begin(), protocols.end()); |
5227 | ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; |
5228 | type = getObjCObjectType( |
5229 | objT->getBaseType(), |
5230 | objT->getTypeArgsAsWritten(), |
5231 | protocols, |
5232 | objT->isKindOfTypeAsWritten()); |
5233 | return getObjCObjectPointerType(type); |
5234 | } |
5235 | } |
5236 | |
5237 | // Apply protocol qualifiers to ObjCObjectType. |
5238 | if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ |
5239 | // FIXME: Check for protocols to which the class type is already |
5240 | // known to conform. |
5241 | |
5242 | return getObjCObjectType(objT->getBaseType(), |
5243 | objT->getTypeArgsAsWritten(), |
5244 | protocols, |
5245 | objT->isKindOfTypeAsWritten()); |
5246 | } |
5247 | |
5248 | // If the canonical type is ObjCObjectType, ... |
5249 | if (type->isObjCObjectType()) { |
5250 | // Silently overwrite any existing protocol qualifiers. |
5251 | // TODO: determine whether that's the right thing to do. |
5252 | |
5253 | // FIXME: Check for protocols to which the class type is already |
5254 | // known to conform. |
5255 | return getObjCObjectType(type, {}, protocols, false); |
5256 | } |
5257 | |
5258 | // id<protocol-list> |
5259 | if (type->isObjCIdType()) { |
5260 | const auto *objPtr = type->castAs<ObjCObjectPointerType>(); |
5261 | type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, |
5262 | objPtr->isKindOfType()); |
5263 | return getObjCObjectPointerType(type); |
5264 | } |
5265 | |
5266 | // Class<protocol-list> |
5267 | if (type->isObjCClassType()) { |
5268 | const auto *objPtr = type->castAs<ObjCObjectPointerType>(); |
5269 | type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, |
5270 | objPtr->isKindOfType()); |
5271 | return getObjCObjectPointerType(type); |
5272 | } |
5273 | |
5274 | hasError = true; |
5275 | return type; |
5276 | } |
5277 | |
5278 | QualType |
5279 | ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, |
5280 | ArrayRef<ObjCProtocolDecl *> protocols) const { |
5281 | // Look in the folding set for an existing type. |
5282 | llvm::FoldingSetNodeID ID; |
5283 | ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); |
5284 | void *InsertPos = nullptr; |
5285 | if (ObjCTypeParamType *TypeParam = |
5286 | ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) |
5287 | return QualType(TypeParam, 0); |
5288 | |
5289 | // We canonicalize to the underlying type. |
5290 | QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); |
5291 | if (!protocols.empty()) { |
5292 | // Apply the protocol qualifers. |
5293 | bool hasError; |
5294 | Canonical = getCanonicalType(applyObjCProtocolQualifiers( |
5295 | Canonical, protocols, hasError, true /*allowOnPointerType*/)); |
5296 | assert(!hasError && "Error when apply protocol qualifier to bound type")(static_cast<void> (0)); |
5297 | } |
5298 | |
5299 | unsigned size = sizeof(ObjCTypeParamType); |
5300 | size += protocols.size() * sizeof(ObjCProtocolDecl *); |
5301 | void *mem = Allocate(size, TypeAlignment); |
5302 | auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); |
5303 | |
5304 | Types.push_back(newType); |
5305 | ObjCTypeParamTypes.InsertNode(newType, InsertPos); |
5306 | return QualType(newType, 0); |
5307 | } |
5308 | |
5309 | void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, |
5310 | ObjCTypeParamDecl *New) const { |
5311 | New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); |
5312 | // Update TypeForDecl after updating TypeSourceInfo. |
5313 | auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); |
5314 | SmallVector<ObjCProtocolDecl *, 8> protocols; |
5315 | protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); |
5316 | QualType UpdatedTy = getObjCTypeParamType(New, protocols); |
5317 | New->setTypeForDecl(UpdatedTy.getTypePtr()); |
5318 | } |
5319 | |
5320 | /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's |
5321 | /// protocol list adopt all protocols in QT's qualified-id protocol |
5322 | /// list. |
5323 | bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, |
5324 | ObjCInterfaceDecl *IC) { |
5325 | if (!QT->isObjCQualifiedIdType()) |
5326 | return false; |
5327 | |
5328 | if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { |
5329 | // If both the right and left sides have qualifiers. |
5330 | for (auto *Proto : OPT->quals()) { |
5331 | if (!IC->ClassImplementsProtocol(Proto, false)) |
5332 | return false; |
5333 | } |
5334 | return true; |
5335 | } |
5336 | return false; |
5337 | } |
5338 | |
5339 | /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in |
5340 | /// QT's qualified-id protocol list adopt all protocols in IDecl's list |
5341 | /// of protocols. |
5342 | bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, |
5343 | ObjCInterfaceDecl *IDecl) { |
5344 | if (!QT->isObjCQualifiedIdType()) |
5345 | return false; |
5346 | const auto *OPT = QT->getAs<ObjCObjectPointerType>(); |
5347 | if (!OPT) |
5348 | return false; |
5349 | if (!IDecl->hasDefinition()) |
5350 | return false; |
5351 | llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; |
5352 | CollectInheritedProtocols(IDecl, InheritedProtocols); |
5353 | if (InheritedProtocols.empty()) |
5354 | return false; |
5355 | // Check that if every protocol in list of id<plist> conforms to a protocol |
5356 | // of IDecl's, then bridge casting is ok. |
5357 | bool Conforms = false; |
5358 | for (auto *Proto : OPT->quals()) { |
5359 | Conforms = false; |
5360 | for (auto *PI : InheritedProtocols) { |
5361 | if (ProtocolCompatibleWithProtocol(Proto, PI)) { |
5362 | Conforms = true; |
5363 | break; |
5364 | } |
5365 | } |
5366 | if (!Conforms) |
5367 | break; |
5368 | } |
5369 | if (Conforms) |
5370 | return true; |
5371 | |
5372 | for (auto *PI : InheritedProtocols) { |
5373 | // If both the right and left sides have qualifiers. |
5374 | bool Adopts = false; |
5375 | for (auto *Proto : OPT->quals()) { |
5376 | // return 'true' if 'PI' is in the inheritance hierarchy of Proto |
5377 | if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) |
5378 | break; |
5379 | } |
5380 | if (!Adopts) |
5381 | return false; |
5382 | } |
5383 | return true; |
5384 | } |
5385 | |
5386 | /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for |
5387 | /// the given object type. |
5388 | QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { |
5389 | llvm::FoldingSetNodeID ID; |
5390 | ObjCObjectPointerType::Profile(ID, ObjectT); |
5391 | |
5392 | void *InsertPos = nullptr; |
5393 | if (ObjCObjectPointerType *QT = |
5394 | ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) |
5395 | return QualType(QT, 0); |
5396 | |
5397 | // Find the canonical object type. |
5398 | QualType Canonical; |
5399 | if (!ObjectT.isCanonical()) { |
5400 | Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); |
5401 | |
5402 | // Regenerate InsertPos. |
5403 | ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); |
5404 | } |
5405 | |
5406 | // No match. |
5407 | void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); |
5408 | auto *QType = |
5409 | new (Mem) ObjCObjectPointerType(Canonical, ObjectT); |
5410 | |
5411 | Types.push_back(QType); |
5412 | ObjCObjectPointerTypes.InsertNode(QType, InsertPos); |
5413 | return QualType(QType, 0); |
5414 | } |
5415 | |
5416 | /// getObjCInterfaceType - Return the unique reference to the type for the |
5417 | /// specified ObjC interface decl. The list of protocols is optional. |
5418 | QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, |
5419 | ObjCInterfaceDecl *PrevDecl) const { |
5420 | if (Decl->TypeForDecl) |
5421 | return QualType(Decl->TypeForDecl, 0); |
5422 | |
5423 | if (PrevDecl) { |
5424 | assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl")(static_cast<void> (0)); |
5425 | Decl->TypeForDecl = PrevDecl->TypeForDecl; |
5426 | return QualType(PrevDecl->TypeForDecl, 0); |
5427 | } |
5428 | |
5429 | // Prefer the definition, if there is one. |
5430 | if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) |
5431 | Decl = Def; |
5432 | |
5433 | void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); |
5434 | auto *T = new (Mem) ObjCInterfaceType(Decl); |
5435 | Decl->TypeForDecl = T; |
5436 | Types.push_back(T); |
5437 | return QualType(T, 0); |
5438 | } |
5439 | |
5440 | /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique |
5441 | /// TypeOfExprType AST's (since expression's are never shared). For example, |
5442 | /// multiple declarations that refer to "typeof(x)" all contain different |
5443 | /// DeclRefExpr's. This doesn't effect the type checker, since it operates |
5444 | /// on canonical type's (which are always unique). |
5445 | QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { |
5446 | TypeOfExprType *toe; |
5447 | if (tofExpr->isTypeDependent()) { |
5448 | llvm::FoldingSetNodeID ID; |
5449 | DependentTypeOfExprType::Profile(ID, *this, tofExpr); |
5450 | |
5451 | void *InsertPos = nullptr; |
5452 | DependentTypeOfExprType *Canon |
5453 | = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); |
5454 | if (Canon) { |
5455 | // We already have a "canonical" version of an identical, dependent |
5456 | // typeof(expr) type. Use that as our canonical type. |
5457 | toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, |
5458 | QualType((TypeOfExprType*)Canon, 0)); |
5459 | } else { |
5460 | // Build a new, canonical typeof(expr) type. |
5461 | Canon |
5462 | = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); |
5463 | DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); |
5464 | toe = Canon; |
5465 | } |
5466 | } else { |
5467 | QualType Canonical = getCanonicalType(tofExpr->getType()); |
5468 | toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); |
5469 | } |
5470 | Types.push_back(toe); |
5471 | return QualType(toe, 0); |
5472 | } |
5473 | |
5474 | /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique |
5475 | /// TypeOfType nodes. The only motivation to unique these nodes would be |
5476 | /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be |
5477 | /// an issue. This doesn't affect the type checker, since it operates |
5478 | /// on canonical types (which are always unique). |
5479 | QualType ASTContext::getTypeOfType(QualType tofType) const { |
5480 | QualType Canonical = getCanonicalType(tofType); |
5481 | auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); |
5482 | Types.push_back(tot); |
5483 | return QualType(tot, 0); |
5484 | } |
5485 | |
5486 | /// getReferenceQualifiedType - Given an expr, will return the type for |
5487 | /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions |
5488 | /// and class member access into account. |
5489 | QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { |
5490 | // C++11 [dcl.type.simple]p4: |
5491 | // [...] |
5492 | QualType T = E->getType(); |
5493 | switch (E->getValueKind()) { |
5494 | // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the |
5495 | // type of e; |
5496 | case VK_XValue: |
5497 | return getRValueReferenceType(T); |
5498 | // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the |
5499 | // type of e; |
5500 | case VK_LValue: |
5501 | return getLValueReferenceType(T); |
5502 | // - otherwise, decltype(e) is the type of e. |
5503 | case VK_PRValue: |
5504 | return T; |
5505 | } |
5506 | llvm_unreachable("Unknown value kind")__builtin_unreachable(); |
5507 | } |
5508 | |
5509 | /// Unlike many "get<Type>" functions, we don't unique DecltypeType |
5510 | /// nodes. This would never be helpful, since each such type has its own |
5511 | /// expression, and would not give a significant memory saving, since there |
5512 | /// is an Expr tree under each such type. |
5513 | QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { |
5514 | DecltypeType *dt; |
5515 | |
5516 | // C++11 [temp.type]p2: |
5517 | // If an expression e involves a template parameter, decltype(e) denotes a |
5518 | // unique dependent type. Two such decltype-specifiers refer to the same |
5519 | // type only if their expressions are equivalent (14.5.6.1). |
5520 | if (e->isInstantiationDependent()) { |
5521 | llvm::FoldingSetNodeID ID; |
5522 | DependentDecltypeType::Profile(ID, *this, e); |
5523 | |
5524 | void *InsertPos = nullptr; |
5525 | DependentDecltypeType *Canon |
5526 | = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); |
5527 | if (!Canon) { |
5528 | // Build a new, canonical decltype(expr) type. |
5529 | Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); |
5530 | DependentDecltypeTypes.InsertNode(Canon, InsertPos); |
5531 | } |
5532 | dt = new (*this, TypeAlignment) |
5533 | DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); |
5534 | } else { |
5535 | dt = new (*this, TypeAlignment) |
5536 | DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); |
5537 | } |
5538 | Types.push_back(dt); |
5539 | return QualType(dt, 0); |
5540 | } |
5541 | |
5542 | /// getUnaryTransformationType - We don't unique these, since the memory |
5543 | /// savings are minimal and these are rare. |
5544 | QualType ASTContext::getUnaryTransformType(QualType BaseType, |
5545 | QualType UnderlyingType, |
5546 | UnaryTransformType::UTTKind Kind) |
5547 | const { |
5548 | UnaryTransformType *ut = nullptr; |
5549 | |
5550 | if (BaseType->isDependentType()) { |
5551 | // Look in the folding set for an existing type. |
5552 | llvm::FoldingSetNodeID ID; |
5553 | DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); |
5554 | |
5555 | void *InsertPos = nullptr; |
5556 | DependentUnaryTransformType *Canon |
5557 | = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); |
5558 | |
5559 | if (!Canon) { |
5560 | // Build a new, canonical __underlying_type(type) type. |
5561 | Canon = new (*this, TypeAlignment) |
5562 | DependentUnaryTransformType(*this, getCanonicalType(BaseType), |
5563 | Kind); |
5564 | DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); |
5565 | } |
5566 | ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, |
5567 | QualType(), Kind, |
5568 | QualType(Canon, 0)); |
5569 | } else { |
5570 | QualType CanonType = getCanonicalType(UnderlyingType); |
5571 | ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, |
5572 | UnderlyingType, Kind, |
5573 | CanonType); |
5574 | } |
5575 | Types.push_back(ut); |
5576 | return QualType(ut, 0); |
5577 | } |
5578 | |
5579 | /// getAutoType - Return the uniqued reference to the 'auto' type which has been |
5580 | /// deduced to the given type, or to the canonical undeduced 'auto' type, or the |
5581 | /// canonical deduced-but-dependent 'auto' type. |
5582 | QualType |
5583 | ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, |
5584 | bool IsDependent, bool IsPack, |
5585 | ConceptDecl *TypeConstraintConcept, |
5586 | ArrayRef<TemplateArgument> TypeConstraintArgs) const { |
5587 | assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack")(static_cast<void> (0)); |
5588 | if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && |
5589 | !TypeConstraintConcept && !IsDependent) |
5590 | return getAutoDeductType(); |
5591 | |
5592 | // Look in the folding set for an existing type. |
5593 | void *InsertPos = nullptr; |
5594 | llvm::FoldingSetNodeID ID; |
5595 | AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, |
5596 | TypeConstraintConcept, TypeConstraintArgs); |
5597 | if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) |
5598 | return QualType(AT, 0); |
5599 | |
5600 | void *Mem = Allocate(sizeof(AutoType) + |
5601 | sizeof(TemplateArgument) * TypeConstraintArgs.size(), |
5602 | TypeAlignment); |
5603 | auto *AT = new (Mem) AutoType( |
5604 | DeducedType, Keyword, |
5605 | (IsDependent ? TypeDependence::DependentInstantiation |
5606 | : TypeDependence::None) | |
5607 | (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), |
5608 | TypeConstraintConcept, TypeConstraintArgs); |
5609 | Types.push_back(AT); |
5610 | if (InsertPos) |
5611 | AutoTypes.InsertNode(AT, InsertPos); |
5612 | return QualType(AT, 0); |
5613 | } |
5614 | |
5615 | /// Return the uniqued reference to the deduced template specialization type |
5616 | /// which has been deduced to the given type, or to the canonical undeduced |
5617 | /// such type, or the canonical deduced-but-dependent such type. |
5618 | QualType ASTContext::getDeducedTemplateSpecializationType( |
5619 | TemplateName Template, QualType DeducedType, bool IsDependent) const { |
5620 | // Look in the folding set for an existing type. |
5621 | void *InsertPos = nullptr; |
5622 | llvm::FoldingSetNodeID ID; |
5623 | DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, |
5624 | IsDependent); |
5625 | if (DeducedTemplateSpecializationType *DTST = |
5626 | DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) |
5627 | return QualType(DTST, 0); |
5628 | |
5629 | auto *DTST = new (*this, TypeAlignment) |
5630 | DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); |
5631 | Types.push_back(DTST); |
5632 | if (InsertPos) |
5633 | DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); |
5634 | return QualType(DTST, 0); |
5635 | } |
5636 | |
5637 | /// getAtomicType - Return the uniqued reference to the atomic type for |
5638 | /// the given value type. |
5639 | QualType ASTContext::getAtomicType(QualType T) const { |
5640 | // Unique pointers, to guarantee there is only one pointer of a particular |
5641 | // structure. |
5642 | llvm::FoldingSetNodeID ID; |
5643 | AtomicType::Profile(ID, T); |
5644 | |
5645 | void *InsertPos = nullptr; |
5646 | if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) |
5647 | return QualType(AT, 0); |
5648 | |
5649 | // If the atomic value type isn't canonical, this won't be a canonical type |
5650 | // either, so fill in the canonical type field. |
5651 | QualType Canonical; |
5652 | if (!T.isCanonical()) { |
5653 | Canonical = getAtomicType(getCanonicalType(T)); |
5654 | |
5655 | // Get the new insert position for the node we care about. |
5656 | AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); |
5657 | assert(!NewIP && "Shouldn't be in the map!")(static_cast<void> (0)); (void)NewIP; |
5658 | } |
5659 | auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); |
5660 | Types.push_back(New); |
5661 | AtomicTypes.InsertNode(New, InsertPos); |
5662 | return QualType(New, 0); |
5663 | } |
5664 | |
5665 | /// getAutoDeductType - Get type pattern for deducing against 'auto'. |
5666 | QualType ASTContext::getAutoDeductType() const { |
5667 | if (AutoDeductTy.isNull()) |
5668 | AutoDeductTy = QualType(new (*this, TypeAlignment) |
5669 | AutoType(QualType(), AutoTypeKeyword::Auto, |
5670 | TypeDependence::None, |
5671 | /*concept*/ nullptr, /*args*/ {}), |
5672 | 0); |
5673 | return AutoDeductTy; |
5674 | } |
5675 | |
5676 | /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. |
5677 | QualType ASTContext::getAutoRRefDeductType() const { |
5678 | if (AutoRRefDeductTy.isNull()) |
5679 | AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); |
5680 | assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern")(static_cast<void> (0)); |
5681 | return AutoRRefDeductTy; |
5682 | } |
5683 | |
5684 | /// getTagDeclType - Return the unique reference to the type for the |
5685 | /// specified TagDecl (struct/union/class/enum) decl. |
5686 | QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { |
5687 | assert(Decl)(static_cast<void> (0)); |
5688 | // FIXME: What is the design on getTagDeclType when it requires casting |
5689 | // away const? mutable? |
5690 | return getTypeDeclType(const_cast<TagDecl*>(Decl)); |
5691 | } |
5692 | |
5693 | /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result |
5694 | /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and |
5695 | /// needs to agree with the definition in <stddef.h>. |
5696 | CanQualType ASTContext::getSizeType() const { |
5697 | return getFromTargetType(Target->getSizeType()); |
5698 | } |
5699 | |
5700 | /// Return the unique signed counterpart of the integer type |
5701 | /// corresponding to size_t. |
5702 | CanQualType ASTContext::getSignedSizeType() const { |
5703 | return getFromTargetType(Target->getSignedSizeType()); |
5704 | } |
5705 | |
5706 | /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). |
5707 | CanQualType ASTContext::getIntMaxType() const { |
5708 | return getFromTargetType(Target->getIntMaxType()); |
5709 | } |
5710 | |
5711 | /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). |
5712 | CanQualType ASTContext::getUIntMaxType() const { |
5713 | return getFromTargetType(Target->getUIntMaxType()); |
5714 | } |
5715 | |
5716 | /// getSignedWCharType - Return the type of "signed wchar_t". |
5717 | /// Used when in C++, as a GCC extension. |
5718 | QualType ASTContext::getSignedWCharType() const { |
5719 | // FIXME: derive from "Target" ? |
5720 | return WCharTy; |
5721 | } |
5722 | |
5723 | /// getUnsignedWCharType - Return the type of "unsigned wchar_t". |
5724 | /// Used when in C++, as a GCC extension. |
5725 | QualType ASTContext::getUnsignedWCharType() const { |
5726 | // FIXME: derive from "Target" ? |
5727 | return UnsignedIntTy; |
5728 | } |
5729 | |
5730 | QualType ASTContext::getIntPtrType() const { |
5731 | return getFromTargetType(Target->getIntPtrType()); |
5732 | } |
5733 | |
5734 | QualType ASTContext::getUIntPtrType() const { |
5735 | return getCorrespondingUnsignedType(getIntPtrType()); |
5736 | } |
5737 | |
5738 | /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) |
5739 | /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). |
5740 | QualType ASTContext::getPointerDiffType() const { |
5741 | return getFromTargetType(Target->getPtrDiffType(0)); |
5742 | } |
5743 | |
5744 | /// Return the unique unsigned counterpart of "ptrdiff_t" |
5745 | /// integer type. The standard (C11 7.21.6.1p7) refers to this type |
5746 | /// in the definition of %tu format specifier. |
5747 | QualType ASTContext::getUnsignedPointerDiffType() const { |
5748 | return getFromTargetType(Target->getUnsignedPtrDiffType(0)); |
5749 | } |
5750 | |
5751 | /// Return the unique type for "pid_t" defined in |
5752 | /// <sys/types.h>. We need this to compute the correct type for vfork(). |
5753 | QualType ASTContext::getProcessIDType() const { |
5754 | return getFromTargetType(Target->getProcessIDType()); |
5755 | } |
5756 | |
5757 | //===----------------------------------------------------------------------===// |
5758 | // Type Operators |
5759 | //===----------------------------------------------------------------------===// |
5760 | |
5761 | CanQualType ASTContext::getCanonicalParamType(QualType T) const { |
5762 | // Push qualifiers into arrays, and then discard any remaining |
5763 | // qualifiers. |
5764 | T = getCanonicalType(T); |
5765 | T = getVariableArrayDecayedType(T); |
5766 | const Type *Ty = T.getTypePtr(); |
5767 | QualType Result; |
5768 | if (isa<ArrayType>(Ty)) { |
5769 | Result = getArrayDecayedType(QualType(Ty,0)); |
5770 | } else if (isa<FunctionType>(Ty)) { |
5771 | Result = getPointerType(QualType(Ty, 0)); |
5772 | } else { |
5773 | Result = QualType(Ty, 0); |
5774 | } |
5775 | |
5776 | return CanQualType::CreateUnsafe(Result); |
5777 | } |
5778 | |
5779 | QualType ASTContext::getUnqualifiedArrayType(QualType type, |
5780 | Qualifiers &quals) { |
5781 | SplitQualType splitType = type.getSplitUnqualifiedType(); |
5782 | |
5783 | // FIXME: getSplitUnqualifiedType() actually walks all the way to |
5784 | // the unqualified desugared type and then drops it on the floor. |
5785 | // We then have to strip that sugar back off with |
5786 | // getUnqualifiedDesugaredType(), which is silly. |
5787 | const auto *AT = |
5788 | dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); |
5789 | |
5790 | // If we don't have an array, just use the results in splitType. |
5791 | if (!AT) { |
5792 | quals = splitType.Quals; |
5793 | return QualType(splitType.Ty, 0); |
5794 | } |
5795 | |
5796 | // Otherwise, recurse on the array's element type. |
5797 | QualType elementType = AT->getElementType(); |
5798 | QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); |
5799 | |
5800 | // If that didn't change the element type, AT has no qualifiers, so we |
5801 | // can just use the results in splitType. |
5802 | if (elementType == unqualElementType) { |
5803 | assert(quals.empty())(static_cast<void> (0)); // from the recursive call |
5804 | quals = splitType.Quals; |
5805 | return QualType(splitType.Ty, 0); |
5806 | } |
5807 | |
5808 | // Otherwise, add in the qualifiers from the outermost type, then |
5809 | // build the type back up. |
5810 | quals.addConsistentQualifiers(splitType.Quals); |
5811 | |
5812 | if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { |
5813 | return getConstantArrayType(unqualElementType, CAT->getSize(), |
5814 | CAT->getSizeExpr(), CAT->getSizeModifier(), 0); |
5815 | } |
5816 | |
5817 | if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { |
5818 | return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); |
5819 | } |
5820 | |
5821 | if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { |
5822 | return getVariableArrayType(unqualElementType, |
5823 | VAT->getSizeExpr(), |
5824 | VAT->getSizeModifier(), |
5825 | VAT->getIndexTypeCVRQualifiers(), |
5826 | VAT->getBracketsRange()); |
5827 | } |
5828 | |
5829 | const auto *DSAT = cast<DependentSizedArrayType>(AT); |
5830 | return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), |
5831 | DSAT->getSizeModifier(), 0, |
5832 | SourceRange()); |
5833 | } |
5834 | |
5835 | /// Attempt to unwrap two types that may both be array types with the same bound |
5836 | /// (or both be array types of unknown bound) for the purpose of comparing the |
5837 | /// cv-decomposition of two types per C++ [conv.qual]. |
5838 | void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) { |
5839 | while (true) { |
5840 | auto *AT1 = getAsArrayType(T1); |
5841 | if (!AT1) |
5842 | return; |
5843 | |
5844 | auto *AT2 = getAsArrayType(T2); |
5845 | if (!AT2) |
5846 | return; |
5847 | |
5848 | // If we don't have two array types with the same constant bound nor two |
5849 | // incomplete array types, we've unwrapped everything we can. |
5850 | if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { |
5851 | auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); |
5852 | if (!CAT2 || CAT1->getSize() != CAT2->getSize()) |
5853 | return; |
5854 | } else if (!isa<IncompleteArrayType>(AT1) || |
5855 | !isa<IncompleteArrayType>(AT2)) { |
5856 | return; |
5857 | } |
5858 | |
5859 | T1 = AT1->getElementType(); |
5860 | T2 = AT2->getElementType(); |
5861 | } |
5862 | } |
5863 | |
5864 | /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). |
5865 | /// |
5866 | /// If T1 and T2 are both pointer types of the same kind, or both array types |
5867 | /// with the same bound, unwraps layers from T1 and T2 until a pointer type is |
5868 | /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. |
5869 | /// |
5870 | /// This function will typically be called in a loop that successively |
5871 | /// "unwraps" pointer and pointer-to-member types to compare them at each |
5872 | /// level. |
5873 | /// |
5874 | /// \return \c true if a pointer type was unwrapped, \c false if we reached a |
5875 | /// pair of types that can't be unwrapped further. |
5876 | bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2) { |
5877 | UnwrapSimilarArrayTypes(T1, T2); |
5878 | |
5879 | const auto *T1PtrType = T1->getAs<PointerType>(); |
5880 | const auto *T2PtrType = T2->getAs<PointerType>(); |
5881 | if (T1PtrType && T2PtrType) { |
5882 | T1 = T1PtrType->getPointeeType(); |
5883 | T2 = T2PtrType->getPointeeType(); |
5884 | return true; |
5885 | } |
5886 | |
5887 | const auto *T1MPType = T1->getAs<MemberPointerType>(); |
5888 | const auto *T2MPType = T2->getAs<MemberPointerType>(); |
5889 | if (T1MPType && T2MPType && |
5890 | hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), |
5891 | QualType(T2MPType->getClass(), 0))) { |
5892 | T1 = T1MPType->getPointeeType(); |
5893 | T2 = T2MPType->getPointeeType(); |
5894 | return true; |
5895 | } |
5896 | |
5897 | if (getLangOpts().ObjC) { |
5898 | const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); |
5899 | const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); |
5900 | if (T1OPType && T2OPType) { |
5901 | T1 = T1OPType->getPointeeType(); |
5902 | T2 = T2OPType->getPointeeType(); |
5903 | return true; |
5904 | } |
5905 | } |
5906 | |
5907 | // FIXME: Block pointers, too? |
5908 | |
5909 | return false; |
5910 | } |
5911 | |
5912 | bool ASTContext::hasSimilarType(QualType T1, QualType T2) { |
5913 | while (true) { |
5914 | Qualifiers Quals; |
5915 | T1 = getUnqualifiedArrayType(T1, Quals); |
5916 | T2 = getUnqualifiedArrayType(T2, Quals); |
5917 | if (hasSameType(T1, T2)) |
5918 | return true; |
5919 | if (!UnwrapSimilarTypes(T1, T2)) |
5920 | return false; |
5921 | } |
5922 | } |
5923 | |
5924 | bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { |
5925 | while (true) { |
5926 | Qualifiers Quals1, Quals2; |
5927 | T1 = getUnqualifiedArrayType(T1, Quals1); |
5928 | T2 = getUnqualifiedArrayType(T2, Quals2); |
5929 | |
5930 | Quals1.removeCVRQualifiers(); |
5931 | Quals2.removeCVRQualifiers(); |
5932 | if (Quals1 != Quals2) |
5933 | return false; |
5934 | |
5935 | if (hasSameType(T1, T2)) |
5936 | return true; |
5937 | |
5938 | if (!UnwrapSimilarTypes(T1, T2)) |
5939 | return false; |
5940 | } |
5941 | } |
5942 | |
5943 | DeclarationNameInfo |
5944 | ASTContext::getNameForTemplate(TemplateName Name, |
5945 | SourceLocation NameLoc) const { |
5946 | switch (Name.getKind()) { |
5947 | case TemplateName::QualifiedTemplate: |
5948 | case TemplateName::Template: |
5949 | // DNInfo work in progress: CHECKME: what about DNLoc? |
5950 | return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), |
5951 | NameLoc); |
5952 | |
5953 | case TemplateName::OverloadedTemplate: { |
5954 | OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); |
5955 | // DNInfo work in progress: CHECKME: what about DNLoc? |
5956 | return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); |
5957 | } |
5958 | |
5959 | case TemplateName::AssumedTemplate: { |
5960 | AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); |
5961 | return DeclarationNameInfo(Storage->getDeclName(), NameLoc); |
5962 | } |
5963 | |
5964 | case TemplateName::DependentTemplate: { |
5965 | DependentTemplateName *DTN = Name.getAsDependentTemplateName(); |
5966 | DeclarationName DName; |
5967 | if (DTN->isIdentifier()) { |
5968 | DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); |
5969 | return DeclarationNameInfo(DName, NameLoc); |
5970 | } else { |
5971 | DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); |
5972 | // DNInfo work in progress: FIXME: source locations? |
5973 | DeclarationNameLoc DNLoc = |
5974 | DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); |
5975 | return DeclarationNameInfo(DName, NameLoc, DNLoc); |
5976 | } |
5977 | } |
5978 | |
5979 | case TemplateName::SubstTemplateTemplateParm: { |
5980 | SubstTemplateTemplateParmStorage *subst |
5981 | = Name.getAsSubstTemplateTemplateParm(); |
5982 | return DeclarationNameInfo(subst->getParameter()->getDeclName(), |
5983 | NameLoc); |
5984 | } |
5985 | |
5986 | case TemplateName::SubstTemplateTemplateParmPack: { |
5987 | SubstTemplateTemplateParmPackStorage *subst |
5988 | = Name.getAsSubstTemplateTemplateParmPack(); |
5989 | return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), |
5990 | NameLoc); |
5991 | } |
5992 | } |
5993 | |
5994 | llvm_unreachable("bad template name kind!")__builtin_unreachable(); |
5995 | } |
5996 | |
5997 | TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const { |
5998 | switch (Name.getKind()) { |
5999 | case TemplateName::QualifiedTemplate: |
6000 | case TemplateName::Template: { |
6001 | TemplateDecl *Template = Name.getAsTemplateDecl(); |
6002 | if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) |
6003 | Template = getCanonicalTemplateTemplateParmDecl(TTP); |
6004 | |
6005 | // The canonical template name is the canonical template declaration. |
6006 | return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); |
6007 | } |
6008 | |
6009 | case TemplateName::OverloadedTemplate: |
6010 | case TemplateName::AssumedTemplate: |
6011 | llvm_unreachable("cannot canonicalize unresolved template")__builtin_unreachable(); |
6012 | |
6013 | case TemplateName::DependentTemplate: { |
6014 | DependentTemplateName *DTN = Name.getAsDependentTemplateName(); |
6015 | assert(DTN && "Non-dependent template names must refer to template decls.")(static_cast<void> (0)); |
6016 | return DTN->CanonicalTemplateName; |
6017 | } |
6018 | |
6019 | case TemplateName::SubstTemplateTemplateParm: { |
6020 | SubstTemplateTemplateParmStorage *subst |
6021 | = Name.getAsSubstTemplateTemplateParm(); |
6022 | return getCanonicalTemplateName(subst->getReplacement()); |
6023 | } |
6024 | |
6025 | case TemplateName::SubstTemplateTemplateParmPack: { |
6026 | SubstTemplateTemplateParmPackStorage *subst |
6027 | = Name.getAsSubstTemplateTemplateParmPack(); |
6028 | TemplateTemplateParmDecl *canonParameter |
6029 | = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); |
6030 | TemplateArgument canonArgPack |
6031 | = getCanonicalTemplateArgument(subst->getArgumentPack()); |
6032 | return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); |
6033 | } |
6034 | } |
6035 | |
6036 | llvm_unreachable("bad template name!")__builtin_unreachable(); |
6037 | } |
6038 | |
6039 | bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) { |
6040 | X = getCanonicalTemplateName(X); |
6041 | Y = getCanonicalTemplateName(Y); |
6042 | return X.getAsVoidPointer() == Y.getAsVoidPointer(); |
6043 | } |
6044 | |
6045 | TemplateArgument |
6046 | ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { |
6047 | switch (Arg.getKind()) { |
6048 | case TemplateArgument::Null: |
6049 | return Arg; |
6050 | |
6051 | case TemplateArgument::Expression: |
6052 | return Arg; |
6053 | |
6054 | case TemplateArgument::Declaration: { |
6055 | auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); |
6056 | return TemplateArgument(D, Arg.getParamTypeForDecl()); |
6057 | } |
6058 | |
6059 | case TemplateArgument::NullPtr: |
6060 | return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), |
6061 | /*isNullPtr*/true); |
6062 | |
6063 | case TemplateArgument::Template: |
6064 | return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); |
6065 | |
6066 | case TemplateArgument::TemplateExpansion: |
6067 | return TemplateArgument(getCanonicalTemplateName( |
6068 | Arg.getAsTemplateOrTemplatePattern()), |
6069 | Arg.getNumTemplateExpansions()); |
6070 | |
6071 | case TemplateArgument::Integral: |
6072 | return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); |
6073 | |
6074 | case TemplateArgument::Type: |
6075 | return TemplateArgument(getCanonicalType(Arg.getAsType())); |
6076 | |
6077 | case TemplateArgument::Pack: { |
6078 | if (Arg.pack_size() == 0) |
6079 | return Arg; |
6080 | |
6081 | auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()]; |
6082 | unsigned Idx = 0; |
6083 | for (TemplateArgument::pack_iterator A = Arg.pack_begin(), |
6084 | AEnd = Arg.pack_end(); |
6085 | A != AEnd; (void)++A, ++Idx) |
6086 | CanonArgs[Idx] = getCanonicalTemplateArgument(*A); |
6087 | |
6088 | return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size())); |
6089 | } |
6090 | } |
6091 | |
6092 | // Silence GCC warning |
6093 | llvm_unreachable("Unhandled template argument kind")__builtin_unreachable(); |
6094 | } |
6095 | |
6096 | NestedNameSpecifier * |
6097 | ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { |
6098 | if (!NNS) |
6099 | return nullptr; |
6100 | |
6101 | switch (NNS->getKind()) { |
6102 | case NestedNameSpecifier::Identifier: |
6103 | // Canonicalize the prefix but keep the identifier the same. |
6104 | return NestedNameSpecifier::Create(*this, |
6105 | getCanonicalNestedNameSpecifier(NNS->getPrefix()), |
6106 | NNS->getAsIdentifier()); |
6107 | |
6108 | case NestedNameSpecifier::Namespace: |
6109 | // A namespace is canonical; build a nested-name-specifier with |
6110 | // this namespace and no prefix. |
6111 | return NestedNameSpecifier::Create(*this, nullptr, |
6112 | NNS->getAsNamespace()->getOriginalNamespace()); |
6113 | |
6114 | case NestedNameSpecifier::NamespaceAlias: |
6115 | // A namespace is canonical; build a nested-name-specifier with |
6116 | // this namespace and no prefix. |
6117 | return NestedNameSpecifier::Create(*this, nullptr, |
6118 | NNS->getAsNamespaceAlias()->getNamespace() |
6119 | ->getOriginalNamespace()); |
6120 | |
6121 | // The difference between TypeSpec and TypeSpecWithTemplate is that the |
6122 | // latter will have the 'template' keyword when printed. |
6123 | case NestedNameSpecifier::TypeSpec: |
6124 | case NestedNameSpecifier::TypeSpecWithTemplate: { |
6125 | const Type *T = getCanonicalType(NNS->getAsType()); |
6126 | |
6127 | // If we have some kind of dependent-named type (e.g., "typename T::type"), |
6128 | // break it apart into its prefix and identifier, then reconsititute those |
6129 | // as the canonical nested-name-specifier. This is required to canonicalize |
6130 | // a dependent nested-name-specifier involving typedefs of dependent-name |
6131 | // types, e.g., |
6132 | // typedef typename T::type T1; |
6133 | // typedef typename T1::type T2; |
6134 | if (const auto *DNT = T->getAs<DependentNameType>()) |
6135 | return NestedNameSpecifier::Create( |
6136 | *this, DNT->getQualifier(), |
6137 | const_cast<IdentifierInfo *>(DNT->getIdentifier())); |
6138 | if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) |
6139 | return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, |
6140 | const_cast<Type *>(T)); |
6141 | |
6142 | // TODO: Set 'Template' parameter to true for other template types. |
6143 | return NestedNameSpecifier::Create(*this, nullptr, false, |
6144 | const_cast<Type *>(T)); |
6145 | } |
6146 | |
6147 | case NestedNameSpecifier::Global: |
6148 | case NestedNameSpecifier::Super: |
6149 | // The global specifier and __super specifer are canonical and unique. |
6150 | return NNS; |
6151 | } |
6152 | |
6153 | llvm_unreachable("Invalid NestedNameSpecifier::Kind!")__builtin_unreachable(); |
6154 | } |
6155 | |
6156 | const ArrayType *ASTContext::getAsArrayType(QualType T) const { |
6157 | // Handle the non-qualified case efficiently. |
6158 | if (!T.hasLocalQualifiers()) { |
6159 | // Handle the common positive case fast. |
6160 | if (const auto *AT = dyn_cast<ArrayType>(T)) |
6161 | return AT; |
6162 | } |
6163 | |
6164 | // Handle the common negative case fast. |
6165 | if (!isa<ArrayType>(T.getCanonicalType())) |
6166 | return nullptr; |
6167 | |
6168 | // Apply any qualifiers from the array type to the element type. This |
6169 | // implements C99 6.7.3p8: "If the specification of an array type includes |
6170 | // any type qualifiers, the element type is so qualified, not the array type." |
6171 | |
6172 | // If we get here, we either have type qualifiers on the type, or we have |
6173 | // sugar such as a typedef in the way. If we have type qualifiers on the type |
6174 | // we must propagate them down into the element type. |
6175 | |
6176 | SplitQualType split = T.getSplitDesugaredType(); |
6177 | Qualifiers qs = split.Quals; |
6178 | |
6179 | // If we have a simple case, just return now. |
6180 | const auto *ATy = dyn_cast<ArrayType>(split.Ty); |
6181 | if (!ATy || qs.empty()) |
6182 | return ATy; |
6183 | |
6184 | // Otherwise, we have an array and we have qualifiers on it. Push the |
6185 | // qualifiers into the array element type and return a new array type. |
6186 | QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); |
6187 | |
6188 | if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) |
6189 | return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), |
6190 | CAT->getSizeExpr(), |
6191 | CAT->getSizeModifier(), |
6192 | CAT->getIndexTypeCVRQualifiers())); |
6193 | if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) |
6194 | return cast<ArrayType>(getIncompleteArrayType(NewEltTy, |
6195 | IAT->getSizeModifier(), |
6196 | IAT->getIndexTypeCVRQualifiers())); |
6197 | |
6198 | if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) |
6199 | return cast<ArrayType>( |
6200 | getDependentSizedArrayType(NewEltTy, |
6201 | DSAT->getSizeExpr(), |
6202 | DSAT->getSizeModifier(), |
6203 | DSAT->getIndexTypeCVRQualifiers(), |
6204 | DSAT->getBracketsRange())); |
6205 | |
6206 | const auto *VAT = cast<VariableArrayType>(ATy); |
6207 | return cast<ArrayType>(getVariableArrayType(NewEltTy, |
6208 | VAT->getSizeExpr(), |
6209 | VAT->getSizeModifier(), |
6210 | VAT->getIndexTypeCVRQualifiers(), |
6211 | VAT->getBracketsRange())); |
6212 | } |
6213 | |
6214 | QualType ASTContext::getAdjustedParameterType(QualType T) const { |
6215 | if (T->isArrayType() || T->isFunctionType()) |
6216 | return getDecayedType(T); |
6217 | return T; |
6218 | } |
6219 | |
6220 | QualType ASTContext::getSignatureParameterType(QualType T) const { |
6221 | T = getVariableArrayDecayedType(T); |
6222 | T = getAdjustedParameterType(T); |
6223 | return T.getUnqualifiedType(); |
6224 | } |
6225 | |
6226 | QualType ASTContext::getExceptionObjectType(QualType T) const { |
6227 | // C++ [except.throw]p3: |
6228 | // A throw-expression initializes a temporary object, called the exception |
6229 | // object, the type of which is determined by removing any top-level |
6230 | // cv-qualifiers from the static type of the operand of throw and adjusting |
6231 | // the type from "array of T" or "function returning T" to "pointer to T" |
6232 | // or "pointer to function returning T", [...] |
6233 | T = getVariableArrayDecayedType(T); |
6234 | if (T->isArrayType() || T->isFunctionType()) |
6235 | T = getDecayedType(T); |
6236 | return T.getUnqualifiedType(); |
6237 | } |
6238 | |
6239 | /// getArrayDecayedType - Return the properly qualified result of decaying the |
6240 | /// specified array type to a pointer. This operation is non-trivial when |
6241 | /// handling typedefs etc. The canonical type of "T" must be an array type, |
6242 | /// this returns a pointer to a properly qualified element of the array. |
6243 | /// |
6244 | /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. |
6245 | QualType ASTContext::getArrayDecayedType(QualType Ty) const { |
6246 | // Get the element type with 'getAsArrayType' so that we don't lose any |
6247 | // typedefs in the element type of the array. This also handles propagation |
6248 | // of type qualifiers from the array type into the element type if present |
6249 | // (C99 6.7.3p8). |
6250 | const ArrayType *PrettyArrayType = getAsArrayType(Ty); |
6251 | assert(PrettyArrayType && "Not an array type!")(static_cast<void> (0)); |
6252 | |
6253 | QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); |
6254 | |
6255 | // int x[restrict 4] -> int *restrict |
6256 | QualType Result = getQualifiedType(PtrTy, |
6257 | PrettyArrayType->getIndexTypeQualifiers()); |
6258 | |
6259 | // int x[_Nullable] -> int * _Nullable |
6260 | if (auto Nullability = Ty->getNullability(*this)) { |
6261 | Result = const_cast<ASTContext *>(this)->getAttributedType( |
6262 | AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); |
6263 | } |
6264 | return Result; |
6265 | } |
6266 | |
6267 | QualType ASTContext::getBaseElementType(const ArrayType *array) const { |
6268 | return getBaseElementType(array->getElementType()); |
6269 | } |
6270 | |
6271 | QualType ASTContext::getBaseElementType(QualType type) const { |
6272 | Qualifiers qs; |
6273 | while (true) { |
6274 | SplitQualType split = type.getSplitDesugaredType(); |
6275 | const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); |
6276 | if (!array) break; |
6277 | |
6278 | type = array->getElementType(); |
6279 | qs.addConsistentQualifiers(split.Quals); |
6280 | } |
6281 | |
6282 | return getQualifiedType(type, qs); |
6283 | } |
6284 | |
6285 | /// getConstantArrayElementCount - Returns number of constant array elements. |
6286 | uint64_t |
6287 | ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { |
6288 | uint64_t ElementCount = 1; |
6289 | do { |
6290 | ElementCount *= CA->getSize().getZExtValue(); |
6291 | CA = dyn_cast_or_null<ConstantArrayType>( |
6292 | CA->getElementType()->getAsArrayTypeUnsafe()); |
6293 | } while (CA); |
6294 | return ElementCount; |
6295 | } |
6296 | |
6297 | /// getFloatingRank - Return a relative rank for floating point types. |
6298 | /// This routine will assert if passed a built-in type that isn't a float. |
6299 | static FloatingRank getFloatingRank(QualType T) { |
6300 | if (const auto *CT = T->getAs<ComplexType>()) |
6301 | return getFloatingRank(CT->getElementType()); |
6302 | |
6303 | switch (T->castAs<BuiltinType>()->getKind()) { |
6304 | default: llvm_unreachable("getFloatingRank(): not a floating type")__builtin_unreachable(); |
6305 | case BuiltinType::Float16: return Float16Rank; |
6306 | case BuiltinType::Half: return HalfRank; |
6307 | case BuiltinType::Float: return FloatRank; |
6308 | case BuiltinType::Double: return DoubleRank; |
6309 | case BuiltinType::LongDouble: return LongDoubleRank; |
6310 | case BuiltinType::Float128: return Float128Rank; |
6311 | case BuiltinType::BFloat16: return BFloat16Rank; |
6312 | } |
6313 | } |
6314 | |
6315 | /// getFloatingTypeOfSizeWithinDomain - Returns a real floating |
6316 | /// point or a complex type (based on typeDomain/typeSize). |
6317 | /// 'typeDomain' is a real floating point or complex type. |
6318 | /// 'typeSize' is a real floating point or complex type. |
6319 | QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size, |
6320 | QualType Domain) const { |
6321 | FloatingRank EltRank = getFloatingRank(Size); |
6322 | if (Domain->isComplexType()) { |
6323 | switch (EltRank) { |
6324 | case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported")__builtin_unreachable(); |
6325 | case Float16Rank: |
6326 | case HalfRank: llvm_unreachable("Complex half is not supported")__builtin_unreachable(); |
6327 | case FloatRank: return FloatComplexTy; |
6328 | case DoubleRank: return DoubleComplexTy; |
6329 | case LongDoubleRank: return LongDoubleComplexTy; |
6330 | case Float128Rank: return Float128ComplexTy; |
6331 | } |
6332 | } |
6333 | |
6334 | assert(Domain->isRealFloatingType() && "Unknown domain!")(static_cast<void> (0)); |
6335 | switch (EltRank) { |
6336 | case Float16Rank: return HalfTy; |
6337 | case BFloat16Rank: return BFloat16Ty; |
6338 | case HalfRank: return HalfTy; |
6339 | case FloatRank: return FloatTy; |
6340 | case DoubleRank: return DoubleTy; |
6341 | case LongDoubleRank: return LongDoubleTy; |
6342 | case Float128Rank: return Float128Ty; |
6343 | } |
6344 | llvm_unreachable("getFloatingRank(): illegal value for rank")__builtin_unreachable(); |
6345 | } |
6346 | |
6347 | /// getFloatingTypeOrder - Compare the rank of the two specified floating |
6348 | /// point types, ignoring the domain of the type (i.e. 'double' == |
6349 | /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If |
6350 | /// LHS < RHS, return -1. |
6351 | int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { |
6352 | FloatingRank LHSR = getFloatingRank(LHS); |
6353 | FloatingRank RHSR = getFloatingRank(RHS); |
6354 | |
6355 | if (LHSR == RHSR) |
6356 | return 0; |
6357 | if (LHSR > RHSR) |
6358 | return 1; |
6359 | return -1; |
6360 | } |
6361 | |
6362 | int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { |
6363 | if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) |
6364 | return 0; |
6365 | return getFloatingTypeOrder(LHS, RHS); |
6366 | } |
6367 | |
6368 | /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This |
6369 | /// routine will assert if passed a built-in type that isn't an integer or enum, |
6370 | /// or if it is not canonicalized. |
6371 | unsigned ASTContext::getIntegerRank(const Type *T) const { |
6372 | assert(T->isCanonicalUnqualified() && "T should be canonicalized")(static_cast<void> (0)); |
6373 | |
6374 | // Results in this 'losing' to any type of the same size, but winning if |
6375 | // larger. |
6376 | if (const auto *EIT = dyn_cast<ExtIntType>(T)) |
6377 | return 0 + (EIT->getNumBits() << 3); |
6378 | |
6379 | switch (cast<BuiltinType>(T)->getKind()) { |
6380 | default: llvm_unreachable("getIntegerRank(): not a built-in integer")__builtin_unreachable(); |
6381 | case BuiltinType::Bool: |
6382 | return 1 + (getIntWidth(BoolTy) << 3); |
6383 | case BuiltinType::Char_S: |
6384 | case BuiltinType::Char_U: |
6385 | case BuiltinType::SChar: |
6386 | case BuiltinType::UChar: |
6387 | return 2 + (getIntWidth(CharTy) << 3); |
6388 | case BuiltinType::Short: |
6389 | case BuiltinType::UShort: |
6390 | return 3 + (getIntWidth(ShortTy) << 3); |
6391 | case BuiltinType::Int: |
6392 | case BuiltinType::UInt: |
6393 | return 4 + (getIntWidth(IntTy) << 3); |
6394 | case BuiltinType::Long: |
6395 | case BuiltinType::ULong: |
6396 | return 5 + (getIntWidth(LongTy) << 3); |
6397 | case BuiltinType::LongLong: |
6398 | case BuiltinType::ULongLong: |
6399 | return 6 + (getIntWidth(LongLongTy) << 3); |
6400 | case BuiltinType::Int128: |
6401 | case BuiltinType::UInt128: |
6402 | return 7 + (getIntWidth(Int128Ty) << 3); |
6403 | } |
6404 | } |
6405 | |
6406 | /// Whether this is a promotable bitfield reference according |
6407 | /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). |
6408 | /// |
6409 | /// \returns the type this bit-field will promote to, or NULL if no |
6410 | /// promotion occurs. |
6411 | QualType ASTContext::isPromotableBitField(Expr *E) const { |
6412 | if (E->isTypeDependent() || E->isValueDependent()) |
6413 | return {}; |
6414 | |
6415 | // C++ [conv.prom]p5: |
6416 | // If the bit-field has an enumerated type, it is treated as any other |
6417 | // value of that type for promotion purposes. |
6418 | if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) |
6419 | return {}; |
6420 | |
6421 | // FIXME: We should not do this unless E->refersToBitField() is true. This |
6422 | // matters in C where getSourceBitField() will find bit-fields for various |
6423 | // cases where the source expression is not a bit-field designator. |
6424 | |
6425 | FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? |
6426 | if (!Field) |
6427 | return {}; |
6428 | |
6429 | QualType FT = Field->getType(); |
6430 | |
6431 | uint64_t BitWidth = Field->getBitWidthValue(*this); |
6432 | uint64_t IntSize = getTypeSize(IntTy); |
6433 | // C++ [conv.prom]p5: |
6434 | // A prvalue for an integral bit-field can be converted to a prvalue of type |
6435 | // int if int can represent all the values of the bit-field; otherwise, it |
6436 | // can be converted to unsigned int if unsigned int can represent all the |
6437 | // values of the bit-field. If the bit-field is larger yet, no integral |
6438 | // promotion applies to it. |
6439 | // C11 6.3.1.1/2: |
6440 | // [For a bit-field of type _Bool, int, signed int, or unsigned int:] |
6441 | // If an int can represent all values of the original type (as restricted by |
6442 | // the width, for a bit-field), the value is converted to an int; otherwise, |
6443 | // it is converted to an unsigned int. |
6444 | // |
6445 | // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. |
6446 | // We perform that promotion here to match GCC and C++. |
6447 | // FIXME: C does not permit promotion of an enum bit-field whose rank is |
6448 | // greater than that of 'int'. We perform that promotion to match GCC. |
6449 | if (BitWidth < IntSize) |
6450 | return IntTy; |
6451 | |
6452 | if (BitWidth == IntSize) |
6453 | return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; |
6454 | |
6455 | // Bit-fields wider than int are not subject to promotions, and therefore act |
6456 | // like the base type. GCC has some weird bugs in this area that we |
6457 | // deliberately do not follow (GCC follows a pre-standard resolution to |
6458 | // C's DR315 which treats bit-width as being part of the type, and this leaks |
6459 | // into their semantics in some cases). |
6460 | return {}; |
6461 | } |
6462 | |
6463 | /// getPromotedIntegerType - Returns the type that Promotable will |
6464 | /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable |
6465 | /// integer type. |
6466 | QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { |
6467 | assert(!Promotable.isNull())(static_cast<void> (0)); |
6468 | assert(Promotable->isPromotableIntegerType())(static_cast<void> (0)); |
6469 | if (const auto *ET = Promotable->getAs<EnumType>()) |
6470 | return ET->getDecl()->getPromotionType(); |
6471 | |
6472 | if (const auto *BT = Promotable->getAs<BuiltinType>()) { |
6473 | // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t |
6474 | // (3.9.1) can be converted to a prvalue of the first of the following |
6475 | // types that can represent all the values of its underlying type: |
6476 | // int, unsigned int, long int, unsigned long int, long long int, or |
6477 | // unsigned long long int [...] |
6478 | // FIXME: Is there some better way to compute this? |
6479 | if (BT->getKind() == BuiltinType::WChar_S || |
6480 | BT->getKind() == BuiltinType::WChar_U || |
6481 | BT->getKind() == BuiltinType::Char8 || |
6482 | BT->getKind() == BuiltinType::Char16 || |
6483 | BT->getKind() == BuiltinType::Char32) { |
6484 | bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; |
6485 | uint64_t FromSize = getTypeSize(BT); |
6486 | QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, |
6487 | LongLongTy, UnsignedLongLongTy }; |
6488 | for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) { |
6489 | uint64_t ToSize = getTypeSize(PromoteTypes[Idx]); |
6490 | if (FromSize < ToSize || |
6491 | (FromSize == ToSize && |
6492 | FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) |
6493 | return PromoteTypes[Idx]; |
6494 | } |
6495 | llvm_unreachable("char type should fit into long long")__builtin_unreachable(); |
6496 | } |
6497 | } |
6498 | |
6499 | // At this point, we should have a signed or unsigned integer type. |
6500 | if (Promotable->isSignedIntegerType()) |
6501 | return IntTy; |
6502 | uint64_t PromotableSize = getIntWidth(Promotable); |
6503 | uint64_t IntSize = getIntWidth(IntTy); |
6504 | assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize)(static_cast<void> (0)); |
6505 | return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; |
6506 | } |
6507 | |
6508 | /// Recurses in pointer/array types until it finds an objc retainable |
6509 | /// type and returns its ownership. |
6510 | Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { |
6511 | while (!T.isNull()) { |
6512 | if (T.getObjCLifetime() != Qualifiers::OCL_None) |
6513 | return T.getObjCLifetime(); |
6514 | if (T->isArrayType()) |
6515 | T = getBaseElementType(T); |
6516 | else if (const auto *PT = T->getAs<PointerType>()) |
6517 | T = PT->getPointeeType(); |
6518 | else if (const auto *RT = T->getAs<ReferenceType>()) |
6519 | T = RT->getPointeeType(); |
6520 | else |
6521 | break; |
6522 | } |
6523 | |
6524 | return Qualifiers::OCL_None; |
6525 | } |
6526 | |
6527 | static const Type *getIntegerTypeForEnum(const EnumType *ET) { |
6528 | // Incomplete enum types are not treated as integer types. |
6529 | // FIXME: In C++, enum types are never integer types. |
6530 | if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) |
6531 | return ET->getDecl()->getIntegerType().getTypePtr(); |
6532 | return nullptr; |
6533 | } |
6534 | |
6535 | /// getIntegerTypeOrder - Returns the highest ranked integer type: |
6536 | /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If |
6537 | /// LHS < RHS, return -1. |
6538 | int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { |
6539 | const Type *LHSC = getCanonicalType(LHS).getTypePtr(); |
6540 | const Type *RHSC = getCanonicalType(RHS).getTypePtr(); |
6541 | |
6542 | // Unwrap enums to their underlying type. |
6543 | if (const auto *ET = dyn_cast<EnumType>(LHSC)) |
6544 | LHSC = getIntegerTypeForEnum(ET); |
6545 | if (const auto *ET = dyn_cast<EnumType>(RHSC)) |
6546 | RHSC = getIntegerTypeForEnum(ET); |
6547 | |
6548 | if (LHSC == RHSC) return 0; |
6549 | |
6550 | bool LHSUnsigned = LHSC->isUnsignedIntegerType(); |
6551 | bool RHSUnsigned = RHSC->isUnsignedIntegerType(); |
6552 | |
6553 | unsigned LHSRank = getIntegerRank(LHSC); |
6554 | unsigned RHSRank = getIntegerRank(RHSC); |
6555 | |
6556 | if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. |
6557 | if (LHSRank == RHSRank) return 0; |
6558 | return LHSRank > RHSRank ? 1 : -1; |
6559 | } |
6560 | |
6561 | // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. |
6562 | if (LHSUnsigned) { |
6563 | // If the unsigned [LHS] type is larger, return it. |
6564 | if (LHSRank >= RHSRank) |
6565 | return 1; |
6566 | |
6567 | // If the signed type can represent all values of the unsigned type, it |
6568 | // wins. Because we are dealing with 2's complement and types that are |
6569 | // powers of two larger than each other, this is always safe. |
6570 | return -1; |
6571 | } |
6572 | |
6573 | // If the unsigned [RHS] type is larger, return it. |
6574 | if (RHSRank >= LHSRank) |
6575 | return -1; |
6576 | |
6577 | // If the signed type can represent all values of the unsigned type, it |
6578 | // wins. Because we are dealing with 2's complement and types that are |
6579 | // powers of two larger than each other, this is always safe. |
6580 | return 1; |
6581 | } |
6582 | |
6583 | TypedefDecl *ASTContext::getCFConstantStringDecl() const { |
6584 | if (CFConstantStringTypeDecl) |
6585 | return CFConstantStringTypeDecl; |
6586 | |
6587 | assert(!CFConstantStringTagDecl &&(static_cast<void> (0)) |
6588 | "tag and typedef should be initialized together")(static_cast<void> (0)); |
6589 | CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); |
6590 | CFConstantStringTagDecl->startDefinition(); |
6591 | |
6592 | struct { |
6593 | QualType Type; |
6594 | const char *Name; |
6595 | } Fields[5]; |
6596 | unsigned Count = 0; |
6597 | |
6598 | /// Objective-C ABI |
6599 | /// |
6600 | /// typedef struct __NSConstantString_tag { |
6601 | /// const int *isa; |
6602 | /// int flags; |
6603 | /// const char *str; |
6604 | /// long length; |
6605 | /// } __NSConstantString; |
6606 | /// |
6607 | /// Swift ABI (4.1, 4.2) |
6608 | /// |
6609 | /// typedef struct __NSConstantString_tag { |
6610 | /// uintptr_t _cfisa; |
6611 | /// uintptr_t _swift_rc; |
6612 | /// _Atomic(uint64_t) _cfinfoa; |
6613 | /// const char *_ptr; |
6614 | /// uint32_t _length; |
6615 | /// } __NSConstantString; |
6616 | /// |
6617 | /// Swift ABI (5.0) |
6618 | /// |
6619 | /// typedef struct __NSConstantString_tag { |
6620 | /// uintptr_t _cfisa; |
6621 | /// uintptr_t _swift_rc; |
6622 | /// _Atomic(uint64_t) _cfinfoa; |
6623 | /// const char *_ptr; |
6624 | /// uintptr_t _length; |
6625 | /// } __NSConstantString; |
6626 | |
6627 | const auto CFRuntime = getLangOpts().CFRuntime; |
6628 | if (static_cast<unsigned>(CFRuntime) < |
6629 | static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { |
6630 | Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; |
6631 | Fields[Count++] = { IntTy, "flags" }; |
6632 | Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; |
6633 | Fields[Count++] = { LongTy, "length" }; |
6634 | } else { |
6635 | Fields[Count++] = { getUIntPtrType(), "_cfisa" }; |
6636 | Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; |
6637 | Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; |
6638 | Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; |
6639 | if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || |
6640 | CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) |
6641 | Fields[Count++] = { IntTy, "_ptr" }; |
6642 | else |
6643 | Fields[Count++] = { getUIntPtrType(), "_ptr" }; |
6644 | } |
6645 | |
6646 | // Create fields |
6647 | for (unsigned i = 0; i < Count; ++i) { |
6648 | FieldDecl *Field = |
6649 | FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), |
6650 | SourceLocation(), &Idents.get(Fields[i].Name), |
6651 | Fields[i].Type, /*TInfo=*/nullptr, |
6652 | /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); |
6653 | Field->setAccess(AS_public); |
6654 | CFConstantStringTagDecl->addDecl(Field); |
6655 | } |
6656 | |
6657 | CFConstantStringTagDecl->completeDefinition(); |
6658 | // This type is designed to be compatible with NSConstantString, but cannot |
6659 | // use the same name, since NSConstantString is an interface. |
6660 | auto tagType = getTagDeclType(CFConstantStringTagDecl); |
6661 | CFConstantStringTypeDecl = |
6662 | buildImplicitTypedef(tagType, "__NSConstantString"); |
6663 | |
6664 | return CFConstantStringTypeDecl; |
6665 | } |
6666 | |
6667 | RecordDecl *ASTContext::getCFConstantStringTagDecl() const { |
6668 | if (!CFConstantStringTagDecl) |
6669 | getCFConstantStringDecl(); // Build the tag and the typedef. |
6670 | return CFConstantStringTagDecl; |
6671 | } |
6672 | |
6673 | // getCFConstantStringType - Return the type used for constant CFStrings. |
6674 | QualType ASTContext::getCFConstantStringType() const { |
6675 | return getTypedefType(getCFConstantStringDecl()); |
6676 | } |
6677 | |
6678 | QualType ASTContext::getObjCSuperType() const { |
6679 | if (ObjCSuperType.isNull()) { |
6680 | RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); |
6681 | getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); |
6682 | ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); |
6683 | } |
6684 | return ObjCSuperType; |
6685 | } |
6686 | |
6687 | void ASTContext::setCFConstantStringType(QualType T) { |
6688 | const auto *TD = T->castAs<TypedefType>(); |
6689 | CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); |
6690 | const auto *TagType = |
6691 | CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); |
6692 | CFConstantStringTagDecl = TagType->getDecl(); |
6693 | } |
6694 | |
6695 | QualType ASTContext::getBlockDescriptorType() const { |
6696 | if (BlockDescriptorType) |
6697 | return getTagDeclType(BlockDescriptorType); |
6698 | |
6699 | RecordDecl *RD; |
6700 | // FIXME: Needs the FlagAppleBlock bit. |
6701 | RD = buildImplicitRecord("__block_descriptor"); |
6702 | RD->startDefinition(); |
6703 | |
6704 | QualType FieldTypes[] = { |
6705 | UnsignedLongTy, |
6706 | UnsignedLongTy, |
6707 | }; |
6708 | |
6709 | static const char *const FieldNames[] = { |
6710 | "reserved", |
6711 | "Size" |
6712 | }; |
6713 | |
6714 | for (size_t i = 0; i < 2; ++i) { |
6715 | FieldDecl *Field = FieldDecl::Create( |
6716 | *this, RD, SourceLocation(), SourceLocation(), |
6717 | &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, |
6718 | /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); |
6719 | Field->setAccess(AS_public); |
6720 | RD->addDecl(Field); |
6721 | } |
6722 | |
6723 | RD->completeDefinition(); |
6724 | |
6725 | BlockDescriptorType = RD; |
6726 | |
6727 | return getTagDeclType(BlockDescriptorType); |
6728 | } |
6729 | |
6730 | QualType ASTContext::getBlockDescriptorExtendedType() const { |
6731 | if (BlockDescriptorExtendedType) |
6732 | return getTagDeclType(BlockDescriptorExtendedType); |
6733 | |
6734 | RecordDecl *RD; |
6735 | // FIXME: Needs the FlagAppleBlock bit. |
6736 | RD = buildImplicitRecord("__block_descriptor_withcopydispose"); |
6737 | RD->startDefinition(); |
6738 | |
6739 | QualType FieldTypes[] = { |
6740 | UnsignedLongTy, |
6741 | UnsignedLongTy, |
6742 | getPointerType(VoidPtrTy), |
6743 | getPointerType(VoidPtrTy) |
6744 | }; |
6745 | |
6746 | static const char *const FieldNames[] = { |
6747 | "reserved", |
6748 | "Size", |
6749 | "CopyFuncPtr", |
6750 | "DestroyFuncPtr" |
6751 | }; |
6752 | |
6753 | for (size_t i = 0; i < 4; ++i) { |
6754 | FieldDecl *Field = FieldDecl::Create( |
6755 | *this, RD, SourceLocation(), SourceLocation(), |
6756 | &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, |
6757 | /*BitWidth=*/nullptr, |
6758 | /*Mutable=*/false, ICIS_NoInit); |
6759 | Field->setAccess(AS_public); |
6760 | RD->addDecl(Field); |
6761 | } |
6762 | |
6763 | RD->completeDefinition(); |
6764 | |
6765 | BlockDescriptorExtendedType = RD; |
6766 | return getTagDeclType(BlockDescriptorExtendedType); |
6767 | } |
6768 | |
6769 | OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { |
6770 | const auto *BT = dyn_cast<BuiltinType>(T); |
6771 | |
6772 | if (!BT) { |
6773 | if (isa<PipeType>(T)) |
6774 | return OCLTK_Pipe; |
6775 | |
6776 | return OCLTK_Default; |
6777 | } |
6778 | |
6779 | switch (BT->getKind()) { |
6780 | #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ |
6781 | case BuiltinType::Id: \ |
6782 | return OCLTK_Image; |
6783 | #include "clang/Basic/OpenCLImageTypes.def" |
6784 | |
6785 | case BuiltinType::OCLClkEvent: |
6786 | return OCLTK_ClkEvent; |
6787 | |
6788 | case BuiltinType::OCLEvent: |
6789 | return OCLTK_Event; |
6790 | |
6791 | case BuiltinType::OCLQueue: |
6792 | return OCLTK_Queue; |
6793 | |
6794 | case BuiltinType::OCLReserveID: |
6795 | return OCLTK_ReserveID; |
6796 | |
6797 | case BuiltinType::OCLSampler: |
6798 | return OCLTK_Sampler; |
6799 | |
6800 | default: |
6801 | return OCLTK_Default; |
6802 | } |
6803 | } |
6804 | |
6805 | LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { |
6806 | return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); |
6807 | } |
6808 | |
6809 | /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" |
6810 | /// requires copy/dispose. Note that this must match the logic |
6811 | /// in buildByrefHelpers. |
6812 | bool ASTContext::BlockRequiresCopying(QualType Ty, |
6813 | const VarDecl *D) { |
6814 | if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { |
6815 | const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); |
6816 | if (!copyExpr && record->hasTrivialDestructor()) return false; |
6817 | |
6818 | return true; |
6819 | } |
6820 | |
6821 | // The block needs copy/destroy helpers if Ty is non-trivial to destructively |
6822 | // move or destroy. |
6823 | if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) |
6824 | return true; |
6825 | |
6826 | if (!Ty->isObjCRetainableType()) return false; |
6827 | |
6828 | Qualifiers qs = Ty.getQualifiers(); |
6829 | |
6830 | // If we have lifetime, that dominates. |
6831 | if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { |
6832 | switch (lifetime) { |
6833 | case Qualifiers::OCL_None: llvm_unreachable("impossible")__builtin_unreachable(); |
6834 | |
6835 | // These are just bits as far as the runtime is concerned. |
6836 | case Qualifiers::OCL_ExplicitNone: |
6837 | case Qualifiers::OCL_Autoreleasing: |
6838 | return false; |
6839 | |
6840 | // These cases should have been taken care of when checking the type's |
6841 | // non-triviality. |
6842 | case Qualifiers::OCL_Weak: |
6843 | case Qualifiers::OCL_Strong: |
6844 | llvm_unreachable("impossible")__builtin_unreachable(); |
6845 | } |
6846 | llvm_unreachable("fell out of lifetime switch!")__builtin_unreachable(); |
6847 | } |
6848 | return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || |
6849 | Ty->isObjCObjectPointerType()); |
6850 | } |
6851 | |
6852 | bool ASTContext::getByrefLifetime(QualType Ty, |
6853 | Qualifiers::ObjCLifetime &LifeTime, |
6854 | bool &HasByrefExtendedLayout) const { |
6855 | if (!getLangOpts().ObjC || |
6856 | getLangOpts().getGC() != LangOptions::NonGC) |
6857 | return false; |
6858 | |
6859 | HasByrefExtendedLayout = false; |
6860 | if (Ty->isRecordType()) { |
6861 | HasByrefExtendedLayout = true; |
6862 | LifeTime = Qualifiers::OCL_None; |
6863 | } else if ((LifeTime = Ty.getObjCLifetime())) { |
6864 | // Honor the ARC qualifiers. |
6865 | } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { |
6866 | // The MRR rule. |
6867 | LifeTime = Qualifiers::OCL_ExplicitNone; |
6868 | } else { |
6869 | LifeTime = Qualifiers::OCL_None; |
6870 | } |
6871 | return true; |
6872 | } |
6873 | |
6874 | CanQualType ASTContext::getNSUIntegerType() const { |
6875 | assert(Target && "Expected target to be initialized")(static_cast<void> (0)); |
6876 | const llvm::Triple &T = Target->getTriple(); |
6877 | // Windows is LLP64 rather than LP64 |
6878 | if (T.isOSWindows() && T.isArch64Bit()) |
6879 | return UnsignedLongLongTy; |
6880 | return UnsignedLongTy; |
6881 | } |
6882 | |
6883 | CanQualType ASTContext::getNSIntegerType() const { |
6884 | assert(Target && "Expected target to be initialized")(static_cast<void> (0)); |
6885 | const llvm::Triple &T = Target->getTriple(); |
6886 | // Windows is LLP64 rather than LP64 |
6887 | if (T.isOSWindows() && T.isArch64Bit()) |
6888 | return LongLongTy; |
6889 | return LongTy; |
6890 | } |
6891 | |
6892 | TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { |
6893 | if (!ObjCInstanceTypeDecl) |
6894 | ObjCInstanceTypeDecl = |
6895 | buildImplicitTypedef(getObjCIdType(), "instancetype"); |
6896 | return ObjCInstanceTypeDecl; |
6897 | } |
6898 | |
6899 | // This returns true if a type has been typedefed to BOOL: |
6900 | // typedef <type> BOOL; |
6901 | static bool isTypeTypedefedAsBOOL(QualType T) { |
6902 | if (const auto *TT = dyn_cast<TypedefType>(T)) |
6903 | if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) |
6904 | return II->isStr("BOOL"); |
6905 | |
6906 | return false; |
6907 | } |
6908 | |
6909 | /// getObjCEncodingTypeSize returns size of type for objective-c encoding |
6910 | /// purpose. |
6911 | CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { |
6912 | if (!type->isIncompleteArrayType() && type->isIncompleteType()) |
6913 | return CharUnits::Zero(); |
6914 | |
6915 | CharUnits sz = getTypeSizeInChars(type); |
6916 | |
6917 | // Make all integer and enum types at least as large as an int |
6918 | if (sz.isPositive() && type->isIntegralOrEnumerationType()) |
6919 | sz = std::max(sz, getTypeSizeInChars(IntTy)); |
6920 | // Treat arrays as pointers, since that's how they're passed in. |
6921 | else if (type->isArrayType()) |
6922 | sz = getTypeSizeInChars(VoidPtrTy); |
6923 | return sz; |
6924 | } |
6925 | |
6926 | bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { |
6927 | return getTargetInfo().getCXXABI().isMicrosoft() && |
6928 | VD->isStaticDataMember() && |
6929 | VD->getType()->isIntegralOrEnumerationType() && |
6930 | !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); |
6931 | } |
6932 | |
6933 | ASTContext::InlineVariableDefinitionKind |
6934 | ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { |
6935 | if (!VD->isInline()) |
6936 | return InlineVariableDefinitionKind::None; |
6937 | |
6938 | // In almost all cases, it's a weak definition. |
6939 | auto *First = VD->getFirstDecl(); |
6940 | if (First->isInlineSpecified() || !First->isStaticDataMember()) |
6941 | return InlineVariableDefinitionKind::Weak; |
6942 | |
6943 | // If there's a file-context declaration in this translation unit, it's a |
6944 | // non-discardable definition. |
6945 | for (auto *D : VD->redecls()) |
6946 | if (D->getLexicalDeclContext()->isFileContext() && |
6947 | !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) |
6948 | return InlineVariableDefinitionKind::Strong; |
6949 | |
6950 | // If we've not seen one yet, we don't know. |
6951 | return InlineVariableDefinitionKind::WeakUnknown; |
6952 | } |
6953 | |
6954 | static std::string charUnitsToString(const CharUnits &CU) { |
6955 | return llvm::itostr(CU.getQuantity()); |
6956 | } |
6957 | |
6958 | /// getObjCEncodingForBlock - Return the encoded type for this block |
6959 | /// declaration. |
6960 | std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { |
6961 | std::string S; |
6962 | |
6963 | const BlockDecl *Decl = Expr->getBlockDecl(); |
6964 | QualType BlockTy = |
6965 | Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); |
6966 | QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); |
6967 | // Encode result type. |
6968 | if (getLangOpts().EncodeExtendedBlockSig) |
6969 | getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, |
6970 | true /*Extended*/); |
6971 | else |
6972 | getObjCEncodingForType(BlockReturnTy, S); |
6973 | // Compute size of all parameters. |
6974 | // Start with computing size of a pointer in number of bytes. |
6975 | // FIXME: There might(should) be a better way of doing this computation! |
6976 | CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); |
6977 | CharUnits ParmOffset = PtrSize; |
6978 | for (auto PI : Decl->parameters()) { |
6979 | QualType PType = PI->getType(); |
6980 | CharUnits sz = getObjCEncodingTypeSize(PType); |
6981 | if (sz.isZero()) |
6982 | continue; |
6983 | assert(sz.isPositive() && "BlockExpr - Incomplete param type")(static_cast<void> (0)); |
6984 | ParmOffset += sz; |
6985 | } |
6986 | // Size of the argument frame |
6987 | S += charUnitsToString(ParmOffset); |
6988 | // Block pointer and offset. |
6989 | S += "@?0"; |
6990 | |
6991 | // Argument types. |
6992 | ParmOffset = PtrSize; |
6993 | for (auto PVDecl : Decl->parameters()) { |
6994 | QualType PType = PVDecl->getOriginalType(); |
6995 | if (const auto *AT = |
6996 | dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { |
6997 | // Use array's original type only if it has known number of |
6998 | // elements. |
6999 | if (!isa<ConstantArrayType>(AT)) |
7000 | PType = PVDecl->getType(); |
7001 | } else if (PType->isFunctionType()) |
7002 | PType = PVDecl->getType(); |
7003 | if (getLangOpts().EncodeExtendedBlockSig) |
7004 | getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, |
7005 | S, true /*Extended*/); |
7006 | else |
7007 | getObjCEncodingForType(PType, S); |
7008 | S += charUnitsToString(ParmOffset); |
7009 | ParmOffset += getObjCEncodingTypeSize(PType); |
7010 | } |
7011 | |
7012 | return S; |
7013 | } |
7014 | |
7015 | std::string |
7016 | ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { |
7017 | std::string S; |
7018 | // Encode result type. |
7019 | getObjCEncodingForType(Decl->getReturnType(), S); |
7020 | CharUnits ParmOffset; |
7021 | // Compute size of all parameters. |
7022 | for (auto PI : Decl->parameters()) { |
7023 | QualType PType = PI->getType(); |
7024 | CharUnits sz = getObjCEncodingTypeSize(PType); |
7025 | if (sz.isZero()) |
7026 | continue; |
7027 | |
7028 | assert(sz.isPositive() &&(static_cast<void> (0)) |
7029 | "getObjCEncodingForFunctionDecl - Incomplete param type")(static_cast<void> (0)); |
7030 | ParmOffset += sz; |
7031 | } |
7032 | S += charUnitsToString(ParmOffset); |
7033 | ParmOffset = CharUnits::Zero(); |
7034 | |
7035 | // Argument types. |
7036 | for (auto PVDecl : Decl->parameters()) { |
7037 | QualType PType = PVDecl->getOriginalType(); |
7038 | if (const auto *AT = |
7039 | dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { |
7040 | // Use array's original type only if it has known number of |
7041 | // elements. |
7042 | if (!isa<ConstantArrayType>(AT)) |
7043 | PType = PVDecl->getType(); |
7044 | } else if (PType->isFunctionType()) |
7045 | PType = PVDecl->getType(); |
7046 | getObjCEncodingForType(PType, S); |
7047 | S += charUnitsToString(ParmOffset); |
7048 | ParmOffset += getObjCEncodingTypeSize(PType); |
7049 | } |
7050 | |
7051 | return S; |
7052 | } |
7053 | |
7054 | /// getObjCEncodingForMethodParameter - Return the encoded type for a single |
7055 | /// method parameter or return type. If Extended, include class names and |
7056 | /// block object types. |
7057 | void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, |
7058 | QualType T, std::string& S, |
7059 | bool Extended) const { |
7060 | // Encode type qualifer, 'in', 'inout', etc. for the parameter. |
7061 | getObjCEncodingForTypeQualifier(QT, S); |
7062 | // Encode parameter type. |
7063 | ObjCEncOptions Options = ObjCEncOptions() |
7064 | .setExpandPointedToStructures() |
7065 | .setExpandStructures() |
7066 | .setIsOutermostType(); |
7067 | if (Extended) |
7068 | Options.setEncodeBlockParameters().setEncodeClassNames(); |
7069 | getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); |
7070 | } |
7071 | |
7072 | /// getObjCEncodingForMethodDecl - Return the encoded type for this method |
7073 | /// declaration. |
7074 | std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, |
7075 | bool Extended) const { |
7076 | // FIXME: This is not very efficient. |
7077 | // Encode return type. |
7078 | std::string S; |
7079 | getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), |
7080 | Decl->getReturnType(), S, Extended); |
7081 | // Compute size of all parameters. |
7082 | // Start with computing size of a pointer in number of bytes. |
7083 | // FIXME: There might(should) be a better way of doing this computation! |
7084 | CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); |
7085 | // The first two arguments (self and _cmd) are pointers; account for |
7086 | // their size. |
7087 | CharUnits ParmOffset = 2 * PtrSize; |
7088 | for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), |
7089 | E = Decl->sel_param_end(); PI != E; ++PI) { |
7090 | QualType PType = (*PI)->getType(); |
7091 | CharUnits sz = getObjCEncodingTypeSize(PType); |
7092 | if (sz.isZero()) |
7093 | continue; |
7094 | |
7095 | assert(sz.isPositive() &&(static_cast<void> (0)) |
7096 | "getObjCEncodingForMethodDecl - Incomplete param type")(static_cast<void> (0)); |
7097 | ParmOffset += sz; |
7098 | } |
7099 | S += charUnitsToString(ParmOffset); |
7100 | S += "@0:"; |
7101 | S += charUnitsToString(PtrSize); |
7102 | |
7103 | // Argument types. |
7104 | ParmOffset = 2 * PtrSize; |
7105 | for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), |
7106 | E = Decl->sel_param_end(); PI != E; ++PI) { |
7107 | const ParmVarDecl *PVDecl = *PI; |
7108 | QualType PType = PVDecl->getOriginalType(); |
7109 | if (const auto *AT = |
7110 | dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { |
7111 | // Use array's original type only if it has known number of |
7112 | // elements. |
7113 | if (!isa<ConstantArrayType>(AT)) |
7114 | PType = PVDecl->getType(); |
7115 | } else if (PType->isFunctionType()) |
7116 | PType = PVDecl->getType(); |
7117 | getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), |
7118 | PType, S, Extended); |
7119 | S += charUnitsToString(ParmOffset); |
7120 | ParmOffset += getObjCEncodingTypeSize(PType); |
7121 | } |
7122 | |
7123 | return S; |
7124 | } |
7125 | |
7126 | ObjCPropertyImplDecl * |
7127 | ASTContext::getObjCPropertyImplDeclForPropertyDecl( |
7128 | const ObjCPropertyDecl *PD, |
7129 | const Decl *Container) const { |
7130 | if (!Container) |
7131 | return nullptr; |
7132 | if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { |
7133 | for (auto *PID : CID->property_impls()) |
7134 | if (PID->getPropertyDecl() == PD) |
7135 | return PID; |
7136 | } else { |
7137 | const auto *OID = cast<ObjCImplementationDecl>(Container); |
7138 | for (auto *PID : OID->property_impls()) |
7139 | if (PID->getPropertyDecl() == PD) |
7140 | return PID; |
7141 | } |
7142 | return nullptr; |
7143 | } |
7144 | |
7145 | /// getObjCEncodingForPropertyDecl - Return the encoded type for this |
7146 | /// property declaration. If non-NULL, Container must be either an |
7147 | /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be |
7148 | /// NULL when getting encodings for protocol properties. |
7149 | /// Property attributes are stored as a comma-delimited C string. The simple |
7150 | /// attributes readonly and bycopy are encoded as single characters. The |
7151 | /// parametrized attributes, getter=name, setter=name, and ivar=name, are |
7152 | /// encoded as single characters, followed by an identifier. Property types |
7153 | /// are also encoded as a parametrized attribute. The characters used to encode |
7154 | /// these attributes are defined by the following enumeration: |
7155 | /// @code |
7156 | /// enum PropertyAttributes { |
7157 | /// kPropertyReadOnly = 'R', // property is read-only. |
7158 | /// kPropertyBycopy = 'C', // property is a copy of the value last assigned |
7159 | /// kPropertyByref = '&', // property is a reference to the value last assigned |
7160 | /// kPropertyDynamic = 'D', // property is dynamic |
7161 | /// kPropertyGetter = 'G', // followed by getter selector name |
7162 | /// kPropertySetter = 'S', // followed by setter selector name |
7163 | /// kPropertyInstanceVariable = 'V' // followed by instance variable name |
7164 | /// kPropertyType = 'T' // followed by old-style type encoding. |
7165 | /// kPropertyWeak = 'W' // 'weak' property |
7166 | /// kPropertyStrong = 'P' // property GC'able |
7167 | /// kPropertyNonAtomic = 'N' // property non-atomic |
7168 | /// }; |
7169 | /// @endcode |
7170 | std::string |
7171 | ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, |
7172 | const Decl *Container) const { |
7173 | // Collect information from the property implementation decl(s). |
7174 | bool Dynamic = false; |
7175 | ObjCPropertyImplDecl *SynthesizePID = nullptr; |
7176 | |
7177 | if (ObjCPropertyImplDecl *PropertyImpDecl = |
7178 | getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { |
7179 | if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) |
7180 | Dynamic = true; |
7181 | else |
7182 | SynthesizePID = PropertyImpDecl; |
7183 | } |
7184 | |
7185 | // FIXME: This is not very efficient. |
7186 | std::string S = "T"; |
7187 | |
7188 | // Encode result type. |
7189 | // GCC has some special rules regarding encoding of properties which |
7190 | // closely resembles encoding of ivars. |
7191 | getObjCEncodingForPropertyType(PD->getType(), S); |
7192 | |
7193 | if (PD->isReadOnly()) { |
7194 | S += ",R"; |
7195 | if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) |
7196 | S += ",C"; |
7197 | if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) |
7198 | S += ",&"; |
7199 | if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) |
7200 | S += ",W"; |
7201 | } else { |
7202 | switch (PD->getSetterKind()) { |
7203 | case ObjCPropertyDecl::Assign: break; |
7204 | case ObjCPropertyDecl::Copy: S += ",C"; break; |
7205 | case ObjCPropertyDecl::Retain: S += ",&"; break; |
7206 | case ObjCPropertyDecl::Weak: S += ",W"; break; |
7207 | } |
7208 | } |
7209 | |
7210 | // It really isn't clear at all what this means, since properties |
7211 | // are "dynamic by default". |
7212 | if (Dynamic) |
7213 | S += ",D"; |
7214 | |
7215 | if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) |
7216 | S += ",N"; |
7217 | |
7218 | if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { |
7219 | S += ",G"; |
7220 | S += PD->getGetterName().getAsString(); |
7221 | } |
7222 | |
7223 | if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { |
7224 | S += ",S"; |
7225 | S += PD->getSetterName().getAsString(); |
7226 | } |
7227 | |
7228 | if (SynthesizePID) { |
7229 | const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); |
7230 | S += ",V"; |
7231 | S += OID->getNameAsString(); |
7232 | } |
7233 | |
7234 | // FIXME: OBJCGC: weak & strong |
7235 | return S; |
7236 | } |
7237 | |
7238 | /// getLegacyIntegralTypeEncoding - |
7239 | /// Another legacy compatibility encoding: 32-bit longs are encoded as |
7240 | /// 'l' or 'L' , but not always. For typedefs, we need to use |
7241 | /// 'i' or 'I' instead if encoding a struct field, or a pointer! |
7242 | void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { |
7243 | if (isa<TypedefType>(PointeeTy.getTypePtr())) { |
7244 | if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { |
7245 | if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) |
7246 | PointeeTy = UnsignedIntTy; |
7247 | else |
7248 | if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) |
7249 | PointeeTy = IntTy; |
7250 | } |
7251 | } |
7252 | } |
7253 | |
7254 | void ASTContext::getObjCEncodingForType(QualType T, std::string& S, |
7255 | const FieldDecl *Field, |
7256 | QualType *NotEncodedT) const { |
7257 | // We follow the behavior of gcc, expanding structures which are |
7258 | // directly pointed to, and expanding embedded structures. Note that |
7259 | // these rules are sufficient to prevent recursive encoding of the |
7260 | // same type. |
7261 | getObjCEncodingForTypeImpl(T, S, |
7262 | ObjCEncOptions() |
7263 | .setExpandPointedToStructures() |
7264 | .setExpandStructures() |
7265 | .setIsOutermostType(), |
7266 | Field, NotEncodedT); |
7267 | } |
7268 | |
7269 | void ASTContext::getObjCEncodingForPropertyType(QualType T, |
7270 | std::string& S) const { |
7271 | // Encode result type. |
7272 | // GCC has some special rules regarding encoding of properties which |
7273 | // closely resembles encoding of ivars. |
7274 | getObjCEncodingForTypeImpl(T, S, |
7275 | ObjCEncOptions() |
7276 | .setExpandPointedToStructures() |
7277 | .setExpandStructures() |
7278 | .setIsOutermostType() |
7279 | .setEncodingProperty(), |
7280 | /*Field=*/nullptr); |
7281 | } |
7282 | |
7283 | static char getObjCEncodingForPrimitiveType(const ASTContext *C, |
7284 | const BuiltinType *BT) { |
7285 | BuiltinType::Kind kind = BT->getKind(); |
7286 | switch (kind) { |
7287 | case BuiltinType::Void: return 'v'; |
7288 | case BuiltinType::Bool: return 'B'; |
7289 | case BuiltinType::Char8: |
7290 | case BuiltinType::Char_U: |
7291 | case BuiltinType::UChar: return 'C'; |
7292 | case BuiltinType::Char16: |
7293 | case BuiltinType::UShort: return 'S'; |
7294 | case BuiltinType::Char32: |
7295 | case BuiltinType::UInt: return 'I'; |
7296 | case BuiltinType::ULong: |
7297 | return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; |
7298 | case BuiltinType::UInt128: return 'T'; |
7299 | case BuiltinType::ULongLong: return 'Q'; |
7300 | case BuiltinType::Char_S: |
7301 | case BuiltinType::SChar: return 'c'; |
7302 | case BuiltinType::Short: return 's'; |
7303 | case BuiltinType::WChar_S: |
7304 | case BuiltinType::WChar_U: |
7305 | case BuiltinType::Int: return 'i'; |
7306 | case BuiltinType::Long: |
7307 | return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; |
7308 | case BuiltinType::LongLong: return 'q'; |
7309 | case BuiltinType::Int128: return 't'; |
7310 | case BuiltinType::Float: return 'f'; |
7311 | case BuiltinType::Double: return 'd'; |
7312 | case BuiltinType::LongDouble: return 'D'; |
7313 | case BuiltinType::NullPtr: return '*'; // like char* |
7314 | |
7315 | case BuiltinType::BFloat16: |
7316 | case BuiltinType::Float16: |
7317 | case BuiltinType::Float128: |
7318 | case BuiltinType::Half: |
7319 | case BuiltinType::ShortAccum: |
7320 | case BuiltinType::Accum: |
7321 | case BuiltinType::LongAccum: |
7322 | case BuiltinType::UShortAccum: |
7323 | case BuiltinType::UAccum: |
7324 | case BuiltinType::ULongAccum: |
7325 | case BuiltinType::ShortFract: |
7326 | case BuiltinType::Fract: |
7327 | case BuiltinType::LongFract: |
7328 | case BuiltinType::UShortFract: |
7329 | case BuiltinType::UFract: |
7330 | case BuiltinType::ULongFract: |
7331 | case BuiltinType::SatShortAccum: |
7332 | case BuiltinType::SatAccum: |
7333 | case BuiltinType::SatLongAccum: |
7334 | case BuiltinType::SatUShortAccum: |
7335 | case BuiltinType::SatUAccum: |
7336 | case BuiltinType::SatULongAccum: |
7337 | case BuiltinType::SatShortFract: |
7338 | case BuiltinType::SatFract: |
7339 | case BuiltinType::SatLongFract: |
7340 | case BuiltinType::SatUShortFract: |
7341 | case BuiltinType::SatUFract: |
7342 | case BuiltinType::SatULongFract: |
7343 | // FIXME: potentially need @encodes for these! |
7344 | return ' '; |
7345 | |
7346 | #define SVE_TYPE(Name, Id, SingletonId) \ |
7347 | case BuiltinType::Id: |
7348 | #include "clang/Basic/AArch64SVEACLETypes.def" |
7349 | #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: |
7350 | #include "clang/Basic/RISCVVTypes.def" |
7351 | { |
7352 | DiagnosticsEngine &Diags = C->getDiagnostics(); |
7353 | unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, |
7354 | "cannot yet @encode type %0"); |
7355 | Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); |
7356 | return ' '; |
7357 | } |
7358 | |
7359 | case BuiltinType::ObjCId: |
7360 | case BuiltinType::ObjCClass: |
7361 | case BuiltinType::ObjCSel: |
7362 | llvm_unreachable("@encoding ObjC primitive type")__builtin_unreachable(); |
7363 | |
7364 | // OpenCL and placeholder types don't need @encodings. |
7365 | #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ |
7366 | case BuiltinType::Id: |
7367 | #include "clang/Basic/OpenCLImageTypes.def" |
7368 | #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ |
7369 | case BuiltinType::Id: |
7370 | #include "clang/Basic/OpenCLExtensionTypes.def" |
7371 | case BuiltinType::OCLEvent: |
7372 | case BuiltinType::OCLClkEvent: |
7373 | case BuiltinType::OCLQueue: |
7374 | case BuiltinType::OCLReserveID: |
7375 | case BuiltinType::OCLSampler: |
7376 | case BuiltinType::Dependent: |
7377 | #define PPC_VECTOR_TYPE(Name, Id, Size) \ |
7378 | case BuiltinType::Id: |
7379 | #include "clang/Basic/PPCTypes.def" |
7380 | #define BUILTIN_TYPE(KIND, ID) |
7381 | #define PLACEHOLDER_TYPE(KIND, ID) \ |
7382 | case BuiltinType::KIND: |
7383 | #include "clang/AST/BuiltinTypes.def" |
7384 | llvm_unreachable("invalid builtin type for @encode")__builtin_unreachable(); |
7385 | } |
7386 | llvm_unreachable("invalid BuiltinType::Kind value")__builtin_unreachable(); |
7387 | } |
7388 | |
7389 | static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { |
7390 | EnumDecl *Enum = ET->getDecl(); |
7391 | |
7392 | // The encoding of an non-fixed enum type is always 'i', regardless of size. |
7393 | if (!Enum->isFixed()) |
7394 | return 'i'; |
7395 | |
7396 | // The encoding of a fixed enum type matches its fixed underlying type. |
7397 | const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); |
7398 | return getObjCEncodingForPrimitiveType(C, BT); |
7399 | } |
7400 | |
7401 | static void EncodeBitField(const ASTContext *Ctx, std::string& S, |
7402 | QualType T, const FieldDecl *FD) { |
7403 | assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl")(static_cast<void> (0)); |
7404 | S += 'b'; |
7405 | // The NeXT runtime encodes bit fields as b followed by the number of bits. |
7406 | // The GNU runtime requires more information; bitfields are encoded as b, |
7407 | // then the offset (in bits) of the first element, then the type of the |
7408 | // bitfield, then the size in bits. For example, in this structure: |
7409 | // |
7410 | // struct |
7411 | // { |
7412 | // int integer; |
7413 | // int flags:2; |
7414 | // }; |
7415 | // On a 32-bit system, the encoding for flags would be b2 for the NeXT |
7416 | // runtime, but b32i2 for the GNU runtime. The reason for this extra |
7417 | // information is not especially sensible, but we're stuck with it for |
7418 | // compatibility with GCC, although providing it breaks anything that |
7419 | // actually uses runtime introspection and wants to work on both runtimes... |
7420 | if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { |
7421 | uint64_t Offset; |
7422 | |
7423 | if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { |
7424 | Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, |
7425 | IVD); |
7426 | } else { |
7427 | const RecordDecl *RD = FD->getParent(); |
7428 | const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); |
7429 | Offset = RL.getFieldOffset(FD->getFieldIndex()); |
7430 | } |
7431 | |
7432 | S += llvm::utostr(Offset); |
7433 | |
7434 | if (const auto *ET = T->getAs<EnumType>()) |
7435 | S += ObjCEncodingForEnumType(Ctx, ET); |
7436 | else { |
7437 | const auto *BT = T->castAs<BuiltinType>(); |
7438 | S += getObjCEncodingForPrimitiveType(Ctx, BT); |
7439 | } |
7440 | } |
7441 | S += llvm::utostr(FD->getBitWidthValue(*Ctx)); |
7442 | } |
7443 | |
7444 | // Helper function for determining whether the encoded type string would include |
7445 | // a template specialization type. |
7446 | static bool hasTemplateSpecializationInEncodedString(const Type *T, |
7447 | bool VisitBasesAndFields) { |
7448 | T = T->getBaseElementTypeUnsafe(); |
7449 | |
7450 | if (auto *PT = T->getAs<PointerType>()) |
7451 | return hasTemplateSpecializationInEncodedString( |
7452 | PT->getPointeeType().getTypePtr(), false); |
7453 | |
7454 | auto *CXXRD = T->getAsCXXRecordDecl(); |
7455 | |
7456 | if (!CXXRD) |
7457 | return false; |
7458 | |
7459 | if (isa<ClassTemplateSpecializationDecl>(CXXRD)) |
7460 | return true; |
7461 | |
7462 | if (!CXXRD->hasDefinition() || !VisitBasesAndFields) |
7463 | return false; |
7464 | |
7465 | for (auto B : CXXRD->bases()) |
7466 | if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), |
7467 | true)) |
7468 | return true; |
7469 | |
7470 | for (auto *FD : CXXRD->fields()) |
7471 | if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), |
7472 | true)) |
7473 | return true; |
7474 | |
7475 | return false; |
7476 | } |
7477 | |
7478 | // FIXME: Use SmallString for accumulating string. |
7479 | void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, |
7480 | const ObjCEncOptions Options, |
7481 | const FieldDecl *FD, |
7482 | QualType *NotEncodedT) const { |
7483 | CanQualType CT = getCanonicalType(T); |
7484 | switch (CT->getTypeClass()) { |
7485 | case Type::Builtin: |
7486 | case Type::Enum: |
7487 | if (FD && FD->isBitField()) |
7488 | return EncodeBitField(this, S, T, FD); |
7489 | if (const auto *BT = dyn_cast<BuiltinType>(CT)) |
7490 | S += getObjCEncodingForPrimitiveType(this, BT); |
7491 | else |
7492 | S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); |
7493 | return; |
7494 | |
7495 | case Type::Complex: |
7496 | S += 'j'; |
7497 | getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, |
7498 | ObjCEncOptions(), |
7499 | /*Field=*/nullptr); |
7500 | return; |
7501 | |
7502 | case Type::Atomic: |
7503 | S += 'A'; |
7504 | getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, |
7505 | ObjCEncOptions(), |
7506 | /*Field=*/nullptr); |
7507 | return; |
7508 | |
7509 | // encoding for pointer or reference types. |
7510 | case Type::Pointer: |
7511 | case Type::LValueReference: |
7512 | case Type::RValueReference: { |
7513 | QualType PointeeTy; |
7514 | if (isa<PointerType>(CT)) { |
7515 | const auto *PT = T->castAs<PointerType>(); |
7516 | if (PT->isObjCSelType()) { |
7517 | S += ':'; |
7518 | return; |
7519 | } |
7520 | PointeeTy = PT->getPointeeType(); |
7521 | } else { |
7522 | PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); |
7523 | } |
7524 | |
7525 | bool isReadOnly = false; |
7526 | // For historical/compatibility reasons, the read-only qualifier of the |
7527 | // pointee gets emitted _before_ the '^'. The read-only qualifier of |
7528 | // the pointer itself gets ignored, _unless_ we are looking at a typedef! |
7529 | // Also, do not emit the 'r' for anything but the outermost type! |
7530 | if (isa<TypedefType>(T.getTypePtr())) { |
7531 | if (Options.IsOutermostType() && T.isConstQualified()) { |
7532 | isReadOnly = true; |
7533 | S += 'r'; |
7534 | } |
7535 | } else if (Options.IsOutermostType()) { |
7536 | QualType P = PointeeTy; |
7537 | while (auto PT = P->getAs<PointerType>()) |
7538 | P = PT->getPointeeType(); |
7539 | if (P.isConstQualified()) { |
7540 | isReadOnly = true; |
7541 | S += 'r'; |
7542 | } |
7543 | } |
7544 | if (isReadOnly) { |
7545 | // Another legacy compatibility encoding. Some ObjC qualifier and type |
7546 | // combinations need to be rearranged. |
7547 | // Rewrite "in const" from "nr" to "rn" |
7548 | if (StringRef(S).endswith("nr")) |
7549 | S.replace(S.end()-2, S.end(), "rn"); |
7550 | } |
7551 | |
7552 | if (PointeeTy->isCharType()) { |
7553 | // char pointer types should be encoded as '*' unless it is a |
7554 | // type that has been typedef'd to 'BOOL'. |
7555 | if (!isTypeTypedefedAsBOOL(PointeeTy)) { |
7556 | S += '*'; |
7557 | return; |
7558 | } |
7559 | } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { |
7560 | // GCC binary compat: Need to convert "struct objc_class *" to "#". |
7561 | if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { |
7562 | S += '#'; |
7563 | return; |
7564 | } |
7565 | // GCC binary compat: Need to convert "struct objc_object *" to "@". |
7566 | if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { |
7567 | S += '@'; |
7568 | return; |
7569 | } |
7570 | // If the encoded string for the class includes template names, just emit |
7571 | // "^v" for pointers to the class. |
7572 | if (getLangOpts().CPlusPlus && |
7573 | (!getLangOpts().EncodeCXXClassTemplateSpec && |
7574 | hasTemplateSpecializationInEncodedString( |
7575 | RTy, Options.ExpandPointedToStructures()))) { |
7576 | S += "^v"; |
7577 | return; |
7578 | } |
7579 | // fall through... |
7580 | } |
7581 | S += '^'; |
7582 | getLegacyIntegralTypeEncoding(PointeeTy); |
7583 | |
7584 | ObjCEncOptions NewOptions; |
7585 | if (Options.ExpandPointedToStructures()) |
7586 | NewOptions.setExpandStructures(); |
7587 | getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, |
7588 | /*Field=*/nullptr, NotEncodedT); |
7589 | return; |
7590 | } |
7591 | |
7592 | case Type::ConstantArray: |
7593 | case Type::IncompleteArray: |
7594 | case Type::VariableArray: { |
7595 | const auto *AT = cast<ArrayType>(CT); |
7596 | |
7597 | if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { |
7598 | // Incomplete arrays are encoded as a pointer to the array element. |
7599 | S += '^'; |
7600 | |
7601 | getObjCEncodingForTypeImpl( |
7602 | AT->getElementType(), S, |
7603 | Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); |
7604 | } else { |
7605 | S += '['; |
7606 | |
7607 | if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) |
7608 | S += llvm::utostr(CAT->getSize().getZExtValue()); |
7609 | else { |
7610 | //Variable length arrays are encoded as a regular array with 0 elements. |
7611 | assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&(static_cast<void> (0)) |
7612 | "Unknown array type!")(static_cast<void> (0)); |
7613 | S += '0'; |
7614 | } |
7615 | |
7616 | getObjCEncodingForTypeImpl( |
7617 | AT->getElementType(), S, |
7618 | Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, |
7619 | NotEncodedT); |
7620 | S += ']'; |
7621 | } |
7622 | return; |
7623 | } |
7624 | |
7625 | case Type::FunctionNoProto: |
7626 | case Type::FunctionProto: |
7627 | S += '?'; |
7628 | return; |
7629 | |
7630 | case Type::Record: { |
7631 | RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); |
7632 | S += RDecl->isUnion() ? '(' : '{'; |
7633 | // Anonymous structures print as '?' |
7634 | if (const IdentifierInfo *II = RDecl->getIdentifier()) { |
7635 | S += II->getName(); |
7636 | if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { |
7637 | const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); |
7638 | llvm::raw_string_ostream OS(S); |
7639 | printTemplateArgumentList(OS, TemplateArgs.asArray(), |
7640 | getPrintingPolicy()); |
7641 | } |
7642 | } else { |
7643 | S += '?'; |
7644 | } |
7645 | if (Options.ExpandStructures()) { |
7646 | S += '='; |
7647 | if (!RDecl->isUnion()) { |
7648 | getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); |
7649 | } else { |
7650 | for (const auto *Field : RDecl->fields()) { |
7651 | if (FD) { |
7652 | S += '"'; |
7653 | S += Field->getNameAsString(); |
7654 | S += '"'; |
7655 | } |
7656 | |
7657 | // Special case bit-fields. |
7658 | if (Field->isBitField()) { |
7659 | getObjCEncodingForTypeImpl(Field->getType(), S, |
7660 | ObjCEncOptions().setExpandStructures(), |
7661 | Field); |
7662 | } else { |
7663 | QualType qt = Field->getType(); |
7664 | getLegacyIntegralTypeEncoding(qt); |
7665 | getObjCEncodingForTypeImpl( |
7666 | qt, S, |
7667 | ObjCEncOptions().setExpandStructures().setIsStructField(), FD, |
7668 | NotEncodedT); |
7669 | } |
7670 | } |
7671 | } |
7672 | } |
7673 | S += RDecl->isUnion() ? ')' : '}'; |
7674 | return; |
7675 | } |
7676 | |
7677 | case Type::BlockPointer: { |
7678 | const auto *BT = T->castAs<BlockPointerType>(); |
7679 | S += "@?"; // Unlike a pointer-to-function, which is "^?". |
7680 | if (Options.EncodeBlockParameters()) { |
7681 | const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); |
7682 | |
7683 | S += '<'; |
7684 | // Block return type |
7685 | getObjCEncodingForTypeImpl(FT->getReturnType(), S, |
7686 | Options.forComponentType(), FD, NotEncodedT); |
7687 | // Block self |
7688 | S += "@?"; |
7689 | // Block parameters |
7690 | if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { |
7691 | for (const auto &I : FPT->param_types()) |
7692 | getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, |
7693 | NotEncodedT); |
7694 | } |
7695 | S += '>'; |
7696 | } |
7697 | return; |
7698 | } |
7699 | |
7700 | case Type::ObjCObject: { |
7701 | // hack to match legacy encoding of *id and *Class |
7702 | QualType Ty = getObjCObjectPointerType(CT); |
7703 | if (Ty->isObjCIdType()) { |
7704 | S += "{objc_object=}"; |
7705 | return; |
7706 | } |
7707 | else if (Ty->isObjCClassType()) { |
7708 | S += "{objc_class=}"; |
7709 | return; |
7710 | } |
7711 | // TODO: Double check to make sure this intentionally falls through. |
7712 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
7713 | } |
7714 | |
7715 | case Type::ObjCInterface: { |
7716 | // Ignore protocol qualifiers when mangling at this level. |
7717 | // @encode(class_name) |
7718 | ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); |
7719 | S += '{'; |
7720 | S += OI->getObjCRuntimeNameAsString(); |
7721 | if (Options.ExpandStructures()) { |
7722 | S += '='; |
7723 | SmallVector<const ObjCIvarDecl*, 32> Ivars; |
7724 | DeepCollectObjCIvars(OI, true, Ivars); |
7725 | for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { |
7726 | const FieldDecl *Field = Ivars[i]; |
7727 | if (Field->isBitField()) |
7728 | getObjCEncodingForTypeImpl(Field->getType(), S, |
7729 | ObjCEncOptions().setExpandStructures(), |
7730 | Field); |
7731 | else |
7732 | getObjCEncodingForTypeImpl(Field->getType(), S, |
7733 | ObjCEncOptions().setExpandStructures(), FD, |
7734 | NotEncodedT); |
7735 | } |
7736 | } |
7737 | S += '}'; |
7738 | return; |
7739 | } |
7740 | |
7741 | case Type::ObjCObjectPointer: { |
7742 | const auto *OPT = T->castAs<ObjCObjectPointerType>(); |
7743 | if (OPT->isObjCIdType()) { |
7744 | S += '@'; |
7745 | return; |
7746 | } |
7747 | |
7748 | if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { |
7749 | // FIXME: Consider if we need to output qualifiers for 'Class<p>'. |
7750 | // Since this is a binary compatibility issue, need to consult with |
7751 | // runtime folks. Fortunately, this is a *very* obscure construct. |
7752 | S += '#'; |
7753 | return; |
7754 | } |
7755 | |
7756 | if (OPT->isObjCQualifiedIdType()) { |
7757 | getObjCEncodingForTypeImpl( |
7758 | getObjCIdType(), S, |
7759 | Options.keepingOnly(ObjCEncOptions() |
7760 | .setExpandPointedToStructures() |
7761 | .setExpandStructures()), |
7762 | FD); |
7763 | if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { |
7764 | // Note that we do extended encoding of protocol qualifer list |
7765 | // Only when doing ivar or property encoding. |
7766 | S += '"'; |
7767 | for (const auto *I : OPT->quals()) { |
7768 | S += '<'; |
7769 | S += I->getObjCRuntimeNameAsString(); |
7770 | S += '>'; |
7771 | } |
7772 | S += '"'; |
7773 | } |
7774 | return; |
7775 | } |
7776 | |
7777 | S += '@'; |
7778 | if (OPT->getInterfaceDecl() && |
7779 | (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { |
7780 | S += '"'; |
7781 | S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); |
7782 | for (const auto *I : OPT->quals()) { |
7783 | S += '<'; |
7784 | S += I->getObjCRuntimeNameAsString(); |
7785 | S += '>'; |
7786 | } |
7787 | S += '"'; |
7788 | } |
7789 | return; |
7790 | } |
7791 | |
7792 | // gcc just blithely ignores member pointers. |
7793 | // FIXME: we should do better than that. 'M' is available. |
7794 | case Type::MemberPointer: |
7795 | // This matches gcc's encoding, even though technically it is insufficient. |
7796 | //FIXME. We should do a better job than gcc. |
7797 | case Type::Vector: |
7798 | case Type::ExtVector: |
7799 | // Until we have a coherent encoding of these three types, issue warning. |
7800 | if (NotEncodedT) |
7801 | *NotEncodedT = T; |
7802 | return; |
7803 | |
7804 | case Type::ConstantMatrix: |
7805 | if (NotEncodedT) |
7806 | *NotEncodedT = T; |
7807 | return; |
7808 | |
7809 | // We could see an undeduced auto type here during error recovery. |
7810 | // Just ignore it. |
7811 | case Type::Auto: |
7812 | case Type::DeducedTemplateSpecialization: |
7813 | return; |
7814 | |
7815 | case Type::Pipe: |
7816 | case Type::ExtInt: |
7817 | #define ABSTRACT_TYPE(KIND, BASE) |
7818 | #define TYPE(KIND, BASE) |
7819 | #define DEPENDENT_TYPE(KIND, BASE) \ |
7820 | case Type::KIND: |
7821 | #define NON_CANONICAL_TYPE(KIND, BASE) \ |
7822 | case Type::KIND: |
7823 | #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ |
7824 | case Type::KIND: |
7825 | #include "clang/AST/TypeNodes.inc" |
7826 | llvm_unreachable("@encode for dependent type!")__builtin_unreachable(); |
7827 | } |
7828 | llvm_unreachable("bad type kind!")__builtin_unreachable(); |
7829 | } |
7830 | |
7831 | void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, |
7832 | std::string &S, |
7833 | const FieldDecl *FD, |
7834 | bool includeVBases, |
7835 | QualType *NotEncodedT) const { |
7836 | assert(RDecl && "Expected non-null RecordDecl")(static_cast<void> (0)); |
7837 | assert(!RDecl->isUnion() && "Should not be called for unions")(static_cast<void> (0)); |
7838 | if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) |
7839 | return; |
7840 | |
7841 | const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); |
7842 | std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; |
7843 | const ASTRecordLayout &layout = getASTRecordLayout(RDecl); |
7844 | |
7845 | if (CXXRec) { |
7846 | for (const auto &BI : CXXRec->bases()) { |
7847 | if (!BI.isVirtual()) { |
7848 | CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); |
7849 | if (base->isEmpty()) |
7850 | continue; |
7851 | uint64_t offs = toBits(layout.getBaseClassOffset(base)); |
7852 | FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), |
7853 | std::make_pair(offs, base)); |
7854 | } |
7855 | } |
7856 | } |
7857 | |
7858 | unsigned i = 0; |
7859 | for (FieldDecl *Field : RDecl->fields()) { |
7860 | if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) |
7861 | continue; |
7862 | uint64_t offs = layout.getFieldOffset(i); |
7863 | FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), |
7864 | std::make_pair(offs, Field)); |
7865 | ++i; |
7866 | } |
7867 | |
7868 | if (CXXRec && includeVBases) { |
7869 | for (const auto &BI : CXXRec->vbases()) { |
7870 | CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); |
7871 | if (base->isEmpty()) |
7872 | continue; |
7873 | uint64_t offs = toBits(layout.getVBaseClassOffset(base)); |
7874 | if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && |
7875 | FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) |
7876 | FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), |
7877 | std::make_pair(offs, base)); |
7878 | } |
7879 | } |
7880 | |
7881 | CharUnits size; |
7882 | if (CXXRec) { |
7883 | size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); |
7884 | } else { |
7885 | size = layout.getSize(); |
7886 | } |
7887 | |
7888 | #ifndef NDEBUG1 |
7889 | uint64_t CurOffs = 0; |
7890 | #endif |
7891 | std::multimap<uint64_t, NamedDecl *>::iterator |
7892 | CurLayObj = FieldOrBaseOffsets.begin(); |
7893 | |
7894 | if (CXXRec && CXXRec->isDynamicClass() && |
7895 | (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { |
7896 | if (FD) { |
7897 | S += "\"_vptr$"; |
7898 | std::string recname = CXXRec->getNameAsString(); |
7899 | if (recname.empty()) recname = "?"; |
7900 | S += recname; |
7901 | S += '"'; |
7902 | } |
7903 | S += "^^?"; |
7904 | #ifndef NDEBUG1 |
7905 | CurOffs += getTypeSize(VoidPtrTy); |
7906 | #endif |
7907 | } |
7908 | |
7909 | if (!RDecl->hasFlexibleArrayMember()) { |
7910 | // Mark the end of the structure. |
7911 | uint64_t offs = toBits(size); |
7912 | FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), |
7913 | std::make_pair(offs, nullptr)); |
7914 | } |
7915 | |
7916 | for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { |
7917 | #ifndef NDEBUG1 |
7918 | assert(CurOffs <= CurLayObj->first)(static_cast<void> (0)); |
7919 | if (CurOffs < CurLayObj->first) { |
7920 | uint64_t padding = CurLayObj->first - CurOffs; |
7921 | // FIXME: There doesn't seem to be a way to indicate in the encoding that |
7922 | // packing/alignment of members is different that normal, in which case |
7923 | // the encoding will be out-of-sync with the real layout. |
7924 | // If the runtime switches to just consider the size of types without |
7925 | // taking into account alignment, we could make padding explicit in the |
7926 | // encoding (e.g. using arrays of chars). The encoding strings would be |
7927 | // longer then though. |
7928 | CurOffs += padding; |
7929 | } |
7930 | #endif |
7931 | |
7932 | NamedDecl *dcl = CurLayObj->second; |
7933 | if (!dcl) |
7934 | break; // reached end of structure. |
7935 | |
7936 | if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { |
7937 | // We expand the bases without their virtual bases since those are going |
7938 | // in the initial structure. Note that this differs from gcc which |
7939 | // expands virtual bases each time one is encountered in the hierarchy, |
7940 | // making the encoding type bigger than it really is. |
7941 | getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, |
7942 | NotEncodedT); |
7943 | assert(!base->isEmpty())(static_cast<void> (0)); |
7944 | #ifndef NDEBUG1 |
7945 | CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); |
7946 | #endif |
7947 | } else { |
7948 | const auto *field = cast<FieldDecl>(dcl); |
7949 | if (FD) { |
7950 | S += '"'; |
7951 | S += field->getNameAsString(); |
7952 | S += '"'; |
7953 | } |
7954 | |
7955 | if (field->isBitField()) { |
7956 | EncodeBitField(this, S, field->getType(), field); |
7957 | #ifndef NDEBUG1 |
7958 | CurOffs += field->getBitWidthValue(*this); |
7959 | #endif |
7960 | } else { |
7961 | QualType qt = field->getType(); |
7962 | getLegacyIntegralTypeEncoding(qt); |
7963 | getObjCEncodingForTypeImpl( |
7964 | qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), |
7965 | FD, NotEncodedT); |
7966 | #ifndef NDEBUG1 |
7967 | CurOffs += getTypeSize(field->getType()); |
7968 | #endif |
7969 | } |
7970 | } |
7971 | } |
7972 | } |
7973 | |
7974 | void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, |
7975 | std::string& S) const { |
7976 | if (QT & Decl::OBJC_TQ_In) |
7977 | S += 'n'; |
7978 | if (QT & Decl::OBJC_TQ_Inout) |
7979 | S += 'N'; |
7980 | if (QT & Decl::OBJC_TQ_Out) |
7981 | S += 'o'; |
7982 | if (QT & Decl::OBJC_TQ_Bycopy) |
7983 | S += 'O'; |
7984 | if (QT & Decl::OBJC_TQ_Byref) |
7985 | S += 'R'; |
7986 | if (QT & Decl::OBJC_TQ_Oneway) |
7987 | S += 'V'; |
7988 | } |
7989 | |
7990 | TypedefDecl *ASTContext::getObjCIdDecl() const { |
7991 | if (!ObjCIdDecl) { |
7992 | QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); |
7993 | T = getObjCObjectPointerType(T); |
7994 | ObjCIdDecl = buildImplicitTypedef(T, "id"); |
7995 | } |
7996 | return ObjCIdDecl; |
7997 | } |
7998 | |
7999 | TypedefDecl *ASTContext::getObjCSelDecl() const { |
8000 | if (!ObjCSelDecl) { |
8001 | QualType T = getPointerType(ObjCBuiltinSelTy); |
8002 | ObjCSelDecl = buildImplicitTypedef(T, "SEL"); |
8003 | } |
8004 | return ObjCSelDecl; |
8005 | } |
8006 | |
8007 | TypedefDecl *ASTContext::getObjCClassDecl() const { |
8008 | if (!ObjCClassDecl) { |
8009 | QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); |
8010 | T = getObjCObjectPointerType(T); |
8011 | ObjCClassDecl = buildImplicitTypedef(T, "Class"); |
8012 | } |
8013 | return ObjCClassDecl; |
8014 | } |
8015 | |
8016 | ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { |
8017 | if (!ObjCProtocolClassDecl) { |
8018 | ObjCProtocolClassDecl |
8019 | = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), |
8020 | SourceLocation(), |
8021 | &Idents.get("Protocol"), |
8022 | /*typeParamList=*/nullptr, |
8023 | /*PrevDecl=*/nullptr, |
8024 | SourceLocation(), true); |
8025 | } |
8026 | |
8027 | return ObjCProtocolClassDecl; |
8028 | } |
8029 | |
8030 | //===----------------------------------------------------------------------===// |
8031 | // __builtin_va_list Construction Functions |
8032 | //===----------------------------------------------------------------------===// |
8033 | |
8034 | static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, |
8035 | StringRef Name) { |
8036 | // typedef char* __builtin[_ms]_va_list; |
8037 | QualType T = Context->getPointerType(Context->CharTy); |
8038 | return Context->buildImplicitTypedef(T, Name); |
8039 | } |
8040 | |
8041 | static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { |
8042 | return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); |
8043 | } |
8044 | |
8045 | static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { |
8046 | return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); |
8047 | } |
8048 | |
8049 | static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { |
8050 | // typedef void* __builtin_va_list; |
8051 | QualType T = Context->getPointerType(Context->VoidTy); |
8052 | return Context->buildImplicitTypedef(T, "__builtin_va_list"); |
8053 | } |
8054 | |
8055 | static TypedefDecl * |
8056 | CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { |
8057 | RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); |
8058 | // namespace std { struct __va_list { |
8059 | // Note that we create the namespace even in C. This is intentional so that |
8060 | // the type is consistent between C and C++, which is important in cases where |
8061 | // the types need to match between translation units (e.g. with |
8062 | // -fsanitize=cfi-icall). Ideally we wouldn't have created this namespace at |
8063 | // all, but it's now part of the ABI (e.g. in mangled names), so we can't |
8064 | // change it. |
8065 | auto *NS = NamespaceDecl::Create( |
8066 | const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), |
8067 | /*Inline*/ false, SourceLocation(), SourceLocation(), |
8068 | &Context->Idents.get("std"), |
8069 | /*PrevDecl*/ nullptr); |
8070 | NS->setImplicit(); |
8071 | VaListTagDecl->setDeclContext(NS); |
8072 | |
8073 | VaListTagDecl->startDefinition(); |
8074 | |
8075 | const size_t NumFields = 5; |
8076 | QualType FieldTypes[NumFields]; |
8077 | const char *FieldNames[NumFields]; |
8078 | |
8079 | // void *__stack; |
8080 | FieldTypes[0] = Context->getPointerType(Context->VoidTy); |
8081 | FieldNames[0] = "__stack"; |
8082 | |
8083 | // void *__gr_top; |
8084 | FieldTypes[1] = Context->getPointerType(Context->VoidTy); |
8085 | FieldNames[1] = "__gr_top"; |
8086 | |
8087 | // void *__vr_top; |
8088 | FieldTypes[2] = Context->getPointerType(Context->VoidTy); |
8089 | FieldNames[2] = "__vr_top"; |
8090 | |
8091 | // int __gr_offs; |
8092 | FieldTypes[3] = Context->IntTy; |
8093 | FieldNames[3] = "__gr_offs"; |
8094 | |
8095 | // int __vr_offs; |
8096 | FieldTypes[4] = Context->IntTy; |
8097 | FieldNames[4] = "__vr_offs"; |
8098 | |
8099 | // Create fields |
8100 | for (unsigned i = 0; i < NumFields; ++i) { |
8101 | FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), |
8102 | VaListTagDecl, |
8103 | SourceLocation(), |
8104 | SourceLocation(), |
8105 | &Context->Idents.get(FieldNames[i]), |
8106 | FieldTypes[i], /*TInfo=*/nullptr, |
8107 | /*BitWidth=*/nullptr, |
8108 | /*Mutable=*/false, |
8109 | ICIS_NoInit); |
8110 | Field->setAccess(AS_public); |
8111 | VaListTagDecl->addDecl(Field); |
8112 | } |
8113 | VaListTagDecl->completeDefinition(); |
8114 | Context->VaListTagDecl = VaListTagDecl; |
8115 | QualType VaListTagType = Context->getRecordType(VaListTagDecl); |
8116 | |
8117 | // } __builtin_va_list; |
8118 | return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); |
8119 | } |
8120 | |
8121 | static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { |
8122 | // typedef struct __va_list_tag { |
8123 | RecordDecl *VaListTagDecl; |
8124 | |
8125 | VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); |
8126 | VaListTagDecl->startDefinition(); |
8127 | |
8128 | const size_t NumFields = 5; |
8129 | QualType FieldTypes[NumFields]; |
8130 | const char *FieldNames[NumFields]; |
8131 | |
8132 | // unsigned char gpr; |
8133 | FieldTypes[0] = Context->UnsignedCharTy; |
8134 | FieldNames[0] = "gpr"; |
8135 | |
8136 | // unsigned char fpr; |
8137 | FieldTypes[1] = Context->UnsignedCharTy; |
8138 | FieldNames[1] = "fpr"; |
8139 | |
8140 | // unsigned short reserved; |
8141 | FieldTypes[2] = Context->UnsignedShortTy; |
8142 | FieldNames[2] = "reserved"; |
8143 | |
8144 | // void* overflow_arg_area; |
8145 | FieldTypes[3] = Context->getPointerType(Context->VoidTy); |
8146 | FieldNames[3] = "overflow_arg_area"; |
8147 | |
8148 | // void* reg_save_area; |
8149 | FieldTypes[4] = Context->getPointerType(Context->VoidTy); |
8150 | FieldNames[4] = "reg_save_area"; |
8151 | |
8152 | // Create fields |
8153 | for (unsigned i = 0; i < NumFields; ++i) { |
8154 | FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, |
8155 | SourceLocation(), |
8156 | SourceLocation(), |
8157 | &Context->Idents.get(FieldNames[i]), |
8158 | FieldTypes[i], /*TInfo=*/nullptr, |
8159 | /*BitWidth=*/nullptr, |
8160 | /*Mutable=*/false, |
8161 | ICIS_NoInit); |
8162 | Field->setAccess(AS_public); |
8163 | VaListTagDecl->addDecl(Field); |
8164 | } |
8165 | VaListTagDecl->completeDefinition(); |
8166 | Context->VaListTagDecl = VaListTagDecl; |
8167 | QualType VaListTagType = Context->getRecordType(VaListTagDecl); |
8168 | |
8169 | // } __va_list_tag; |
8170 | TypedefDecl *VaListTagTypedefDecl = |
8171 | Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); |
8172 | |
8173 | QualType VaListTagTypedefType = |
8174 | Context->getTypedefType(VaListTagTypedefDecl); |
8175 | |
8176 | // typedef __va_list_tag __builtin_va_list[1]; |
8177 | llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); |
8178 | QualType VaListTagArrayType |
8179 | = Context->getConstantArrayType(VaListTagTypedefType, |
8180 | Size, nullptr, ArrayType::Normal, 0); |
8181 | return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); |
8182 | } |
8183 | |
8184 | static TypedefDecl * |
8185 | CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { |
8186 | // struct __va_list_tag { |
8187 | RecordDecl *VaListTagDecl; |
8188 | VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); |
8189 | VaListTagDecl->startDefinition(); |
8190 | |
8191 | const size_t NumFields = 4; |
8192 | QualType FieldTypes[NumFields]; |
8193 | const char *FieldNames[NumFields]; |
8194 | |
8195 | // unsigned gp_offset; |
8196 | FieldTypes[0] = Context->UnsignedIntTy; |
8197 | FieldNames[0] = "gp_offset"; |
8198 | |
8199 | // unsigned fp_offset; |
8200 | FieldTypes[1] = Context->UnsignedIntTy; |
8201 | FieldNames[1] = "fp_offset"; |
8202 | |
8203 | // void* overflow_arg_area; |
8204 | FieldTypes[2] = Context->getPointerType(Context->VoidTy); |
8205 | FieldNames[2] = "overflow_arg_area"; |
8206 | |
8207 | // void* reg_save_area; |
8208 | FieldTypes[3] = Context->getPointerType(Context->VoidTy); |
8209 | FieldNames[3] = "reg_save_area"; |
8210 | |
8211 | // Create fields |
8212 | for (unsigned i = 0; i < NumFields; ++i) { |
8213 | FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), |
8214 | VaListTagDecl, |
8215 | SourceLocation(), |
8216 | SourceLocation(), |
8217 | &Context->Idents.get(FieldNames[i]), |
8218 | FieldTypes[i], /*TInfo=*/nullptr, |
8219 | /*BitWidth=*/nullptr, |
8220 | /*Mutable=*/false, |
8221 | ICIS_NoInit); |
8222 | Field->setAccess(AS_public); |
8223 | VaListTagDecl->addDecl(Field); |
8224 | } |
8225 | VaListTagDecl->completeDefinition(); |
8226 | Context->VaListTagDecl = VaListTagDecl; |
8227 | QualType VaListTagType = Context->getRecordType(VaListTagDecl); |
8228 | |
8229 | // }; |
8230 | |
8231 | // typedef struct __va_list_tag __builtin_va_list[1]; |
8232 | llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); |
8233 | QualType VaListTagArrayType = Context->getConstantArrayType( |
8234 | VaListTagType, Size, nullptr, ArrayType::Normal, 0); |
8235 | return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); |
8236 | } |
8237 | |
8238 | static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { |
8239 | // typedef int __builtin_va_list[4]; |
8240 | llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); |
8241 | QualType IntArrayType = Context->getConstantArrayType( |
8242 | Context->IntTy, Size, nullptr, ArrayType::Normal, 0); |
8243 | return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); |
8244 | } |
8245 | |
8246 | static TypedefDecl * |
8247 | CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { |
8248 | // struct __va_list |
8249 | RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); |
8250 | if (Context->getLangOpts().CPlusPlus) { |
8251 | // namespace std { struct __va_list { |
8252 | NamespaceDecl *NS; |
8253 | NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), |
8254 | Context->getTranslationUnitDecl(), |
8255 | /*Inline*/false, SourceLocation(), |
8256 | SourceLocation(), &Context->Idents.get("std"), |
8257 | /*PrevDecl*/ nullptr); |
8258 | NS->setImplicit(); |
8259 | VaListDecl->setDeclContext(NS); |
8260 | } |
8261 | |
8262 | VaListDecl->startDefinition(); |
8263 | |
8264 | // void * __ap; |
8265 | FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), |
8266 | VaListDecl, |
8267 | SourceLocation(), |
8268 | SourceLocation(), |
8269 | &Context->Idents.get("__ap"), |
8270 | Context->getPointerType(Context->VoidTy), |
8271 | /*TInfo=*/nullptr, |
8272 | /*BitWidth=*/nullptr, |
8273 | /*Mutable=*/false, |
8274 | ICIS_NoInit); |
8275 | Field->setAccess(AS_public); |
8276 | VaListDecl->addDecl(Field); |
8277 | |
8278 | // }; |
8279 | VaListDecl->completeDefinition(); |
8280 | Context->VaListTagDecl = VaListDecl; |
8281 | |
8282 | // typedef struct __va_list __builtin_va_list; |
8283 | QualType T = Context->getRecordType(VaListDecl); |
8284 | return Context->buildImplicitTypedef(T, "__builtin_va_list"); |
8285 | } |
8286 | |
8287 | static TypedefDecl * |
8288 | CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { |
8289 | // struct __va_list_tag { |
8290 | RecordDecl *VaListTagDecl; |
8291 | VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); |
8292 | VaListTagDecl->startDefinition(); |
8293 | |
8294 | const size_t NumFields = 4; |
8295 | QualType FieldTypes[NumFields]; |
8296 | const char *FieldNames[NumFields]; |
8297 | |
8298 | // long __gpr; |
8299 | FieldTypes[0] = Context->LongTy; |
8300 | FieldNames[0] = "__gpr"; |
8301 | |
8302 | // long __fpr; |
8303 | FieldTypes[1] = Context->LongTy; |
8304 | FieldNames[1] = "__fpr"; |
8305 | |
8306 | // void *__overflow_arg_area; |
8307 | FieldTypes[2] = Context->getPointerType(Context->VoidTy); |
8308 | FieldNames[2] = "__overflow_arg_area"; |
8309 | |
8310 | // void *__reg_save_area; |
8311 | FieldTypes[3] = Context->getPointerType(Context->VoidTy); |
8312 | FieldNames[3] = "__reg_save_area"; |
8313 | |
8314 | // Create fields |
8315 | for (unsigned i = 0; i < NumFields; ++i) { |
8316 | FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), |
8317 | VaListTagDecl, |
8318 | SourceLocation(), |
8319 | SourceLocation(), |
8320 | &Context->Idents.get(FieldNames[i]), |
8321 | FieldTypes[i], /*TInfo=*/nullptr, |
8322 | /*BitWidth=*/nullptr, |
8323 | /*Mutable=*/false, |
8324 | ICIS_NoInit); |
8325 | Field->setAccess(AS_public); |
8326 | VaListTagDecl->addDecl(Field); |
8327 | } |
8328 | VaListTagDecl->completeDefinition(); |
8329 | Context->VaListTagDecl = VaListTagDecl; |
8330 | QualType VaListTagType = Context->getRecordType(VaListTagDecl); |
8331 | |
8332 | // }; |
8333 | |
8334 | // typedef __va_list_tag __builtin_va_list[1]; |
8335 | llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); |
8336 | QualType VaListTagArrayType = Context->getConstantArrayType( |
8337 | VaListTagType, Size, nullptr, ArrayType::Normal, 0); |
8338 | |
8339 | return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); |
8340 | } |
8341 | |
8342 | static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { |
8343 | // typedef struct __va_list_tag { |
8344 | RecordDecl *VaListTagDecl; |
8345 | VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); |
8346 | VaListTagDecl->startDefinition(); |
8347 | |
8348 | const size_t NumFields = 3; |
8349 | QualType FieldTypes[NumFields]; |
8350 | const char *FieldNames[NumFields]; |
8351 | |
8352 | // void *CurrentSavedRegisterArea; |
8353 | FieldTypes[0] = Context->getPointerType(Context->VoidTy); |
8354 | FieldNames[0] = "__current_saved_reg_area_pointer"; |
8355 | |
8356 | // void *SavedRegAreaEnd; |
8357 | FieldTypes[1] = Context->getPointerType(Context->VoidTy); |
8358 | FieldNames[1] = "__saved_reg_area_end_pointer"; |
8359 | |
8360 | // void *OverflowArea; |
8361 | FieldTypes[2] = Context->getPointerType(Context->VoidTy); |
8362 | FieldNames[2] = "__overflow_area_pointer"; |
8363 | |
8364 | // Create fields |
8365 | for (unsigned i = 0; i < NumFields; ++i) { |
8366 | FieldDecl *Field = FieldDecl::Create( |
8367 | const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), |
8368 | SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], |
8369 | /*TInfo=*/0, |
8370 | /*BitWidth=*/0, |
8371 | /*Mutable=*/false, ICIS_NoInit); |
8372 | Field->setAccess(AS_public); |
8373 | VaListTagDecl->addDecl(Field); |
8374 | } |
8375 | VaListTagDecl->completeDefinition(); |
8376 | Context->VaListTagDecl = VaListTagDecl; |
8377 | QualType VaListTagType = Context->getRecordType(VaListTagDecl); |
8378 | |
8379 | // } __va_list_tag; |
8380 | TypedefDecl *VaListTagTypedefDecl = |
8381 | Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); |
8382 | |
8383 | QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); |
8384 | |
8385 | // typedef __va_list_tag __builtin_va_list[1]; |
8386 | llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); |
8387 | QualType VaListTagArrayType = Context->getConstantArrayType( |
8388 | VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); |
8389 | |
8390 | return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); |
8391 | } |
8392 | |
8393 | static TypedefDecl *CreateVaListDecl(const ASTContext *Context, |
8394 | TargetInfo::BuiltinVaListKind Kind) { |
8395 | switch (Kind) { |
8396 | case TargetInfo::CharPtrBuiltinVaList: |
8397 | return CreateCharPtrBuiltinVaListDecl(Context); |
8398 | case TargetInfo::VoidPtrBuiltinVaList: |
8399 | return CreateVoidPtrBuiltinVaListDecl(Context); |
8400 | case TargetInfo::AArch64ABIBuiltinVaList: |
8401 | return CreateAArch64ABIBuiltinVaListDecl(Context); |
8402 | case TargetInfo::PowerABIBuiltinVaList: |
8403 | return CreatePowerABIBuiltinVaListDecl(Context); |
8404 | case TargetInfo::X86_64ABIBuiltinVaList: |
8405 | return CreateX86_64ABIBuiltinVaListDecl(Context); |
8406 | case TargetInfo::PNaClABIBuiltinVaList: |
8407 | return CreatePNaClABIBuiltinVaListDecl(Context); |
8408 | case TargetInfo::AAPCSABIBuiltinVaList: |
8409 | return CreateAAPCSABIBuiltinVaListDecl(Context); |
8410 | case TargetInfo::SystemZBuiltinVaList: |
8411 | return CreateSystemZBuiltinVaListDecl(Context); |
8412 | case TargetInfo::HexagonBuiltinVaList: |
8413 | return CreateHexagonBuiltinVaListDecl(Context); |
8414 | } |
8415 | |
8416 | llvm_unreachable("Unhandled __builtin_va_list type kind")__builtin_unreachable(); |
8417 | } |
8418 | |
8419 | TypedefDecl *ASTContext::getBuiltinVaListDecl() const { |
8420 | if (!BuiltinVaListDecl) { |
8421 | BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); |
8422 | assert(BuiltinVaListDecl->isImplicit())(static_cast<void> (0)); |
8423 | } |
8424 | |
8425 | return BuiltinVaListDecl; |
8426 | } |
8427 | |
8428 | Decl *ASTContext::getVaListTagDecl() const { |
8429 | // Force the creation of VaListTagDecl by building the __builtin_va_list |
8430 | // declaration. |
8431 | if (!VaListTagDecl) |
8432 | (void)getBuiltinVaListDecl(); |
8433 | |
8434 | return VaListTagDecl; |
8435 | } |
8436 | |
8437 | TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { |
8438 | if (!BuiltinMSVaListDecl) |
8439 | BuiltinMSVaListDecl = CreateMSVaListDecl(this); |
8440 | |
8441 | return BuiltinMSVaListDecl; |
8442 | } |
8443 | |
8444 | bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { |
8445 | return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); |
8446 | } |
8447 | |
8448 | void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { |
8449 | assert(ObjCConstantStringType.isNull() &&(static_cast<void> (0)) |
8450 | "'NSConstantString' type already set!")(static_cast<void> (0)); |
8451 | |
8452 | ObjCConstantStringType = getObjCInterfaceType(Decl); |
8453 | } |
8454 | |
8455 | /// Retrieve the template name that corresponds to a non-empty |
8456 | /// lookup. |
8457 | TemplateName |
8458 | ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, |
8459 | UnresolvedSetIterator End) const { |
8460 | unsigned size = End - Begin; |
8461 | assert(size > 1 && "set is not overloaded!")(static_cast<void> (0)); |
8462 | |
8463 | void *memory = Allocate(sizeof(OverloadedTemplateStorage) + |
8464 | size * sizeof(FunctionTemplateDecl*)); |
8465 | auto *OT = new (memory) OverloadedTemplateStorage(size); |
8466 | |
8467 | NamedDecl **Storage = OT->getStorage(); |
8468 | for (UnresolvedSetIterator I = Begin; I != End; ++I) { |
8469 | NamedDecl *D = *I; |
8470 | assert(isa<FunctionTemplateDecl>(D) ||(static_cast<void> (0)) |
8471 | isa<UnresolvedUsingValueDecl>(D) ||(static_cast<void> (0)) |
8472 | (isa<UsingShadowDecl>(D) &&(static_cast<void> (0)) |
8473 | isa<FunctionTemplateDecl>(D->getUnderlyingDecl())))(static_cast<void> (0)); |
8474 | *Storage++ = D; |
8475 | } |
8476 | |
8477 | return TemplateName(OT); |
8478 | } |
8479 | |
8480 | /// Retrieve a template name representing an unqualified-id that has been |
8481 | /// assumed to name a template for ADL purposes. |
8482 | TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { |
8483 | auto *OT = new (*this) AssumedTemplateStorage(Name); |
8484 | return TemplateName(OT); |
8485 | } |
8486 | |
8487 | /// Retrieve the template name that represents a qualified |
8488 | /// template name such as \c std::vector. |
8489 | TemplateName |
8490 | ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, |
8491 | bool TemplateKeyword, |
8492 | TemplateDecl *Template) const { |
8493 | assert(NNS && "Missing nested-name-specifier in qualified template name")(static_cast<void> (0)); |
8494 | |
8495 | // FIXME: Canonicalization? |
8496 | llvm::FoldingSetNodeID ID; |
8497 | QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); |
8498 | |
8499 | void *InsertPos = nullptr; |
8500 | QualifiedTemplateName *QTN = |
8501 | QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); |
8502 | if (!QTN) { |
8503 | QTN = new (*this, alignof(QualifiedTemplateName)) |
8504 | QualifiedTemplateName(NNS, TemplateKeyword, Template); |
8505 | QualifiedTemplateNames.InsertNode(QTN, InsertPos); |
8506 | } |
8507 | |
8508 | return TemplateName(QTN); |
8509 | } |
8510 | |
8511 | /// Retrieve the template name that represents a dependent |
8512 | /// template name such as \c MetaFun::template apply. |
8513 | TemplateName |
8514 | ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, |
8515 | const IdentifierInfo *Name) const { |
8516 | assert((!NNS || NNS->isDependent()) &&(static_cast<void> (0)) |
8517 | "Nested name specifier must be dependent")(static_cast<void> (0)); |
8518 | |
8519 | llvm::FoldingSetNodeID ID; |
8520 | DependentTemplateName::Profile(ID, NNS, Name); |
8521 | |
8522 | void *InsertPos = nullptr; |
8523 | DependentTemplateName *QTN = |
8524 | DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); |
8525 | |
8526 | if (QTN) |
8527 | return TemplateName(QTN); |
8528 | |
8529 | NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); |
8530 | if (CanonNNS == NNS) { |
8531 | QTN = new (*this, alignof(DependentTemplateName)) |
8532 | DependentTemplateName(NNS, Name); |
8533 | } else { |
8534 | TemplateName Canon = getDependentTemplateName(CanonNNS, Name); |
8535 | QTN = new (*this, alignof(DependentTemplateName)) |
8536 | DependentTemplateName(NNS, Name, Canon); |
8537 | DependentTemplateName *CheckQTN = |
8538 | DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); |
8539 | assert(!CheckQTN && "Dependent type name canonicalization broken")(static_cast<void> (0)); |
8540 | (void)CheckQTN; |
8541 | } |
8542 | |
8543 | DependentTemplateNames.InsertNode(QTN, InsertPos); |
8544 | return TemplateName(QTN); |
8545 | } |
8546 | |
8547 | /// Retrieve the template name that represents a dependent |
8548 | /// template name such as \c MetaFun::template operator+. |
8549 | TemplateName |
8550 | ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, |
8551 | OverloadedOperatorKind Operator) const { |
8552 | assert((!NNS || NNS->isDependent()) &&(static_cast<void> (0)) |
8553 | "Nested name specifier must be dependent")(static_cast<void> (0)); |
8554 | |
8555 | llvm::FoldingSetNodeID ID; |
8556 | DependentTemplateName::Profile(ID, NNS, Operator); |
8557 | |
8558 | void *InsertPos = nullptr; |
8559 | DependentTemplateName *QTN |
8560 | = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); |
8561 | |
8562 | if (QTN) |
8563 | return TemplateName(QTN); |
8564 | |
8565 | NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); |
8566 | if (CanonNNS == NNS) { |
8567 | QTN = new (*this, alignof(DependentTemplateName)) |
8568 | DependentTemplateName(NNS, Operator); |
8569 | } else { |
8570 | TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); |
8571 | QTN = new (*this, alignof(DependentTemplateName)) |
8572 | DependentTemplateName(NNS, Operator, Canon); |
8573 | |
8574 | DependentTemplateName *CheckQTN |
8575 | = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); |
8576 | assert(!CheckQTN && "Dependent template name canonicalization broken")(static_cast<void> (0)); |
8577 | (void)CheckQTN; |
8578 | } |
8579 | |
8580 | DependentTemplateNames.InsertNode(QTN, InsertPos); |
8581 | return TemplateName(QTN); |
8582 | } |
8583 | |
8584 | TemplateName |
8585 | ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, |
8586 | TemplateName replacement) const { |
8587 | llvm::FoldingSetNodeID ID; |
8588 | SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); |
8589 | |
8590 | void *insertPos = nullptr; |
8591 | SubstTemplateTemplateParmStorage *subst |
8592 | = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); |
8593 | |
8594 | if (!subst) { |
8595 | subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); |
8596 | SubstTemplateTemplateParms.InsertNode(subst, insertPos); |
8597 | } |
8598 | |
8599 | return TemplateName(subst); |
8600 | } |
8601 | |
8602 | TemplateName |
8603 | ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, |
8604 | const TemplateArgument &ArgPack) const { |
8605 | auto &Self = const_cast<ASTContext &>(*this); |
8606 | llvm::FoldingSetNodeID ID; |
8607 | SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); |
8608 | |
8609 | void *InsertPos = nullptr; |
8610 | SubstTemplateTemplateParmPackStorage *Subst |
8611 | = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); |
8612 | |
8613 | if (!Subst) { |
8614 | Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, |
8615 | ArgPack.pack_size(), |
8616 | ArgPack.pack_begin()); |
8617 | SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); |
8618 | } |
8619 | |
8620 | return TemplateName(Subst); |
8621 | } |
8622 | |
8623 | /// getFromTargetType - Given one of the integer types provided by |
8624 | /// TargetInfo, produce the corresponding type. The unsigned @p Type |
8625 | /// is actually a value of type @c TargetInfo::IntType. |
8626 | CanQualType ASTContext::getFromTargetType(unsigned Type) const { |
8627 | switch (Type) { |
8628 | case TargetInfo::NoInt: return {}; |
8629 | case TargetInfo::SignedChar: return SignedCharTy; |
8630 | case TargetInfo::UnsignedChar: return UnsignedCharTy; |
8631 | case TargetInfo::SignedShort: return ShortTy; |
8632 | case TargetInfo::UnsignedShort: return UnsignedShortTy; |
8633 | case TargetInfo::SignedInt: return IntTy; |
8634 | case TargetInfo::UnsignedInt: return UnsignedIntTy; |
8635 | case TargetInfo::SignedLong: return LongTy; |
8636 | case TargetInfo::UnsignedLong: return UnsignedLongTy; |
8637 | case TargetInfo::SignedLongLong: return LongLongTy; |
8638 | case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; |
8639 | } |
8640 | |
8641 | llvm_unreachable("Unhandled TargetInfo::IntType value")__builtin_unreachable(); |
8642 | } |
8643 | |
8644 | //===----------------------------------------------------------------------===// |
8645 | // Type Predicates. |
8646 | //===----------------------------------------------------------------------===// |
8647 | |
8648 | /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's |
8649 | /// garbage collection attribute. |
8650 | /// |
8651 | Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { |
8652 | if (getLangOpts().getGC() == LangOptions::NonGC) |
8653 | return Qualifiers::GCNone; |
8654 | |
8655 | assert(getLangOpts().ObjC)(static_cast<void> (0)); |
8656 | Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); |
8657 | |
8658 | // Default behaviour under objective-C's gc is for ObjC pointers |
8659 | // (or pointers to them) be treated as though they were declared |
8660 | // as __strong. |
8661 | if (GCAttrs == Qualifiers::GCNone) { |
8662 | if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) |
8663 | return Qualifiers::Strong; |
8664 | else if (Ty->isPointerType()) |
8665 | return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); |
8666 | } else { |
8667 | // It's not valid to set GC attributes on anything that isn't a |
8668 | // pointer. |
8669 | #ifndef NDEBUG1 |
8670 | QualType CT = Ty->getCanonicalTypeInternal(); |
8671 | while (const auto *AT = dyn_cast<ArrayType>(CT)) |
8672 | CT = AT->getElementType(); |
8673 | assert(CT->isAnyPointerType() || CT->isBlockPointerType())(static_cast<void> (0)); |
8674 | #endif |
8675 | } |
8676 | return GCAttrs; |
8677 | } |
8678 | |
8679 | //===----------------------------------------------------------------------===// |
8680 | // Type Compatibility Testing |
8681 | //===----------------------------------------------------------------------===// |
8682 | |
8683 | /// areCompatVectorTypes - Return true if the two specified vector types are |
8684 | /// compatible. |
8685 | static bool areCompatVectorTypes(const VectorType *LHS, |
8686 | const VectorType *RHS) { |
8687 | assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified())(static_cast<void> (0)); |
8688 | return LHS->getElementType() == RHS->getElementType() && |
8689 | LHS->getNumElements() == RHS->getNumElements(); |
8690 | } |
8691 | |
8692 | /// areCompatMatrixTypes - Return true if the two specified matrix types are |
8693 | /// compatible. |
8694 | static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, |
8695 | const ConstantMatrixType *RHS) { |
8696 | assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified())(static_cast<void> (0)); |
8697 | return LHS->getElementType() == RHS->getElementType() && |
8698 | LHS->getNumRows() == RHS->getNumRows() && |
8699 | LHS->getNumColumns() == RHS->getNumColumns(); |
8700 | } |
8701 | |
8702 | bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, |
8703 | QualType SecondVec) { |
8704 | assert(FirstVec->isVectorType() && "FirstVec should be a vector type")(static_cast<void> (0)); |
8705 | assert(SecondVec->isVectorType() && "SecondVec should be a vector type")(static_cast<void> (0)); |
8706 | |
8707 | if (hasSameUnqualifiedType(FirstVec, SecondVec)) |
8708 | return true; |
8709 | |
8710 | // Treat Neon vector types and most AltiVec vector types as if they are the |
8711 | // equivalent GCC vector types. |
8712 | const auto *First = FirstVec->castAs<VectorType>(); |
8713 | const auto *Second = SecondVec->castAs<VectorType>(); |
8714 | if (First->getNumElements() == Second->getNumElements() && |
8715 | hasSameType(First->getElementType(), Second->getElementType()) && |
8716 | First->getVectorKind() != VectorType::AltiVecPixel && |
8717 | First->getVectorKind() != VectorType::AltiVecBool && |
8718 | Second->getVectorKind() != VectorType::AltiVecPixel && |
8719 | Second->getVectorKind() != VectorType::AltiVecBool && |
8720 | First->getVectorKind() != VectorType::SveFixedLengthDataVector && |
8721 | First->getVectorKind() != VectorType::SveFixedLengthPredicateVector && |
8722 | Second->getVectorKind() != VectorType::SveFixedLengthDataVector && |
8723 | Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector) |
8724 | return true; |
8725 | |
8726 | return false; |
8727 | } |
8728 | |
8729 | /// getSVETypeSize - Return SVE vector or predicate register size. |
8730 | static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { |
8731 | assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type")(static_cast<void> (0)); |
8732 | return Ty->getKind() == BuiltinType::SveBool |
8733 | ? Context.getLangOpts().ArmSveVectorBits / Context.getCharWidth() |
8734 | : Context.getLangOpts().ArmSveVectorBits; |
8735 | } |
8736 | |
8737 | bool ASTContext::areCompatibleSveTypes(QualType FirstType, |
8738 | QualType SecondType) { |
8739 | assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) ||(static_cast<void> (0)) |
8740 | (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) &&(static_cast<void> (0)) |
8741 | "Expected SVE builtin type and vector type!")(static_cast<void> (0)); |
8742 | |
8743 | auto IsValidCast = [this](QualType FirstType, QualType SecondType) { |
8744 | if (const auto *BT = FirstType->getAs<BuiltinType>()) { |
8745 | if (const auto *VT = SecondType->getAs<VectorType>()) { |
8746 | // Predicates have the same representation as uint8 so we also have to |
8747 | // check the kind to make these types incompatible. |
8748 | if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) |
8749 | return BT->getKind() == BuiltinType::SveBool; |
8750 | else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) |
8751 | return VT->getElementType().getCanonicalType() == |
8752 | FirstType->getSveEltType(*this); |
8753 | else if (VT->getVectorKind() == VectorType::GenericVector) |
8754 | return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && |
8755 | hasSameType(VT->getElementType(), |
8756 | getBuiltinVectorTypeInfo(BT).ElementType); |
8757 | } |
8758 | } |
8759 | return false; |
8760 | }; |
8761 | |
8762 | return IsValidCast(FirstType, SecondType) || |
8763 | IsValidCast(SecondType, FirstType); |
8764 | } |
8765 | |
8766 | bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, |
8767 | QualType SecondType) { |
8768 | assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) ||(static_cast<void> (0)) |
8769 | (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) &&(static_cast<void> (0)) |
8770 | "Expected SVE builtin type and vector type!")(static_cast<void> (0)); |
8771 | |
8772 | auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { |
8773 | const auto *BT = FirstType->getAs<BuiltinType>(); |
8774 | if (!BT) |
8775 | return false; |
8776 | |
8777 | const auto *VecTy = SecondType->getAs<VectorType>(); |
8778 | if (VecTy && |
8779 | (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || |
8780 | VecTy->getVectorKind() == VectorType::GenericVector)) { |
8781 | const LangOptions::LaxVectorConversionKind LVCKind = |
8782 | getLangOpts().getLaxVectorConversions(); |
8783 | |
8784 | // Can not convert between sve predicates and sve vectors because of |
8785 | // different size. |
8786 | if (BT->getKind() == BuiltinType::SveBool && |
8787 | VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector) |
8788 | return false; |
8789 | |
8790 | // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. |
8791 | // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly |
8792 | // converts to VLAT and VLAT implicitly converts to GNUT." |
8793 | // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and |
8794 | // predicates. |
8795 | if (VecTy->getVectorKind() == VectorType::GenericVector && |
8796 | getTypeSize(SecondType) != getSVETypeSize(*this, BT)) |
8797 | return false; |
8798 | |
8799 | // If -flax-vector-conversions=all is specified, the types are |
8800 | // certainly compatible. |
8801 | if (LVCKind == LangOptions::LaxVectorConversionKind::All) |
8802 | return true; |
8803 | |
8804 | // If -flax-vector-conversions=integer is specified, the types are |
8805 | // compatible if the elements are integer types. |
8806 | if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) |
8807 | return VecTy->getElementType().getCanonicalType()->isIntegerType() && |
8808 | FirstType->getSveEltType(*this)->isIntegerType(); |
8809 | } |
8810 | |
8811 | return false; |
8812 | }; |
8813 | |
8814 | return IsLaxCompatible(FirstType, SecondType) || |
8815 | IsLaxCompatible(SecondType, FirstType); |
8816 | } |
8817 | |
8818 | bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { |
8819 | while (true) { |
8820 | // __strong id |
8821 | if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { |
8822 | if (Attr->getAttrKind() == attr::ObjCOwnership) |
8823 | return true; |
8824 | |
8825 | Ty = Attr->getModifiedType(); |
8826 | |
8827 | // X *__strong (...) |
8828 | } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { |
8829 | Ty = Paren->getInnerType(); |
8830 | |
8831 | // We do not want to look through typedefs, typeof(expr), |
8832 | // typeof(type), or any other way that the type is somehow |
8833 | // abstracted. |
8834 | } else { |
8835 | return false; |
8836 | } |
8837 | } |
8838 | } |
8839 | |
8840 | //===----------------------------------------------------------------------===// |
8841 | // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. |
8842 | //===----------------------------------------------------------------------===// |
8843 | |
8844 | /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the |
8845 | /// inheritance hierarchy of 'rProto'. |
8846 | bool |
8847 | ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, |
8848 | ObjCProtocolDecl *rProto) const { |
8849 | if (declaresSameEntity(lProto, rProto)) |
8850 | return true; |
8851 | for (auto *PI : rProto->protocols()) |
8852 | if (ProtocolCompatibleWithProtocol(lProto, PI)) |
8853 | return true; |
8854 | return false; |
8855 | } |
8856 | |
8857 | /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and |
8858 | /// Class<pr1, ...>. |
8859 | bool ASTContext::ObjCQualifiedClassTypesAreCompatible( |
8860 | const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { |
8861 | for (auto *lhsProto : lhs->quals()) { |
8862 | bool match = false; |
8863 | for (auto *rhsProto : rhs->quals()) { |
8864 | if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { |
8865 | match = true; |
8866 | break; |
8867 | } |
8868 | } |
8869 | if (!match) |
8870 | return false; |
8871 | } |
8872 | return true; |
8873 | } |
8874 | |
8875 | /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an |
8876 | /// ObjCQualifiedIDType. |
8877 | bool ASTContext::ObjCQualifiedIdTypesAreCompatible( |
8878 | const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, |
8879 | bool compare) { |
8880 | // Allow id<P..> and an 'id' in all cases. |
8881 | if (lhs->isObjCIdType() || rhs->isObjCIdType()) |
8882 | return true; |
8883 | |
8884 | // Don't allow id<P..> to convert to Class or Class<P..> in either direction. |
8885 | if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || |
8886 | rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) |
8887 | return false; |
8888 | |
8889 | if (lhs->isObjCQualifiedIdType()) { |
8890 | if (rhs->qual_empty()) { |
8891 | // If the RHS is a unqualified interface pointer "NSString*", |
8892 | // make sure we check the class hierarchy. |
8893 | if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { |
8894 | for (auto *I : lhs->quals()) { |
8895 | // when comparing an id<P> on lhs with a static type on rhs, |
8896 | // see if static class implements all of id's protocols, directly or |
8897 | // through its super class and categories. |
8898 | if (!rhsID->ClassImplementsProtocol(I, true)) |
8899 | return false; |
8900 | } |
8901 | } |
8902 | // If there are no qualifiers and no interface, we have an 'id'. |
8903 | return true; |
8904 | } |
8905 | // Both the right and left sides have qualifiers. |
8906 | for (auto *lhsProto : lhs->quals()) { |
8907 | bool match = false; |
8908 | |
8909 | // when comparing an id<P> on lhs with a static type on rhs, |
8910 | // see if static class implements all of id's protocols, directly or |
8911 | // through its super class and categories. |
8912 | for (auto *rhsProto : rhs->quals()) { |
8913 | if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || |
8914 | (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { |
8915 | match = true; |
8916 | break; |
8917 | } |
8918 | } |
8919 | // If the RHS is a qualified interface pointer "NSString<P>*", |
8920 | // make sure we check the class hierarchy. |
8921 | if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { |
8922 | for (auto *I : lhs->quals()) { |
8923 | // when comparing an id<P> on lhs with a static type on rhs, |
8924 | // see if static class implements all of id's protocols, directly or |
8925 | // through its super class and categories. |
8926 | if (rhsID->ClassImplementsProtocol(I, true)) { |
8927 | match = true; |
8928 | break; |
8929 | } |
8930 | } |
8931 | } |
8932 | if (!match) |
8933 | return false; |
8934 | } |
8935 | |
8936 | return true; |
8937 | } |
8938 | |
8939 | assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>")(static_cast<void> (0)); |
8940 | |
8941 | if (lhs->getInterfaceType()) { |
8942 | // If both the right and left sides have qualifiers. |
8943 | for (auto *lhsProto : lhs->quals()) { |
8944 | bool match = false; |
8945 | |
8946 | // when comparing an id<P> on rhs with a static type on lhs, |
8947 | // see if static class implements all of id's protocols, directly or |
8948 | // through its super class and categories. |
8949 | // First, lhs protocols in the qualifier list must be found, direct |
8950 | // or indirect in rhs's qualifier list or it is a mismatch. |
8951 | for (auto *rhsProto : rhs->quals()) { |
8952 | if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || |
8953 | (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { |
8954 | match = true; |
8955 | break; |
8956 | } |
8957 | } |
8958 | if (!match) |
8959 | return false; |
8960 | } |
8961 | |
8962 | // Static class's protocols, or its super class or category protocols |
8963 | // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. |
8964 | if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { |
8965 | llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; |
8966 | CollectInheritedProtocols(lhsID, LHSInheritedProtocols); |
8967 | // This is rather dubious but matches gcc's behavior. If lhs has |
8968 | // no type qualifier and its class has no static protocol(s) |
8969 | // assume that it is mismatch. |
8970 | if (LHSInheritedProtocols.empty() && lhs->qual_empty()) |
8971 | return false; |
8972 | for (auto *lhsProto : LHSInheritedProtocols) { |
8973 | bool match = false; |
8974 | for (auto *rhsProto : rhs->quals()) { |
8975 | if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || |
8976 | (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { |
8977 | match = true; |
8978 | break; |
8979 | } |
8980 | } |
8981 | if (!match) |
8982 | return false; |
8983 | } |
8984 | } |
8985 | return true; |
8986 | } |
8987 | return false; |
8988 | } |
8989 | |
8990 | /// canAssignObjCInterfaces - Return true if the two interface types are |
8991 | /// compatible for assignment from RHS to LHS. This handles validation of any |
8992 | /// protocol qualifiers on the LHS or RHS. |
8993 | bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, |
8994 | const ObjCObjectPointerType *RHSOPT) { |
8995 | const ObjCObjectType* LHS = LHSOPT->getObjectType(); |
8996 | const ObjCObjectType* RHS = RHSOPT->getObjectType(); |
8997 | |
8998 | // If either type represents the built-in 'id' type, return true. |
8999 | if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) |
9000 | return true; |
9001 | |
9002 | // Function object that propagates a successful result or handles |
9003 | // __kindof types. |
9004 | auto finish = [&](bool succeeded) -> bool { |
9005 | if (succeeded) |
9006 | return true; |
9007 | |
9008 | if (!RHS->isKindOfType()) |
9009 | return false; |
9010 | |
9011 | // Strip off __kindof and protocol qualifiers, then check whether |
9012 | // we can assign the other way. |
9013 | return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), |
9014 | LHSOPT->stripObjCKindOfTypeAndQuals(*this)); |
9015 | }; |
9016 | |
9017 | // Casts from or to id<P> are allowed when the other side has compatible |
9018 | // protocols. |
9019 | if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { |
9020 | return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); |
9021 | } |
9022 | |
9023 | // Verify protocol compatibility for casts from Class<P1> to Class<P2>. |
9024 | if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { |
9025 | return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); |
9026 | } |
9027 | |
9028 | // Casts from Class to Class<Foo>, or vice-versa, are allowed. |
9029 | if (LHS->isObjCClass() && RHS->isObjCClass()) { |
9030 | return true; |
9031 | } |
9032 | |
9033 | // If we have 2 user-defined types, fall into that path. |
9034 | if (LHS->getInterface() && RHS->getInterface()) { |
9035 | return finish(canAssignObjCInterfaces(LHS, RHS)); |
9036 | } |
9037 | |
9038 | return false; |
9039 | } |
9040 | |
9041 | /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written |
9042 | /// for providing type-safety for objective-c pointers used to pass/return |
9043 | /// arguments in block literals. When passed as arguments, passing 'A*' where |
9044 | /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is |
9045 | /// not OK. For the return type, the opposite is not OK. |
9046 | bool ASTContext::canAssignObjCInterfacesInBlockPointer( |
9047 | const ObjCObjectPointerType *LHSOPT, |
9048 | const ObjCObjectPointerType *RHSOPT, |
9049 | bool BlockReturnType) { |
9050 | |
9051 | // Function object that propagates a successful result or handles |
9052 | // __kindof types. |
9053 | auto finish = [&](bool succeeded) -> bool { |
9054 | if (succeeded) |
9055 | return true; |
9056 | |
9057 | const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; |
9058 | if (!Expected->isKindOfType()) |
9059 | return false; |
9060 | |
9061 | // Strip off __kindof and protocol qualifiers, then check whether |
9062 | // we can assign the other way. |
9063 | return canAssignObjCInterfacesInBlockPointer( |
9064 | RHSOPT->stripObjCKindOfTypeAndQuals(*this), |
9065 | LHSOPT->stripObjCKindOfTypeAndQuals(*this), |
9066 | BlockReturnType); |
9067 | }; |
9068 | |
9069 | if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) |
9070 | return true; |
9071 | |
9072 | if (LHSOPT->isObjCBuiltinType()) { |
9073 | return finish(RHSOPT->isObjCBuiltinType() || |
9074 | RHSOPT->isObjCQualifiedIdType()); |
9075 | } |
9076 | |
9077 | if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { |
9078 | if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) |
9079 | // Use for block parameters previous type checking for compatibility. |
9080 | return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || |
9081 | // Or corrected type checking as in non-compat mode. |
9082 | (!BlockReturnType && |
9083 | ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); |
9084 | else |
9085 | return finish(ObjCQualifiedIdTypesAreCompatible( |
9086 | (BlockReturnType ? LHSOPT : RHSOPT), |
9087 | (BlockReturnType ? RHSOPT : LHSOPT), false)); |
9088 | } |
9089 | |
9090 | const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); |
9091 | const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); |
9092 | if (LHS && RHS) { // We have 2 user-defined types. |
9093 | if (LHS != RHS) { |
9094 | if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) |
9095 | return finish(BlockReturnType); |
9096 | if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) |
9097 | return finish(!BlockReturnType); |
9098 | } |
9099 | else |
9100 | return true; |
9101 | } |
9102 | return false; |
9103 | } |
9104 | |
9105 | /// Comparison routine for Objective-C protocols to be used with |
9106 | /// llvm::array_pod_sort. |
9107 | static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, |
9108 | ObjCProtocolDecl * const *rhs) { |
9109 | return (*lhs)->getName().compare((*rhs)->getName()); |
9110 | } |
9111 | |
9112 | /// getIntersectionOfProtocols - This routine finds the intersection of set |
9113 | /// of protocols inherited from two distinct objective-c pointer objects with |
9114 | /// the given common base. |
9115 | /// It is used to build composite qualifier list of the composite type of |
9116 | /// the conditional expression involving two objective-c pointer objects. |
9117 | static |
9118 | void getIntersectionOfProtocols(ASTContext &Context, |
9119 | const ObjCInterfaceDecl *CommonBase, |
9120 | const ObjCObjectPointerType *LHSOPT, |
9121 | const ObjCObjectPointerType *RHSOPT, |
9122 | SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { |
9123 | |
9124 | const ObjCObjectType* LHS = LHSOPT->getObjectType(); |
9125 | const ObjCObjectType* RHS = RHSOPT->getObjectType(); |
9126 | assert(LHS->getInterface() && "LHS must have an interface base")(static_cast<void> (0)); |
9127 | assert(RHS->getInterface() && "RHS must have an interface base")(static_cast<void> (0)); |
9128 | |
9129 | // Add all of the protocols for the LHS. |
9130 | llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; |
9131 | |
9132 | // Start with the protocol qualifiers. |
9133 | for (auto proto : LHS->quals()) { |
9134 | Context.CollectInheritedProtocols(proto, LHSProtocolSet); |
9135 | } |
9136 | |
9137 | // Also add the protocols associated with the LHS interface. |
9138 | Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); |
9139 | |
9140 | // Add all of the protocols for the RHS. |
9141 | llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; |
9142 | |
9143 | // Start with the protocol qualifiers. |
9144 | for (auto proto : RHS->quals()) { |
9145 | Context.CollectInheritedProtocols(proto, RHSProtocolSet); |
9146 | } |
9147 | |
9148 | // Also add the protocols associated with the RHS interface. |
9149 | Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); |
9150 | |
9151 | // Compute the intersection of the collected protocol sets. |
9152 | for (auto proto : LHSProtocolSet) { |
9153 | if (RHSProtocolSet.count(proto)) |
9154 | IntersectionSet.push_back(proto); |
9155 | } |
9156 | |
9157 | // Compute the set of protocols that is implied by either the common type or |
9158 | // the protocols within the intersection. |
9159 | llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; |
9160 | Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); |
9161 | |
9162 | // Remove any implied protocols from the list of inherited protocols. |
9163 | if (!ImpliedProtocols.empty()) { |
9164 | IntersectionSet.erase( |
9165 | std::remove_if(IntersectionSet.begin(), |
9166 | IntersectionSet.end(), |
9167 | [&](ObjCProtocolDecl *proto) -> bool { |
9168 | return ImpliedProtocols.count(proto) > 0; |
9169 | }), |
9170 | IntersectionSet.end()); |
9171 | } |
9172 | |
9173 | // Sort the remaining protocols by name. |
9174 | llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), |
9175 | compareObjCProtocolsByName); |
9176 | } |
9177 | |
9178 | /// Determine whether the first type is a subtype of the second. |
9179 | static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, |
9180 | QualType rhs) { |
9181 | // Common case: two object pointers. |
9182 | const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); |
9183 | const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); |
9184 | if (lhsOPT && rhsOPT) |
9185 | return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); |
9186 | |
9187 | // Two block pointers. |
9188 | const auto *lhsBlock = lhs->getAs<BlockPointerType>(); |
9189 | const auto *rhsBlock = rhs->getAs<BlockPointerType>(); |
9190 | if (lhsBlock && rhsBlock) |
9191 | return ctx.typesAreBlockPointerCompatible(lhs, rhs); |
9192 | |
9193 | // If either is an unqualified 'id' and the other is a block, it's |
9194 | // acceptable. |
9195 | if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || |
9196 | (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) |
9197 | return true; |
9198 | |
9199 | return false; |
9200 | } |
9201 | |
9202 | // Check that the given Objective-C type argument lists are equivalent. |
9203 | static bool sameObjCTypeArgs(ASTContext &ctx, |
9204 | const ObjCInterfaceDecl *iface, |
9205 | ArrayRef<QualType> lhsArgs, |
9206 | ArrayRef<QualType> rhsArgs, |
9207 | bool stripKindOf) { |
9208 | if (lhsArgs.size() != rhsArgs.size()) |
9209 | return false; |
9210 | |
9211 | ObjCTypeParamList *typeParams = iface->getTypeParamList(); |
9212 | for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { |
9213 | if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) |
9214 | continue; |
9215 | |
9216 | switch (typeParams->begin()[i]->getVariance()) { |
9217 | case ObjCTypeParamVariance::Invariant: |
9218 | if (!stripKindOf || |
9219 | !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), |
9220 | rhsArgs[i].stripObjCKindOfType(ctx))) { |
9221 | return false; |
9222 | } |
9223 | break; |
9224 | |
9225 | case ObjCTypeParamVariance::Covariant: |
9226 | if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) |
9227 | return false; |
9228 | break; |
9229 | |
9230 | case ObjCTypeParamVariance::Contravariant: |
9231 | if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) |
9232 | return false; |
9233 | break; |
9234 | } |
9235 | } |
9236 | |
9237 | return true; |
9238 | } |
9239 | |
9240 | QualType ASTContext::areCommonBaseCompatible( |
9241 | const ObjCObjectPointerType *Lptr, |
9242 | const ObjCObjectPointerType *Rptr) { |
9243 | const ObjCObjectType *LHS = Lptr->getObjectType(); |
9244 | const ObjCObjectType *RHS = Rptr->getObjectType(); |
9245 | const ObjCInterfaceDecl* LDecl = LHS->getInterface(); |
9246 | const ObjCInterfaceDecl* RDecl = RHS->getInterface(); |
9247 | |
9248 | if (!LDecl || !RDecl) |
9249 | return {}; |
9250 | |
9251 | // When either LHS or RHS is a kindof type, we should return a kindof type. |
9252 | // For example, for common base of kindof(ASub1) and kindof(ASub2), we return |
9253 | // kindof(A). |
9254 | bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); |
9255 | |
9256 | // Follow the left-hand side up the class hierarchy until we either hit a |
9257 | // root or find the RHS. Record the ancestors in case we don't find it. |
9258 | llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> |
9259 | LHSAncestors; |
9260 | while (true) { |
9261 | // Record this ancestor. We'll need this if the common type isn't in the |
9262 | // path from the LHS to the root. |
9263 | LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; |
9264 | |
9265 | if (declaresSameEntity(LHS->getInterface(), RDecl)) { |
9266 | // Get the type arguments. |
9267 | ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); |
9268 | bool anyChanges = false; |
9269 | if (LHS->isSpecialized() && RHS->isSpecialized()) { |
9270 | // Both have type arguments, compare them. |
9271 | if (!sameObjCTypeArgs(*this, LHS->getInterface(), |
9272 | LHS->getTypeArgs(), RHS->getTypeArgs(), |
9273 | /*stripKindOf=*/true)) |
9274 | return {}; |
9275 | } else if (LHS->isSpecialized() != RHS->isSpecialized()) { |
9276 | // If only one has type arguments, the result will not have type |
9277 | // arguments. |
9278 | LHSTypeArgs = {}; |
9279 | anyChanges = true; |
9280 | } |
9281 | |
9282 | // Compute the intersection of protocols. |
9283 | SmallVector<ObjCProtocolDecl *, 8> Protocols; |
9284 | getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, |
9285 | Protocols); |
9286 | if (!Protocols.empty()) |
9287 | anyChanges = true; |
9288 | |
9289 | // If anything in the LHS will have changed, build a new result type. |
9290 | // If we need to return a kindof type but LHS is not a kindof type, we |
9291 | // build a new result type. |
9292 | if (anyChanges || LHS->isKindOfType() != anyKindOf) { |
9293 | QualType Result = getObjCInterfaceType(LHS->getInterface()); |
9294 | Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, |
9295 | anyKindOf || LHS->isKindOfType()); |
9296 | return getObjCObjectPointerType(Result); |
9297 | } |
9298 | |
9299 | return getObjCObjectPointerType(QualType(LHS, 0)); |
9300 | } |
9301 | |
9302 | // Find the superclass. |
9303 | QualType LHSSuperType = LHS->getSuperClassType(); |
9304 | if (LHSSuperType.isNull()) |
9305 | break; |
9306 | |
9307 | LHS = LHSSuperType->castAs<ObjCObjectType>(); |
9308 | } |
9309 | |
9310 | // We didn't find anything by following the LHS to its root; now check |
9311 | // the RHS against the cached set of ancestors. |
9312 | while (true) { |
9313 | auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); |
9314 | if (KnownLHS != LHSAncestors.end()) { |
9315 | LHS = KnownLHS->second; |
9316 | |
9317 | // Get the type arguments. |
9318 | ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); |
9319 | bool anyChanges = false; |
9320 | if (LHS->isSpecialized() && RHS->isSpecialized()) { |
9321 | // Both have type arguments, compare them. |
9322 | if (!sameObjCTypeArgs(*this, LHS->getInterface(), |
9323 | LHS->getTypeArgs(), RHS->getTypeArgs(), |
9324 | /*stripKindOf=*/true)) |
9325 | return {}; |
9326 | } else if (LHS->isSpecialized() != RHS->isSpecialized()) { |
9327 | // If only one has type arguments, the result will not have type |
9328 | // arguments. |
9329 | RHSTypeArgs = {}; |
9330 | anyChanges = true; |
9331 | } |
9332 | |
9333 | // Compute the intersection of protocols. |
9334 | SmallVector<ObjCProtocolDecl *, 8> Protocols; |
9335 | getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, |
9336 | Protocols); |
9337 | if (!Protocols.empty()) |
9338 | anyChanges = true; |
9339 | |
9340 | // If we need to return a kindof type but RHS is not a kindof type, we |
9341 | // build a new result type. |
9342 | if (anyChanges || RHS->isKindOfType() != anyKindOf) { |
9343 | QualType Result = getObjCInterfaceType(RHS->getInterface()); |
9344 | Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, |
9345 | anyKindOf || RHS->isKindOfType()); |
9346 | return getObjCObjectPointerType(Result); |
9347 | } |
9348 | |
9349 | return getObjCObjectPointerType(QualType(RHS, 0)); |
9350 | } |
9351 | |
9352 | // Find the superclass of the RHS. |
9353 | QualType RHSSuperType = RHS->getSuperClassType(); |
9354 | if (RHSSuperType.isNull()) |
9355 | break; |
9356 | |
9357 | RHS = RHSSuperType->castAs<ObjCObjectType>(); |
9358 | } |
9359 | |
9360 | return {}; |
9361 | } |
9362 | |
9363 | bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, |
9364 | const ObjCObjectType *RHS) { |
9365 | assert(LHS->getInterface() && "LHS is not an interface type")(static_cast<void> (0)); |
9366 | assert(RHS->getInterface() && "RHS is not an interface type")(static_cast<void> (0)); |
9367 | |
9368 | // Verify that the base decls are compatible: the RHS must be a subclass of |
9369 | // the LHS. |
9370 | ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); |
9371 | bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); |
9372 | if (!IsSuperClass) |
9373 | return false; |
9374 | |
9375 | // If the LHS has protocol qualifiers, determine whether all of them are |
9376 | // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the |
9377 | // LHS). |
9378 | if (LHS->getNumProtocols() > 0) { |
9379 | // OK if conversion of LHS to SuperClass results in narrowing of types |
9380 | // ; i.e., SuperClass may implement at least one of the protocols |
9381 | // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. |
9382 | // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. |
9383 | llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; |
9384 | CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); |
9385 | // Also, if RHS has explicit quelifiers, include them for comparing with LHS's |
9386 | // qualifiers. |
9387 | for (auto *RHSPI : RHS->quals()) |
9388 | CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); |
9389 | // If there is no protocols associated with RHS, it is not a match. |
9390 | if (SuperClassInheritedProtocols.empty()) |
9391 | return false; |
9392 | |
9393 | for (const auto *LHSProto : LHS->quals()) { |
9394 | bool SuperImplementsProtocol = false; |
9395 | for (auto *SuperClassProto : SuperClassInheritedProtocols) |
9396 | if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { |
9397 | SuperImplementsProtocol = true; |
9398 | break; |
9399 | } |
9400 | if (!SuperImplementsProtocol) |
9401 | return false; |
9402 | } |
9403 | } |
9404 | |
9405 | // If the LHS is specialized, we may need to check type arguments. |
9406 | if (LHS->isSpecialized()) { |
9407 | // Follow the superclass chain until we've matched the LHS class in the |
9408 | // hierarchy. This substitutes type arguments through. |
9409 | const ObjCObjectType *RHSSuper = RHS; |
9410 | while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) |
9411 | RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); |
9412 | |
9413 | // If the RHS is specializd, compare type arguments. |
9414 | if (RHSSuper->isSpecialized() && |
9415 | !sameObjCTypeArgs(*this, LHS->getInterface(), |
9416 | LHS->getTypeArgs(), RHSSuper->getTypeArgs(), |
9417 | /*stripKindOf=*/true)) { |
9418 | return false; |
9419 | } |
9420 | } |
9421 | |
9422 | return true; |
9423 | } |
9424 | |
9425 | bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { |
9426 | // get the "pointed to" types |
9427 | const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); |
9428 | const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); |
9429 | |
9430 | if (!LHSOPT || !RHSOPT) |
9431 | return false; |
9432 | |
9433 | return canAssignObjCInterfaces(LHSOPT, RHSOPT) || |
9434 | canAssignObjCInterfaces(RHSOPT, LHSOPT); |
9435 | } |
9436 | |
9437 | bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { |
9438 | return canAssignObjCInterfaces( |
9439 | getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), |
9440 | getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); |
9441 | } |
9442 | |
9443 | /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, |
9444 | /// both shall have the identically qualified version of a compatible type. |
9445 | /// C99 6.2.7p1: Two types have compatible types if their types are the |
9446 | /// same. See 6.7.[2,3,5] for additional rules. |
9447 | bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, |
9448 | bool CompareUnqualified) { |
9449 | if (getLangOpts().CPlusPlus) |
9450 | return hasSameType(LHS, RHS); |
9451 | |
9452 | return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); |
9453 | } |
9454 | |
9455 | bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { |
9456 | return typesAreCompatible(LHS, RHS); |
9457 | } |
9458 | |
9459 | bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { |
9460 | return !mergeTypes(LHS, RHS, true).isNull(); |
9461 | } |
9462 | |
9463 | /// mergeTransparentUnionType - if T is a transparent union type and a member |
9464 | /// of T is compatible with SubType, return the merged type, else return |
9465 | /// QualType() |
9466 | QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, |
9467 | bool OfBlockPointer, |
9468 | bool Unqualified) { |
9469 | if (const RecordType *UT = T->getAsUnionType()) { |
9470 | RecordDecl *UD = UT->getDecl(); |
9471 | if (UD->hasAttr<TransparentUnionAttr>()) { |
9472 | for (const auto *I : UD->fields()) { |
9473 | QualType ET = I->getType().getUnqualifiedType(); |
9474 | QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); |
9475 | if (!MT.isNull()) |
9476 | return MT; |
9477 | } |
9478 | } |
9479 | } |
9480 | |
9481 | return {}; |
9482 | } |
9483 | |
9484 | /// mergeFunctionParameterTypes - merge two types which appear as function |
9485 | /// parameter types |
9486 | QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, |
9487 | bool OfBlockPointer, |
9488 | bool Unqualified) { |
9489 | // GNU extension: two types are compatible if they appear as a function |
9490 | // argument, one of the types is a transparent union type and the other |
9491 | // type is compatible with a union member |
9492 | QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, |
9493 | Unqualified); |
9494 | if (!lmerge.isNull()) |
9495 | return lmerge; |
9496 | |
9497 | QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, |
9498 | Unqualified); |
9499 | if (!rmerge.isNull()) |
9500 | return rmerge; |
9501 | |
9502 | return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); |
9503 | } |
9504 | |
9505 | QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, |
9506 | bool OfBlockPointer, bool Unqualified, |
9507 | bool AllowCXX) { |
9508 | const auto *lbase = lhs->castAs<FunctionType>(); |
9509 | const auto *rbase = rhs->castAs<FunctionType>(); |
9510 | const auto *lproto = dyn_cast<FunctionProtoType>(lbase); |
9511 | const auto *rproto = dyn_cast<FunctionProtoType>(rbase); |
9512 | bool allLTypes = true; |
9513 | bool allRTypes = true; |
9514 | |
9515 | // Check return type |
9516 | QualType retType; |
9517 | if (OfBlockPointer) { |
9518 | QualType RHS = rbase->getReturnType(); |
9519 | QualType LHS = lbase->getReturnType(); |
9520 | bool UnqualifiedResult = Unqualified; |
9521 | if (!UnqualifiedResult) |
9522 | UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); |
9523 | retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); |
9524 | } |
9525 | else |
9526 | retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, |
9527 | Unqualified); |
9528 | if (retType.isNull()) |
9529 | return {}; |
9530 | |
9531 | if (Unqualified) |
9532 | retType = retType.getUnqualifiedType(); |
9533 | |
9534 | CanQualType LRetType = getCanonicalType(lbase->getReturnType()); |
9535 | CanQualType RRetType = getCanonicalType(rbase->getReturnType()); |
9536 | if (Unqualified) { |
9537 | LRetType = LRetType.getUnqualifiedType(); |
9538 | RRetType = RRetType.getUnqualifiedType(); |
9539 | } |
9540 | |
9541 | if (getCanonicalType(retType) != LRetType) |
9542 | allLTypes = false; |
9543 | if (getCanonicalType(retType) != RRetType) |
9544 | allRTypes = false; |
9545 | |
9546 | // FIXME: double check this |
9547 | // FIXME: should we error if lbase->getRegParmAttr() != 0 && |
9548 | // rbase->getRegParmAttr() != 0 && |
9549 | // lbase->getRegParmAttr() != rbase->getRegParmAttr()? |
9550 | FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); |
9551 | FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); |
9552 | |
9553 | // Compatible functions must have compatible calling conventions |
9554 | if (lbaseInfo.getCC() != rbaseInfo.getCC()) |
9555 | return {}; |
9556 | |
9557 | // Regparm is part of the calling convention. |
9558 | if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) |
9559 | return {}; |
9560 | if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) |
9561 | return {}; |
9562 | |
9563 | if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) |
9564 | return {}; |
9565 | if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) |
9566 | return {}; |
9567 | if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) |
9568 | return {}; |
9569 | |
9570 | // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. |
9571 | bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); |
9572 | |
9573 | if (lbaseInfo.getNoReturn() != NoReturn) |
9574 | allLTypes = false; |
9575 | if (rbaseInfo.getNoReturn() != NoReturn) |
9576 | allRTypes = false; |
9577 | |
9578 | FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); |
9579 | |
9580 | if (lproto && rproto) { // two C99 style function prototypes |
9581 | assert((AllowCXX ||(static_cast<void> (0)) |
9582 | (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&(static_cast<void> (0)) |
9583 | "C++ shouldn't be here")(static_cast<void> (0)); |
9584 | // Compatible functions must have the same number of parameters |
9585 | if (lproto->getNumParams() != rproto->getNumParams()) |
9586 | return {}; |
9587 | |
9588 | // Variadic and non-variadic functions aren't compatible |
9589 | if (lproto->isVariadic() != rproto->isVariadic()) |
9590 | return {}; |
9591 | |
9592 | if (lproto->getMethodQuals() != rproto->getMethodQuals()) |
9593 | return {}; |
9594 | |
9595 | SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; |
9596 | bool canUseLeft, canUseRight; |
9597 | if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, |
9598 | newParamInfos)) |
9599 | return {}; |
9600 | |
9601 | if (!canUseLeft) |
9602 | allLTypes = false; |
9603 | if (!canUseRight) |
9604 | allRTypes = false; |
9605 | |
9606 | // Check parameter type compatibility |
9607 | SmallVector<QualType, 10> types; |
9608 | for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { |
9609 | QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); |
9610 | QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); |
9611 | QualType paramType = mergeFunctionParameterTypes( |
9612 | lParamType, rParamType, OfBlockPointer, Unqualified); |
9613 | if (paramType.isNull()) |
9614 | return {}; |
9615 | |
9616 | if (Unqualified) |
9617 | paramType = paramType.getUnqualifiedType(); |
9618 | |
9619 | types.push_back(paramType); |
9620 | if (Unqualified) { |
9621 | lParamType = lParamType.getUnqualifiedType(); |
9622 | rParamType = rParamType.getUnqualifiedType(); |
9623 | } |
9624 | |
9625 | if (getCanonicalType(paramType) != getCanonicalType(lParamType)) |
9626 | allLTypes = false; |
9627 | if (getCanonicalType(paramType) != getCanonicalType(rParamType)) |
9628 | allRTypes = false; |
9629 | } |
9630 | |
9631 | if (allLTypes) return lhs; |
9632 | if (allRTypes) return rhs; |
9633 | |
9634 | FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); |
9635 | EPI.ExtInfo = einfo; |
9636 | EPI.ExtParameterInfos = |
9637 | newParamInfos.empty() ? nullptr : newParamInfos.data(); |
9638 | return getFunctionType(retType, types, EPI); |
9639 | } |
9640 | |
9641 | if (lproto) allRTypes = false; |
9642 | if (rproto) allLTypes = false; |
9643 | |
9644 | const FunctionProtoType *proto = lproto ? lproto : rproto; |
9645 | if (proto) { |
9646 | assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here")(static_cast<void> (0)); |
9647 | if (proto->isVariadic()) |
9648 | return {}; |
9649 | // Check that the types are compatible with the types that |
9650 | // would result from default argument promotions (C99 6.7.5.3p15). |
9651 | // The only types actually affected are promotable integer |
9652 | // types and floats, which would be passed as a different |
9653 | // type depending on whether the prototype is visible. |
9654 | for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { |
9655 | QualType paramTy = proto->getParamType(i); |
9656 | |
9657 | // Look at the converted type of enum types, since that is the type used |
9658 | // to pass enum values. |
9659 | if (const auto *Enum = paramTy->getAs<EnumType>()) { |
9660 | paramTy = Enum->getDecl()->getIntegerType(); |
9661 | if (paramTy.isNull()) |
9662 | return {}; |
9663 | } |
9664 | |
9665 | if (paramTy->isPromotableIntegerType() || |
9666 | getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) |
9667 | return {}; |
9668 | } |
9669 | |
9670 | if (allLTypes) return lhs; |
9671 | if (allRTypes) return rhs; |
9672 | |
9673 | FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); |
9674 | EPI.ExtInfo = einfo; |
9675 | return getFunctionType(retType, proto->getParamTypes(), EPI); |
9676 | } |
9677 | |
9678 | if (allLTypes) return lhs; |
9679 | if (allRTypes) return rhs; |
9680 | return getFunctionNoProtoType(retType, einfo); |
9681 | } |
9682 | |
9683 | /// Given that we have an enum type and a non-enum type, try to merge them. |
9684 | static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, |
9685 | QualType other, bool isBlockReturnType) { |
9686 | // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, |
9687 | // a signed integer type, or an unsigned integer type. |
9688 | // Compatibility is based on the underlying type, not the promotion |
9689 | // type. |
9690 | QualType underlyingType = ET->getDecl()->getIntegerType(); |
9691 | if (underlyingType.isNull()) |
9692 | return {}; |
9693 | if (Context.hasSameType(underlyingType, other)) |
9694 | return other; |
9695 | |
9696 | // In block return types, we're more permissive and accept any |
9697 | // integral type of the same size. |
9698 | if (isBlockReturnType && other->isIntegerType() && |
9699 | Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) |
9700 | return other; |
9701 | |
9702 | return {}; |
9703 | } |
9704 | |
9705 | QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, |
9706 | bool OfBlockPointer, |
9707 | bool Unqualified, bool BlockReturnType) { |
9708 | // For C++ we will not reach this code with reference types (see below), |
9709 | // for OpenMP variant call overloading we might. |
9710 | // |
9711 | // C++ [expr]: If an expression initially has the type "reference to T", the |
9712 | // type is adjusted to "T" prior to any further analysis, the expression |
9713 | // designates the object or function denoted by the reference, and the |
9714 | // expression is an lvalue unless the reference is an rvalue reference and |
9715 | // the expression is a function call (possibly inside parentheses). |
9716 | if (LangOpts.OpenMP && LHS->getAs<ReferenceType>() && |
9717 | RHS->getAs<ReferenceType>() && LHS->getTypeClass() == RHS->getTypeClass()) |
9718 | return mergeTypes(LHS->getAs<ReferenceType>()->getPointeeType(), |
9719 | RHS->getAs<ReferenceType>()->getPointeeType(), |
9720 | OfBlockPointer, Unqualified, BlockReturnType); |
9721 | if (LHS->getAs<ReferenceType>() || RHS->getAs<ReferenceType>()) |
9722 | return {}; |
9723 | |
9724 | if (Unqualified) { |
9725 | LHS = LHS.getUnqualifiedType(); |
9726 | RHS = RHS.getUnqualifiedType(); |
9727 | } |
9728 | |
9729 | QualType LHSCan = getCanonicalType(LHS), |
9730 | RHSCan = getCanonicalType(RHS); |
9731 | |
9732 | // If two types are identical, they are compatible. |
9733 | if (LHSCan == RHSCan) |
9734 | return LHS; |
9735 | |
9736 | // If the qualifiers are different, the types aren't compatible... mostly. |
9737 | Qualifiers LQuals = LHSCan.getLocalQualifiers(); |
9738 | Qualifiers RQuals = RHSCan.getLocalQualifiers(); |
9739 | if (LQuals != RQuals) { |
9740 | // If any of these qualifiers are different, we have a type |
9741 | // mismatch. |
9742 | if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || |
9743 | LQuals.getAddressSpace() != RQuals.getAddressSpace() || |
9744 | LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || |
9745 | LQuals.hasUnaligned() != RQuals.hasUnaligned()) |
9746 | return {}; |
9747 | |
9748 | // Exactly one GC qualifier difference is allowed: __strong is |
9749 | // okay if the other type has no GC qualifier but is an Objective |
9750 | // C object pointer (i.e. implicitly strong by default). We fix |
9751 | // this by pretending that the unqualified type was actually |
9752 | // qualified __strong. |
9753 | Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); |
9754 | Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); |
9755 | assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements")(static_cast<void> (0)); |
9756 | |
9757 | if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) |
9758 | return {}; |
9759 | |
9760 | if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { |
9761 | return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); |
9762 | } |
9763 | if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { |
9764 | return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); |
9765 | } |
9766 | return {}; |
9767 | } |
9768 | |
9769 | // Okay, qualifiers are equal. |
9770 | |
9771 | Type::TypeClass LHSClass = LHSCan->getTypeClass(); |
9772 | Type::TypeClass RHSClass = RHSCan->getTypeClass(); |
9773 | |
9774 | // We want to consider the two function types to be the same for these |
9775 | // comparisons, just force one to the other. |
9776 | if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; |
9777 | if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; |
9778 | |
9779 | // Same as above for arrays |
9780 | if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) |
9781 | LHSClass = Type::ConstantArray; |
9782 | if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) |
9783 | RHSClass = Type::ConstantArray; |
9784 | |
9785 | // ObjCInterfaces are just specialized ObjCObjects. |
9786 | if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; |
9787 | if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; |
9788 | |
9789 | // Canonicalize ExtVector -> Vector. |
9790 | if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; |
9791 | if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; |
9792 | |
9793 | // If the canonical type classes don't match. |
9794 | if (LHSClass != RHSClass) { |
9795 | // Note that we only have special rules for turning block enum |
9796 | // returns into block int returns, not vice-versa. |
9797 | if (const auto *ETy = LHS->getAs<EnumType>()) { |
9798 | return mergeEnumWithInteger(*this, ETy, RHS, false); |
9799 | } |
9800 | if (const EnumType* ETy = RHS->getAs<EnumType>()) { |
9801 | return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); |
9802 | } |
9803 | // allow block pointer type to match an 'id' type. |
9804 | if (OfBlockPointer && !BlockReturnType) { |
9805 | if (LHS->isObjCIdType() && RHS->isBlockPointerType()) |
9806 | return LHS; |
9807 | if (RHS->isObjCIdType() && LHS->isBlockPointerType()) |
9808 | return RHS; |
9809 | } |
9810 | |
9811 | return {}; |
9812 | } |
9813 | |
9814 | // The canonical type classes match. |
9815 | switch (LHSClass) { |
9816 | #define TYPE(Class, Base) |
9817 | #define ABSTRACT_TYPE(Class, Base) |
9818 | #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: |
9819 | #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: |
9820 | #define DEPENDENT_TYPE(Class, Base) case Type::Class: |
9821 | #include "clang/AST/TypeNodes.inc" |
9822 | llvm_unreachable("Non-canonical and dependent types shouldn't get here")__builtin_unreachable(); |
9823 | |
9824 | case Type::Auto: |
9825 | case Type::DeducedTemplateSpecialization: |
9826 | case Type::LValueReference: |
9827 | case Type::RValueReference: |
9828 | case Type::MemberPointer: |
9829 | llvm_unreachable("C++ should never be in mergeTypes")__builtin_unreachable(); |
9830 | |
9831 | case Type::ObjCInterface: |
9832 | case Type::IncompleteArray: |
9833 | case Type::VariableArray: |
9834 | case Type::FunctionProto: |
9835 | case Type::ExtVector: |
9836 | llvm_unreachable("Types are eliminated above")__builtin_unreachable(); |
9837 | |
9838 | case Type::Pointer: |
9839 | { |
9840 | // Merge two pointer types, while trying to preserve typedef info |
9841 | QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); |
9842 | QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); |
9843 | if (Unqualified) { |
9844 | LHSPointee = LHSPointee.getUnqualifiedType(); |
9845 | RHSPointee = RHSPointee.getUnqualifiedType(); |
9846 | } |
9847 | QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, |
9848 | Unqualified); |
9849 | if (ResultType.isNull()) |
9850 | return {}; |
9851 | if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) |
9852 | return LHS; |
9853 | if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) |
9854 | return RHS; |
9855 | return getPointerType(ResultType); |
9856 | } |
9857 | case Type::BlockPointer: |
9858 | { |
9859 | // Merge two block pointer types, while trying to preserve typedef info |
9860 | QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); |
9861 | QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); |
9862 | if (Unqualified) { |
9863 | LHSPointee = LHSPointee.getUnqualifiedType(); |
9864 | RHSPointee = RHSPointee.getUnqualifiedType(); |
9865 | } |
9866 | if (getLangOpts().OpenCL) { |
9867 | Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); |
9868 | Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); |
9869 | // Blocks can't be an expression in a ternary operator (OpenCL v2.0 |
9870 | // 6.12.5) thus the following check is asymmetric. |
9871 | if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) |
9872 | return {}; |
9873 | LHSPteeQual.removeAddressSpace(); |
9874 | RHSPteeQual.removeAddressSpace(); |
9875 | LHSPointee = |
9876 | QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); |
9877 | RHSPointee = |
9878 | QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); |
9879 | } |
9880 | QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, |
9881 | Unqualified); |
9882 | if (ResultType.isNull()) |
9883 | return {}; |
9884 | if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) |
9885 | return LHS; |
9886 | if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) |
9887 | return RHS; |
9888 | return getBlockPointerType(ResultType); |
9889 | } |
9890 | case Type::Atomic: |
9891 | { |
9892 | // Merge two pointer types, while trying to preserve typedef info |
9893 | QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); |
9894 | QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); |
9895 | if (Unqualified) { |
9896 | LHSValue = LHSValue.getUnqualifiedType(); |
9897 | RHSValue = RHSValue.getUnqualifiedType(); |
9898 | } |
9899 | QualType ResultType = mergeTypes(LHSValue, RHSValue, false, |
9900 | Unqualified); |
9901 | if (ResultType.isNull()) |
9902 | return {}; |
9903 | if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) |
9904 | return LHS; |
9905 | if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) |
9906 | return RHS; |
9907 | return getAtomicType(ResultType); |
9908 | } |
9909 | case Type::ConstantArray: |
9910 | { |
9911 | const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); |
9912 | const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); |
9913 | if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) |
9914 | return {}; |
9915 | |
9916 | QualType LHSElem = getAsArrayType(LHS)->getElementType(); |
9917 | QualType RHSElem = getAsArrayType(RHS)->getElementType(); |
9918 | if (Unqualified) { |
9919 | LHSElem = LHSElem.getUnqualifiedType(); |
9920 | RHSElem = RHSElem.getUnqualifiedType(); |
9921 | } |
9922 | |
9923 | QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); |
9924 | if (ResultType.isNull()) |
9925 | return {}; |
9926 | |
9927 | const VariableArrayType* LVAT = getAsVariableArrayType(LHS); |
9928 | const VariableArrayType* RVAT = getAsVariableArrayType(RHS); |
9929 | |
9930 | // If either side is a variable array, and both are complete, check whether |
9931 | // the current dimension is definite. |
9932 | if (LVAT || RVAT) { |
9933 | auto SizeFetch = [this](const VariableArrayType* VAT, |
9934 | const ConstantArrayType* CAT) |
9935 | -> std::pair<bool,llvm::APInt> { |
9936 | if (VAT) { |
9937 | Optional<llvm::APSInt> TheInt; |
9938 | Expr *E = VAT->getSizeExpr(); |
9939 | if (E && (TheInt = E->getIntegerConstantExpr(*this))) |
9940 | return std::make_pair(true, *TheInt); |
9941 | return std::make_pair(false, llvm::APSInt()); |
9942 | } |
9943 | if (CAT) |
9944 | return std::make_pair(true, CAT->getSize()); |
9945 | return std::make_pair(false, llvm::APInt()); |
9946 | }; |
9947 | |
9948 | bool HaveLSize, HaveRSize; |
9949 | llvm::APInt LSize, RSize; |
9950 | std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); |
9951 | std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); |
9952 | if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) |
9953 | return {}; // Definite, but unequal, array dimension |
9954 | } |
9955 | |
9956 | if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) |
9957 | return LHS; |
9958 | if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) |
9959 | return RHS; |
9960 | if (LCAT) |
9961 | return getConstantArrayType(ResultType, LCAT->getSize(), |
9962 | LCAT->getSizeExpr(), |
9963 | ArrayType::ArraySizeModifier(), 0); |
9964 | if (RCAT) |
9965 | return getConstantArrayType(ResultType, RCAT->getSize(), |
9966 | RCAT->getSizeExpr(), |
9967 | ArrayType::ArraySizeModifier(), 0); |
9968 | if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) |
9969 | return LHS; |
9970 | if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) |
9971 | return RHS; |
9972 | if (LVAT) { |
9973 | // FIXME: This isn't correct! But tricky to implement because |
9974 | // the array's size has to be the size of LHS, but the type |
9975 | // has to be different. |
9976 | return LHS; |
9977 | } |
9978 | if (RVAT) { |
9979 | // FIXME: This isn't correct! But tricky to implement because |
9980 | // the array's size has to be the size of RHS, but the type |
9981 | // has to be different. |
9982 | return RHS; |
9983 | } |
9984 | if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; |
9985 | if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; |
9986 | return getIncompleteArrayType(ResultType, |
9987 | ArrayType::ArraySizeModifier(), 0); |
9988 | } |
9989 | case Type::FunctionNoProto: |
9990 | return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); |
9991 | case Type::Record: |
9992 | case Type::Enum: |
9993 | return {}; |
9994 | case Type::Builtin: |
9995 | // Only exactly equal builtin types are compatible, which is tested above. |
9996 | return {}; |
9997 | case Type::Complex: |
9998 | // Distinct complex types are incompatible. |
9999 | return {}; |
10000 | case Type::Vector: |
10001 | // FIXME: The merged type should be an ExtVector! |
10002 | if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), |
10003 | RHSCan->castAs<VectorType>())) |
10004 | return LHS; |
10005 | return {}; |
10006 | case Type::ConstantMatrix: |
10007 | if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), |
10008 | RHSCan->castAs<ConstantMatrixType>())) |
10009 | return LHS; |
10010 | return {}; |
10011 | case Type::ObjCObject: { |
10012 | // Check if the types are assignment compatible. |
10013 | // FIXME: This should be type compatibility, e.g. whether |
10014 | // "LHS x; RHS x;" at global scope is legal. |
10015 | if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), |
10016 | RHS->castAs<ObjCObjectType>())) |
10017 | return LHS; |
10018 | return {}; |
10019 | } |
10020 | case Type::ObjCObjectPointer: |
10021 | if (OfBlockPointer) { |
10022 | if (canAssignObjCInterfacesInBlockPointer( |
10023 | LHS->castAs<ObjCObjectPointerType>(), |
10024 | RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) |
10025 | return LHS; |
10026 | return {}; |
10027 | } |
10028 | if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), |
10029 | RHS->castAs<ObjCObjectPointerType>())) |
10030 | return LHS; |
10031 | return {}; |
10032 | case Type::Pipe: |
10033 | assert(LHS != RHS &&(static_cast<void> (0)) |
10034 | "Equivalent pipe types should have already been handled!")(static_cast<void> (0)); |
10035 | return {}; |
10036 | case Type::ExtInt: { |
10037 | // Merge two ext-int types, while trying to preserve typedef info. |
10038 | bool LHSUnsigned = LHS->castAs<ExtIntType>()->isUnsigned(); |
10039 | bool RHSUnsigned = RHS->castAs<ExtIntType>()->isUnsigned(); |
10040 | unsigned LHSBits = LHS->castAs<ExtIntType>()->getNumBits(); |
10041 | unsigned RHSBits = RHS->castAs<ExtIntType>()->getNumBits(); |
10042 | |
10043 | // Like unsigned/int, shouldn't have a type if they dont match. |
10044 | if (LHSUnsigned != RHSUnsigned) |
10045 | return {}; |
10046 | |
10047 | if (LHSBits != RHSBits) |
10048 | return {}; |
10049 | return LHS; |
10050 | } |
10051 | } |
10052 | |
10053 | llvm_unreachable("Invalid Type::Class!")__builtin_unreachable(); |
10054 | } |
10055 | |
10056 | bool ASTContext::mergeExtParameterInfo( |
10057 | const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, |
10058 | bool &CanUseFirst, bool &CanUseSecond, |
10059 | SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { |
10060 | assert(NewParamInfos.empty() && "param info list not empty")(static_cast<void> (0)); |
10061 | CanUseFirst = CanUseSecond = true; |
10062 | bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); |
10063 | bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); |
10064 | |
10065 | // Fast path: if the first type doesn't have ext parameter infos, |
10066 | // we match if and only if the second type also doesn't have them. |
10067 | if (!FirstHasInfo && !SecondHasInfo) |
10068 | return true; |
10069 | |
10070 | bool NeedParamInfo = false; |
10071 | size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() |
10072 | : SecondFnType->getExtParameterInfos().size(); |
10073 | |
10074 | for (size_t I = 0; I < E; ++I) { |
10075 | FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; |
10076 | if (FirstHasInfo) |
10077 | FirstParam = FirstFnType->getExtParameterInfo(I); |
10078 | if (SecondHasInfo) |
10079 | SecondParam = SecondFnType->getExtParameterInfo(I); |
10080 | |
10081 | // Cannot merge unless everything except the noescape flag matches. |
10082 | if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) |
10083 | return false; |
10084 | |
10085 | bool FirstNoEscape = FirstParam.isNoEscape(); |
10086 | bool SecondNoEscape = SecondParam.isNoEscape(); |
10087 | bool IsNoEscape = FirstNoEscape && SecondNoEscape; |
10088 | NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); |
10089 | if (NewParamInfos.back().getOpaqueValue()) |
10090 | NeedParamInfo = true; |
10091 | if (FirstNoEscape != IsNoEscape) |
10092 | CanUseFirst = false; |
10093 | if (SecondNoEscape != IsNoEscape) |
10094 | CanUseSecond = false; |
10095 | } |
10096 | |
10097 | if (!NeedParamInfo) |
10098 | NewParamInfos.clear(); |
10099 | |
10100 | return true; |
10101 | } |
10102 | |
10103 | void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { |
10104 | ObjCLayouts[CD] = nullptr; |
10105 | } |
10106 | |
10107 | /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and |
10108 | /// 'RHS' attributes and returns the merged version; including for function |
10109 | /// return types. |
10110 | QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { |
10111 | QualType LHSCan = getCanonicalType(LHS), |
10112 | RHSCan = getCanonicalType(RHS); |
10113 | // If two types are identical, they are compatible. |
10114 | if (LHSCan == RHSCan) |
10115 | return LHS; |
10116 | if (RHSCan->isFunctionType()) { |
10117 | if (!LHSCan->isFunctionType()) |
10118 | return {}; |
10119 | QualType OldReturnType = |
10120 | cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); |
10121 | QualType NewReturnType = |
10122 | cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); |
10123 | QualType ResReturnType = |
10124 | mergeObjCGCQualifiers(NewReturnType, OldReturnType); |
10125 | if (ResReturnType.isNull()) |
10126 | return {}; |
10127 | if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { |
10128 | // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); |
10129 | // In either case, use OldReturnType to build the new function type. |
10130 | const auto *F = LHS->castAs<FunctionType>(); |
10131 | if (const auto *FPT = cast<FunctionProtoType>(F)) { |
10132 | FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); |
10133 | EPI.ExtInfo = getFunctionExtInfo(LHS); |
10134 | QualType ResultType = |
10135 | getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); |
10136 | return ResultType; |
10137 | } |
10138 | } |
10139 | return {}; |
10140 | } |
10141 | |
10142 | // If the qualifiers are different, the types can still be merged. |
10143 | Qualifiers LQuals = LHSCan.getLocalQualifiers(); |
10144 | Qualifiers RQuals = RHSCan.getLocalQualifiers(); |
10145 | if (LQuals != RQuals) { |
10146 | // If any of these qualifiers are different, we have a type mismatch. |
10147 | if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || |
10148 | LQuals.getAddressSpace() != RQuals.getAddressSpace()) |
10149 | return {}; |
10150 | |
10151 | // Exactly one GC qualifier difference is allowed: __strong is |
10152 | // okay if the other type has no GC qualifier but is an Objective |
10153 | // C object pointer (i.e. implicitly strong by default). We fix |
10154 | // this by pretending that the unqualified type was actually |
10155 | // qualified __strong. |
10156 | Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); |
10157 | Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); |
10158 | assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements")(static_cast<void> (0)); |
10159 | |
10160 | if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) |
10161 | return {}; |
10162 | |
10163 | if (GC_L == Qualifiers::Strong) |
10164 | return LHS; |
10165 | if (GC_R == Qualifiers::Strong) |
10166 | return RHS; |
10167 | return {}; |
10168 | } |
10169 | |
10170 | if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { |
10171 | QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); |
10172 | QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); |
10173 | QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); |
10174 | if (ResQT == LHSBaseQT) |
10175 | return LHS; |
10176 | if (ResQT == RHSBaseQT) |
10177 | return RHS; |
10178 | } |
10179 | return {}; |
10180 | } |
10181 | |
10182 | //===----------------------------------------------------------------------===// |
10183 | // Integer Predicates |
10184 | //===----------------------------------------------------------------------===// |
10185 | |
10186 | unsigned ASTContext::getIntWidth(QualType T) const { |
10187 | if (const auto *ET = T->getAs<EnumType>()) |
10188 | T = ET->getDecl()->getIntegerType(); |
10189 | if (T->isBooleanType()) |
10190 | return 1; |
10191 | if(const auto *EIT = T->getAs<ExtIntType>()) |
10192 | return EIT->getNumBits(); |
10193 | // For builtin types, just use the standard type sizing method |
10194 | return (unsigned)getTypeSize(T); |
10195 | } |
10196 | |
10197 | QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { |
10198 | assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) &&(static_cast<void> (0)) |
10199 | "Unexpected type")(static_cast<void> (0)); |
10200 | |
10201 | // Turn <4 x signed int> -> <4 x unsigned int> |
10202 | if (const auto *VTy = T->getAs<VectorType>()) |
10203 | return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), |
10204 | VTy->getNumElements(), VTy->getVectorKind()); |
10205 | |
10206 | // For _ExtInt, return an unsigned _ExtInt with same width. |
10207 | if (const auto *EITy = T->getAs<ExtIntType>()) |
10208 | return getExtIntType(/*IsUnsigned=*/true, EITy->getNumBits()); |
10209 | |
10210 | // For enums, get the underlying integer type of the enum, and let the general |
10211 | // integer type signchanging code handle it. |
10212 | if (const auto *ETy = T->getAs<EnumType>()) |
10213 | T = ETy->getDecl()->getIntegerType(); |
10214 | |
10215 | switch (T->castAs<BuiltinType>()->getKind()) { |
10216 | case BuiltinType::Char_S: |
10217 | case BuiltinType::SChar: |
10218 | return UnsignedCharTy; |
10219 | case BuiltinType::Short: |
10220 | return UnsignedShortTy; |
10221 | case BuiltinType::Int: |
10222 | return UnsignedIntTy; |
10223 | case BuiltinType::Long: |
10224 | return UnsignedLongTy; |
10225 | case BuiltinType::LongLong: |
10226 | return UnsignedLongLongTy; |
10227 | case BuiltinType::Int128: |
10228 | return UnsignedInt128Ty; |
10229 | // wchar_t is special. It is either signed or not, but when it's signed, |
10230 | // there's no matching "unsigned wchar_t". Therefore we return the unsigned |
10231 | // version of it's underlying type instead. |
10232 | case BuiltinType::WChar_S: |
10233 | return getUnsignedWCharType(); |
10234 | |
10235 | case BuiltinType::ShortAccum: |
10236 | return UnsignedShortAccumTy; |
10237 | case BuiltinType::Accum: |
10238 | return UnsignedAccumTy; |
10239 | case BuiltinType::LongAccum: |
10240 | return UnsignedLongAccumTy; |
10241 | case BuiltinType::SatShortAccum: |
10242 | return SatUnsignedShortAccumTy; |
10243 | case BuiltinType::SatAccum: |
10244 | return SatUnsignedAccumTy; |
10245 | case BuiltinType::SatLongAccum: |
10246 | return SatUnsignedLongAccumTy; |
10247 | case BuiltinType::ShortFract: |
10248 | return UnsignedShortFractTy; |
10249 | case BuiltinType::Fract: |
10250 | return UnsignedFractTy; |
10251 | case BuiltinType::LongFract: |
10252 | return UnsignedLongFractTy; |
10253 | case BuiltinType::SatShortFract: |
10254 | return SatUnsignedShortFractTy; |
10255 | case BuiltinType::SatFract: |
10256 | return SatUnsignedFractTy; |
10257 | case BuiltinType::SatLongFract: |
10258 | return SatUnsignedLongFractTy; |
10259 | default: |
10260 | llvm_unreachable("Unexpected signed integer or fixed point type")__builtin_unreachable(); |
10261 | } |
10262 | } |
10263 | |
10264 | QualType ASTContext::getCorrespondingSignedType(QualType T) const { |
10265 | assert((T->hasUnsignedIntegerRepresentation() ||(static_cast<void> (0)) |
10266 | T->isUnsignedFixedPointType()) &&(static_cast<void> (0)) |
10267 | "Unexpected type")(static_cast<void> (0)); |
10268 | |
10269 | // Turn <4 x unsigned int> -> <4 x signed int> |
10270 | if (const auto *VTy = T->getAs<VectorType>()) |
10271 | return getVectorType(getCorrespondingSignedType(VTy->getElementType()), |
10272 | VTy->getNumElements(), VTy->getVectorKind()); |
10273 | |
10274 | // For _ExtInt, return a signed _ExtInt with same width. |
10275 | if (const auto *EITy = T->getAs<ExtIntType>()) |
10276 | return getExtIntType(/*IsUnsigned=*/false, EITy->getNumBits()); |
10277 | |
10278 | // For enums, get the underlying integer type of the enum, and let the general |
10279 | // integer type signchanging code handle it. |
10280 | if (const auto *ETy = T->getAs<EnumType>()) |
10281 | T = ETy->getDecl()->getIntegerType(); |
10282 | |
10283 | switch (T->castAs<BuiltinType>()->getKind()) { |
10284 | case BuiltinType::Char_U: |
10285 | case BuiltinType::UChar: |
10286 | return SignedCharTy; |
10287 | case BuiltinType::UShort: |
10288 | return ShortTy; |
10289 | case BuiltinType::UInt: |
10290 | return IntTy; |
10291 | case BuiltinType::ULong: |
10292 | return LongTy; |
10293 | case BuiltinType::ULongLong: |
10294 | return LongLongTy; |
10295 | case BuiltinType::UInt128: |
10296 | return Int128Ty; |
10297 | // wchar_t is special. It is either unsigned or not, but when it's unsigned, |
10298 | // there's no matching "signed wchar_t". Therefore we return the signed |
10299 | // version of it's underlying type instead. |
10300 | case BuiltinType::WChar_U: |
10301 | return getSignedWCharType(); |
10302 | |
10303 | case BuiltinType::UShortAccum: |
10304 | return ShortAccumTy; |
10305 | case BuiltinType::UAccum: |
10306 | return AccumTy; |
10307 | case BuiltinType::ULongAccum: |
10308 | return LongAccumTy; |
10309 | case BuiltinType::SatUShortAccum: |
10310 | return SatShortAccumTy; |
10311 | case BuiltinType::SatUAccum: |
10312 | return SatAccumTy; |
10313 | case BuiltinType::SatULongAccum: |
10314 | return SatLongAccumTy; |
10315 | case BuiltinType::UShortFract: |
10316 | return ShortFractTy; |
10317 | case BuiltinType::UFract: |
10318 | return FractTy; |
10319 | case BuiltinType::ULongFract: |
10320 | return LongFractTy; |
10321 | case BuiltinType::SatUShortFract: |
10322 | return SatShortFractTy; |
10323 | case BuiltinType::SatUFract: |
10324 | return SatFractTy; |
10325 | case BuiltinType::SatULongFract: |
10326 | return SatLongFractTy; |
10327 | default: |
10328 | llvm_unreachable("Unexpected unsigned integer or fixed point type")__builtin_unreachable(); |
10329 | } |
10330 | } |
10331 | |
10332 | ASTMutationListener::~ASTMutationListener() = default; |
10333 | |
10334 | void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, |
10335 | QualType ReturnType) {} |
10336 | |
10337 | //===----------------------------------------------------------------------===// |
10338 | // Builtin Type Computation |
10339 | //===----------------------------------------------------------------------===// |
10340 | |
10341 | /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the |
10342 | /// pointer over the consumed characters. This returns the resultant type. If |
10343 | /// AllowTypeModifiers is false then modifier like * are not parsed, just basic |
10344 | /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of |
10345 | /// a vector of "i*". |
10346 | /// |
10347 | /// RequiresICE is filled in on return to indicate whether the value is required |
10348 | /// to be an Integer Constant Expression. |
10349 | static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, |
10350 | ASTContext::GetBuiltinTypeError &Error, |
10351 | bool &RequiresICE, |
10352 | bool AllowTypeModifiers) { |
10353 | // Modifiers. |
10354 | int HowLong = 0; |
10355 | bool Signed = false, Unsigned = false; |
10356 | RequiresICE = false; |
10357 | |
10358 | // Read the prefixed modifiers first. |
10359 | bool Done = false; |
10360 | #ifndef NDEBUG1 |
10361 | bool IsSpecial = false; |
10362 | #endif |
10363 | while (!Done) { |
10364 | switch (*Str++) { |
10365 | default: Done = true; --Str; break; |
10366 | case 'I': |
10367 | RequiresICE = true; |
10368 | break; |
10369 | case 'S': |
10370 | assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!")(static_cast<void> (0)); |
10371 | assert(!Signed && "Can't use 'S' modifier multiple times!")(static_cast<void> (0)); |
10372 | Signed = true; |
10373 | break; |
10374 | case 'U': |
10375 | assert(!Signed && "Can't use both 'S' and 'U' modifiers!")(static_cast<void> (0)); |
10376 | assert(!Unsigned && "Can't use 'U' modifier multiple times!")(static_cast<void> (0)); |
10377 | Unsigned = true; |
10378 | break; |
10379 | case 'L': |
10380 | assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers")(static_cast<void> (0)); |
10381 | assert(HowLong <= 2 && "Can't have LLLL modifier")(static_cast<void> (0)); |
10382 | ++HowLong; |
10383 | break; |
10384 | case 'N': |
10385 | // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. |
10386 | assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!")(static_cast<void> (0)); |
10387 | assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!")(static_cast<void> (0)); |
10388 | #ifndef NDEBUG1 |
10389 | IsSpecial = true; |
10390 | #endif |
10391 | if (Context.getTargetInfo().getLongWidth() == 32) |
10392 | ++HowLong; |
10393 | break; |
10394 | case 'W': |
10395 | // This modifier represents int64 type. |
10396 | assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!")(static_cast<void> (0)); |
10397 | assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!")(static_cast<void> (0)); |
10398 | #ifndef NDEBUG1 |
10399 | IsSpecial = true; |
10400 | #endif |
10401 | switch (Context.getTargetInfo().getInt64Type()) { |
10402 | default: |
10403 | llvm_unreachable("Unexpected integer type")__builtin_unreachable(); |
10404 | case TargetInfo::SignedLong: |
10405 | HowLong = 1; |
10406 | break; |
10407 | case TargetInfo::SignedLongLong: |
10408 | HowLong = 2; |
10409 | break; |
10410 | } |
10411 | break; |
10412 | case 'Z': |
10413 | // This modifier represents int32 type. |
10414 | assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!")(static_cast<void> (0)); |
10415 | assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!")(static_cast<void> (0)); |
10416 | #ifndef NDEBUG1 |
10417 | IsSpecial = true; |
10418 | #endif |
10419 | switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { |
10420 | default: |
10421 | llvm_unreachable("Unexpected integer type")__builtin_unreachable(); |
10422 | case TargetInfo::SignedInt: |
10423 | HowLong = 0; |
10424 | break; |
10425 | case TargetInfo::SignedLong: |
10426 | HowLong = 1; |
10427 | break; |
10428 | case TargetInfo::SignedLongLong: |
10429 | HowLong = 2; |
10430 | break; |
10431 | } |
10432 | break; |
10433 | case 'O': |
10434 | assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!")(static_cast<void> (0)); |
10435 | assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!")(static_cast<void> (0)); |
10436 | #ifndef NDEBUG1 |
10437 | IsSpecial = true; |
10438 | #endif |
10439 | if (Context.getLangOpts().OpenCL) |
10440 | HowLong = 1; |
10441 | else |
10442 | HowLong = 2; |
10443 | break; |
10444 | } |
10445 | } |
10446 | |
10447 | QualType Type; |
10448 | |
10449 | // Read the base type. |
10450 | switch (*Str++) { |
10451 | default: llvm_unreachable("Unknown builtin type letter!")__builtin_unreachable(); |
10452 | case 'x': |
10453 | assert(HowLong == 0 && !Signed && !Unsigned &&(static_cast<void> (0)) |
10454 | "Bad modifiers used with 'x'!")(static_cast<void> (0)); |
10455 | Type = Context.Float16Ty; |
10456 | break; |
10457 | case 'y': |
10458 | assert(HowLong == 0 && !Signed && !Unsigned &&(static_cast<void> (0)) |
10459 | "Bad modifiers used with 'y'!")(static_cast<void> (0)); |
10460 | Type = Context.BFloat16Ty; |
10461 | break; |
10462 | case 'v': |
10463 | assert(HowLong == 0 && !Signed && !Unsigned &&(static_cast<void> (0)) |
10464 | "Bad modifiers used with 'v'!")(static_cast<void> (0)); |
10465 | Type = Context.VoidTy; |
10466 | break; |
10467 | case 'h': |
10468 | assert(HowLong == 0 && !Signed && !Unsigned &&(static_cast<void> (0)) |
10469 | "Bad modifiers used with 'h'!")(static_cast<void> (0)); |
10470 | Type = Context.HalfTy; |
10471 | break; |
10472 | case 'f': |
10473 | assert(HowLong == 0 && !Signed && !Unsigned &&(static_cast<void> (0)) |
10474 | "Bad modifiers used with 'f'!")(static_cast<void> (0)); |
10475 | Type = Context.FloatTy; |
10476 | break; |
10477 | case 'd': |
10478 | assert(HowLong < 3 && !Signed && !Unsigned &&(static_cast<void> (0)) |
10479 | "Bad modifiers used with 'd'!")(static_cast<void> (0)); |
10480 | if (HowLong == 1) |
10481 | Type = Context.LongDoubleTy; |
10482 | else if (HowLong == 2) |
10483 | Type = Context.Float128Ty; |
10484 | else |
10485 | Type = Context.DoubleTy; |
10486 | break; |
10487 | case 's': |
10488 | assert(HowLong == 0 && "Bad modifiers used with 's'!")(static_cast<void> (0)); |
10489 | if (Unsigned) |
10490 | Type = Context.UnsignedShortTy; |
10491 | else |
10492 | Type = Context.ShortTy; |
10493 | break; |
10494 | case 'i': |
10495 | if (HowLong == 3) |
10496 | Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; |
10497 | else if (HowLong == 2) |
10498 | Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; |
10499 | else if (HowLong == 1) |
10500 | Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; |
10501 | else |
10502 | Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; |
10503 | break; |
10504 | case 'c': |
10505 | assert(HowLong == 0 && "Bad modifiers used with 'c'!")(static_cast<void> (0)); |
10506 | if (Signed) |
10507 | Type = Context.SignedCharTy; |
10508 | else if (Unsigned) |
10509 | Type = Context.UnsignedCharTy; |
10510 | else |
10511 | Type = Context.CharTy; |
10512 | break; |
10513 | case 'b': // boolean |
10514 | assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!")(static_cast<void> (0)); |
10515 | Type = Context.BoolTy; |
10516 | break; |
10517 | case 'z': // size_t. |
10518 | assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!")(static_cast<void> (0)); |
10519 | Type = Context.getSizeType(); |
10520 | break; |
10521 | case 'w': // wchar_t. |
10522 | assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!")(static_cast<void> (0)); |
10523 | Type = Context.getWideCharType(); |
10524 | break; |
10525 | case 'F': |
10526 | Type = Context.getCFConstantStringType(); |
10527 | break; |
10528 | case 'G': |
10529 | Type = Context.getObjCIdType(); |
10530 | break; |
10531 | case 'H': |
10532 | Type = Context.getObjCSelType(); |
10533 | break; |
10534 | case 'M': |
10535 | Type = Context.getObjCSuperType(); |
10536 | break; |
10537 | case 'a': |
10538 | Type = Context.getBuiltinVaListType(); |
10539 | assert(!Type.isNull() && "builtin va list type not initialized!")(static_cast<void> (0)); |
10540 | break; |
10541 | case 'A': |
10542 | // This is a "reference" to a va_list; however, what exactly |
10543 | // this means depends on how va_list is defined. There are two |
10544 | // different kinds of va_list: ones passed by value, and ones |
10545 | // passed by reference. An example of a by-value va_list is |
10546 | // x86, where va_list is a char*. An example of by-ref va_list |
10547 | // is x86-64, where va_list is a __va_list_tag[1]. For x86, |
10548 | // we want this argument to be a char*&; for x86-64, we want |
10549 | // it to be a __va_list_tag*. |
10550 | Type = Context.getBuiltinVaListType(); |
10551 | assert(!Type.isNull() && "builtin va list type not initialized!")(static_cast<void> (0)); |
10552 | if (Type->isArrayType()) |
10553 | Type = Context.getArrayDecayedType(Type); |
10554 | else |
10555 | Type = Context.getLValueReferenceType(Type); |
10556 | break; |
10557 | case 'q': { |
10558 | char *End; |
10559 | unsigned NumElements = strtoul(Str, &End, 10); |
10560 | assert(End != Str && "Missing vector size")(static_cast<void> (0)); |
10561 | Str = End; |
10562 | |
10563 | QualType ElementType = DecodeTypeFromStr(Str, Context, Error, |
10564 | RequiresICE, false); |
10565 | assert(!RequiresICE && "Can't require vector ICE")(static_cast<void> (0)); |
10566 | |
10567 | Type = Context.getScalableVectorType(ElementType, NumElements); |
10568 | break; |
10569 | } |
10570 | case 'V': { |
10571 | char *End; |
10572 | unsigned NumElements = strtoul(Str, &End, 10); |
10573 | assert(End != Str && "Missing vector size")(static_cast<void> (0)); |
10574 | Str = End; |
10575 | |
10576 | QualType ElementType = DecodeTypeFromStr(Str, Context, Error, |
10577 | RequiresICE, false); |
10578 | assert(!RequiresICE && "Can't require vector ICE")(static_cast<void> (0)); |
10579 | |
10580 | // TODO: No way to make AltiVec vectors in builtins yet. |
10581 | Type = Context.getVectorType(ElementType, NumElements, |
10582 | VectorType::GenericVector); |
10583 | break; |
10584 | } |
10585 | case 'E': { |
10586 | char *End; |
10587 | |
10588 | unsigned NumElements = strtoul(Str, &End, 10); |
10589 | assert(End != Str && "Missing vector size")(static_cast<void> (0)); |
10590 | |
10591 | Str = End; |
10592 | |
10593 | QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, |
10594 | false); |
10595 | Type = Context.getExtVectorType(ElementType, NumElements); |
10596 | break; |
10597 | } |
10598 | case 'X': { |
10599 | QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, |
10600 | false); |
10601 | assert(!RequiresICE && "Can't require complex ICE")(static_cast<void> (0)); |
10602 | Type = Context.getComplexType(ElementType); |
10603 | break; |
10604 | } |
10605 | case 'Y': |
10606 | Type = Context.getPointerDiffType(); |
10607 | break; |
10608 | case 'P': |
10609 | Type = Context.getFILEType(); |
10610 | if (Type.isNull()) { |
10611 | Error = ASTContext::GE_Missing_stdio; |
10612 | return {}; |
10613 | } |
10614 | break; |
10615 | case 'J': |
10616 | if (Signed) |
10617 | Type = Context.getsigjmp_bufType(); |
10618 | else |
10619 | Type = Context.getjmp_bufType(); |
10620 | |
10621 | if (Type.isNull()) { |
10622 | Error = ASTContext::GE_Missing_setjmp; |
10623 | return {}; |
10624 | } |
10625 | break; |
10626 | case 'K': |
10627 | assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!")(static_cast<void> (0)); |
10628 | Type = Context.getucontext_tType(); |
10629 | |
10630 | if (Type.isNull()) { |
10631 | Error = ASTContext::GE_Missing_ucontext; |
10632 | return {}; |
10633 | } |
10634 | break; |
10635 | case 'p': |
10636 | Type = Context.getProcessIDType(); |
10637 | break; |
10638 | } |
10639 | |
10640 | // If there are modifiers and if we're allowed to parse them, go for it. |
10641 | Done = !AllowTypeModifiers; |
10642 | while (!Done) { |
10643 | switch (char c = *Str++) { |
10644 | default: Done = true; --Str; break; |
10645 | case '*': |
10646 | case '&': { |
10647 | // Both pointers and references can have their pointee types |
10648 | // qualified with an address space. |
10649 | char *End; |
10650 | unsigned AddrSpace = strtoul(Str, &End, 10); |
10651 | if (End != Str) { |
10652 | // Note AddrSpace == 0 is not the same as an unspecified address space. |
10653 | Type = Context.getAddrSpaceQualType( |
10654 | Type, |
10655 | Context.getLangASForBuiltinAddressSpace(AddrSpace)); |
10656 | Str = End; |
10657 | } |
10658 | if (c == '*') |
10659 | Type = Context.getPointerType(Type); |
10660 | else |
10661 | Type = Context.getLValueReferenceType(Type); |
10662 | break; |
10663 | } |
10664 | // FIXME: There's no way to have a built-in with an rvalue ref arg. |
10665 | case 'C': |
10666 | Type = Type.withConst(); |
10667 | break; |
10668 | case 'D': |
10669 | Type = Context.getVolatileType(Type); |
10670 | break; |
10671 | case 'R': |
10672 | Type = Type.withRestrict(); |
10673 | break; |
10674 | } |
10675 | } |
10676 | |
10677 | assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&(static_cast<void> (0)) |
10678 | "Integer constant 'I' type must be an integer")(static_cast<void> (0)); |
10679 | |
10680 | return Type; |
10681 | } |
10682 | |
10683 | // On some targets such as PowerPC, some of the builtins are defined with custom |
10684 | // type decriptors for target-dependent types. These descriptors are decoded in |
10685 | // other functions, but it may be useful to be able to fall back to default |
10686 | // descriptor decoding to define builtins mixing target-dependent and target- |
10687 | // independent types. This function allows decoding one type descriptor with |
10688 | // default decoding. |
10689 | QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, |
10690 | GetBuiltinTypeError &Error, bool &RequireICE, |
10691 | bool AllowTypeModifiers) const { |
10692 | return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); |
10693 | } |
10694 | |
10695 | /// GetBuiltinType - Return the type for the specified builtin. |
10696 | QualType ASTContext::GetBuiltinType(unsigned Id, |
10697 | GetBuiltinTypeError &Error, |
10698 | unsigned *IntegerConstantArgs) const { |
10699 | const char *TypeStr = BuiltinInfo.getTypeString(Id); |
10700 | if (TypeStr[0] == '\0') { |
10701 | Error = GE_Missing_type; |
10702 | return {}; |
10703 | } |
10704 | |
10705 | SmallVector<QualType, 8> ArgTypes; |
10706 | |
10707 | bool RequiresICE = false; |
10708 | Error = GE_None; |
10709 | QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, |
10710 | RequiresICE, true); |
10711 | if (Error != GE_None) |
10712 | return {}; |
10713 | |
10714 | assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE")(static_cast<void> (0)); |
10715 | |
10716 | while (TypeStr[0] && TypeStr[0] != '.') { |
10717 | QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); |
10718 | if (Error != GE_None) |
10719 | return {}; |
10720 | |
10721 | // If this argument is required to be an IntegerConstantExpression and the |
10722 | // caller cares, fill in the bitmask we return. |
10723 | if (RequiresICE && IntegerConstantArgs) |
10724 | *IntegerConstantArgs |= 1 << ArgTypes.size(); |
10725 | |
10726 | // Do array -> pointer decay. The builtin should use the decayed type. |
10727 | if (Ty->isArrayType()) |
10728 | Ty = getArrayDecayedType(Ty); |
10729 | |
10730 | ArgTypes.push_back(Ty); |
10731 | } |
10732 | |
10733 | if (Id == Builtin::BI__GetExceptionInfo) |
10734 | return {}; |
10735 | |
10736 | assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&(static_cast<void> (0)) |
10737 | "'.' should only occur at end of builtin type list!")(static_cast<void> (0)); |
10738 | |
10739 | bool Variadic = (TypeStr[0] == '.'); |
10740 | |
10741 | FunctionType::ExtInfo EI(getDefaultCallingConvention( |
10742 | Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); |
10743 | if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); |
10744 | |
10745 | |
10746 | // We really shouldn't be making a no-proto type here. |
10747 | if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus) |
10748 | return getFunctionNoProtoType(ResType, EI); |
10749 | |
10750 | FunctionProtoType::ExtProtoInfo EPI; |
10751 | EPI.ExtInfo = EI; |
10752 | EPI.Variadic = Variadic; |
10753 | if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) |
10754 | EPI.ExceptionSpec.Type = |
10755 | getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; |
10756 | |
10757 | return getFunctionType(ResType, ArgTypes, EPI); |
10758 | } |
10759 | |
10760 | static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, |
10761 | const FunctionDecl *FD) { |
10762 | if (!FD->isExternallyVisible()) |
10763 | return GVA_Internal; |
10764 | |
10765 | // Non-user-provided functions get emitted as weak definitions with every |
10766 | // use, no matter whether they've been explicitly instantiated etc. |
10767 | if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) |
10768 | if (!MD->isUserProvided()) |
10769 | return GVA_DiscardableODR; |
10770 | |
10771 | GVALinkage External; |
10772 | switch (FD->getTemplateSpecializationKind()) { |
10773 | case TSK_Undeclared: |
10774 | case TSK_ExplicitSpecialization: |
10775 | External = GVA_StrongExternal; |
10776 | break; |
10777 | |
10778 | case TSK_ExplicitInstantiationDefinition: |
10779 | return GVA_StrongODR; |
10780 | |
10781 | // C++11 [temp.explicit]p10: |
10782 | // [ Note: The intent is that an inline function that is the subject of |
10783 | // an explicit instantiation declaration will still be implicitly |
10784 | // instantiated when used so that the body can be considered for |
10785 | // inlining, but that no out-of-line copy of the inline function would be |
10786 | // generated in the translation unit. -- end note ] |
10787 | case TSK_ExplicitInstantiationDeclaration: |
10788 | return GVA_AvailableExternally; |
10789 | |
10790 | case TSK_ImplicitInstantiation: |
10791 | External = GVA_DiscardableODR; |
10792 | break; |
10793 | } |
10794 | |
10795 | if (!FD->isInlined()) |
10796 | return External; |
10797 | |
10798 | if ((!Context.getLangOpts().CPlusPlus && |
10799 | !Context.getTargetInfo().getCXXABI().isMicrosoft() && |
10800 | !FD->hasAttr<DLLExportAttr>()) || |
10801 | FD->hasAttr<GNUInlineAttr>()) { |
10802 | // FIXME: This doesn't match gcc's behavior for dllexport inline functions. |
10803 | |
10804 | // GNU or C99 inline semantics. Determine whether this symbol should be |
10805 | // externally visible. |
10806 | if (FD->isInlineDefinitionExternallyVisible()) |
10807 | return External; |
10808 | |
10809 | // C99 inline semantics, where the symbol is not externally visible. |
10810 | return GVA_AvailableExternally; |
10811 | } |
10812 | |
10813 | // Functions specified with extern and inline in -fms-compatibility mode |
10814 | // forcibly get emitted. While the body of the function cannot be later |
10815 | // replaced, the function definition cannot be discarded. |
10816 | if (FD->isMSExternInline()) |
10817 | return GVA_StrongODR; |
10818 | |
10819 | return GVA_DiscardableODR; |
10820 | } |
10821 | |
10822 | static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, |
10823 | const Decl *D, GVALinkage L) { |
10824 | // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx |
10825 | // dllexport/dllimport on inline functions. |
10826 | if (D->hasAttr<DLLImportAttr>()) { |
10827 | if (L == GVA_DiscardableODR || L == GVA_StrongODR) |
10828 | return GVA_AvailableExternally; |
10829 | } else if (D->hasAttr<DLLExportAttr>()) { |
10830 | if (L == GVA_DiscardableODR) |
10831 | return GVA_StrongODR; |
10832 | } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { |
10833 | // Device-side functions with __global__ attribute must always be |
10834 | // visible externally so they can be launched from host. |
10835 | if (D->hasAttr<CUDAGlobalAttr>() && |
10836 | (L == GVA_DiscardableODR || L == GVA_Internal)) |
10837 | return GVA_StrongODR; |
10838 | // Single source offloading languages like CUDA/HIP need to be able to |
10839 | // access static device variables from host code of the same compilation |
10840 | // unit. This is done by externalizing the static variable with a shared |
10841 | // name between the host and device compilation which is the same for the |
10842 | // same compilation unit whereas different among different compilation |
10843 | // units. |
10844 | if (Context.shouldExternalizeStaticVar(D)) |
10845 | return GVA_StrongExternal; |
10846 | } |
10847 | return L; |
10848 | } |
10849 | |
10850 | /// Adjust the GVALinkage for a declaration based on what an external AST source |
10851 | /// knows about whether there can be other definitions of this declaration. |
10852 | static GVALinkage |
10853 | adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, |
10854 | GVALinkage L) { |
10855 | ExternalASTSource *Source = Ctx.getExternalSource(); |
10856 | if (!Source) |
10857 | return L; |
10858 | |
10859 | switch (Source->hasExternalDefinitions(D)) { |
10860 | case ExternalASTSource::EK_Never: |
10861 | // Other translation units rely on us to provide the definition. |
10862 | if (L == GVA_DiscardableODR) |
10863 | return GVA_StrongODR; |
10864 | break; |
10865 | |
10866 | case ExternalASTSource::EK_Always: |
10867 | return GVA_AvailableExternally; |
10868 | |
10869 | case ExternalASTSource::EK_ReplyHazy: |
10870 | break; |
10871 | } |
10872 | return L; |
10873 | } |
10874 | |
10875 | GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { |
10876 | return adjustGVALinkageForExternalDefinitionKind(*this, FD, |
10877 | adjustGVALinkageForAttributes(*this, FD, |
10878 | basicGVALinkageForFunction(*this, FD))); |
10879 | } |
10880 | |
10881 | static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, |
10882 | const VarDecl *VD) { |
10883 | if (!VD->isExternallyVisible()) |
10884 | return GVA_Internal; |
10885 | |
10886 | if (VD->isStaticLocal()) { |
10887 | const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); |
10888 | while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) |
10889 | LexicalContext = LexicalContext->getLexicalParent(); |
10890 | |
10891 | // ObjC Blocks can create local variables that don't have a FunctionDecl |
10892 | // LexicalContext. |
10893 | if (!LexicalContext) |
10894 | return GVA_DiscardableODR; |
10895 | |
10896 | // Otherwise, let the static local variable inherit its linkage from the |
10897 | // nearest enclosing function. |
10898 | auto StaticLocalLinkage = |
10899 | Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); |
10900 | |
10901 | // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must |
10902 | // be emitted in any object with references to the symbol for the object it |
10903 | // contains, whether inline or out-of-line." |
10904 | // Similar behavior is observed with MSVC. An alternative ABI could use |
10905 | // StrongODR/AvailableExternally to match the function, but none are |
10906 | // known/supported currently. |
10907 | if (StaticLocalLinkage == GVA_StrongODR || |
10908 | StaticLocalLinkage == GVA_AvailableExternally) |
10909 | return GVA_DiscardableODR; |
10910 | return StaticLocalLinkage; |
10911 | } |
10912 | |
10913 | // MSVC treats in-class initialized static data members as definitions. |
10914 | // By giving them non-strong linkage, out-of-line definitions won't |
10915 | // cause link errors. |
10916 | if (Context.isMSStaticDataMemberInlineDefinition(VD)) |
10917 | return GVA_DiscardableODR; |
10918 | |
10919 | // Most non-template variables have strong linkage; inline variables are |
10920 | // linkonce_odr or (occasionally, for compatibility) weak_odr. |
10921 | GVALinkage StrongLinkage; |
10922 | switch (Context.getInlineVariableDefinitionKind(VD)) { |
10923 | case ASTContext::InlineVariableDefinitionKind::None: |
10924 | StrongLinkage = GVA_StrongExternal; |
10925 | break; |
10926 | case ASTContext::InlineVariableDefinitionKind::Weak: |
10927 | case ASTContext::InlineVariableDefinitionKind::WeakUnknown: |
10928 | StrongLinkage = GVA_DiscardableODR; |
10929 | break; |
10930 | case ASTContext::InlineVariableDefinitionKind::Strong: |
10931 | StrongLinkage = GVA_StrongODR; |
10932 | break; |
10933 | } |
10934 | |
10935 | switch (VD->getTemplateSpecializationKind()) { |
10936 | case TSK_Undeclared: |
10937 | return StrongLinkage; |
10938 | |
10939 | case TSK_ExplicitSpecialization: |
10940 | return Context.getTargetInfo().getCXXABI().isMicrosoft() && |
10941 | VD->isStaticDataMember() |
10942 | ? GVA_StrongODR |
10943 | : StrongLinkage; |
10944 | |
10945 | case TSK_ExplicitInstantiationDefinition: |
10946 | return GVA_StrongODR; |
10947 | |
10948 | case TSK_ExplicitInstantiationDeclaration: |
10949 | return GVA_AvailableExternally; |
10950 | |
10951 | case TSK_ImplicitInstantiation: |
10952 | return GVA_DiscardableODR; |
10953 | } |
10954 | |
10955 | llvm_unreachable("Invalid Linkage!")__builtin_unreachable(); |
10956 | } |
10957 | |
10958 | GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { |
10959 | return adjustGVALinkageForExternalDefinitionKind(*this, VD, |
10960 | adjustGVALinkageForAttributes(*this, VD, |
10961 | basicGVALinkageForVariable(*this, VD))); |
10962 | } |
10963 | |
10964 | bool ASTContext::DeclMustBeEmitted(const Decl *D) { |
10965 | if (const auto *VD = dyn_cast<VarDecl>(D)) { |
10966 | if (!VD->isFileVarDecl()) |
10967 | return false; |
10968 | // Global named register variables (GNU extension) are never emitted. |
10969 | if (VD->getStorageClass() == SC_Register) |
10970 | return false; |
10971 | if (VD->getDescribedVarTemplate() || |
10972 | isa<VarTemplatePartialSpecializationDecl>(VD)) |
10973 | return false; |
10974 | } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { |
10975 | // We never need to emit an uninstantiated function template. |
10976 | if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) |
10977 | return false; |
10978 | } else if (isa<PragmaCommentDecl>(D)) |
10979 | return true; |
10980 | else if (isa<PragmaDetectMismatchDecl>(D)) |
10981 | return true; |
10982 | else if (isa<OMPRequiresDecl>(D)) |
10983 | return true; |
10984 | else if (isa<OMPThreadPrivateDecl>(D)) |
10985 | return !D->getDeclContext()->isDependentContext(); |
10986 | else if (isa<OMPAllocateDecl>(D)) |
10987 | return !D->getDeclContext()->isDependentContext(); |
10988 | else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) |
10989 | return !D->getDeclContext()->isDependentContext(); |
10990 | else if (isa<ImportDecl>(D)) |
10991 | return true; |
10992 | else |
10993 | return false; |
10994 | |
10995 | // If this is a member of a class template, we do not need to emit it. |
10996 | if (D->getDeclContext()->isDependentContext()) |
10997 | return false; |
10998 | |
10999 | // Weak references don't produce any output by themselves. |
11000 | if (D->hasAttr<WeakRefAttr>()) |
11001 | return false; |
11002 | |
11003 | // Aliases and used decls are required. |
11004 | if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) |
11005 | return true; |
11006 | |
11007 | if (const auto *FD = dyn_cast<FunctionDecl>(D)) { |
11008 | // Forward declarations aren't required. |
11009 | if (!FD->doesThisDeclarationHaveABody()) |
11010 | return FD->doesDeclarationForceExternallyVisibleDefinition(); |
11011 | |
11012 | // Constructors and destructors are required. |
11013 | if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) |
11014 | return true; |
11015 | |
11016 | // The key function for a class is required. This rule only comes |
11017 | // into play when inline functions can be key functions, though. |
11018 | if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { |
11019 | if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { |
11020 | const CXXRecordDecl *RD = MD->getParent(); |
11021 | if (MD->isOutOfLine() && RD->isDynamicClass()) { |
11022 | const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); |
11023 | if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) |
11024 | return true; |
11025 | } |
11026 | } |
11027 | } |
11028 | |
11029 | GVALinkage Linkage = GetGVALinkageForFunction(FD); |
11030 | |
11031 | // static, static inline, always_inline, and extern inline functions can |
11032 | // always be deferred. Normal inline functions can be deferred in C99/C++. |
11033 | // Implicit template instantiations can also be deferred in C++. |
11034 | return !isDiscardableGVALinkage(Linkage); |
11035 | } |
11036 | |
11037 | const auto *VD = cast<VarDecl>(D); |
11038 | assert(VD->isFileVarDecl() && "Expected file scoped var")(static_cast<void> (0)); |
11039 | |
11040 | // If the decl is marked as `declare target to`, it should be emitted for the |
11041 | // host and for the device. |
11042 | if (LangOpts.OpenMP && |
11043 | OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) |
11044 | return true; |
11045 | |
11046 | if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && |
11047 | !isMSStaticDataMemberInlineDefinition(VD)) |
11048 | return false; |
11049 | |
11050 | // Variables that can be needed in other TUs are required. |
11051 | auto Linkage = GetGVALinkageForVariable(VD); |
11052 | if (!isDiscardableGVALinkage(Linkage)) |
11053 | return true; |
11054 | |
11055 | // We never need to emit a variable that is available in another TU. |
11056 | if (Linkage == GVA_AvailableExternally) |
11057 | return false; |
11058 | |
11059 | // Variables that have destruction with side-effects are required. |
11060 | if (VD->needsDestruction(*this)) |
11061 | return true; |
11062 | |
11063 | // Variables that have initialization with side-effects are required. |
11064 | if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && |
11065 | // We can get a value-dependent initializer during error recovery. |
11066 | (VD->getInit()->isValueDependent() || !VD->evaluateValue())) |
11067 | return true; |
11068 | |
11069 | // Likewise, variables with tuple-like bindings are required if their |
11070 | // bindings have side-effects. |
11071 | if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) |
11072 | for (const auto *BD : DD->bindings()) |
11073 | if (const auto *BindingVD = BD->getHoldingVar()) |
11074 | if (DeclMustBeEmitted(BindingVD)) |
11075 | return true; |
11076 | |
11077 | return false; |
11078 | } |
11079 | |
11080 | void ASTContext::forEachMultiversionedFunctionVersion( |
11081 | const FunctionDecl *FD, |
11082 | llvm::function_ref<void(FunctionDecl *)> Pred) const { |
11083 | assert(FD->isMultiVersion() && "Only valid for multiversioned functions")(static_cast<void> (0)); |
11084 | llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; |
11085 | FD = FD->getMostRecentDecl(); |
11086 | // FIXME: The order of traversal here matters and depends on the order of |
11087 | // lookup results, which happens to be (mostly) oldest-to-newest, but we |
11088 | // shouldn't rely on that. |
11089 | for (auto *CurDecl : |
11090 | FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { |
11091 | FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); |
11092 | if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && |
11093 | std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) { |
11094 | SeenDecls.insert(CurFD); |
11095 | Pred(CurFD); |
11096 | } |
11097 | } |
11098 | } |
11099 | |
11100 | CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, |
11101 | bool IsCXXMethod, |
11102 | bool IsBuiltin) const { |
11103 | // Pass through to the C++ ABI object |
11104 | if (IsCXXMethod) |
11105 | return ABI->getDefaultMethodCallConv(IsVariadic); |
11106 | |
11107 | // Builtins ignore user-specified default calling convention and remain the |
11108 | // Target's default calling convention. |
11109 | if (!IsBuiltin) { |
11110 | switch (LangOpts.getDefaultCallingConv()) { |
11111 | case LangOptions::DCC_None: |
11112 | break; |
11113 | case LangOptions::DCC_CDecl: |
11114 | return CC_C; |
11115 | case LangOptions::DCC_FastCall: |
11116 | if (getTargetInfo().hasFeature("sse2") && !IsVariadic) |
11117 | return CC_X86FastCall; |
11118 | break; |
11119 | case LangOptions::DCC_StdCall: |
11120 | if (!IsVariadic) |
11121 | return CC_X86StdCall; |
11122 | break; |
11123 | case LangOptions::DCC_VectorCall: |
11124 | // __vectorcall cannot be applied to variadic functions. |
11125 | if (!IsVariadic) |
11126 | return CC_X86VectorCall; |
11127 | break; |
11128 | case LangOptions::DCC_RegCall: |
11129 | // __regcall cannot be applied to variadic functions. |
11130 | if (!IsVariadic) |
11131 | return CC_X86RegCall; |
11132 | break; |
11133 | } |
11134 | } |
11135 | return Target->getDefaultCallingConv(); |
11136 | } |
11137 | |
11138 | bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { |
11139 | // Pass through to the C++ ABI object |
11140 | return ABI->isNearlyEmpty(RD); |
11141 | } |
11142 | |
11143 | VTableContextBase *ASTContext::getVTableContext() { |
11144 | if (!VTContext.get()) { |
11145 | auto ABI = Target->getCXXABI(); |
11146 | if (ABI.isMicrosoft()) |
11147 | VTContext.reset(new MicrosoftVTableContext(*this)); |
11148 | else { |
11149 | auto ComponentLayout = getLangOpts().RelativeCXXABIVTables |
11150 | ? ItaniumVTableContext::Relative |
11151 | : ItaniumVTableContext::Pointer; |
11152 | VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); |
11153 | } |
11154 | } |
11155 | return VTContext.get(); |
11156 | } |
11157 | |
11158 | MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { |
11159 | if (!T) |
11160 | T = Target; |
11161 | switch (T->getCXXABI().getKind()) { |
11162 | case TargetCXXABI::AppleARM64: |
11163 | case TargetCXXABI::Fuchsia: |
11164 | case TargetCXXABI::GenericAArch64: |
11165 | case TargetCXXABI::GenericItanium: |
11166 | case TargetCXXABI::GenericARM: |
11167 | case TargetCXXABI::GenericMIPS: |
11168 | case TargetCXXABI::iOS: |
11169 | case TargetCXXABI::WebAssembly: |
11170 | case TargetCXXABI::WatchOS: |
11171 | case TargetCXXABI::XL: |
11172 | return ItaniumMangleContext::create(*this, getDiagnostics()); |
11173 | case TargetCXXABI::Microsoft: |
11174 | return MicrosoftMangleContext::create(*this, getDiagnostics()); |
11175 | } |
11176 | llvm_unreachable("Unsupported ABI")__builtin_unreachable(); |
11177 | } |
11178 | |
11179 | MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { |
11180 | assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft &&(static_cast<void> (0)) |
11181 | "Device mangle context does not support Microsoft mangling.")(static_cast<void> (0)); |
11182 | switch (T.getCXXABI().getKind()) { |
11183 | case TargetCXXABI::AppleARM64: |
11184 | case TargetCXXABI::Fuchsia: |
11185 | case TargetCXXABI::GenericAArch64: |
11186 | case TargetCXXABI::GenericItanium: |
11187 | case TargetCXXABI::GenericARM: |
11188 | case TargetCXXABI::GenericMIPS: |
11189 | case TargetCXXABI::iOS: |
11190 | case TargetCXXABI::WebAssembly: |
11191 | case TargetCXXABI::WatchOS: |
11192 | case TargetCXXABI::XL: |
11193 | return ItaniumMangleContext::create( |
11194 | *this, getDiagnostics(), |
11195 | [](ASTContext &, const NamedDecl *ND) -> llvm::Optional<unsigned> { |
11196 | if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) |
11197 | return RD->getDeviceLambdaManglingNumber(); |
11198 | return llvm::None; |
11199 | }); |
11200 | case TargetCXXABI::Microsoft: |
11201 | return MicrosoftMangleContext::create(*this, getDiagnostics()); |
11202 | } |
11203 | llvm_unreachable("Unsupported ABI")__builtin_unreachable(); |
11204 | } |
11205 | |
11206 | CXXABI::~CXXABI() = default; |
11207 | |
11208 | size_t ASTContext::getSideTableAllocatedMemory() const { |
11209 | return ASTRecordLayouts.getMemorySize() + |
11210 | llvm::capacity_in_bytes(ObjCLayouts) + |
11211 | llvm::capacity_in_bytes(KeyFunctions) + |
11212 | llvm::capacity_in_bytes(ObjCImpls) + |
11213 | llvm::capacity_in_bytes(BlockVarCopyInits) + |
11214 | llvm::capacity_in_bytes(DeclAttrs) + |
11215 | llvm::capacity_in_bytes(TemplateOrInstantiation) + |
11216 | llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + |
11217 | llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + |
11218 | llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + |
11219 | llvm::capacity_in_bytes(OverriddenMethods) + |
11220 | llvm::capacity_in_bytes(Types) + |
11221 | llvm::capacity_in_bytes(VariableArrayTypes); |
11222 | } |
11223 | |
11224 | /// getIntTypeForBitwidth - |
11225 | /// sets integer QualTy according to specified details: |
11226 | /// bitwidth, signed/unsigned. |
11227 | /// Returns empty type if there is no appropriate target types. |
11228 | QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, |
11229 | unsigned Signed) const { |
11230 | TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); |
11231 | CanQualType QualTy = getFromTargetType(Ty); |
11232 | if (!QualTy && DestWidth == 128) |
11233 | return Signed ? Int128Ty : UnsignedInt128Ty; |
11234 | return QualTy; |
11235 | } |
11236 | |
11237 | /// getRealTypeForBitwidth - |
11238 | /// sets floating point QualTy according to specified bitwidth. |
11239 | /// Returns empty type if there is no appropriate target types. |
11240 | QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, |
11241 | bool ExplicitIEEE) const { |
11242 | TargetInfo::RealType Ty = |
11243 | getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitIEEE); |
11244 | switch (Ty) { |
11245 | case TargetInfo::Float: |
11246 | return FloatTy; |
11247 | case TargetInfo::Double: |
11248 | return DoubleTy; |
11249 | case TargetInfo::LongDouble: |
11250 | return LongDoubleTy; |
11251 | case TargetInfo::Float128: |
11252 | return Float128Ty; |
11253 | case TargetInfo::NoFloat: |
11254 | return {}; |
11255 | } |
11256 | |
11257 | llvm_unreachable("Unhandled TargetInfo::RealType value")__builtin_unreachable(); |
11258 | } |
11259 | |
11260 | void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { |
11261 | if (Number > 1) |
11262 | MangleNumbers[ND] = Number; |
11263 | } |
11264 | |
11265 | unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const { |
11266 | auto I = MangleNumbers.find(ND); |
11267 | return I != MangleNumbers.end() ? I->second : 1; |
11268 | } |
11269 | |
11270 | void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { |
11271 | if (Number > 1) |
11272 | StaticLocalNumbers[VD] = Number; |
11273 | } |
11274 | |
11275 | unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { |
11276 | auto I = StaticLocalNumbers.find(VD); |
11277 | return I != StaticLocalNumbers.end() ? I->second : 1; |
11278 | } |
11279 | |
11280 | MangleNumberingContext & |
11281 | ASTContext::getManglingNumberContext(const DeclContext *DC) { |
11282 | assert(LangOpts.CPlusPlus)(static_cast<void> (0)); // We don't need mangling numbers for plain C. |
11283 | std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; |
11284 | if (!MCtx) |
11285 | MCtx = createMangleNumberingContext(); |
11286 | return *MCtx; |
11287 | } |
11288 | |
11289 | MangleNumberingContext & |
11290 | ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { |
11291 | assert(LangOpts.CPlusPlus)(static_cast<void> (0)); // We don't need mangling numbers for plain C. |
11292 | std::unique_ptr<MangleNumberingContext> &MCtx = |
11293 | ExtraMangleNumberingContexts[D]; |
11294 | if (!MCtx) |
11295 | MCtx = createMangleNumberingContext(); |
11296 | return *MCtx; |
11297 | } |
11298 | |
11299 | std::unique_ptr<MangleNumberingContext> |
11300 | ASTContext::createMangleNumberingContext() const { |
11301 | return ABI->createMangleNumberingContext(); |
11302 | } |
11303 | |
11304 | const CXXConstructorDecl * |
11305 | ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { |
11306 | return ABI->getCopyConstructorForExceptionObject( |
11307 | cast<CXXRecordDecl>(RD->getFirstDecl())); |
11308 | } |
11309 | |
11310 | void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, |
11311 | CXXConstructorDecl *CD) { |
11312 | return ABI->addCopyConstructorForExceptionObject( |
11313 | cast<CXXRecordDecl>(RD->getFirstDecl()), |
11314 | cast<CXXConstructorDecl>(CD->getFirstDecl())); |
11315 | } |
11316 | |
11317 | void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, |
11318 | TypedefNameDecl *DD) { |
11319 | return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); |
11320 | } |
11321 | |
11322 | TypedefNameDecl * |
11323 | ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { |
11324 | return ABI->getTypedefNameForUnnamedTagDecl(TD); |
11325 | } |
11326 | |
11327 | void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, |
11328 | DeclaratorDecl *DD) { |
11329 | return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); |
11330 | } |
11331 | |
11332 | DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { |
11333 | return ABI->getDeclaratorForUnnamedTagDecl(TD); |
11334 | } |
11335 | |
11336 | void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { |
11337 | ParamIndices[D] = index; |
11338 | } |
11339 | |
11340 | unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { |
11341 | ParameterIndexTable::const_iterator I = ParamIndices.find(D); |
11342 | assert(I != ParamIndices.end() &&(static_cast<void> (0)) |
11343 | "ParmIndices lacks entry set by ParmVarDecl")(static_cast<void> (0)); |
11344 | return I->second; |
11345 | } |
11346 | |
11347 | QualType ASTContext::getStringLiteralArrayType(QualType EltTy, |
11348 | unsigned Length) const { |
11349 | // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). |
11350 | if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) |
11351 | EltTy = EltTy.withConst(); |
11352 | |
11353 | EltTy = adjustStringLiteralBaseType(EltTy); |
11354 | |
11355 | // Get an array type for the string, according to C99 6.4.5. This includes |
11356 | // the null terminator character. |
11357 | return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, |
11358 | ArrayType::Normal, /*IndexTypeQuals*/ 0); |
11359 | } |
11360 | |
11361 | StringLiteral * |
11362 | ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { |
11363 | StringLiteral *&Result = StringLiteralCache[Key]; |
11364 | if (!Result) |
11365 | Result = StringLiteral::Create( |
11366 | *this, Key, StringLiteral::Ascii, |
11367 | /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), |
11368 | SourceLocation()); |
11369 | return Result; |
11370 | } |
11371 | |
11372 | MSGuidDecl * |
11373 | ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { |
11374 | assert(MSGuidTagDecl && "building MS GUID without MS extensions?")(static_cast<void> (0)); |
11375 | |
11376 | llvm::FoldingSetNodeID ID; |
11377 | MSGuidDecl::Profile(ID, Parts); |
11378 | |
11379 | void *InsertPos; |
11380 | if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) |
11381 | return Existing; |
11382 | |
11383 | QualType GUIDType = getMSGuidType().withConst(); |
11384 | MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); |
11385 | MSGuidDecls.InsertNode(New, InsertPos); |
11386 | return New; |
11387 | } |
11388 | |
11389 | TemplateParamObjectDecl * |
11390 | ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { |
11391 | assert(T->isRecordType() && "template param object of unexpected type")(static_cast<void> (0)); |
11392 | |
11393 | // C++ [temp.param]p8: |
11394 | // [...] a static storage duration object of type 'const T' [...] |
11395 | T.addConst(); |
11396 | |
11397 | llvm::FoldingSetNodeID ID; |
11398 | TemplateParamObjectDecl::Profile(ID, T, V); |
11399 | |
11400 | void *InsertPos; |
11401 | if (TemplateParamObjectDecl *Existing = |
11402 | TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) |
11403 | return Existing; |
11404 | |
11405 | TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); |
11406 | TemplateParamObjectDecls.InsertNode(New, InsertPos); |
11407 | return New; |
11408 | } |
11409 | |
11410 | bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { |
11411 | const llvm::Triple &T = getTargetInfo().getTriple(); |
11412 | if (!T.isOSDarwin()) |
11413 | return false; |
11414 | |
11415 | if (!(T.isiOS() && T.isOSVersionLT(7)) && |
11416 | !(T.isMacOSX() && T.isOSVersionLT(10, 9))) |
11417 | return false; |
11418 | |
11419 | QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); |
11420 | CharUnits sizeChars = getTypeSizeInChars(AtomicTy); |
11421 | uint64_t Size = sizeChars.getQuantity(); |
11422 | CharUnits alignChars = getTypeAlignInChars(AtomicTy); |
11423 | unsigned Align = alignChars.getQuantity(); |
11424 | unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); |
11425 | return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); |
11426 | } |
11427 | |
11428 | bool |
11429 | ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, |
11430 | const ObjCMethodDecl *MethodImpl) { |
11431 | // No point trying to match an unavailable/deprecated mothod. |
11432 | if (MethodDecl->hasAttr<UnavailableAttr>() |
11433 | || MethodDecl->hasAttr<DeprecatedAttr>()) |
11434 | return false; |
11435 | if (MethodDecl->getObjCDeclQualifier() != |
11436 | MethodImpl->getObjCDeclQualifier()) |
11437 | return false; |
11438 | if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) |
11439 | return false; |
11440 | |
11441 | if (MethodDecl->param_size() != MethodImpl->param_size()) |
11442 | return false; |
11443 | |
11444 | for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), |
11445 | IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), |
11446 | EF = MethodDecl->param_end(); |
11447 | IM != EM && IF != EF; ++IM, ++IF) { |
11448 | const ParmVarDecl *DeclVar = (*IF); |
11449 | const ParmVarDecl *ImplVar = (*IM); |
11450 | if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) |
11451 | return false; |
11452 | if (!hasSameType(DeclVar->getType(), ImplVar->getType())) |
11453 | return false; |
11454 | } |
11455 | |
11456 | return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); |
11457 | } |
11458 | |
11459 | uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { |
11460 | LangAS AS; |
11461 | if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) |
11462 | AS = LangAS::Default; |
11463 | else |
11464 | AS = QT->getPointeeType().getAddressSpace(); |
11465 | |
11466 | return getTargetInfo().getNullPointerValue(AS); |
11467 | } |
11468 | |
11469 | unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { |
11470 | if (isTargetAddressSpace(AS)) |
11471 | return toTargetAddressSpace(AS); |
11472 | else |
11473 | return (*AddrSpaceMap)[(unsigned)AS]; |
11474 | } |
11475 | |
11476 | QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { |
11477 | assert(Ty->isFixedPointType())(static_cast<void> (0)); |
11478 | |
11479 | if (Ty->isSaturatedFixedPointType()) return Ty; |
11480 | |
11481 | switch (Ty->castAs<BuiltinType>()->getKind()) { |
11482 | default: |
11483 | llvm_unreachable("Not a fixed point type!")__builtin_unreachable(); |
11484 | case BuiltinType::ShortAccum: |
11485 | return SatShortAccumTy; |
11486 | case BuiltinType::Accum: |
11487 | return SatAccumTy; |
11488 | case BuiltinType::LongAccum: |
11489 | return SatLongAccumTy; |
11490 | case BuiltinType::UShortAccum: |
11491 | return SatUnsignedShortAccumTy; |
11492 | case BuiltinType::UAccum: |
11493 | return SatUnsignedAccumTy; |
11494 | case BuiltinType::ULongAccum: |
11495 | return SatUnsignedLongAccumTy; |
11496 | case BuiltinType::ShortFract: |
11497 | return SatShortFractTy; |
11498 | case BuiltinType::Fract: |
11499 | return SatFractTy; |
11500 | case BuiltinType::LongFract: |
11501 | return SatLongFractTy; |
11502 | case BuiltinType::UShortFract: |
11503 | return SatUnsignedShortFractTy; |
11504 | case BuiltinType::UFract: |
11505 | return SatUnsignedFractTy; |
11506 | case BuiltinType::ULongFract: |
11507 | return SatUnsignedLongFractTy; |
11508 | } |
11509 | } |
11510 | |
11511 | LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { |
11512 | if (LangOpts.OpenCL) |
11513 | return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); |
11514 | |
11515 | if (LangOpts.CUDA) |
11516 | return getTargetInfo().getCUDABuiltinAddressSpace(AS); |
11517 | |
11518 | return getLangASFromTargetAS(AS); |
11519 | } |
11520 | |
11521 | // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that |
11522 | // doesn't include ASTContext.h |
11523 | template |
11524 | clang::LazyGenerationalUpdatePtr< |
11525 | const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType |
11526 | clang::LazyGenerationalUpdatePtr< |
11527 | const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( |
11528 | const clang::ASTContext &Ctx, Decl *Value); |
11529 | |
11530 | unsigned char ASTContext::getFixedPointScale(QualType Ty) const { |
11531 | assert(Ty->isFixedPointType())(static_cast<void> (0)); |
11532 | |
11533 | const TargetInfo &Target = getTargetInfo(); |
11534 | switch (Ty->castAs<BuiltinType>()->getKind()) { |
11535 | default: |
11536 | llvm_unreachable("Not a fixed point type!")__builtin_unreachable(); |
11537 | case BuiltinType::ShortAccum: |
11538 | case BuiltinType::SatShortAccum: |
11539 | return Target.getShortAccumScale(); |
11540 | case BuiltinType::Accum: |
11541 | case BuiltinType::SatAccum: |
11542 | return Target.getAccumScale(); |
11543 | case BuiltinType::LongAccum: |
11544 | case BuiltinType::SatLongAccum: |
11545 | return Target.getLongAccumScale(); |
11546 | case BuiltinType::UShortAccum: |
11547 | case BuiltinType::SatUShortAccum: |
11548 | return Target.getUnsignedShortAccumScale(); |
11549 | case BuiltinType::UAccum: |
11550 | case BuiltinType::SatUAccum: |
11551 | return Target.getUnsignedAccumScale(); |
11552 | case BuiltinType::ULongAccum: |
11553 | case BuiltinType::SatULongAccum: |
11554 | return Target.getUnsignedLongAccumScale(); |
11555 | case BuiltinType::ShortFract: |
11556 | case BuiltinType::SatShortFract: |
11557 | return Target.getShortFractScale(); |
11558 | case BuiltinType::Fract: |
11559 | case BuiltinType::SatFract: |
11560 | return Target.getFractScale(); |
11561 | case BuiltinType::LongFract: |
11562 | case BuiltinType::SatLongFract: |
11563 | return Target.getLongFractScale(); |
11564 | case BuiltinType::UShortFract: |
11565 | case BuiltinType::SatUShortFract: |
11566 | return Target.getUnsignedShortFractScale(); |
11567 | case BuiltinType::UFract: |
11568 | case BuiltinType::SatUFract: |
11569 | return Target.getUnsignedFractScale(); |
11570 | case BuiltinType::ULongFract: |
11571 | case BuiltinType::SatULongFract: |
11572 | return Target.getUnsignedLongFractScale(); |
11573 | } |
11574 | } |
11575 | |
11576 | unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { |
11577 | assert(Ty->isFixedPointType())(static_cast<void> (0)); |
11578 | |
11579 | const TargetInfo &Target = getTargetInfo(); |
11580 | switch (Ty->castAs<BuiltinType>()->getKind()) { |
11581 | default: |
11582 | llvm_unreachable("Not a fixed point type!")__builtin_unreachable(); |
11583 | case BuiltinType::ShortAccum: |
11584 | case BuiltinType::SatShortAccum: |
11585 | return Target.getShortAccumIBits(); |
11586 | case BuiltinType::Accum: |
11587 | case BuiltinType::SatAccum: |
11588 | return Target.getAccumIBits(); |
11589 | case BuiltinType::LongAccum: |
11590 | case BuiltinType::SatLongAccum: |
11591 | return Target.getLongAccumIBits(); |
11592 | case BuiltinType::UShortAccum: |
11593 | case BuiltinType::SatUShortAccum: |
11594 | return Target.getUnsignedShortAccumIBits(); |
11595 | case BuiltinType::UAccum: |
11596 | case BuiltinType::SatUAccum: |
11597 | return Target.getUnsignedAccumIBits(); |
11598 | case BuiltinType::ULongAccum: |
11599 | case BuiltinType::SatULongAccum: |
11600 | return Target.getUnsignedLongAccumIBits(); |
11601 | case BuiltinType::ShortFract: |
11602 | case BuiltinType::SatShortFract: |
11603 | case BuiltinType::Fract: |
11604 | case BuiltinType::SatFract: |
11605 | case BuiltinType::LongFract: |
11606 | case BuiltinType::SatLongFract: |
11607 | case BuiltinType::UShortFract: |
11608 | case BuiltinType::SatUShortFract: |
11609 | case BuiltinType::UFract: |
11610 | case BuiltinType::SatUFract: |
11611 | case BuiltinType::ULongFract: |
11612 | case BuiltinType::SatULongFract: |
11613 | return 0; |
11614 | } |
11615 | } |
11616 | |
11617 | llvm::FixedPointSemantics |
11618 | ASTContext::getFixedPointSemantics(QualType Ty) const { |
11619 | assert((Ty->isFixedPointType() || Ty->isIntegerType()) &&(static_cast<void> (0)) |
11620 | "Can only get the fixed point semantics for a "(static_cast<void> (0)) |
11621 | "fixed point or integer type.")(static_cast<void> (0)); |
11622 | if (Ty->isIntegerType()) |
11623 | return llvm::FixedPointSemantics::GetIntegerSemantics( |
11624 | getIntWidth(Ty), Ty->isSignedIntegerType()); |
11625 | |
11626 | bool isSigned = Ty->isSignedFixedPointType(); |
11627 | return llvm::FixedPointSemantics( |
11628 | static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, |
11629 | Ty->isSaturatedFixedPointType(), |
11630 | !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); |
11631 | } |
11632 | |
11633 | llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { |
11634 | assert(Ty->isFixedPointType())(static_cast<void> (0)); |
11635 | return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); |
11636 | } |
11637 | |
11638 | llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { |
11639 | assert(Ty->isFixedPointType())(static_cast<void> (0)); |
11640 | return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); |
11641 | } |
11642 | |
11643 | QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { |
11644 | assert(Ty->isUnsignedFixedPointType() &&(static_cast<void> (0)) |
11645 | "Expected unsigned fixed point type")(static_cast<void> (0)); |
11646 | |
11647 | switch (Ty->castAs<BuiltinType>()->getKind()) { |
11648 | case BuiltinType::UShortAccum: |
11649 | return ShortAccumTy; |
11650 | case BuiltinType::UAccum: |
11651 | return AccumTy; |
11652 | case BuiltinType::ULongAccum: |
11653 | return LongAccumTy; |
11654 | case BuiltinType::SatUShortAccum: |
11655 | return SatShortAccumTy; |
11656 | case BuiltinType::SatUAccum: |
11657 | return SatAccumTy; |
11658 | case BuiltinType::SatULongAccum: |
11659 | return SatLongAccumTy; |
11660 | case BuiltinType::UShortFract: |
11661 | return ShortFractTy; |
11662 | case BuiltinType::UFract: |
11663 | return FractTy; |
11664 | case BuiltinType::ULongFract: |
11665 | return LongFractTy; |
11666 | case BuiltinType::SatUShortFract: |
11667 | return SatShortFractTy; |
11668 | case BuiltinType::SatUFract: |
11669 | return SatFractTy; |
11670 | case BuiltinType::SatULongFract: |
11671 | return SatLongFractTy; |
11672 | default: |
11673 | llvm_unreachable("Unexpected unsigned fixed point type")__builtin_unreachable(); |
11674 | } |
11675 | } |
11676 | |
11677 | ParsedTargetAttr |
11678 | ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { |
11679 | assert(TD != nullptr)(static_cast<void> (0)); |
11680 | ParsedTargetAttr ParsedAttr = TD->parse(); |
11681 | |
11682 | ParsedAttr.Features.erase( |
11683 | llvm::remove_if(ParsedAttr.Features, |
11684 | [&](const std::string &Feat) { |
11685 | return !Target->isValidFeatureName( |
11686 | StringRef{Feat}.substr(1)); |
11687 | }), |
11688 | ParsedAttr.Features.end()); |
11689 | return ParsedAttr; |
11690 | } |
11691 | |
11692 | void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, |
11693 | const FunctionDecl *FD) const { |
11694 | if (FD) |
11695 | getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); |
11696 | else |
11697 | Target->initFeatureMap(FeatureMap, getDiagnostics(), |
11698 | Target->getTargetOpts().CPU, |
11699 | Target->getTargetOpts().Features); |
11700 | } |
11701 | |
11702 | // Fills in the supplied string map with the set of target features for the |
11703 | // passed in function. |
11704 | void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, |
11705 | GlobalDecl GD) const { |
11706 | StringRef TargetCPU = Target->getTargetOpts().CPU; |
11707 | const FunctionDecl *FD = GD.getDecl()->getAsFunction(); |
11708 | if (const auto *TD = FD->getAttr<TargetAttr>()) { |
11709 | ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); |
11710 | |
11711 | // Make a copy of the features as passed on the command line into the |
11712 | // beginning of the additional features from the function to override. |
11713 | ParsedAttr.Features.insert( |
11714 | ParsedAttr.Features.begin(), |
11715 | Target->getTargetOpts().FeaturesAsWritten.begin(), |
11716 | Target->getTargetOpts().FeaturesAsWritten.end()); |
11717 | |
11718 | if (ParsedAttr.Architecture != "" && |
11719 | Target->isValidCPUName(ParsedAttr.Architecture)) |
11720 | TargetCPU = ParsedAttr.Architecture; |
11721 | |
11722 | // Now populate the feature map, first with the TargetCPU which is either |
11723 | // the default or a new one from the target attribute string. Then we'll use |
11724 | // the passed in features (FeaturesAsWritten) along with the new ones from |
11725 | // the attribute. |
11726 | Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, |
11727 | ParsedAttr.Features); |
11728 | } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { |
11729 | llvm::SmallVector<StringRef, 32> FeaturesTmp; |
11730 | Target->getCPUSpecificCPUDispatchFeatures( |
11731 | SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); |
11732 | std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); |
11733 | Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); |
11734 | } else { |
11735 | FeatureMap = Target->getTargetOpts().FeatureMap; |
11736 | } |
11737 | } |
11738 | |
11739 | OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { |
11740 | OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); |
11741 | return *OMPTraitInfoVector.back(); |
11742 | } |
11743 | |
11744 | const StreamingDiagnostic &clang:: |
11745 | operator<<(const StreamingDiagnostic &DB, |
11746 | const ASTContext::SectionInfo &Section) { |
11747 | if (Section.Decl) |
11748 | return DB << Section.Decl; |
11749 | return DB << "a prior #pragma section"; |
11750 | } |
11751 | |
11752 | bool ASTContext::mayExternalizeStaticVar(const Decl *D) const { |
11753 | bool IsStaticVar = |
11754 | isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static; |
11755 | bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && |
11756 | !D->getAttr<CUDADeviceAttr>()->isImplicit()) || |
11757 | (D->hasAttr<CUDAConstantAttr>() && |
11758 | !D->getAttr<CUDAConstantAttr>()->isImplicit()); |
11759 | // CUDA/HIP: static managed variables need to be externalized since it is |
11760 | // a declaration in IR, therefore cannot have internal linkage. |
11761 | return IsStaticVar && |
11762 | (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar); |
11763 | } |
11764 | |
11765 | bool ASTContext::shouldExternalizeStaticVar(const Decl *D) const { |
11766 | return mayExternalizeStaticVar(D) && |
11767 | (D->hasAttr<HIPManagedAttr>() || |
11768 | CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); |
11769 | } |
11770 | |
11771 | StringRef ASTContext::getCUIDHash() const { |
11772 | if (!CUIDHash.empty()) |
11773 | return CUIDHash; |
11774 | if (LangOpts.CUID.empty()) |
11775 | return StringRef(); |
11776 | CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); |
11777 | return CUIDHash; |
11778 | } |
11779 | |
11780 | // Get the closest named parent, so we can order the sycl naming decls somewhere |
11781 | // that mangling is meaningful. |
11782 | static const DeclContext *GetNamedParent(const CXXRecordDecl *RD) { |
11783 | const DeclContext *DC = RD->getDeclContext(); |
11784 | |
11785 | while (!isa<NamedDecl, TranslationUnitDecl>(DC)) |
11786 | DC = DC->getParent(); |
11787 | return DC; |
11788 | } |
11789 | |
11790 | void ASTContext::AddSYCLKernelNamingDecl(const CXXRecordDecl *RD) { |
11791 | assert(getLangOpts().isSYCL() && "Only valid for SYCL programs")(static_cast<void> (0)); |
11792 | RD = RD->getCanonicalDecl(); |
11793 | const DeclContext *DC = GetNamedParent(RD); |
11794 | |
11795 | assert(RD->getLocation().isValid() &&(static_cast<void> (0)) |
11796 | "Invalid location on kernel naming decl")(static_cast<void> (0)); |
11797 | |
11798 | (void)SYCLKernelNamingTypes[DC].insert(RD); |
11799 | } |
11800 | |
11801 | bool ASTContext::IsSYCLKernelNamingDecl(const NamedDecl *ND) const { |
11802 | assert(getLangOpts().isSYCL() && "Only valid for SYCL programs")(static_cast<void> (0)); |
11803 | const auto *RD = dyn_cast<CXXRecordDecl>(ND); |
11804 | if (!RD) |
11805 | return false; |
11806 | RD = RD->getCanonicalDecl(); |
11807 | const DeclContext *DC = GetNamedParent(RD); |
11808 | |
11809 | auto Itr = SYCLKernelNamingTypes.find(DC); |
11810 | |
11811 | if (Itr == SYCLKernelNamingTypes.end()) |
11812 | return false; |
11813 | |
11814 | return Itr->getSecond().count(RD); |
11815 | } |
11816 | |
11817 | // Filters the Decls list to those that share the lambda mangling with the |
11818 | // passed RD. |
11819 | void ASTContext::FilterSYCLKernelNamingDecls( |
11820 | const CXXRecordDecl *RD, |
11821 | llvm::SmallVectorImpl<const CXXRecordDecl *> &Decls) { |
11822 | |
11823 | if (!SYCLKernelFilterContext) |
11824 | SYCLKernelFilterContext.reset( |
11825 | ItaniumMangleContext::create(*this, getDiagnostics())); |
11826 | |
11827 | llvm::SmallString<128> LambdaSig; |
11828 | llvm::raw_svector_ostream Out(LambdaSig); |
11829 | SYCLKernelFilterContext->mangleLambdaSig(RD, Out); |
11830 | |
11831 | llvm::erase_if(Decls, [this, &LambdaSig](const CXXRecordDecl *LocalRD) { |
11832 | llvm::SmallString<128> LocalLambdaSig; |
11833 | llvm::raw_svector_ostream LocalOut(LocalLambdaSig); |
11834 | SYCLKernelFilterContext->mangleLambdaSig(LocalRD, LocalOut); |
11835 | return LambdaSig != LocalLambdaSig; |
11836 | }); |
11837 | } |
11838 | |
11839 | unsigned ASTContext::GetSYCLKernelNamingIndex(const NamedDecl *ND) { |
11840 | assert(getLangOpts().isSYCL() && "Only valid for SYCL programs")(static_cast<void> (0)); |
11841 | assert(IsSYCLKernelNamingDecl(ND) &&(static_cast<void> (0)) |
11842 | "Lambda not involved in mangling asked for a naming index?")(static_cast<void> (0)); |
11843 | |
11844 | const CXXRecordDecl *RD = cast<CXXRecordDecl>(ND)->getCanonicalDecl(); |
11845 | const DeclContext *DC = GetNamedParent(RD); |
11846 | |
11847 | auto Itr = SYCLKernelNamingTypes.find(DC); |
11848 | assert(Itr != SYCLKernelNamingTypes.end() && "Not a valid DeclContext?")(static_cast<void> (0)); |
11849 | |
11850 | const llvm::SmallPtrSet<const CXXRecordDecl *, 4> &Set = Itr->getSecond(); |
11851 | |
11852 | llvm::SmallVector<const CXXRecordDecl *> Decls{Set.begin(), Set.end()}; |
11853 | |
11854 | FilterSYCLKernelNamingDecls(RD, Decls); |
11855 | |
11856 | llvm::sort(Decls, [](const CXXRecordDecl *LHS, const CXXRecordDecl *RHS) { |
11857 | return LHS->getLambdaManglingNumber() < RHS->getLambdaManglingNumber(); |
11858 | }); |
11859 | |
11860 | return llvm::find(Decls, RD) - Decls.begin(); |
11861 | } |